diff --git a/.buildkite/pipelines/ecs-dynamic-template-tests.yml b/.buildkite/pipelines/ecs-dynamic-template-tests.yml index a8145c61a2d40..1c6c18983b082 100644 --- a/.buildkite/pipelines/ecs-dynamic-template-tests.yml +++ b/.buildkite/pipelines/ecs-dynamic-template-tests.yml @@ -10,5 +10,7 @@ steps: notify: - slack: "#es-delivery" if: build.state == "failed" + - slack: "#es-data-management" + if: build.state == "failed" - email: "logs-plus@elastic.co" if: build.state == "failed" diff --git a/.buildkite/pipelines/intake.template.yml b/.buildkite/pipelines/intake.template.yml index 32b0a12f06a0e..66b989d94455c 100644 --- a/.buildkite/pipelines/intake.template.yml +++ b/.buildkite/pipelines/intake.template.yml @@ -32,6 +32,14 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n1-standard-32 buildDirectory: /dev/shm/bk + - label: part4 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart4 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: n1-standard-32 + buildDirectory: /dev/shm/bk - group: bwc-snapshots steps: - label: "{{matrix.BWC_VERSION}} / bwc-snapshots" diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index fd0684d666d64..49c2d34df7e31 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -33,6 +33,14 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n1-standard-32 buildDirectory: /dev/shm/bk + - label: part4 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart4 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: n1-standard-32 + buildDirectory: /dev/shm/bk - group: bwc-snapshots steps: - label: "{{matrix.BWC_VERSION}} / bwc-snapshots" @@ -40,7 +48,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["7.17.15", "8.11.1", "8.12.0"] + BWC_VERSION: ["7.17.16", "8.11.2", "8.12.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index 3043872845779..fab90c8ed6d17 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -1073,6 +1073,22 @@ steps: env: BWC_VERSION: 7.17.15 + - label: "{{matrix.image}} / 7.17.16 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.16 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.16 + - label: "{{matrix.image}} / 8.0.0 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.0.0 timeout_in_minutes: 300 @@ -1713,6 +1729,22 @@ steps: env: BWC_VERSION: 8.11.1 + - label: "{{matrix.image}} / 8.11.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.11.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.11.2 + - label: "{{matrix.image}} / 8.12.0 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.12.0 timeout_in_minutes: 300 diff --git a/.buildkite/pipelines/periodic.bwc.template.yml b/.buildkite/pipelines/periodic.bwc.template.yml index 8a8c43d75e3ef..34e9aa656e340 100644 --- a/.buildkite/pipelines/periodic.bwc.template.yml +++ b/.buildkite/pipelines/periodic.bwc.template.yml @@ -4,7 +4,7 @@ agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: $BWC_VERSION \ No newline at end of file diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index e1ea27c2468e3..88738c88ef5a0 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -8,7 +8,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.0.0 @@ -18,7 +18,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.0.1 @@ -28,7 +28,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.1.0 @@ -38,7 +38,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.1.1 @@ -48,7 +48,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.2.0 @@ -58,7 +58,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.2.1 @@ -68,7 +68,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.3.0 @@ -78,7 +78,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.3.1 @@ -88,7 +88,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.3.2 @@ -98,7 +98,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.4.0 @@ -108,7 +108,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.4.1 @@ -118,7 +118,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.4.2 @@ -128,7 +128,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.5.0 @@ -138,7 +138,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.5.1 @@ -148,7 +148,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.5.2 @@ -158,7 +158,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.6.0 @@ -168,7 +168,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.6.1 @@ -178,7 +178,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.6.2 @@ -188,7 +188,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.7.0 @@ -198,7 +198,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.7.1 @@ -208,7 +208,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.8.0 @@ -218,7 +218,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.8.1 @@ -228,7 +228,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.9.0 @@ -238,7 +238,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.9.1 @@ -248,7 +248,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.9.2 @@ -258,7 +258,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.9.3 @@ -268,7 +268,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.10.0 @@ -278,7 +278,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.10.1 @@ -288,7 +288,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.10.2 @@ -298,7 +298,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.11.0 @@ -308,7 +308,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.11.1 @@ -318,7 +318,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.11.2 @@ -328,7 +328,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.12.0 @@ -338,7 +338,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.12.1 @@ -348,7 +348,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.13.0 @@ -358,7 +358,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.13.1 @@ -368,7 +368,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.13.2 @@ -378,7 +378,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.13.3 @@ -388,7 +388,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.13.4 @@ -398,7 +398,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.14.0 @@ -408,7 +408,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.14.1 @@ -418,7 +418,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.14.2 @@ -428,7 +428,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.15.0 @@ -438,7 +438,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.15.1 @@ -448,7 +448,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.15.2 @@ -458,7 +458,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.16.0 @@ -468,7 +468,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.16.1 @@ -478,7 +478,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.16.2 @@ -488,7 +488,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.16.3 @@ -498,7 +498,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.0 @@ -508,7 +508,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.1 @@ -518,7 +518,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.2 @@ -528,7 +528,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.3 @@ -538,7 +538,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.4 @@ -548,7 +548,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.5 @@ -558,7 +558,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.6 @@ -568,7 +568,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.7 @@ -578,7 +578,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.8 @@ -588,7 +588,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.9 @@ -598,7 +598,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.10 @@ -608,7 +608,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.11 @@ -618,7 +618,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.12 @@ -628,7 +628,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.13 @@ -638,7 +638,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.14 @@ -648,17 +648,27 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.15 + - label: 7.17.16 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.16#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.16 - label: 8.0.0 / bwc command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.0.0#bwcTest timeout_in_minutes: 300 agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.0.0 @@ -668,7 +678,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.0.1 @@ -678,7 +688,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.1.0 @@ -688,7 +698,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.1.1 @@ -698,7 +708,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.1.2 @@ -708,7 +718,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.1.3 @@ -718,7 +728,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.2.0 @@ -728,7 +738,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.2.1 @@ -738,7 +748,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.2.2 @@ -748,7 +758,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.2.3 @@ -758,7 +768,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.3.0 @@ -768,7 +778,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.3.1 @@ -778,7 +788,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.3.2 @@ -788,7 +798,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.3.3 @@ -798,7 +808,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.4.0 @@ -808,7 +818,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.4.1 @@ -818,7 +828,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.4.2 @@ -828,7 +838,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.4.3 @@ -838,7 +848,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.5.0 @@ -848,7 +858,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.5.1 @@ -858,7 +868,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.5.2 @@ -868,7 +878,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.5.3 @@ -878,7 +888,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.6.0 @@ -888,7 +898,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.6.1 @@ -898,7 +908,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.6.2 @@ -908,7 +918,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.7.0 @@ -918,7 +928,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.7.1 @@ -928,7 +938,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.8.0 @@ -938,7 +948,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.8.1 @@ -948,7 +958,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.8.2 @@ -958,7 +968,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.9.0 @@ -968,7 +978,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.9.1 @@ -978,7 +988,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.9.2 @@ -988,7 +998,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.10.0 @@ -998,7 +1008,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.10.1 @@ -1008,7 +1018,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.10.2 @@ -1018,7 +1028,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.10.3 @@ -1028,7 +1038,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.10.4 @@ -1038,7 +1048,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.11.0 @@ -1048,17 +1058,27 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.11.1 + - label: 8.11.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.11.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: n1-standard-32 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.11.2 - label: 8.12.0 / bwc command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.12.0#bwcTest timeout_in_minutes: 300 agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.12.0 diff --git a/.buildkite/pipelines/pull-request/bwc-snapshots.yml b/.buildkite/pipelines/pull-request/bwc-snapshots.yml index 21873475056ea..5a9fc2d938ac0 100644 --- a/.buildkite/pipelines/pull-request/bwc-snapshots.yml +++ b/.buildkite/pipelines/pull-request/bwc-snapshots.yml @@ -16,5 +16,5 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk diff --git a/.buildkite/pipelines/pull-request/part-4-fips.yml b/.buildkite/pipelines/pull-request/part-4-fips.yml new file mode 100644 index 0000000000000..11a50456ca4c0 --- /dev/null +++ b/.buildkite/pipelines/pull-request/part-4-fips.yml @@ -0,0 +1,11 @@ +config: + allow-labels: "Team:Security" +steps: + - label: part-4-fips + command: .ci/scripts/run-gradle.sh -Dignore.tests.seed -Dtests.fips.enabled=true checkPart4 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk diff --git a/.buildkite/pipelines/pull-request/part-4-windows.yml b/.buildkite/pipelines/pull-request/part-4-windows.yml new file mode 100644 index 0000000000000..0493e8af0cf8f --- /dev/null +++ b/.buildkite/pipelines/pull-request/part-4-windows.yml @@ -0,0 +1,14 @@ +config: + allow-labels: "test-windows" +steps: + - label: part-4-windows + command: .\.buildkite\scripts\run-script.ps1 bash .buildkite/scripts/windows-run-gradle.sh + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-windows-2022 + machineType: custom-32-98304 + diskType: pd-ssd + diskSizeGb: 350 + env: + GRADLE_TASK: checkPart4 diff --git a/.buildkite/pipelines/pull-request/part-4.yml b/.buildkite/pipelines/pull-request/part-4.yml new file mode 100644 index 0000000000000..af11f08953d07 --- /dev/null +++ b/.buildkite/pipelines/pull-request/part-4.yml @@ -0,0 +1,11 @@ +config: + skip-target-branches: "7.17" +steps: + - label: part-4 + command: .ci/scripts/run-gradle.sh -Dignore.tests.seed checkPart4 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk diff --git a/.buildkite/pull-requests.json b/.buildkite/pull-requests.json index b59bdc79ad293..c4aa43c775b1e 100644 --- a/.buildkite/pull-requests.json +++ b/.buildkite/pull-requests.json @@ -11,7 +11,7 @@ "set_commit_status": false, "build_on_commit": true, "build_on_comment": true, - "trigger_comment_regex": "run\\W+elasticsearch-ci.+", + "trigger_comment_regex": "(run\\W+elasticsearch-ci.+)|(^\\s*(buildkite\\s*)?test\\s+this(\\s+please)?)", "cancel_intermediate_builds": true, "cancel_intermediate_builds_on_comment": false }, diff --git a/.buildkite/scripts/dra-workflow.sh b/.buildkite/scripts/dra-workflow.sh index 4611379009a08..ecfb8088072a0 100755 --- a/.buildkite/scripts/dra-workflow.sh +++ b/.buildkite/scripts/dra-workflow.sh @@ -13,8 +13,8 @@ fi echo --- Preparing # TODO move this to image -sudo apt-get update -y -sudo apt-get install -y libxml2-utils python3.10-venv +sudo NEEDRESTART_MODE=l apt-get update -y +sudo NEEDRESTART_MODE=l apt-get install -y libxml2-utils python3.10-venv RM_BRANCH="$BRANCH" if [[ "$BRANCH" == "main" ]]; then diff --git a/.buildkite/scripts/pull-request/pipeline.test.ts b/.buildkite/scripts/pull-request/pipeline.test.ts index e13b1e1f73278..d0634752260e4 100644 --- a/.buildkite/scripts/pull-request/pipeline.test.ts +++ b/.buildkite/scripts/pull-request/pipeline.test.ts @@ -12,21 +12,28 @@ describe("generatePipelines", () => { process.env["GITHUB_PR_TRIGGER_COMMENT"] = ""; }); - test("should generate correct pipelines with a non-docs change", () => { - const pipelines = generatePipelines(`${import.meta.dir}/mocks/pipelines`, ["build.gradle", "docs/README.asciidoc"]); + // Helper for testing pipeline generations that should be the same when using the overall ci trigger comment "buildkite test this" + const testWithTriggerCheck = (directory: string, changedFiles?: string[]) => { + const pipelines = generatePipelines(directory, changedFiles); expect(pipelines).toMatchSnapshot(); + + process.env["GITHUB_PR_TRIGGER_COMMENT"] = "buildkite test this"; + const pipelinesWithTriggerComment = generatePipelines(directory, changedFiles); + expect(pipelinesWithTriggerComment).toEqual(pipelines); + }; + + test("should generate correct pipelines with a non-docs change", () => { + testWithTriggerCheck(`${import.meta.dir}/mocks/pipelines`, ["build.gradle", "docs/README.asciidoc"]); }); test("should generate correct pipelines with only docs changes", () => { - const pipelines = generatePipelines(`${import.meta.dir}/mocks/pipelines`, ["docs/README.asciidoc"]); - expect(pipelines).toMatchSnapshot(); + testWithTriggerCheck(`${import.meta.dir}/mocks/pipelines`, ["docs/README.asciidoc"]); }); test("should generate correct pipelines with full BWC expansion", () => { process.env["GITHUB_PR_LABELS"] = "test-full-bwc"; - const pipelines = generatePipelines(`${import.meta.dir}/mocks/pipelines`, ["build.gradle"]); - expect(pipelines).toMatchSnapshot(); + testWithTriggerCheck(`${import.meta.dir}/mocks/pipelines`, ["build.gradle"]); }); test("should generate correct pipeline when using a trigger comment for it", () => { diff --git a/.buildkite/scripts/pull-request/pipeline.ts b/.buildkite/scripts/pull-request/pipeline.ts index 600e0373d9cfc..65aec47fe3cc8 100644 --- a/.buildkite/scripts/pull-request/pipeline.ts +++ b/.buildkite/scripts/pull-request/pipeline.ts @@ -144,8 +144,12 @@ export const generatePipelines = ( (pipeline) => changedFilesIncludedCheck(pipeline, changedFiles), ]; - // When triggering via comment, we ONLY want to run pipelines that match the trigger phrase, regardless of labels, etc - if (process.env["GITHUB_PR_TRIGGER_COMMENT"]) { + // When triggering via the "run elasticsearch-ci/step-name" comment, we ONLY want to run pipelines that match the trigger phrase, regardless of labels, etc + // However, if we're using the overall CI trigger "[buildkite] test this [please]", we should use the regular filters above + if ( + process.env["GITHUB_PR_TRIGGER_COMMENT"] && + !process.env["GITHUB_PR_TRIGGER_COMMENT"].match(/^\s*(buildkite\s*)?test\s+this(\s+please)?/i) + ) { filters = [triggerCommentCheck]; } diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 688d84e1c49c8..581ec2f1565b6 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -64,6 +64,7 @@ BWC_VERSION: - "7.17.13" - "7.17.14" - "7.17.15" + - "7.17.16" - "8.0.0" - "8.0.1" - "8.1.0" @@ -104,4 +105,5 @@ BWC_VERSION: - "8.10.4" - "8.11.0" - "8.11.1" + - "8.11.2" - "8.12.0" diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index fe40ec8fd1d29..7970d655f4014 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,4 +1,4 @@ BWC_VERSION: - - "7.17.15" - - "8.11.1" + - "7.17.16" + - "8.11.2" - "8.12.0" diff --git a/BUILDING.md b/BUILDING.md index 814a9fb60ded8..127d422fad089 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -3,7 +3,7 @@ Building Elasticsearch with Gradle Elasticsearch is built using the [Gradle](https://gradle.org/) open source build tools. -This document provides a general guidelines for using and working on the elasticsearch build logic. +This document provides a general guidelines for using and working on the Elasticsearch build logic. ## Build logic organisation @@ -11,56 +11,56 @@ The Elasticsearch project contains 3 build-related projects that are included in ### `build-conventions` -This project contains build conventions that are applied to all elasticsearch projects. +This project contains build conventions that are applied to all Elasticsearch projects. ### `build-tools` -This project contains all build logic that we publish for third party elasticsearch plugin authors. +This project contains all build logic that we publish for third party Elasticsearch plugin authors. We provide the following plugins: -- `elasticsearch.esplugin` - A gradle plugin for building an elasticsearch plugin. -- `elasticsearch.testclusters` - A gradle plugin for setting up es clusters for testing within a build. +- `elasticsearch.esplugin` - A Gradle plugin for building an elasticsearch plugin. +- `elasticsearch.testclusters` - A Gradle plugin for setting up es clusters for testing within a build. -This project is published as part of the elasticsearch release and accessible by +This project is published as part of the Elasticsearch release and accessible by `org.elasticsearch.gradle:build-tools:`. These build tools are also used by the `elasticsearch-hadoop` project maintained by elastic. ### `build-tools-internal` -This project contains all elasticsearch project specific build logic that is not meant to be shared +This project contains all Elasticsearch project specific build logic that is not meant to be shared with other internal or external projects. ## Build guidelines This is an intentionally small set of guidelines to build users and authors -to ensure we keep the build consistent. We also publish elasticsearch build logic -as `build-tools` to be usuable by thirdparty elasticsearch plugin authors. This is +to ensure we keep the build consistent. We also publish Elasticsearch build logic +as `build-tools` to be usable by thirdparty Elasticsearch plugin authors. This is also used by other elastic teams like `elasticsearch-hadoop`. Breaking changes should therefore be avoided and an appropriate deprecation cycle should be followed. ### Stay up to date -The elasticsearch build usually uses the latest Gradle GA release. We stay as close to the +The Elasticsearch build usually uses the latest Gradle GA release. We stay as close to the latest Gradle releases as possible. In certain cases an update is blocked by a breaking behaviour -in Gradle. We're usually in contact with the gradle team here or working on a fix +in Gradle. We're usually in contact with the Gradle team here or working on a fix in our build logic to resolve this. **The Elasticsearch build will fail if any deprecated Gradle API is used.** ### Follow Gradle best practices -Tony Robalik has compiled a good list of rules that aligns with ours when it comes to writing and maintaining elasticsearch -gradle build logic at http://autonomousapps.com/blog/rules-for-gradle-plugin-authors.html. +Tony Robalik has compiled a good list of rules that aligns with ours when it comes to writing and maintaining Elasticsearch +Gradle build logic at http://autonomousapps.com/blog/rules-for-gradle-plugin-authors.html. Our current build does not yet tick off all those rules everywhere but the ultimate goal is to follow these principles. -The reasons for following those rules besides better readability or maintenance are also the goal to support newer gradle +The reasons for following those rules besides better readability or maintenance are also the goal to support newer Gradle features that we will benefit from in terms of performance and reliability. E.g. [configuration-cache support](https://github.com/elastic/elasticsearch/issues/57918), [Project Isolation]([https://gradle.github.io/configuration-cache/#project_isolation) or [predictive test selection](https://gradle.com/gradle-enterprise-solutions/predictive-test-selection/) ### Make a change in the build -There are a few guidelines to follow that should make your life easier to make changes to the elasticsearch build. +There are a few guidelines to follow that should make your life easier to make changes to the Elasticsearch build. Please add a member of the `es-delivery` team as a reviewer if you're making non-trivial changes to the build. #### Adding or updating a dependency @@ -93,13 +93,13 @@ We prefer sha256 checksums as md5 and sha1 are not considered safe anymore these will have the `origin` attribute been set to `Generated by Gradle`. >A manual confirmation of the Gradle generated checksums is currently not mandatory. ->If you want to add a level of verification you can manually confirm the checksum (e.g by looking it up on the website of the library) +>If you want to add a level of verification you can manually confirm the checksum (e.g. by looking it up on the website of the library) >Please replace the content of the `origin` attribute by `official site` in that case. > -#### Custom Plugin and Task implementations +#### Custom plugin and task implementations -Build logic that is used across multiple subprojects should considered to be moved into a Gradle plugin with according Gradle task implmentation. +Build logic that is used across multiple subprojects should be considered to be moved into a Gradle plugin with according Gradle task implementation. Elasticsearch specific build logic is located in the `build-tools-internal` subproject including integration tests. - Gradle plugins and Tasks should be written in Java @@ -108,7 +108,7 @@ Elasticsearch specific build logic is located in the `build-tools-internal` subp #### Declaring tasks -The elasticsearch build makes use of the [task avoidance API](https://docs.gradle.org/current/userguide/task_configuration_avoidance.html) to keep the configuration time of the build low. +The Elasticsearch build makes use of the [task avoidance API](https://docs.gradle.org/current/userguide/task_configuration_avoidance.html) to keep the configuration time of the build low. When declaring tasks (in build scripts or custom plugins) this means that we want to _register_ a task like: @@ -118,18 +118,18 @@ instead of eagerly _creating_ the task: task someTask { ... } -The major difference between these two syntaxes is, that the configuration block of an registered task will only be executed when the task is actually created due to the build requires that task to run. The configuration block of an eagerly created tasks will be executed immediately. +The major difference between these two syntaxes is, that the configuration block of a registered task will only be executed when the task is actually created due to the build requires that task to run. The configuration block of an eagerly created tasks will be executed immediately. -By actually doing less in the gradle configuration time as only creating tasks that are requested as part of the build and by only running the configurations for those requested tasks, using the task avoidance api contributes a major part in keeping our build fast. +By actually doing less in the Gradle configuration time as only creating tasks that are requested as part of the build and by only running the configurations for those requested tasks, using the task avoidance api contributes a major part in keeping our build fast. #### Registering test clusters -When using the elasticsearch test cluster plugin we want to use (similar to the task avoidance API) a Gradle API to create domain objects lazy or only if required by the build. +When using the Elasticsearch test cluster plugin we want to use (similar to the task avoidance API) a Gradle API to create domain objects lazy or only if required by the build. Therefore we register test cluster by using the following syntax: def someClusterProvider = testClusters.register('someCluster') { ... } -This registers a potential testCluster named `somecluster` and provides a provider instance, but doesn't create it yet nor configures it. This makes the gradle configuration phase more efficient by +This registers a potential testCluster named `somecluster` and provides a provider instance, but doesn't create it yet nor configures it. This makes the Gradle configuration phase more efficient by doing less. To wire this registered cluster into a `TestClusterAware` task (e.g. `RestIntegTest`) you can resolve the actual cluster from the provider instance: @@ -139,23 +139,23 @@ To wire this registered cluster into a `TestClusterAware` task (e.g. `RestIntegT nonInputProperties.systemProperty 'tests.leader_host', "${-> someClusterProvider.get().getAllHttpSocketURI().get(0)}" } -#### Adding additional integration tests +#### Adding integration tests -Additional integration tests for a certain elasticsearch modules that are specific to certain cluster configuration can be declared in a separate so called `qa` subproject of your module. +Additional integration tests for a certain Elasticsearch modules that are specific to certain cluster configuration can be declared in a separate so called `qa` subproject of your module. The benefit of a dedicated project for these tests are: -- `qa` projects are dedicated two specific usecases and easier to maintain +- `qa` projects are dedicated two specific use-cases and easier to maintain - It keeps the specific test logic separated from the common test logic. - You can run those tests in parallel to other projects of the build. #### Using test fixtures -Sometimes we want to share test fixtures to setup the code under test across multiple projects. There are basically two ways doing so. +Sometimes we want to share test fixtures to set up the code under test across multiple projects. There are basically two ways doing so. -Ideally we would use the build-in [java-test-fixtures](https://docs.gradle.org/current/userguide/java_testing.html#sec:java_test_fixtures) gradle plugin. +Ideally we would use the build-in [java-test-fixtures](https://docs.gradle.org/current/userguide/java_testing.html#sec:java_test_fixtures) Gradle plugin. This plugin relies on having a separate sourceSet for the test fixtures code. -In the elasticsearch codebase we have test fixtures and actual tests within the same sourceSet. Therefore we introduced the `elasticsearch.internal-test-artifact` plugin to provides another build artifact of your project based on the `test` sourceSet. +In the Elasticsearch codebase we have test fixtures and actual tests within the same sourceSet. Therefore we introduced the `elasticsearch.internal-test-artifact` plugin to provides another build artifact of your project based on the `test` sourceSet. This artifact can be resolved by the consumer project as shown in the example below: @@ -168,9 +168,9 @@ dependencies { ``` This test artifact mechanism makes use of the concept of [component capabilities](https://docs.gradle.org/current/userguide/component_capabilities.html) -similar to how the gradle build-in `java-test-fixtures` plugin works. +similar to how the Gradle build-in `java-test-fixtures` plugin works. -`testArtifact` is a shortcut declared in the elasticsearch build. Alternatively you can declare the dependency via +`testArtifact` is a shortcut declared in the Elasticsearch build. Alternatively you can declare the dependency via ``` dependencies { @@ -186,7 +186,7 @@ dependencies { To test an unreleased development version of a third party dependency you have several options. -#### How to use a maven based third party dependency via mavenlocal? +#### How to use a Maven based third party dependency via `mavenlocal`? 1. Clone the third party repository locally 2. Run `mvn install` to install copy into your `~/.m2/repository` folder. @@ -200,16 +200,15 @@ To test an unreleased development version of a third party dependency you have s } ``` 4. Update the version in your dependency declaration accordingly (likely a snapshot version) -5. Run the gradle build as needed +5. Run the Gradle build as needed -#### How to use a maven built based third party dependency with jitpack repository? +#### How to use a Maven built based third party dependency with JitPack repository? -https://jitpack.io is an adhoc repository that supports building maven projects transparently in the background when -resolving unreleased snapshots from a github repository. This approach also works as temporally solution +https://jitpack.io is an adhoc repository that supports building Maven projects transparently in the background when +resolving unreleased snapshots from a GitHub repository. This approach also works as temporally solution and is compliant with our CI builds. 1. Add the JitPack repository to the root build file: - ``` allprojects { repositories { @@ -227,7 +226,7 @@ dependencies { As version you could also use a certain short commit hash or `main-SNAPSHOT`. In addition to snapshot builds JitPack supports building Pull Requests. Simply use PR-SNAPSHOT as the version. -3. Run the gradle build as needed. Keep in mind the initial resolution might take a bit longer as this needs to be built +3. Run the Gradle build as needed. Keep in mind the initial resolution might take a bit longer as this needs to be built by JitPack in the background before we can resolve the adhoc built dependency. --- @@ -240,7 +239,7 @@ not want to ship unreleased libraries into our releases. #### How to use a custom third party artifact? -For third party libraries that are not built with maven (e.g. ant) or provided as a plain jar artifact we can leverage +For third party libraries that are not built with Maven (e.g. Ant) or provided as a plain jar artifact we can leverage a flat directory repository that resolves artifacts from a flat directory on your filesystem. 1. Put the jar artifact with the format `artifactName-version.jar` into a directory named `localRepo` (you have to create this manually) @@ -264,7 +263,7 @@ allprojects { implementation 'x:jmxri:1.2.1' } ``` -4. Run the gradle build as needed with `--write-verification-metadata` to ensure the gradle dependency verification does not fail on your custom dependency. +4. Run the Gradle build as needed with `--write-verification-metadata` to ensure the Gradle dependency verification does not fail on your custom dependency. --- **NOTE** @@ -273,5 +272,5 @@ As Gradle prefers to use modules whose descriptor has been created from real met flat directory repositories cannot be used to override artifacts with real meta-data from other repositories declared in the build. For example, if Gradle finds only `jmxri-1.2.1.jar` in a flat directory repository, but `jmxri-1.2.1.pom` in another repository that supports meta-data, it will use the second repository to provide the module. -Therefore, it is recommended to declare a version that is not resolvable from public repositories we use (e.g. maven central) +Therefore, it is recommended to declare a version that is not resolvable from public repositories we use (e.g. Maven Central) --- diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4f9f432bca467..db8cca17a5606 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,4 +1,4 @@ -Contributing to elasticsearch +Contributing to Elasticsearch ============================= Elasticsearch is a free and open project and we love to receive contributions from our community — you! There are many ways to contribute, from writing tutorials or blog posts, improving the documentation, submitting bug reports and feature requests or writing code which can be incorporated into Elasticsearch itself. @@ -54,7 +54,7 @@ The process for contributing to any of the [Elastic repositories](https://github ### Fork and clone the repository You will need to fork the main Elasticsearch code or documentation repository and clone it to your local machine. See -[github help page](https://help.github.com/articles/fork-a-repo) for help. +[GitHub help page](https://help.github.com/articles/fork-a-repo) for help. Further instructions for specific projects are given below. @@ -69,7 +69,7 @@ cycle. * Lines that are not part of your change should not be edited (e.g. don't format unchanged lines, don't reorder existing imports) * Add the appropriate [license headers](#license-headers) to any new files -* For contributions involving the elasticsearch build you can find details about the build setup in the +* For contributions involving the Elasticsearch build you can find details about the build setup in the [BUILDING](BUILDING.md) file ### Submitting your changes @@ -89,7 +89,6 @@ Once your changes and tests are ready to submit for review: Update your local repository with the most recent code from the main Elasticsearch repository, and rebase your branch on top of the latest main branch. We prefer your initial changes to be squashed into a single commit. Later, if we ask you to make changes, add them as separate commits. This makes them easier to review. As a final step before merging we will either ask you to squash all commits yourself or we'll do it for you. - 4. Submit a pull request Push your local changes to your forked copy of the repository and [submit a pull request](https://help.github.com/articles/using-pull-requests). In the pull request, choose a title which sums up the changes that you have made, and in the body provide more details about what your changes do. Also mention the number of the issue where discussion has taken place, eg "Closes #123". @@ -121,8 +120,7 @@ using the wrapper via the `gradlew` script on Unix systems or `gradlew.bat` script on Windows in the root of the repository. The examples below show the usage on Unix. -We support development in IntelliJ versions IntelliJ 2020.1 and -onwards. +We support development in [IntelliJ IDEA] versions 2020.1 and onwards. [Docker](https://docs.docker.com/install/) is required for building some Elasticsearch artifacts and executing certain test suites. You can run Elasticsearch without building all the artifacts with: @@ -135,7 +133,7 @@ specifically these lines tell you that Elasticsearch is ready: [2020-05-29T14:50:35,167][INFO ][o.e.h.AbstractHttpServerTransport] [runTask-0] publish_address {127.0.0.1:9200}, bound_addresses {[::1]:9200}, {127.0.0.1:9200} [2020-05-29T14:50:35,169][INFO ][o.e.n.Node ] [runTask-0] started -But to be honest its typically easier to wait until the console stops scrolling +But to be honest it's typically easier to wait until the console stops scrolling and then run `curl` in another window like this: curl -u elastic:password localhost:9200 @@ -143,7 +141,7 @@ and then run `curl` in another window like this: ### Importing the project into IntelliJ IDEA -The minimum IntelliJ IDEA version required to import the Elasticsearch project is 2020.1 +The minimum IntelliJ IDEA version required to import the Elasticsearch project is 2020.1. Elasticsearch builds using Java 17. When importing into IntelliJ you will need to define an appropriate SDK. The convention is that **this SDK should be named "17"** so that the project import will detect it automatically. For more details @@ -173,7 +171,7 @@ action is required. #### Formatting -Elasticsearch code is automatically formatted with [spotless], backed by the +Elasticsearch code is automatically formatted with [Spotless], backed by the Eclipse formatter. You can do the same in IntelliJ with the [Eclipse Code Formatter] so that you can apply the correct formatting directly in your IDE. The configuration for the plugin is held in @@ -198,7 +196,7 @@ Alternative manual steps for IntelliJ. 3. Navigate to the file `build-conventions/formatterConfig.xml` 4. Click "OK" -### REST Endpoint Conventions +### REST endpoint conventions Elasticsearch typically uses singular nouns rather than plurals in URLs. For example: @@ -214,7 +212,7 @@ but not: You may find counterexamples, but new endpoints should use the singular form. -### Java Language Formatting Guidelines +### Java language formatting guidelines Java files in the Elasticsearch codebase are automatically formatted using the [Spotless Gradle] plugin. All new projects are automatically formatted, @@ -249,13 +247,13 @@ Please follow these formatting guidelines: only do this where the benefit clearly outweighs the decrease in formatting consistency. * Note that Javadoc and block comments i.e. `/* ... */` are not formatted, - but line comments i.e `// ...` are. + but line comments i.e. `// ...` are. * Negative boolean expressions must use the form `foo == false` instead of `!foo` for better readability of the code. This is enforced via Checkstyle. Conversely, you should not write e.g. `if (foo == true)`, but just `if (foo)`. -#### Editor / IDE Support +#### Editor / IDE support IntelliJ IDEs can [import](https://blog.jetbrains.com/idea/2014/01/intellij-idea-13-importing-code-formatter-settings-from-eclipse/) @@ -316,7 +314,7 @@ is to be helpful, not to turn writing code into a chore. this is critical to understanding the code e.g. documenting the subtleties of the implementation of a private method. The point here is that implementations will change over time, and the Javadoc is - less likely to become out-of-date if it only talks about the what is + less likely to become out-of-date if it only talks about the purpose of the code, not what it does. 8. Examples in Javadoc can be very useful, so feel free to add some if you can reasonably do so i.e. if it takes a whole page of code to set @@ -362,7 +360,7 @@ Finally, use your judgement! Base your decisions on what will help other developers - including yourself, when you come back to some code 3 months in the future, having forgotten how it works. -### License Headers +### License headers We require license headers on all Java files. With the exception of the top-level `x-pack` directory, all contributed code should have the following @@ -433,7 +431,7 @@ In rare situations you may want to configure your `Logger` slightly differently, perhaps specifying a different class or maybe using one of the methods on `org.elasticsearch.common.logging.Loggers` instead. -If the log message includes values from your code then you must use use +If the log message includes values from your code then you must use placeholders rather than constructing the string yourself using simple concatenation. Consider wrapping the values in `[...]` to help distinguish them from the static part of the message: @@ -461,18 +459,18 @@ unit tests, especially if there is complex logic for computing what is logged and when to log it. You can use a `org.elasticsearch.test.MockLogAppender` to make assertions about the logs that are being emitted. -Logging is a powerful diagnostic technique but it is not the only possibility. +Logging is a powerful diagnostic technique, but it is not the only possibility. You should also consider exposing some information about your component via an -API instead of in logs. For instance you can implement APIs to report its +API instead of in logs. For instance, you can implement APIs to report its current status, various statistics, and maybe even details of recent failures. #### Log levels -Each log message is written at a particular _level_. By default Elasticsearch +Each log message is written at a particular _level_. By default, Elasticsearch will suppress messages at the two most verbose levels, `TRACE` and `DEBUG`, and will output messages at all other levels. Users can configure which levels of message are written by each logger at runtime, but you should expect everyone -to run with the default configuration almost all of the time and choose your +to run with the default configuration almost all the time and choose your levels accordingly. The guidance in this section is subjective in some areas. When in doubt, @@ -570,7 +568,7 @@ an index template is created or updated: `INFO`-level logging is enabled by default so its target audience is the general population of users and administrators. You should use user-facing terminology and ensure that messages at this level are self-contained. In -general you shouldn't log unusual events, particularly exceptions with stack +general, you shouldn't log unusual events, particularly exceptions with stack traces, at `INFO` level. If the event is relatively benign then use `DEBUG`, whereas if the user should be notified then use `WARN`. @@ -629,7 +627,7 @@ the logs. ##### `ERROR` -This is the next least verbose level after `WARN`. In theory it is possible for +This is the next least verbose level after `WARN`. In theory, it is possible for users to suppress messages at `WARN` and below, believing this to help them focus on the most important `ERROR` messages, but in practice in Elasticsearch this will hide so much useful information that the resulting logs will be @@ -660,7 +658,7 @@ numbering scheme separate to release version. The main ones are inter-node binary protocol and index data + metadata respectively. Separated version numbers are comprised of an integer number. The semantic -meaing of a version number are defined within each `*Version` class. There +meaning of a version number are defined within each `*Version` class. There is no direct mapping between separated version numbers and the release version. The versions used by any particular instance of Elasticsearch can be obtained by querying `/_nodes/info` on the node. @@ -675,13 +673,29 @@ number, there are a few rules that need to be followed: once merged into `main`. 2. To create a new component version, add a new constant to the respective class with a descriptive name of the change being made. Increment the integer - number according to the partciular `*Version` class. + number according to the particular `*Version` class. If your pull request has a conflict around your new version constant, you need to update your PR from `main` and change your PR to use the next available version number. -### Creating A Distribution +### Checking for cluster features + +As part of developing a new feature or change, you might need to determine +if all nodes in a cluster have been upgraded to support your new feature. +This can be done using `FeatureService`. To define and check for a new +feature in a cluster: + +1. Define a new `NodeFeature` constant with a unique id for the feature + in a class related to the change you're doing. +2. Return that constant from an instance of `FeatureSpecification.getFeatures`, + either an existing implementation or a new implementation. Make sure + the implementation is added as an SPI implementation in `module-info.java` + and `META-INF/services`. +3. To check if all nodes in the cluster support the new feature, call +`FeatureService.clusterHasFeature(ClusterState, NodeFeature)` + +### Creating a distribution Run all build commands from within the root directory: @@ -711,7 +725,7 @@ The archive distributions (tar and zip) can be found under: ./distribution/archives/(darwin-tar|linux-tar|windows-zip|oss-darwin-tar|oss-linux-tar|oss-windows-zip)/build/distributions/ -### Running The Full Test Suite +### Running the full test suite Before submitting your changes, run the test suite to make sure that nothing is broken, with: @@ -736,14 +750,14 @@ a test that passes locally, may actually fail later due to random settings or data input. To make tests repeatable, a `REPRODUCE` line in CI will also include the `-Dtests.seed` parameter. -When running locally, gradle does its best to take advantage of cached results. +When running locally, Gradle does its best to take advantage of cached results. So, if the code is unchanged, running the same test with the same `-Dtests.seed` repeatedly may not actually run the test if it has passed with that seed in the previous execution. A way around this is to pass a separate parameter -to adjust the command options seen by gradle. +to adjust the command options seen by Gradle. A simple option may be to add the parameter `-Dtests.timestamp=$(date +%s)` which will give the current time stamp as a parameter, thus making the parameters -sent to gradle unique and bypassing the cache. +sent to Gradle unique and bypassing the cache. ### Project layout @@ -760,9 +774,9 @@ Builds our tar and zip archives and our rpm and deb packages. Libraries used to build other parts of the project. These are meant to be internal rather than general purpose. We have no plans to [semver](https://semver.org/) their APIs or accept feature requests for them. -We publish them to maven central because they are dependencies of our plugin -test framework, high level rest client, and jdbc driver but they really aren't -general purpose enough to *belong* in maven central. We're still working out +We publish them to Maven Central because they are dependencies of our plugin +test framework, high level rest client, and jdbc driver, but they really aren't +general purpose enough to *belong* in Maven Central. We're still working out what to do here. #### `modules` @@ -773,7 +787,7 @@ they depend on libraries that we don't believe *all* of Elasticsearch should depend on. For example, reindex requires the `connect` permission so it can perform -reindex-from-remote but we don't believe that the *all* of Elasticsearch should +reindex-from-remote, but we don't believe that the *all* of Elasticsearch should have the "connect". For another example, Painless is implemented using antlr4 and asm and we don't believe that *all* of Elasticsearch should have access to them. @@ -812,7 +826,7 @@ qa project, open a PR and be ready to discuss options. #### `server` The server component of Elasticsearch that contains all of the modules and -plugins. Right now things like the high level rest client depend on the server +plugins. Right now things like the high level rest client depend on the server, but we'd like to fix that in the future. #### `test` @@ -832,7 +846,7 @@ the `qa` subdirectory functions just like the top level `qa` subdirectory. The `plugin` subdirectory contains the x-pack module which runs inside the Elasticsearch process. -### Gradle Build +### Gradle build We use Gradle to build Elasticsearch because it is flexible enough to not only build and package Elasticsearch, but also orchestrate all of the ways that we @@ -849,16 +863,20 @@ common configurations in our build and how we use them: at compile and runtime but are not exposed as a compile dependency to other dependent projects. Dependencies added to the `implementation` configuration are considered an implementation detail that can be changed at a later date without affecting any dependent projects. +
`api`
Dependencies that are used as compile and runtime dependencies of a project - and are considered part of the external api of the project. + and are considered part of the external api of the project.
+
`runtimeOnly`
Dependencies that not on the classpath at compile time but are on the classpath at runtime. We mostly use this configuration to make sure that we do not accidentally compile against dependencies of our dependencies also known as "transitive" dependencies".
+
`compileOnly`
Code that is on the classpath at compile time but that should not be shipped with the project because it is "provided" by the runtime somehow. Elasticsearch plugins use this configuration to include dependencies that are bundled with Elasticsearch's server.
+
`testImplementation`
Code that is on the classpath for compiling tests that are part of this project but not production code. The canonical example of this is `junit`.
@@ -881,7 +899,7 @@ time is very limited. In some cases the time we would need to spend on reviews would outweigh the benefits of a change by preventing us from working on other more beneficial changes instead. -Please discuss your change in a Github issue before spending much time on its +Please discuss your change in a GitHub issue before spending much time on its implementation. We sometimes have to reject contributions that duplicate other efforts, take the wrong approach to solving a problem, or solve a problem which does not need solving. An up-front discussion often saves a good deal of wasted @@ -964,8 +982,8 @@ Finally, we require that you run `./gradlew check` before submitting a non-documentation contribution. This is mentioned above, but it is worth repeating in this section because it has come up in this context. -[intellij]: https://blog.jetbrains.com/idea/2017/07/intellij-idea-2017-2-is-here-smart-sleek-and-snappy/ +[IntelliJ IDEA]: https://www.jetbrains.com/idea/ [Checkstyle]: https://plugins.jetbrains.com/plugin/1065-checkstyle-idea -[spotless]: https://github.com/diffplug/spotless +[Spotless]: https://github.com/diffplug/spotless [Eclipse Code Formatter]: https://plugins.jetbrains.com/plugin/6546-eclipse-code-formatter [Spotless Gradle]: https://github.com/diffplug/spotless/tree/main/plugin-gradle diff --git a/TESTING.asciidoc b/TESTING.asciidoc index 0393cf92776fa..96f94755a2758 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -45,7 +45,7 @@ run it using Gradle: ==== Launching and debugging from an IDE -If you want to run Elasticsearch from your IDE, the `./gradlew run` task +If you want to run and debug Elasticsearch from your IDE, the `./gradlew run` task supports a remote debugging option. Run the following from your terminal: --------------------------------------------------------------------------- @@ -55,7 +55,7 @@ supports a remote debugging option. Run the following from your terminal: Next start the "Debug Elasticsearch" run configuration in IntelliJ. This will enable the IDE to connect to the process and allow debug functionality. -As such the IDE needs to be instructed to listen for connections on this port. +As such the IDE needs to be instructed to listen for connections on the debug port. Since we might run multiple JVMs as part of configuring and starting the cluster it's recommended to configure the IDE to initiate multiple listening attempts. In case of IntelliJ, this option is called "Auto restart" and needs to be checked. @@ -64,6 +64,22 @@ NOTE: If you have imported the project into IntelliJ according to the instructio link:/CONTRIBUTING.md#importing-the-project-into-intellij-idea[CONTRIBUTING.md] then a debug run configuration named "Debug Elasticsearch" will be created for you and configured appropriately. +===== Debugging the CLI launcher + +The gradle task does not start the Elasticsearch server process directly; like in the Elasticsearch distribution, +the job of starting the server process is delegated to a launcher CLI tool. If you need to debug the launcher itself, +add the following option to the `run` task: +--------------------------------------------------------------------------- +./gradlew run --debug-cli-jvm +--------------------------------------------------------------------------- +This option can be specified in isolation or combined with `--debug-jvm`. Since the CLI launcher lifespan may overlap +with the server process lifespan, the CLI launcher process will be started on a different port (5107 for the first node, +5108 and following for additional cluster nodes). + +As with the `--debug-jvm` command, the IDE needs to be instructed to listen for connections on the debug port. +You need to configure and start an appropriate Remote JVM Debug configuration, e.g. by cloning and editing +the "Debug Elasticsearch" run configuration to point to the correct debug port. + ==== Disabling assertions When running Elasticsearch with `./gradlew run`, assertions are enabled by @@ -103,7 +119,8 @@ password: `elastic-password`. - In order to start a node with a different max heap space add: `-Dtests.heap.size=4G` - In order to use a custom data directory: `--data-dir=/tmp/foo` - In order to preserve data in between executions: `--preserve-data` -- In order to remotely attach a debugger to the process: `--debug-jvm` +- In order to remotely attach a debugger to the server process: `--debug-jvm` +- In order to remotely attach a debugger to the CLI launcher process: `--debug-cli-jvm` - In order to set a different keystore password: `--keystore-password` - In order to set an Elasticsearch setting, provide a setting with the following prefix: `-Dtests.es.` - In order to pass a JVM setting, e.g. to disable assertions: `-Dtests.jvm.argline="-da"` diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/MultivalueDedupeBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/MultivalueDedupeBenchmark.java index c6d5cd91e7ecb..09cdc8b269ad3 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/MultivalueDedupeBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/MultivalueDedupeBenchmark.java @@ -159,18 +159,18 @@ public void setup() { @Benchmark @OperationsPerInvocation(AggregatorBenchmark.BLOCK_LENGTH) public void adaptive() { - MultivalueDedupe.dedupeToBlockAdaptive(Block.Ref.floating(block), BlockFactory.getNonBreakingInstance()).close(); + MultivalueDedupe.dedupeToBlockAdaptive(block, BlockFactory.getNonBreakingInstance()).close(); } @Benchmark @OperationsPerInvocation(AggregatorBenchmark.BLOCK_LENGTH) public void copyAndSort() { - MultivalueDedupe.dedupeToBlockUsingCopyAndSort(Block.Ref.floating(block), BlockFactory.getNonBreakingInstance()).close(); + MultivalueDedupe.dedupeToBlockUsingCopyAndSort(block, BlockFactory.getNonBreakingInstance()).close(); } @Benchmark @OperationsPerInvocation(AggregatorBenchmark.BLOCK_LENGTH) public void copyMissing() { - MultivalueDedupe.dedupeToBlockUsingCopyMissing(Block.Ref.floating(block), BlockFactory.getNonBreakingInstance()).close(); + MultivalueDedupe.dedupeToBlockUsingCopyMissing(block, BlockFactory.getNonBreakingInstance()).close(); } } diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/TopNBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/TopNBenchmark.java index 84f7cec47b737..d723ea3e1a6b3 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/TopNBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/TopNBenchmark.java @@ -24,6 +24,7 @@ import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.compute.operator.topn.TopNEncoder; import org.elasticsearch.compute.operator.topn.TopNOperator; +import org.elasticsearch.indices.breaker.CircuitBreakerMetrics; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.openjdk.jmh.annotations.Benchmark; @@ -103,6 +104,7 @@ private static Operator operator(String data, int topCount) { default -> throw new IllegalArgumentException("unsupported data type [" + data + "]"); }; CircuitBreakerService breakerService = new HierarchyCircuitBreakerService( + CircuitBreakerMetrics.NOOP, Settings.EMPTY, List.of(), ClusterSettings.createBuiltInClusterSettings() diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/ValuesSourceReaderBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/ValuesSourceReaderBenchmark.java index 9fa876a00c35c..afe8377d3e58c 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/ValuesSourceReaderBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/ValuesSourceReaderBenchmark.java @@ -8,8 +8,11 @@ package org.elasticsearch.benchmark.compute.operator; +import org.apache.lucene.document.FieldType; import org.apache.lucene.document.NumericDocValuesField; +import org.apache.lucene.document.StoredField; import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; @@ -19,6 +22,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.NumericUtils; +import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.BytesRefVector; @@ -30,14 +34,16 @@ import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.LongVector; import org.elasticsearch.compute.data.Page; -import org.elasticsearch.compute.lucene.BlockReaderFactories; import org.elasticsearch.compute.lucene.LuceneSourceOperator; import org.elasticsearch.compute.lucene.ValuesSourceReaderOperator; import org.elasticsearch.compute.operator.topn.TopNOperator; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.mapper.BlockLoader; import org.elasticsearch.index.mapper.KeywordFieldMapper; +import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.search.lookup.SearchLookup; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; import org.openjdk.jmh.annotations.Fork; @@ -56,7 +62,9 @@ import java.util.ArrayList; import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.PrimitiveIterator; +import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.stream.IntStream; @@ -93,18 +101,118 @@ public class ValuesSourceReaderBenchmark { } } - private static BlockLoader blockLoader(String name) { + private static List fields(String name) { return switch (name) { - case "long" -> numericBlockLoader(name, NumberFieldMapper.NumberType.LONG); - case "int" -> numericBlockLoader(name, NumberFieldMapper.NumberType.INTEGER); - case "double" -> numericBlockLoader(name, NumberFieldMapper.NumberType.DOUBLE); - case "keyword" -> new KeywordFieldMapper.KeywordFieldType(name).blockLoader(null); - default -> throw new IllegalArgumentException("can't read [" + name + "]"); + case "3_stored_keywords" -> List.of( + new ValuesSourceReaderOperator.FieldInfo("keyword_1", List.of(blockLoader("stored_keyword_1"))), + new ValuesSourceReaderOperator.FieldInfo("keyword_2", List.of(blockLoader("stored_keyword_2"))), + new ValuesSourceReaderOperator.FieldInfo("keyword_3", List.of(blockLoader("stored_keyword_3"))) + ); + default -> List.of(new ValuesSourceReaderOperator.FieldInfo(name, List.of(blockLoader(name)))); }; } - private static BlockLoader numericBlockLoader(String name, NumberFieldMapper.NumberType numberType) { - return new NumberFieldMapper.NumberFieldType(name, numberType).blockLoader(null); + enum Where { + DOC_VALUES, + SOURCE, + STORED; + } + + private static BlockLoader blockLoader(String name) { + Where where = Where.DOC_VALUES; + if (name.startsWith("stored_")) { + name = name.substring("stored_".length()); + where = Where.STORED; + } else if (name.startsWith("source_")) { + name = name.substring("source_".length()); + where = Where.SOURCE; + } + switch (name) { + case "long": + return numericBlockLoader(name, where, NumberFieldMapper.NumberType.LONG); + case "int": + return numericBlockLoader(name, where, NumberFieldMapper.NumberType.INTEGER); + case "double": + return numericBlockLoader(name, where, NumberFieldMapper.NumberType.DOUBLE); + case "keyword": + name = "keyword_1"; + } + if (name.startsWith("keyword")) { + boolean syntheticSource = false; + FieldType ft = new FieldType(KeywordFieldMapper.Defaults.FIELD_TYPE); + switch (where) { + case DOC_VALUES: + break; + case SOURCE: + ft.setDocValuesType(DocValuesType.NONE); + break; + case STORED: + ft.setStored(true); + ft.setDocValuesType(DocValuesType.NONE); + syntheticSource = true; + break; + } + ft.freeze(); + return new KeywordFieldMapper.KeywordFieldType( + name, + ft, + Lucene.KEYWORD_ANALYZER, + Lucene.KEYWORD_ANALYZER, + Lucene.KEYWORD_ANALYZER, + new KeywordFieldMapper.Builder(name, IndexVersion.current()).docValues(ft.docValuesType() != DocValuesType.NONE), + syntheticSource + ).blockLoader(new MappedFieldType.BlockLoaderContext() { + @Override + public String indexName() { + return "benchmark"; + } + + @Override + public SearchLookup lookup() { + throw new UnsupportedOperationException(); + } + + @Override + public Set sourcePaths(String name) { + return Set.of(name); + } + + @Override + public String parentField(String field) { + throw new UnsupportedOperationException(); + } + }); + } + throw new IllegalArgumentException("can't read [" + name + "]"); + } + + private static BlockLoader numericBlockLoader(String name, Where where, NumberFieldMapper.NumberType numberType) { + boolean stored = false; + boolean docValues = true; + switch (where) { + case DOC_VALUES: + break; + case SOURCE: + stored = true; + docValues = false; + break; + case STORED: + throw new UnsupportedOperationException(); + } + return new NumberFieldMapper.NumberFieldType( + name, + numberType, + true, + stored, + docValues, + true, + null, + Map.of(), + null, + false, + null, + null + ).blockLoader(null); } /** @@ -122,7 +230,7 @@ private static BlockLoader numericBlockLoader(String name, NumberFieldMapper.Num @Param({ "in_order", "shuffled", "shuffled_singles" }) public String layout; - @Param({ "long", "int", "double", "keyword" }) + @Param({ "long", "int", "double", "keyword", "stored_keyword", "3_stored_keywords" }) public String name; private Directory directory; @@ -134,9 +242,11 @@ private static BlockLoader numericBlockLoader(String name, NumberFieldMapper.Num public void benchmark() { ValuesSourceReaderOperator op = new ValuesSourceReaderOperator( BlockFactory.getNonBreakingInstance(), - List.of(BlockReaderFactories.loaderToFactory(reader, blockLoader(name))), - 0, - name + fields(name), + List.of(new ValuesSourceReaderOperator.ShardContext(reader, () -> { + throw new UnsupportedOperationException("can't load _source here"); + })), + 0 ); long sum = 0; for (Page page : pages) { @@ -160,7 +270,7 @@ public void benchmark() { sum += (long) values.getDouble(p); } } - case "keyword" -> { + case "keyword", "stored_keyword" -> { BytesRef scratch = new BytesRef(); BytesRefVector values = op.getOutput().getBlock(1).asVector(); for (int p = 0; p < values.getPositionCount(); p++) { @@ -170,21 +280,59 @@ public void benchmark() { sum += Integer.parseInt(r.utf8ToString()); } } + case "3_stored_keywords" -> { + BytesRef scratch = new BytesRef(); + Page out = op.getOutput(); + for (BytesRefVector values : new BytesRefVector[] { + out.getBlock(1).asVector(), + out.getBlock(2).asVector(), + out.getBlock(3).asVector() }) { + + for (int p = 0; p < values.getPositionCount(); p++) { + BytesRef r = values.getBytesRef(p, scratch); + r.offset++; + r.length--; + sum += Integer.parseInt(r.utf8ToString()); + } + } + } } } - long expected; - if (name.equals("keyword")) { - expected = 0; - for (int i = 0; i < INDEX_SIZE; i++) { - expected += i % 1000; - } - } else { - expected = INDEX_SIZE; - expected = expected * (expected - 1) / 2; + long expected = 0; + switch (name) { + case "keyword", "stored_keyword": + for (int i = 0; i < INDEX_SIZE; i++) { + expected += i % 1000; + } + break; + case "3_stored_keywords": + for (int i = 0; i < INDEX_SIZE; i++) { + expected += 3 * (i % 1000); + } + break; + default: + expected = INDEX_SIZE; + expected = expected * (expected - 1) / 2; } if (expected != sum) { throw new AssertionError("[" + layout + "][" + name + "] expected [" + expected + "] but was [" + sum + "]"); } + boolean foundStoredFieldLoader = false; + ValuesSourceReaderOperator.Status status = (ValuesSourceReaderOperator.Status) op.status(); + for (Map.Entry e : status.readersBuilt().entrySet()) { + if (e.getKey().indexOf("stored_fields") >= 0) { + foundStoredFieldLoader = true; + } + } + if (name.indexOf("stored") >= 0) { + if (foundStoredFieldLoader == false) { + throw new AssertionError("expected to use a stored field loader but only had: " + status.readersBuilt()); + } + } else { + if (foundStoredFieldLoader) { + throw new AssertionError("expected not to use a stored field loader but only had: " + status.readersBuilt()); + } + } } @Setup @@ -195,15 +343,23 @@ public void setup() throws IOException { private void setupIndex() throws IOException { directory = new ByteBuffersDirectory(); + FieldType keywordFieldType = new FieldType(KeywordFieldMapper.Defaults.FIELD_TYPE); + keywordFieldType.setStored(true); + keywordFieldType.freeze(); try (IndexWriter iw = new IndexWriter(directory, new IndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE))) { for (int i = 0; i < INDEX_SIZE; i++) { String c = Character.toString('a' - ((i % 1000) % 26) + 26); iw.addDocument( List.of( new NumericDocValuesField("long", i), + new StoredField("long", i), new NumericDocValuesField("int", i), + new StoredField("int", i), new NumericDocValuesField("double", NumericUtils.doubleToSortableLong(i)), - new KeywordFieldMapper.KeywordField("keyword", new BytesRef(c + i % 1000), KeywordFieldMapper.Defaults.FIELD_TYPE) + new StoredField("double", (double) i), + new KeywordFieldMapper.KeywordField("keyword_1", new BytesRef(c + i % 1000), keywordFieldType), + new KeywordFieldMapper.KeywordField("keyword_2", new BytesRef(c + i % 1000), keywordFieldType), + new KeywordFieldMapper.KeywordField("keyword_3", new BytesRef(c + i % 1000), keywordFieldType) ) ); if (i % COMMIT_INTERVAL == 0) { diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/AggConstructionContentionBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/AggConstructionContentionBenchmark.java index 5b139f800cb39..8e60a7435cbc7 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/AggConstructionContentionBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/AggConstructionContentionBenchmark.java @@ -34,6 +34,7 @@ import org.elasticsearch.index.mapper.NumberFieldMapper.NumberType; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.support.NestedScope; +import org.elasticsearch.indices.breaker.CircuitBreakerMetrics; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; @@ -108,7 +109,12 @@ public class AggConstructionContentionBenchmark { @Setup public void setup() { breakerService = switch (breaker) { - case "real", "preallocate" -> new HierarchyCircuitBreakerService(Settings.EMPTY, List.of(), clusterSettings); + case "real", "preallocate" -> new HierarchyCircuitBreakerService( + CircuitBreakerMetrics.NOOP, + Settings.EMPTY, + List.of(), + clusterSettings + ); case "noop" -> new NoneCircuitBreakerService(); default -> throw new UnsupportedOperationException(); }; diff --git a/build-tools-internal/build.gradle b/build-tools-internal/build.gradle index c134638bcd6b6..66001e66f2486 100644 --- a/build-tools-internal/build.gradle +++ b/build-tools-internal/build.gradle @@ -35,6 +35,10 @@ gradlePlugin { id = 'elasticsearch.build' implementationClass = 'org.elasticsearch.gradle.internal.BuildPlugin' } + buildComplete { + id = 'elasticsearch.build-complete' + implementationClass = 'org.elasticsearch.gradle.internal.ElasticsearchBuildCompletePlugin' + } distro { id = 'elasticsearch.distro' implementationClass = 'org.elasticsearch.gradle.internal.distribution.ElasticsearchDistributionPlugin' @@ -158,7 +162,7 @@ gradlePlugin { stringTemplate { id = 'elasticsearch.string-templates' implementationClass = 'org.elasticsearch.gradle.internal.StringTemplatePlugin' - } + } testFixtures { id = 'elasticsearch.test.fixtures' implementationClass = 'org.elasticsearch.gradle.internal.testfixtures.TestFixturesPlugin' @@ -266,6 +270,8 @@ dependencies { api buildLibs.apache.rat api buildLibs.jna api buildLibs.shadow.plugin + api buildLibs.gradle.enterprise + // for our ide tweaking api buildLibs.idea.ext // When upgrading forbidden apis, ensure dependency version is bumped in ThirdPartyPrecommitPlugin as well @@ -280,6 +286,7 @@ dependencies { api buildLibs.asm.tree api buildLibs.httpclient api buildLibs.httpcore + compileOnly buildLibs.checkstyle runtimeOnly "org.elasticsearch.gradle:reaper:$version" testImplementation buildLibs.checkstyle diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/fixtures/AbstractRestResourcesFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/fixtures/AbstractRestResourcesFuncTest.groovy index 2756b9745bc7f..21582b6823b81 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/fixtures/AbstractRestResourcesFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/fixtures/AbstractRestResourcesFuncTest.groovy @@ -31,7 +31,8 @@ abstract class AbstractRestResourcesFuncTest extends AbstractGradleFuncTest { } """ - subProject(":distribution:archives:integ-test-zip") << "configurations { extracted }" + subProject(":distribution:archives:integ-test-zip") << "configurations.create('extracted')\n" + subProject(":distribution:archives:integ-test-zip") << "configurations.create('default')\n" } void setupRestResources(List apis, List tests = [], List xpackTests = []) { diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/BuildPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/BuildPluginFuncTest.groovy index e31594ad2e4a6..96e342e995a36 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/BuildPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/BuildPluginFuncTest.groovy @@ -31,7 +31,7 @@ class BuildPluginFuncTest extends AbstractGradleFuncTest { Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - + 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright @@ -39,7 +39,7 @@ class BuildPluginFuncTest extends AbstractGradleFuncTest { documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. - + THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. @@ -58,11 +58,11 @@ class BuildPluginFuncTest extends AbstractGradleFuncTest { id 'java' id 'elasticsearch.global-build-info' } - + apply plugin:'elasticsearch.build' group = 'org.acme' description = "some example project" - + repositories { maven { name = "local-test" @@ -73,7 +73,7 @@ class BuildPluginFuncTest extends AbstractGradleFuncTest { } mavenCentral() } - + dependencies { jarHell 'org.elasticsearch:elasticsearch-core:current' } @@ -89,7 +89,7 @@ class BuildPluginFuncTest extends AbstractGradleFuncTest { * Side Public License, v 1. */ package org.elasticsearch; - + public class SampleClass { } """.stripIndent() @@ -117,7 +117,7 @@ class BuildPluginFuncTest extends AbstractGradleFuncTest { noticeFile.set(file("NOTICE")) """ when: - def result = gradleRunner("assemble").build() + def result = gradleRunner("assemble", "-x", "generateHistoricalFeaturesMetadata").build() then: result.task(":assemble").outcome == TaskOutcome.SUCCESS file("build/distributions/hello-world.jar").exists() @@ -146,7 +146,7 @@ class BuildPluginFuncTest extends AbstractGradleFuncTest { } licenseFile.set(file("LICENSE")) noticeFile.set(file("NOTICE")) - + tasks.named("forbiddenApisMain").configure {enabled = false } tasks.named('checkstyleMain').configure { enabled = false } tasks.named('loggerUsageCheck').configure { enabled = false } diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/PublishPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/PublishPluginFuncTest.groovy index e17f9c7537777..9d32eaadf7aec 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/PublishPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/PublishPluginFuncTest.groovy @@ -29,7 +29,7 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { id 'elasticsearch.java' id 'elasticsearch.publish' } - + version = "1.0" group = 'org.acme' description = "custom project description" @@ -92,11 +92,11 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { id 'elasticsearch.publish' id 'com.github.johnrengelman.shadow' } - + repositories { mavenCentral() } - + dependencies { implementation 'org.slf4j:log4j-over-slf4j:1.7.30' shadow 'org.slf4j:slf4j-api:1.7.30' @@ -110,8 +110,8 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { } } version = "1.0" - group = 'org.acme' - description = 'some description' + group = 'org.acme' + description = 'some description' """ when: @@ -179,7 +179,7 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { } dependencies { - shadow project(":someLib") + shadow project(":someLib") } publishing { repositories { @@ -192,10 +192,10 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { allprojects { apply plugin: 'elasticsearch.java' version = "1.0" - group = 'org.acme' + group = 'org.acme' } - description = 'some description' + description = 'some description' """ when: @@ -263,13 +263,13 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { id 'elasticsearch.publish' id 'com.github.johnrengelman.shadow' } - + esplugin { name = 'hello-world-plugin' classname 'org.acme.HelloWorldPlugin' description = "custom project description" } - + publishing { repositories { maven { @@ -277,17 +277,17 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { } } } - + // requires elasticsearch artifact available tasks.named('bundlePlugin').configure { enabled = false } licenseFile.set(file('license.txt')) noticeFile.set(file('notice.txt')) version = "1.0" - group = 'org.acme' + group = 'org.acme' """ when: - def result = gradleRunner('assemble', '--stacktrace').build() + def result = gradleRunner('assemble', '--stacktrace', '-x', 'generateHistoricalFeaturesMetadata').build() then: result.task(":generatePom").outcome == TaskOutcome.SUCCESS @@ -348,19 +348,19 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { id 'elasticsearch.internal-es-plugin' id 'elasticsearch.publish' } - + esplugin { name = 'hello-world-plugin' classname 'org.acme.HelloWorldPlugin' description = "custom project description" } - + // requires elasticsearch artifact available tasks.named('bundlePlugin').configure { enabled = false } licenseFile.set(file('license.txt')) noticeFile.set(file('notice.txt')) version = "2.0" - group = 'org.acme' + group = 'org.acme' """ when: @@ -420,9 +420,9 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { apply plugin:'elasticsearch.publish' version = "1.0" - group = 'org.acme' + group = 'org.acme' description = "just a test project" - + ext.projectLicenses.set(['The Apache Software License, Version 2.0': 'http://www.apache.org/licenses/LICENSE-2.0']) """ diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestTestPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestTestPluginFuncTest.groovy index 888c0cc83fc15..94fa329af1715 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestTestPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestTestPluginFuncTest.groovy @@ -24,8 +24,6 @@ class LegacyYamlRestTestPluginFuncTest extends AbstractRestResourcesFuncTest { def "yamlRestTest does nothing when there are no tests"() { given: - // RestIntegTestTask not cc compatible due to - configurationCacheCompatible = false buildFile << """ plugins { id 'elasticsearch.legacy-yaml-rest-test' @@ -43,8 +41,6 @@ class LegacyYamlRestTestPluginFuncTest extends AbstractRestResourcesFuncTest { def "yamlRestTest executes and copies api and tests to correct source set"() { given: - // RestIntegTestTask not cc compatible due to - configurationCacheCompatible = false internalBuild() buildFile << """ apply plugin: 'elasticsearch.legacy-yaml-rest-test' @@ -56,9 +52,10 @@ class LegacyYamlRestTestPluginFuncTest extends AbstractRestResourcesFuncTest { // can't actually spin up test cluster from this test tasks.withType(Test).configureEach{ enabled = false } + def clazzpath = sourceSets.yamlRestTest.runtimeClasspath tasks.register("printYamlRestTestClasspath").configure { doLast { - println sourceSets.yamlRestTest.runtimeClasspath.asPath + println clazzpath.asPath } } """ diff --git a/build-tools-internal/src/main/groovy/elasticsearch.build-complete.gradle b/build-tools-internal/src/main/groovy/elasticsearch.build-complete.gradle deleted file mode 100644 index 1a0afe6d7d344..0000000000000 --- a/build-tools-internal/src/main/groovy/elasticsearch.build-complete.gradle +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -import org.elasticsearch.gradle.util.GradleUtils - -import java.nio.file.Files - -String buildNumber = System.getenv('BUILD_NUMBER') ?: System.getenv('BUILDKITE_BUILD_NUMBER') -String performanceTest = System.getenv('BUILD_PERFORMANCE_TEST') -Boolean isNested = System.getProperty("scan.tag.NESTED") != null - -if (buildNumber && performanceTest == null && GradleUtils.isIncludedBuild(project) == false && isNested == false) { - def uploadFilePath = "build/${buildNumber}.tar.bz2" - File uploadFile = file(uploadFilePath) - project.gradle.buildFinished { result -> - println "build complete, generating: $uploadFile" - if (uploadFile.exists()) { - project.delete(uploadFile) - } - - try { - ant.tar(destfile: uploadFile, compression: "bzip2", longfile: "gnu") { - fileset(dir: projectDir) { - Set fileSet = fileTree(projectDir) { - include("**/*.hprof") - include("**/build/test-results/**/*.xml") - include("**/build/testclusters/**") - include("**/build/testrun/*/temp/**") - include("**/build/**/hs_err_pid*.log") - exclude("**/build/testclusters/**/data/**") - exclude("**/build/testclusters/**/distro/**") - exclude("**/build/testclusters/**/repo/**") - exclude("**/build/testclusters/**/extract/**") - exclude("**/build/testclusters/**/tmp/**") - exclude("**/build/testrun/*/temp/**/data/**") - exclude("**/build/testrun/*/temp/**/distro/**") - exclude("**/build/testrun/*/temp/**/repo/**") - exclude("**/build/testrun/*/temp/**/extract/**") - exclude("**/build/testrun/*/temp/**/tmp/**") - } - .files - .findAll { Files.isRegularFile(it.toPath()) } - - if (fileSet.empty) { - // In cases where we don't match any workspace files, exclude everything - ant.exclude(name: "**/*") - } else { - fileSet.each { - ant.include(name: projectDir.toPath().relativize(it.toPath())) - } - } - } - - fileset(dir: "${gradle.gradleUserHomeDir}/daemon/${gradle.gradleVersion}", followsymlinks: false) { - include(name: "**/daemon-${ProcessHandle.current().pid()}*.log") - } - - fileset(dir: "${gradle.gradleUserHomeDir}/workers", followsymlinks: false) - - fileset(dir: "${project.projectDir}/.gradle/reaper", followsymlinks: false, erroronmissingdir: false) - } - } catch (Exception e) { - logger.lifecycle("Failed to archive additional logs", e) - } - - if (uploadFile.exists() && System.getenv("BUILDKITE") == "true") { - try { - println "Uploading buildkite artifact: ${uploadFilePath}..." - new ProcessBuilder("buildkite-agent", "artifact", "upload", uploadFilePath) - .start() - .waitFor() - - println "Generating buildscan link for artifact..." - - def process = new ProcessBuilder("buildkite-agent", "artifact", "search", uploadFilePath, "--step", System.getenv('BUILDKITE_JOB_ID'), "--format", "%i").start() - process.waitFor() - def artifactUuid = (process.text ?: "").trim() - - println "Artifact UUID: ${artifactUuid}" - if (artifactUuid) { - buildScan.link 'Artifact Upload', "https://buildkite.com/organizations/elastic/pipelines/${System.getenv('BUILDKITE_PIPELINE_SLUG')}/builds/${buildNumber}/jobs/${System.getenv('BUILDKITE_JOB_ID')}/artifacts/${artifactUuid}" - } - } catch (Exception e) { - logger.lifecycle("Failed to upload buildkite artifact", e) - } - } - } -} diff --git a/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle b/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle index 2bb00faae38be..b7f3932effa96 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle @@ -132,6 +132,7 @@ buildScan { } buildFinished { result -> + buildScanPublished { scan -> // Attach build scan link as build metadata // See: https://buildkite.com/docs/pipelines/build-meta-data diff --git a/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle b/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle index 08fbc5b67e978..f691d4bd996a7 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle @@ -23,9 +23,15 @@ if (BuildParams.inFipsJvm) { File fipsSecurity = new File(fipsResourcesDir, javaSecurityFilename) File fipsPolicy = new File(fipsResourcesDir, 'fips_java.policy') File fipsTrustStore = new File(fipsResourcesDir, 'cacerts.bcfks') - def bcFips = dependencies.create('org.bouncycastle:bc-fips:1.0.2') - def bcTlsFips = dependencies.create('org.bouncycastle:bctls-fips:1.0.9') - + def bcFips = dependencies.create('org.bouncycastle:bc-fips:1.0.2.4') + def bcTlsFips = dependencies.create('org.bouncycastle:bctls-fips:1.0.17') + def manualDebug = false; //change this to manually debug bouncy castle in an IDE + if(manualDebug) { + bcFips = dependencies.create('org.bouncycastle:bc-fips-debug:1.0.2.4') + bcTlsFips = dependencies.create('org.bouncycastle:bctls-fips:1.0.17'){ + exclude group: 'org.bouncycastle', module: 'bc-fips' // to avoid jar hell + } + } pluginManager.withPlugin('java-base') { TaskProvider fipsResourcesTask = project.tasks.register('fipsResources', ExportElasticsearchBuildResourcesTask) fipsResourcesTask.configure { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BaseInternalPluginBuildPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BaseInternalPluginBuildPlugin.java index f709600fc7979..63147040a289d 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BaseInternalPluginBuildPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BaseInternalPluginBuildPlugin.java @@ -13,6 +13,7 @@ import org.elasticsearch.gradle.internal.conventions.util.Util; import org.elasticsearch.gradle.internal.info.BuildParams; import org.elasticsearch.gradle.internal.precommit.JarHellPrecommitPlugin; +import org.elasticsearch.gradle.internal.test.HistoricalFeaturesMetadataPlugin; import org.elasticsearch.gradle.plugin.PluginBuildPlugin; import org.elasticsearch.gradle.plugin.PluginPropertiesExtension; import org.elasticsearch.gradle.testclusters.ElasticsearchCluster; @@ -36,6 +37,7 @@ public void apply(Project project) { project.getPluginManager().apply(PluginBuildPlugin.class); project.getPluginManager().apply(JarHellPrecommitPlugin.class); project.getPluginManager().apply(ElasticsearchJavaPlugin.class); + project.getPluginManager().apply(HistoricalFeaturesMetadataPlugin.class); // Clear default dependencies added by public PluginBuildPlugin as we add our // own project dependencies for internal builds // TODO remove once we removed default dependencies from PluginBuildPlugin @@ -72,13 +74,12 @@ public void doCall() { } }); + boolean isModule = GradleUtils.isModuleProject(project.getPath()); + boolean isXPackModule = isModule && project.getPath().startsWith(":x-pack"); + if (isModule == false || isXPackModule) { + addNoticeGeneration(project, extension); + } project.afterEvaluate(p -> { - boolean isModule = GradleUtils.isModuleProject(p.getPath()); - boolean isXPackModule = isModule && p.getPath().startsWith(":x-pack"); - if (isModule == false || isXPackModule) { - addNoticeGeneration(p, extension); - } - @SuppressWarnings("unchecked") NamedDomainObjectContainer testClusters = (NamedDomainObjectContainer) project .getExtensions() diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BuildPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BuildPlugin.java index 6849796579ad9..6c7bc6753531c 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BuildPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BuildPlugin.java @@ -11,6 +11,7 @@ import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; import org.elasticsearch.gradle.internal.precommit.InternalPrecommitTasks; import org.elasticsearch.gradle.internal.snyk.SnykDependencyMonitoringGradlePlugin; +import org.elasticsearch.gradle.internal.test.HistoricalFeaturesMetadataPlugin; import org.gradle.api.InvalidUserDataException; import org.gradle.api.Plugin; import org.gradle.api.Project; @@ -61,6 +62,7 @@ public void apply(final Project project) { project.getPluginManager().apply(ElasticsearchJavadocPlugin.class); project.getPluginManager().apply(DependenciesInfoPlugin.class); project.getPluginManager().apply(SnykDependencyMonitoringGradlePlugin.class); + project.getPluginManager().apply(HistoricalFeaturesMetadataPlugin.class); InternalPrecommitTasks.create(project, true); configureLicenseAndNotice(project); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java new file mode 100644 index 0000000000000..4902168d9b4ff --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java @@ -0,0 +1,220 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal; + +import com.gradle.scan.plugin.BuildScanExtension; + +import org.apache.commons.compress.archivers.tar.TarArchiveEntry; +import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream; +import org.apache.commons.compress.compressors.bzip2.BZip2CompressorOutputStream; +import org.apache.commons.io.IOUtils; +import org.elasticsearch.gradle.util.GradleUtils; +import org.gradle.api.Plugin; +import org.gradle.api.Project; +import org.gradle.api.file.FileSystemOperations; +import org.gradle.api.flow.FlowAction; +import org.gradle.api.flow.FlowParameters; +import org.gradle.api.flow.FlowProviders; +import org.gradle.api.flow.FlowScope; +import org.gradle.api.internal.file.FileOperations; +import org.gradle.api.provider.ListProperty; +import org.gradle.api.provider.Property; +import org.gradle.api.tasks.Input; +import org.jetbrains.annotations.NotNull; + +import java.io.*; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; + +import javax.inject.Inject; + +public abstract class ElasticsearchBuildCompletePlugin implements Plugin { + + @Inject + protected abstract FlowScope getFlowScope(); + + @Inject + protected abstract FlowProviders getFlowProviders(); + + @Inject + protected abstract FileOperations getFileOperations(); + + @Override + public void apply(Project target) { + String buildNumber = System.getenv("BUILD_NUMBER") != null + ? System.getenv("BUILD_NUMBER") + : System.getenv("BUILDKITE_BUILD_NUMBER"); + String performanceTest = System.getenv("BUILD_PERFORMANCE_TEST"); + if (buildNumber != null && performanceTest == null && GradleUtils.isIncludedBuild(target) == false) { + File targetFile = target.file("build/" + buildNumber + ".tar.bz2"); + File projectDir = target.getProjectDir(); + File gradleWorkersDir = new File(target.getGradle().getGradleUserHomeDir(), "workers/"); + BuildScanExtension extension = target.getExtensions().getByType(BuildScanExtension.class); + File daemonsLogDir = new File(target.getGradle().getGradleUserHomeDir(), "daemon/" + target.getGradle().getGradleVersion()); + + getFlowScope().always(BuildFinishedFlowAction.class, spec -> { + spec.getParameters().getBuildScan().set(extension); + spec.getParameters().getUploadFile().set(targetFile); + spec.getParameters().getProjectDir().set(projectDir); + spec.getParameters().getFilteredFiles().addAll(getFlowProviders().getBuildWorkResult().map((result) -> { + System.out.println("Build Finished Action: Collecting archive files..."); + List files = new ArrayList<>(); + files.addAll(resolveProjectLogs(projectDir)); + if (files.isEmpty() == false) { + files.addAll(resolveDaemonLogs(daemonsLogDir)); + files.addAll(getFileOperations().fileTree(gradleWorkersDir).getFiles()); + files.addAll(getFileOperations().fileTree(new File(projectDir, ".gradle/reaper/")).getFiles()); + } + return files; + })); + }); + } + } + + private List resolveProjectLogs(File projectDir) { + var projectDirFiles = getFileOperations().fileTree(projectDir); + projectDirFiles.include("**/*.hprof"); + projectDirFiles.include("**/build/test-results/**/*.xml"); + projectDirFiles.include("**/build/testclusters/**"); + projectDirFiles.include("**/build/testrun/*/temp/**"); + projectDirFiles.include("**/build/**/hs_err_pid*.log"); + projectDirFiles.exclude("**/build/testclusters/**/data/**"); + projectDirFiles.exclude("**/build/testclusters/**/distro/**"); + projectDirFiles.exclude("**/build/testclusters/**/repo/**"); + projectDirFiles.exclude("**/build/testclusters/**/extract/**"); + projectDirFiles.exclude("**/build/testclusters/**/tmp/**"); + projectDirFiles.exclude("**/build/testrun/*/temp/**/data/**"); + projectDirFiles.exclude("**/build/testrun/*/temp/**/distro/**"); + projectDirFiles.exclude("**/build/testrun/*/temp/**/repo/**"); + projectDirFiles.exclude("**/build/testrun/*/temp/**/extract/**"); + projectDirFiles.exclude("**/build/testrun/*/temp/**/tmp/**"); + return projectDirFiles.getFiles().stream().filter(f -> Files.isRegularFile(f.toPath())).toList(); + } + + private List resolveDaemonLogs(File daemonsLogDir) { + var gradleDaemonFileSet = getFileOperations().fileTree(daemonsLogDir); + gradleDaemonFileSet.include("**/daemon-" + ProcessHandle.current().pid() + "*.log"); + return gradleDaemonFileSet.getFiles().stream().filter(f -> Files.isRegularFile(f.toPath())).toList(); + } + + public abstract static class BuildFinishedFlowAction implements FlowAction { + interface Parameters extends FlowParameters { + @Input + Property getUploadFile(); + + @Input + Property getProjectDir(); + + @Input + ListProperty getFilteredFiles(); + + @Input + Property getBuildScan(); + + } + + @Inject + protected abstract FileSystemOperations getFileSystemOperations(); + + @SuppressWarnings("checkstyle:DescendantToken") + @Override + public void execute(BuildFinishedFlowAction.Parameters parameters) throws FileNotFoundException { + File uploadFile = parameters.getUploadFile().get(); + if (uploadFile.exists()) { + getFileSystemOperations().delete(spec -> spec.delete(uploadFile)); + } + uploadFile.getParentFile().mkdirs(); + createBuildArchiveTar(parameters.getFilteredFiles().get(), parameters.getProjectDir().get(), uploadFile); + if (uploadFile.exists() && System.getenv("BUILDKITE").equals("true")) { + String uploadFilePath = "build/" + uploadFile.getName(); + try { + System.out.println("Uploading buildkite artifact: " + uploadFilePath + "..."); + new ProcessBuilder("buildkite-agent", "artifact", "upload", uploadFilePath).start().waitFor(); + + System.out.println("Generating buildscan link for artifact..."); + + Process process = new ProcessBuilder( + "buildkite-agent", + "artifact", + "search", + uploadFilePath, + "--step", + System.getenv("BUILDKITE_JOB_ID"), + "--format", + "%i" + ).start(); + process.waitFor(); + String processOutput; + try { + processOutput = IOUtils.toString(process.getInputStream()); + } catch (IOException e) { + processOutput = ""; + } + String artifactUuid = processOutput.trim(); + + System.out.println("Artifact UUID: " + artifactUuid); + if (artifactUuid.isEmpty() == false) { + String buildkitePipelineSlug = System.getenv("BUILDKITE_PIPELINE_SLUG"); + String targetLink = "https://buildkite.com/organizations/elastic/pipelines/" + + buildkitePipelineSlug + + "/builds/" + + System.getenv("BUILD_NUMBER") + + "/jobs/" + + System.getenv("BUILDKITE_JOB_ID") + + "/artifacts/" + + artifactUuid; + parameters.getBuildScan().get().link("Artifact Upload", targetLink); + } + } catch (Exception e) { + System.out.println("Failed to upload buildkite artifact " + e.getMessage()); + } + } + + } + + private static void createBuildArchiveTar(List files, File projectDir, File uploadFile) { + try ( + OutputStream fOut = Files.newOutputStream(uploadFile.toPath()); + BufferedOutputStream buffOut = new BufferedOutputStream(fOut); + BZip2CompressorOutputStream bzOut = new BZip2CompressorOutputStream(buffOut); + TarArchiveOutputStream tOut = new TarArchiveOutputStream(bzOut) + ) { + Path projectPath = projectDir.toPath(); + tOut.setLongFileMode(TarArchiveOutputStream.LONGFILE_GNU); + tOut.setBigNumberMode(TarArchiveOutputStream.BIGNUMBER_STAR); + for (Path path : files.stream().map(File::toPath).toList()) { + if (!Files.isRegularFile(path)) { + throw new IOException("Support only file!"); + } + + TarArchiveEntry tarEntry = new TarArchiveEntry(path.toFile(), calculateArchivePath(path, projectPath)); + tarEntry.setSize(Files.size(path)); + tOut.putArchiveEntry(tarEntry); + + // copy file to TarArchiveOutputStream + Files.copy(path, tOut); + tOut.closeArchiveEntry(); + + } + tOut.flush(); + tOut.finish(); + + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @NotNull + private static String calculateArchivePath(Path path, Path projectPath) { + return path.startsWith(projectPath) ? projectPath.relativize(path).toString() : path.getFileName().toString(); + } + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java index 7a5bead71fb0e..4f9a7284c83e1 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java @@ -91,7 +91,6 @@ private static void disableTransitiveDependenciesForSourceSet(Project project, S List sourceSetConfigurationNames = List.of( sourceSet.getApiConfigurationName(), sourceSet.getImplementationConfigurationName(), - sourceSet.getImplementationConfigurationName(), sourceSet.getCompileOnlyConfigurationName(), sourceSet.getRuntimeOnlyConfigurationName() ); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/EmptyDirTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/EmptyDirTask.java index 867ccb203de0d..15a224b0ff206 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/EmptyDirTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/EmptyDirTask.java @@ -9,7 +9,7 @@ import org.gradle.api.DefaultTask; import org.gradle.api.tasks.Input; -import org.gradle.api.tasks.Internal; +import org.gradle.api.tasks.OutputDirectory; import org.gradle.api.tasks.TaskAction; import org.gradle.internal.file.Chmod; @@ -39,7 +39,7 @@ public Chmod getChmod() { throw new UnsupportedOperationException(); } - @Internal + @OutputDirectory public File getDir() { return dir; } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionArchiveCheckPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionArchiveCheckPlugin.java index d249cf756ca8d..6fafe513662c5 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionArchiveCheckPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionArchiveCheckPlugin.java @@ -207,29 +207,12 @@ private static void assertLinesInFile(Path path, List expectedLines) { } } - private static boolean toolExists(Project project) { - if (project.getName().contains("tar")) { - return tarExists(); - } else { - assert project.getName().contains("zip"); - return zipExists(); - } - } - private static void assertNoClassFile(File file) { if (file.getName().endsWith(".class")) { throw new GradleException("Detected class file in distribution ('" + file.getName() + "')"); } } - private static boolean zipExists() { - return new File("/bin/unzip").exists() || new File("/usr/bin/unzip").exists() || new File("/usr/local/bin/unzip").exists(); - } - - private static boolean tarExists() { - return new File("/bin/tar").exists() || new File("/usr/bin/tar").exists() || new File("/usr/local/bin/tar").exists(); - } - private Object distTaskOutput(TaskProvider buildDistTask) { return new Callable() { @Override diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/NoticeTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/NoticeTask.java index 1e2506908d108..751ac92512dad 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/NoticeTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/NoticeTask.java @@ -16,11 +16,11 @@ import org.gradle.api.file.ProjectLayout; import org.gradle.api.file.SourceDirectorySet; import org.gradle.api.internal.file.FileOperations; -import org.gradle.api.model.ObjectFactory; import org.gradle.api.provider.ListProperty; import org.gradle.api.tasks.CacheableTask; import org.gradle.api.tasks.InputFile; import org.gradle.api.tasks.InputFiles; +import org.gradle.api.tasks.Internal; import org.gradle.api.tasks.Optional; import org.gradle.api.tasks.OutputFile; import org.gradle.api.tasks.PathSensitive; @@ -43,7 +43,7 @@ * A task to create a notice file which includes dependencies' notices. */ @CacheableTask -public class NoticeTask extends DefaultTask { +public abstract class NoticeTask extends DefaultTask { @InputFile @PathSensitive(PathSensitivity.RELATIVE) @@ -57,19 +57,17 @@ public class NoticeTask extends DefaultTask { /** * Directories to include notices from */ - private final ListProperty licensesDirs; + @Internal + abstract ListProperty getLicenseDirs(); private final FileOperations fileOperations; - private ObjectFactory objectFactory; @Inject - public NoticeTask(BuildLayout buildLayout, ProjectLayout projectLayout, FileOperations fileOperations, ObjectFactory objectFactory) { - this.objectFactory = objectFactory; + public NoticeTask(BuildLayout buildLayout, ProjectLayout projectLayout, FileOperations fileOperations) { this.fileOperations = fileOperations; setDescription("Create a notice file from dependencies"); // Default licenses directory is ${projectDir}/licenses (if it exists) - licensesDirs = objectFactory.listProperty(File.class); - licensesDirs.add(projectLayout.getProjectDirectory().dir("licenses").getAsFile()); + getLicenseDirs().add(projectLayout.getProjectDirectory().dir("licenses").getAsFile()); inputFile = new File(buildLayout.getRootDirectory(), "NOTICE.txt"); outputFile = projectLayout.getBuildDirectory().dir("notices/" + getName()).get().file("NOTICE.txt").getAsFile(); } @@ -78,7 +76,7 @@ public NoticeTask(BuildLayout buildLayout, ProjectLayout projectLayout, FileOper * Add notices from the specified directory. */ public void licensesDir(File licensesDir) { - licensesDirs.add(licensesDir); + getLicenseDirs().add(licensesDir); } public void source(Object source) { @@ -185,7 +183,7 @@ public FileCollection getNoticeFiles() { } private List existingLicenseDirs() { - return licensesDirs.get().stream().filter(d -> d.exists()).collect(Collectors.toList()); + return getLicenseDirs().get().stream().filter(d -> d.exists()).collect(Collectors.toList()); } @InputFiles diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/RestrictedBuildApiService.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/RestrictedBuildApiService.java index b32c566363e88..cafa02941d77c 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/RestrictedBuildApiService.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/RestrictedBuildApiService.java @@ -28,22 +28,14 @@ private static ListMultimap, String> createLegacyRestTestBasePluginUsag ListMultimap, String> map = ArrayListMultimap.create(1, 200); map.put(LegacyRestTestBasePlugin.class, ":docs"); map.put(LegacyRestTestBasePlugin.class, ":distribution:docker"); - map.put(LegacyRestTestBasePlugin.class, ":modules:analysis-common"); - map.put(LegacyRestTestBasePlugin.class, ":modules:ingest-attachment"); - map.put(LegacyRestTestBasePlugin.class, ":modules:ingest-common"); - map.put(LegacyRestTestBasePlugin.class, ":modules:ingest-user-agent"); - map.put(LegacyRestTestBasePlugin.class, ":modules:kibana"); map.put(LegacyRestTestBasePlugin.class, ":modules:lang-expression"); map.put(LegacyRestTestBasePlugin.class, ":modules:lang-mustache"); - map.put(LegacyRestTestBasePlugin.class, ":modules:lang-painless"); map.put(LegacyRestTestBasePlugin.class, ":modules:mapper-extras"); map.put(LegacyRestTestBasePlugin.class, ":modules:parent-join"); map.put(LegacyRestTestBasePlugin.class, ":modules:percolator"); map.put(LegacyRestTestBasePlugin.class, ":modules:rank-eval"); map.put(LegacyRestTestBasePlugin.class, ":modules:reindex"); - map.put(LegacyRestTestBasePlugin.class, ":modules:repository-s3"); map.put(LegacyRestTestBasePlugin.class, ":modules:repository-url"); - map.put(LegacyRestTestBasePlugin.class, ":modules:runtime-fields-common"); map.put(LegacyRestTestBasePlugin.class, ":modules:transport-netty4"); map.put(LegacyRestTestBasePlugin.class, ":plugins:analysis-icu"); map.put(LegacyRestTestBasePlugin.class, ":plugins:analysis-kuromoji"); @@ -74,14 +66,13 @@ private static ListMultimap, String> createLegacyRestTestBasePluginUsag map.put(LegacyRestTestBasePlugin.class, ":qa:system-indices"); map.put(LegacyRestTestBasePlugin.class, ":qa:unconfigured-node-name"); map.put(LegacyRestTestBasePlugin.class, ":qa:verify-version-constants"); + map.put(LegacyRestTestBasePlugin.class, ":test:external-modules:test-apm-integration"); map.put(LegacyRestTestBasePlugin.class, ":test:external-modules:test-delayed-aggs"); map.put(LegacyRestTestBasePlugin.class, ":test:external-modules:test-die-with-dignity"); - map.put(LegacyRestTestBasePlugin.class, ":test:external-modules:test-apm-integration"); map.put(LegacyRestTestBasePlugin.class, ":test:external-modules:test-error-query"); map.put(LegacyRestTestBasePlugin.class, ":test:external-modules:test-latency-simulating-directory"); map.put(LegacyRestTestBasePlugin.class, ":test:external-modules:test-seek-tracking-directory"); map.put(LegacyRestTestBasePlugin.class, ":test:yaml-rest-runner"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin"); map.put(LegacyRestTestBasePlugin.class, ":distribution:archives:integ-test-zip"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:core"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ent-search"); @@ -92,17 +83,13 @@ private static ListMultimap, String> createLegacyRestTestBasePluginUsag map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:mapper-version"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:vector-tile"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:wildcard"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:kerberos-tests"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:mixed-tier-cluster"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:password-protected-keystore"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:reindex-tests-with-security"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:repository-old-versions"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:rolling-upgrade"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:rolling-upgrade-basic"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:rolling-upgrade-multi-cluster"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:runtime-fields:core-with-mapped"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:runtime-fields:core-with-search"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:saml-idp-tests"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:security-example-spi-extension"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:security-setup-password-tests"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:smoke-test-plugins"); @@ -115,12 +102,10 @@ private static ListMultimap, String> createLegacyRestTestBasePluginUsag map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:multi-cluster-search-security:legacy-with-basic-license"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:multi-cluster-search-security:legacy-with-full-license"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:multi-cluster-search-security:legacy-with-restricted-trust"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:runtime-fields:with-security"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:third-party:jira"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:third-party:pagerduty"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:third-party:slack"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:async-search:qa:rest"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:async-search:qa:security"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:autoscaling:qa:rest"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ccr:qa:downgrade-to-basic-license"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ccr:qa:multi-cluster"); @@ -130,7 +115,6 @@ private static ListMultimap, String> createLegacyRestTestBasePluginUsag map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ccr:qa:security"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:deprecation:qa:early-deprecation-rest"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:deprecation:qa:rest"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:downsample:qa:rest"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:downsample:qa:with-security"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:enrich:qa:rest"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:enrich:qa:rest-with-advanced-security"); @@ -139,55 +123,40 @@ private static ListMultimap, String> createLegacyRestTestBasePluginUsag map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:eql:qa:ccs-rolling-upgrade"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:eql:qa:correctness"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:eql:qa:mixed-node"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:eql:qa:multi-cluster-with-security"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:esql:qa:security"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:esql:qa:server:multi-node"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:esql:qa:server:single-node"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:fleet:qa:rest"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:graph:qa:with-security"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:identity-provider:qa:idp-rest-tests"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ilm:qa:multi-cluster"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ilm:qa:multi-node"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ilm:qa:rest"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ilm:qa:with-security"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ml:qa:basic-multi-node"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ml:qa:disabled"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ml:qa:ml-with-security"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ml:qa:multi-cluster-tests-with-security"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ml:qa:native-multi-node-tests"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ml:qa:single-node-tests"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:repositories-metering-api:qa:s3"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:searchable-snapshots:qa:hdfs"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:searchable-snapshots:qa:minio"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:searchable-snapshots:qa:rest"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:searchable-snapshots:qa:s3"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:searchable-snapshots:qa:url"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:security:qa:operator-privileges-tests"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:security:qa:profile"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:security:qa:security-disabled"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:security:qa:smoke-test-all-realms"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:security:qa:tls-basic"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:shutdown:qa:multi-node"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:shutdown:qa:rolling-upgrade"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:slm:qa:multi-node"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:slm:qa:rest"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:slm:qa:with-security"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:snapshot-based-recoveries:qa:fs"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:snapshot-based-recoveries:qa:license-enforcing"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:snapshot-based-recoveries:qa:s3"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:snapshot-repo-test-kit:qa:hdfs"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:snapshot-repo-test-kit:qa:minio"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:snapshot-repo-test-kit:qa:rest"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:snapshot-repo-test-kit:qa:s3"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:sql:qa:jdbc:multi-node"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:sql:qa:jdbc:no-sql"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:sql:qa:jdbc:single-node"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:sql:qa:jdbc:security:with-ssl"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:sql:qa:jdbc:security:without-ssl"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:sql:qa:mixed-node"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:sql:qa:server:multi-cluster-with-security"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:sql:qa:server:multi-node"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:sql:qa:server:single-node"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:sql:qa:server:security:with-ssl"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:sql:qa:server:security:without-ssl"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:stack:qa:rest"); @@ -197,8 +166,8 @@ private static ListMultimap, String> createLegacyRestTestBasePluginUsag map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:transform:qa:single-node-tests"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:vector-tile:qa:multi-cluster"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:watcher:qa:rest"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:watcher:qa:with-monitoring"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:watcher:qa:with-security"); + map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:esql:qa:server:mixed-cluster"); return map; } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckForbiddenApisTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckForbiddenApisTask.java index bb0b8dcf04437..d69a355a3595d 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckForbiddenApisTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckForbiddenApisTask.java @@ -10,6 +10,7 @@ import de.thetaphi.forbiddenapis.Checker; import de.thetaphi.forbiddenapis.Constants; +import de.thetaphi.forbiddenapis.ForbiddenApiException; import de.thetaphi.forbiddenapis.Logger; import de.thetaphi.forbiddenapis.ParseException; import groovy.lang.Closure; @@ -43,6 +44,7 @@ import org.gradle.api.tasks.PathSensitivity; import org.gradle.api.tasks.SkipWhenEmpty; import org.gradle.api.tasks.TaskAction; +import org.gradle.api.tasks.VerificationException; import org.gradle.api.tasks.VerificationTask; import org.gradle.api.tasks.util.PatternFilterable; import org.gradle.api.tasks.util.PatternSet; @@ -469,6 +471,8 @@ public void execute() { } checker.run(); writeMarker(getParameters().getSuccessMarker().getAsFile().get()); + } catch (ForbiddenApiException e) { + throw new VerificationException("Forbidden API verification failed", e); } catch (Exception e) { throw new RuntimeException(e); } finally { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java index a25ad34a241d4..d2ba86bb99cf2 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java @@ -218,7 +218,8 @@ public void runThirdPartyAudit() throws IOException { if (bogousExcludesCount != 0 && bogousExcludesCount == missingClassExcludes.size() + violationsExcludes.size()) { logForbiddenAPIsOutput(forbiddenApisOutput); throw new IllegalStateException( - "All excluded classes seem to have no issues. This is sometimes an indication that the check silently failed" + "All excluded classes seem to have no issues. This is sometimes an indication that the check silently failed " + + "or that exclusions are configured unnecessarily" ); } assertNoPointlessExclusions("are not missing", missingClassExcludes, missingClasses); @@ -261,10 +262,6 @@ private void logForbiddenAPIsOutput(String forbiddenApisOutput) { getLogger().error("Forbidden APIs output:\n{}==end of forbidden APIs==", forbiddenApisOutput); } - private void throwNotConfiguredCorrectlyException() { - throw new IllegalArgumentException("Audit of third party dependencies is not configured correctly"); - } - /** * Ideally we would do unpacking already via artifact transform and keep unpacked jars across builds. * At the moment transform target folder is not configurable and forbidden CLI only takes one common diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java index bcbe1740630ce..42d3a770dbbcc 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java @@ -101,7 +101,7 @@ public void apply(Project project) { addDistributionSysprop(t, DISTRIBUTION_SYSPROP, distribution::getFilepath); addDistributionSysprop(t, EXAMPLE_PLUGIN_SYSPROP, () -> examplePlugin.getSingleFile().toString()); t.exclude("**/PackageUpgradeTests.class"); - }, distribution.getArchiveDependencies(), examplePlugin.getDependencies()); + }, distribution, examplePlugin.getDependencies()); if (distribution.getPlatform() == Platform.WINDOWS) { windowsTestTasks.add(destructiveTask); @@ -235,6 +235,7 @@ private static ElasticsearchDistribution createDistro( d.setBundledJdk(bundledJdk); } d.setVersion(version); + d.setPreferArchive(true); }); // Allow us to gracefully omit building Docker distributions if Docker is not available on the system. diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/HistoricalFeaturesMetadataPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/HistoricalFeaturesMetadataPlugin.java new file mode 100644 index 0000000000000..bd9df6d3903ca --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/HistoricalFeaturesMetadataPlugin.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.test; + +import org.elasticsearch.gradle.dependencies.CompileOnlyResolvePlugin; +import org.gradle.api.Plugin; +import org.gradle.api.Project; +import org.gradle.api.artifacts.Configuration; +import org.gradle.api.artifacts.type.ArtifactTypeDefinition; +import org.gradle.api.tasks.SourceSet; +import org.gradle.api.tasks.SourceSetContainer; +import org.gradle.api.tasks.TaskProvider; + +import java.util.Map; + +/** + * Extracts historical feature metadata into a machine-readable format for use in backward compatibility testing. + */ +public class HistoricalFeaturesMetadataPlugin implements Plugin { + public static final String HISTORICAL_FEATURES_JSON = "historical-features.json"; + public static final String FEATURES_METADATA_TYPE = "features-metadata-json"; + public static final String FEATURES_METADATA_CONFIGURATION = "featuresMetadata"; + + @Override + public void apply(Project project) { + Configuration featureMetadataExtractorConfig = project.getConfigurations().create("featuresMetadataExtractor", c -> { + // Don't bother adding this dependency if the project doesn't exist which simplifies testing + if (project.findProject(":test:metadata-extractor") != null) { + c.defaultDependencies(d -> d.add(project.getDependencies().project(Map.of("path", ":test:metadata-extractor")))); + } + }); + + SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); + SourceSet mainSourceSet = sourceSets.getByName(SourceSet.MAIN_SOURCE_SET_NAME); + + TaskProvider generateTask = project.getTasks() + .register("generateHistoricalFeaturesMetadata", HistoricalFeaturesMetadataTask.class, task -> { + task.setClasspath( + featureMetadataExtractorConfig.plus(mainSourceSet.getRuntimeClasspath()) + .plus(project.getConfigurations().getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME)) + ); + task.getOutputFile().convention(project.getLayout().getBuildDirectory().file(HISTORICAL_FEATURES_JSON)); + }); + + Configuration featuresMetadataArtifactConfig = project.getConfigurations().create(FEATURES_METADATA_CONFIGURATION, c -> { + c.setCanBeResolved(false); + c.setCanBeConsumed(true); + c.attributes(a -> { a.attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, FEATURES_METADATA_TYPE); }); + }); + + project.getArtifacts().add(featuresMetadataArtifactConfig.getName(), generateTask); + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/HistoricalFeaturesMetadataTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/HistoricalFeaturesMetadataTask.java new file mode 100644 index 0000000000000..0891225d1e1ef --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/HistoricalFeaturesMetadataTask.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.test; + +import org.elasticsearch.gradle.LoggedExec; +import org.gradle.api.DefaultTask; +import org.gradle.api.file.ConfigurableFileCollection; +import org.gradle.api.file.FileCollection; +import org.gradle.api.file.RegularFileProperty; +import org.gradle.api.tasks.CacheableTask; +import org.gradle.api.tasks.Classpath; +import org.gradle.api.tasks.OutputFile; +import org.gradle.api.tasks.TaskAction; +import org.gradle.process.ExecOperations; +import org.gradle.workers.WorkAction; +import org.gradle.workers.WorkParameters; +import org.gradle.workers.WorkerExecutor; + +import javax.inject.Inject; + +@CacheableTask +public abstract class HistoricalFeaturesMetadataTask extends DefaultTask { + private FileCollection classpath; + + @OutputFile + public abstract RegularFileProperty getOutputFile(); + + @Classpath + public FileCollection getClasspath() { + return classpath; + } + + public void setClasspath(FileCollection classpath) { + this.classpath = classpath; + } + + @Inject + public abstract WorkerExecutor getWorkerExecutor(); + + @TaskAction + public void execute() { + getWorkerExecutor().noIsolation().submit(HistoricalFeaturesMetadataWorkAction.class, params -> { + params.getClasspath().setFrom(getClasspath()); + params.getOutputFile().set(getOutputFile()); + }); + } + + public interface HistoricalFeaturesWorkParameters extends WorkParameters { + ConfigurableFileCollection getClasspath(); + + RegularFileProperty getOutputFile(); + } + + public abstract static class HistoricalFeaturesMetadataWorkAction implements WorkAction { + private final ExecOperations execOperations; + + @Inject + public HistoricalFeaturesMetadataWorkAction(ExecOperations execOperations) { + this.execOperations = execOperations; + } + + @Override + public void execute() { + LoggedExec.javaexec(execOperations, spec -> { + spec.getMainClass().set("org.elasticsearch.extractor.features.HistoricalFeaturesMetadataExtractor"); + spec.classpath(getParameters().getClasspath()); + spec.args(getParameters().getOutputFile().get().getAsFile().getAbsolutePath()); + }); + } + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/LegacyRestTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/LegacyRestTestBasePlugin.java index be6e3eb377aa1..b29efbfab069f 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/LegacyRestTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/LegacyRestTestBasePlugin.java @@ -29,11 +29,8 @@ import org.gradle.api.specs.NotSpec; import org.gradle.api.specs.Spec; import org.gradle.api.tasks.Sync; -import org.gradle.api.tasks.TaskContainer; import org.gradle.api.tasks.bundling.Zip; -import java.util.Collections; - import javax.inject.Inject; import static org.elasticsearch.gradle.internal.RestrictedBuildApiService.BUILD_API_RESTRICTIONS_SYS_PROPERTY; @@ -131,11 +128,7 @@ public void apply(Project project) { } private void configureCacheability(StandaloneRestIntegTestTask testTask) { - TaskContainer tasks = project.getTasks(); - Spec taskSpec = t -> tasks.withType(StandaloneRestIntegTestTask.class) - .stream() - .filter(task -> task != testTask) - .anyMatch(task -> Collections.disjoint(task.getClusters(), testTask.getClusters()) == false); + Spec taskSpec = task -> testTask.getClusters().stream().anyMatch(ElasticsearchCluster::isShared); testTask.getOutputs() .doNotCacheIf( "Caching disabled for this task since it uses a cluster shared by other tasks", diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestWithSslPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestWithSslPlugin.java index 524f3dfedf95f..833c7ad546a4a 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestWithSslPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestWithSslPlugin.java @@ -85,7 +85,7 @@ public void apply(Project project) { NamedDomainObjectContainer clusters = (NamedDomainObjectContainer) project .getExtensions() .getByName(TestClustersPlugin.EXTENSION_NAME); - clusters.all(c -> { + clusters.configureEach(c -> { if (BuildParams.isInFipsJvm()) { c.setting("xpack.security.transport.ssl.key", "test-node.key"); c.keystore("xpack.security.transport.ssl.secure_key_passphrase", "test-node-key-password"); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java index 32e7f10d14355..a7e72b55f9117 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java @@ -21,6 +21,7 @@ import org.elasticsearch.gradle.internal.ElasticsearchTestBasePlugin; import org.elasticsearch.gradle.internal.InternalDistributionDownloadPlugin; import org.elasticsearch.gradle.internal.info.BuildParams; +import org.elasticsearch.gradle.internal.test.HistoricalFeaturesMetadataPlugin; import org.elasticsearch.gradle.plugin.BasePluginBuildPlugin; import org.elasticsearch.gradle.plugin.PluginBuildPlugin; import org.elasticsearch.gradle.plugin.PluginPropertiesExtension; @@ -35,6 +36,7 @@ import org.gradle.api.Task; import org.gradle.api.artifacts.Configuration; import org.gradle.api.artifacts.Dependency; +import org.gradle.api.artifacts.DependencySet; import org.gradle.api.artifacts.ProjectDependency; import org.gradle.api.artifacts.type.ArtifactTypeDefinition; import org.gradle.api.attributes.Attribute; @@ -74,6 +76,9 @@ public class RestTestBasePlugin implements Plugin { private static final String PLUGINS_CONFIGURATION = "clusterPlugins"; private static final String EXTRACTED_PLUGINS_CONFIGURATION = "extractedPlugins"; private static final Attribute CONFIGURATION_ATTRIBUTE = Attribute.of("test-cluster-artifacts", String.class); + private static final String FEATURES_METADATA_CONFIGURATION = "featuresMetadataDeps"; + private static final String DEFAULT_DISTRO_FEATURES_METADATA_CONFIGURATION = "defaultDistrofeaturesMetadataDeps"; + private static final String TESTS_FEATURES_METADATA_PATH = "tests.features.metadata.path"; private final ProviderFactory providerFactory; @@ -107,6 +112,36 @@ public void apply(Project project) { extractedPluginsConfiguration.extendsFrom(pluginsConfiguration); configureArtifactTransforms(project); + // Create configuration for aggregating historical feature metadata + FileCollection featureMetadataConfig = project.getConfigurations().create(FEATURES_METADATA_CONFIGURATION, c -> { + c.setCanBeConsumed(false); + c.setCanBeResolved(true); + c.attributes( + a -> a.attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, HistoricalFeaturesMetadataPlugin.FEATURES_METADATA_TYPE) + ); + c.defaultDependencies(d -> d.add(project.getDependencies().project(Map.of("path", ":server")))); + c.withDependencies(dependencies -> { + // We can't just use Configuration#extendsFrom() here as we'd inherit the wrong project configuration + copyDependencies(project, dependencies, modulesConfiguration); + copyDependencies(project, dependencies, pluginsConfiguration); + }); + }); + + FileCollection defaultDistroFeatureMetadataConfig = project.getConfigurations() + .create(DEFAULT_DISTRO_FEATURES_METADATA_CONFIGURATION, c -> { + c.setCanBeConsumed(false); + c.setCanBeResolved(true); + c.attributes( + a -> a.attribute( + ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, + HistoricalFeaturesMetadataPlugin.FEATURES_METADATA_TYPE + ) + ); + c.defaultDependencies( + d -> d.add(project.getDependencies().project(Map.of("path", ":distribution", "configuration", "featuresMetadata"))) + ); + }); + // For plugin and module projects, register the current project plugin bundle as a dependency project.getPluginManager().withPlugin("elasticsearch.esplugin", plugin -> { if (GradleUtils.isModuleProject(project.getPath())) { @@ -124,6 +159,10 @@ public void apply(Project project) { task.dependsOn(integTestDistro, modulesConfiguration); registerDistributionInputs(task, integTestDistro); + // Pass feature metadata on to tests + task.getInputs().files(featureMetadataConfig).withPathSensitivity(PathSensitivity.NONE); + nonInputSystemProperties.systemProperty(TESTS_FEATURES_METADATA_PATH, () -> featureMetadataConfig.getAsPath()); + // Enable parallel execution for these tests since each test gets its own cluster task.setMaxParallelForks(task.getProject().getGradle().getStartParameter().getMaxWorkerCount() / 2); nonInputSystemProperties.systemProperty(TESTS_MAX_PARALLEL_FORKS_SYSPROP, () -> String.valueOf(task.getMaxParallelForks())); @@ -163,6 +202,11 @@ public Void call(Object... args) { DEFAULT_DISTRIBUTION_SYSPROP, providerFactory.provider(() -> defaultDistro.getExtracted().getSingleFile().getPath()) ); + + // If we are using the default distribution we need to register all module feature metadata + task.getInputs().files(defaultDistroFeatureMetadataConfig).withPathSensitivity(PathSensitivity.NONE); + nonInputSystemProperties.systemProperty(TESTS_FEATURES_METADATA_PATH, defaultDistroFeatureMetadataConfig::getAsPath); + return null; } }); @@ -198,6 +242,14 @@ public Void call(Object... args) { }); } + private void copyDependencies(Project project, DependencySet dependencies, Configuration configuration) { + configuration.getDependencies() + .stream() + .filter(d -> d instanceof ProjectDependency) + .map(d -> project.getDependencies().project(Map.of("path", ((ProjectDependency) d).getDependencyProject().getPath()))) + .forEach(dependencies::add); + } + private ElasticsearchDistribution createDistribution(Project project, String name, String version) { return createDistribution(project, name, version, null); } diff --git a/build-tools-internal/src/main/resources/fips_java.policy b/build-tools-internal/src/main/resources/fips_java.policy index 4ef62e03c2546..bbfc1caf7593a 100644 --- a/build-tools-internal/src/main/resources/fips_java.policy +++ b/build-tools-internal/src/main/resources/fips_java.policy @@ -1,6 +1,10 @@ grant { permission java.security.SecurityPermission "putProviderProperty.BCFIPS"; permission java.security.SecurityPermission "putProviderProperty.BCJSSE"; + permission java.security.SecurityPermission "getProperty.keystore.type.compat"; + permission java.security.SecurityPermission "getProperty.jdk.tls.disabledAlgorithms"; + permission java.security.SecurityPermission "getProperty.jdk.certpath.disabledAlgorithms"; + permission java.security.SecurityPermission "getProperty.jdk.tls.server.defaultDHEParameters"; permission java.lang.RuntimePermission "getProtectionDomain"; permission java.util.PropertyPermission "java.runtime.name", "read"; permission org.bouncycastle.crypto.CryptoServicesPermission "tlsAlgorithmsEnabled"; diff --git a/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt b/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt index 34f39bbc4ca54..48c888acd35e2 100644 --- a/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt +++ b/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt @@ -158,6 +158,8 @@ org.elasticsearch.cluster.ClusterState#compatibilityVersions() @defaultMessage ClusterFeatures#nodeFeatures is for internal use only. Use FeatureService#clusterHasFeature to determine if a feature is present on the cluster. org.elasticsearch.cluster.ClusterFeatures#nodeFeatures() +@defaultMessage ClusterFeatures#allNodeFeatures is for internal use only. Use FeatureService#clusterHasFeature to determine if a feature is present on the cluster. +org.elasticsearch.cluster.ClusterFeatures#allNodeFeatures() @defaultMessage ClusterFeatures#clusterHasFeature is for internal use only. Use FeatureService#clusterHasFeature to determine if a feature is present on the cluster. org.elasticsearch.cluster.ClusterFeatures#clusterHasFeature(org.elasticsearch.features.NodeFeature) diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties index dc43523b747b3..98d3ad1eff10b 100644 --- a/build-tools-internal/version.properties +++ b/build-tools-internal/version.properties @@ -20,10 +20,9 @@ google_oauth_client = 1.34.1 antlr4 = 4.11.1 # when updating this version, you need to ensure compatibility with: -# - modules/ingest-attachment (transitive dependency, check the upstream POM) # - distribution/tools/plugin-cli # - x-pack/plugin/security -bouncycastle=1.64 +bouncycastle=1.76 # used by security and idp (need to be in sync due to cross-dependency in testing) opensaml = 4.3.0 @@ -42,6 +41,12 @@ junit5 = 5.7.1 hamcrest = 2.1 mocksocket = 1.2 +# test container dependencies +testcontainer = 1.19.2 +dockerJava = 3.3.4 +ductTape = 1.0.8 +commonsCompress = 1.24.0 + # benchmark dependencies jmh = 1.26 diff --git a/build-tools/src/integTest/groovy/org/elasticsearch/gradle/TestClustersPluginFuncTest.groovy b/build-tools/src/integTest/groovy/org/elasticsearch/gradle/TestClustersPluginFuncTest.groovy index 719fae2b463c0..22efa8d08d3e7 100644 --- a/build-tools/src/integTest/groovy/org/elasticsearch/gradle/TestClustersPluginFuncTest.groovy +++ b/build-tools/src/integTest/groovy/org/elasticsearch/gradle/TestClustersPluginFuncTest.groovy @@ -166,7 +166,7 @@ class TestClustersPluginFuncTest extends AbstractGradleFuncTest { then: result.output.contains("Task ':myTask' is not up-to-date because:\n" + - " Input property 'clusters.myCluster\$0.nodes.\$0.$propertyName'") + " Input property 'clusters.myCluster\$0.$propertyName'") result.output.contains("elasticsearch-keystore script executed!") assertEsOutputContains("myCluster", "Starting Elasticsearch process") assertEsOutputContains("myCluster", "Stopping node") diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java b/build-tools/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java index e12523870b15b..fb8416b24d052 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java @@ -79,6 +79,7 @@ public void apply(Project project) { } private void setupDistributionContainer(Project project, Property dockerAvailable) { + distributionsContainer = project.container(ElasticsearchDistribution.class, name -> { Configuration fileConfiguration = project.getConfigurations().create(DISTRO_CONFIG_PREFIX + name); Configuration extractedConfiguration = project.getConfigurations().create(DISTRO_EXTRACTED_CONFIG_PREFIX + name); diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/ElasticsearchDistribution.java b/build-tools/src/main/java/org/elasticsearch/gradle/ElasticsearchDistribution.java index f9805680ce8d4..fab6926008d6c 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/ElasticsearchDistribution.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/ElasticsearchDistribution.java @@ -53,8 +53,9 @@ public String toString() { private final Property platform; private final Property bundledJdk; private final Property failIfUnavailable; + private final Property preferArchive; private final ConfigurableFileCollection extracted; - private Action distributionFinalizer; + private transient Action distributionFinalizer; private boolean frozen = false; ElasticsearchDistribution( @@ -75,6 +76,7 @@ public String toString() { this.platform = objectFactory.property(Platform.class); this.bundledJdk = objectFactory.property(Boolean.class); this.failIfUnavailable = objectFactory.property(Boolean.class).convention(true); + this.preferArchive = objectFactory.property(Boolean.class).convention(false); this.extracted = extractedConfiguration; this.distributionFinalizer = distributionFinalizer; } @@ -141,6 +143,14 @@ public void setFailIfUnavailable(boolean failIfUnavailable) { this.failIfUnavailable.set(failIfUnavailable); } + public boolean getPreferArchive() { + return preferArchive.get(); + } + + public void setPreferArchive(boolean preferArchive) { + this.preferArchive.set(preferArchive); + } + public void setArchitecture(Architecture architecture) { this.architecture.set(architecture); } @@ -188,7 +198,9 @@ public TaskDependency getBuildDependencies() { return task -> Collections.emptySet(); } else { maybeFreeze(); - return getType().shouldExtract() ? extracted.getBuildDependencies() : configuration.getBuildDependencies(); + return getType().shouldExtract() && (preferArchive.get() == false) + ? extracted.getBuildDependencies() + : configuration.getBuildDependencies(); } } @@ -253,13 +265,4 @@ void finalizeValues() { type.finalizeValue(); bundledJdk.finalizeValue(); } - - public TaskDependency getArchiveDependencies() { - if (skippingDockerDistributionBuild()) { - return task -> Collections.emptySet(); - } else { - maybeFreeze(); - return configuration.getBuildDependencies(); - } - } } diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java index f6705bdb62faa..bf539efaf3c30 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java @@ -14,7 +14,11 @@ import org.gradle.api.Named; import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Project; +import org.gradle.api.artifacts.Configuration; +import org.gradle.api.artifacts.Dependency; +import org.gradle.api.artifacts.type.ArtifactTypeDefinition; import org.gradle.api.file.ArchiveOperations; +import org.gradle.api.file.ConfigurableFileCollection; import org.gradle.api.file.FileCollection; import org.gradle.api.file.FileSystemOperations; import org.gradle.api.file.RegularFile; @@ -22,10 +26,15 @@ import org.gradle.api.logging.Logger; import org.gradle.api.logging.Logging; import org.gradle.api.provider.Provider; +import org.gradle.api.tasks.Classpath; +import org.gradle.api.tasks.InputFiles; import org.gradle.api.tasks.Internal; import org.gradle.api.tasks.Nested; +import org.gradle.api.tasks.PathSensitive; +import org.gradle.api.tasks.PathSensitivity; import org.gradle.api.tasks.Sync; import org.gradle.api.tasks.TaskProvider; +import org.gradle.api.tasks.bundling.AbstractArchiveTask; import org.gradle.api.tasks.bundling.Zip; import org.gradle.process.ExecOperations; @@ -35,6 +44,8 @@ import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.security.GeneralSecurityException; +import java.util.Collection; +import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -46,6 +57,9 @@ import java.util.function.Supplier; import java.util.stream.Collectors; +import static org.elasticsearch.gradle.plugin.BasePluginBuildPlugin.EXPLODED_BUNDLE_CONFIG; +import static org.elasticsearch.gradle.testclusters.TestClustersPlugin.BUNDLE_ATTRIBUTE; + public class ElasticsearchCluster implements TestClusterConfiguration, Named { private static final Logger LOGGER = Logging.getLogger(ElasticsearchNode.class); @@ -59,7 +73,7 @@ public class ElasticsearchCluster implements TestClusterConfiguration, Named { private final FileOperations fileOperations; private final File workingDirBase; private final LinkedHashMap> waitConditions = new LinkedHashMap<>(); - private final Project project; + private final transient Project project; private final Provider reaper; private final FileSystemOperations fileSystemOperations; private final ArchiveOperations archiveOperations; @@ -68,6 +82,10 @@ public class ElasticsearchCluster implements TestClusterConfiguration, Named { private final Function isReleasedVersion; private int nodeIndex = 0; + private final ConfigurableFileCollection pluginAndModuleConfiguration; + + private boolean shared = false; + public ElasticsearchCluster( String path, String clusterName, @@ -93,6 +111,7 @@ public ElasticsearchCluster( this.runtimeJava = runtimeJava; this.isReleasedVersion = isReleasedVersion; this.nodes = project.container(ElasticsearchNode.class); + this.pluginAndModuleConfiguration = project.getObjects().fileCollection(); this.nodes.add( new ElasticsearchNode( safeName(clusterName), @@ -113,6 +132,29 @@ public ElasticsearchCluster( addWaitForClusterHealth(); } + /** + * this cluster si marked as shared across TestClusterAware tasks + * */ + @Internal + public boolean isShared() { + return shared; + } + + protected void setShared(boolean shared) { + this.shared = shared; + } + + @Classpath + public FileCollection getInstalledClasspath() { + return pluginAndModuleConfiguration.getAsFileTree().filter(f -> f.getName().endsWith(".jar")); + } + + @InputFiles + @PathSensitive(PathSensitivity.RELATIVE) + public FileCollection getInstalledFiles() { + return pluginAndModuleConfiguration.getAsFileTree().filter(f -> f.getName().endsWith(".jar") == false); + } + public void setNumberOfNodes(int numberOfNodes) { checkFrozen(); @@ -195,34 +237,70 @@ public void setTestDistribution(TestDistribution distribution) { nodes.all(each -> each.setTestDistribution(distribution)); } - @Override - public void plugin(Provider plugin) { - nodes.all(each -> each.plugin(plugin)); + private void registerExtractedConfig(Provider pluginProvider) { + Dependency pluginDependency = this.project.getDependencies().create(project.files(pluginProvider)); + Configuration extractedConfig = project.getConfigurations().detachedConfiguration(pluginDependency); + extractedConfig.getAttributes().attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, ArtifactTypeDefinition.DIRECTORY_TYPE); + extractedConfig.getAttributes().attribute(BUNDLE_ATTRIBUTE, true); + pluginAndModuleConfiguration.from(extractedConfig); } @Override + public void plugin(String pluginProjectPath) { + plugin(maybeCreatePluginOrModuleDependency(pluginProjectPath, "zip")); + } + public void plugin(TaskProvider plugin) { - nodes.all(each -> each.plugin(plugin)); + plugin(plugin.flatMap(AbstractArchiveTask::getArchiveFile)); } @Override - public void plugin(String pluginProjectPath) { - nodes.all(each -> each.plugin(pluginProjectPath)); + public void plugin(Provider plugin) { + registerExtractedConfig(plugin); + nodes.all(each -> each.plugin(plugin)); } @Override public void module(Provider module) { + registerExtractedConfig(module); nodes.all(each -> each.module(module)); } - @Override public void module(TaskProvider module) { - nodes.all(each -> each.module(module)); + module(project.getLayout().file(module.map(Sync::getDestinationDir))); } @Override public void module(String moduleProjectPath) { - nodes.all(each -> each.module(moduleProjectPath)); + module(maybeCreatePluginOrModuleDependency(moduleProjectPath, EXPLODED_BUNDLE_CONFIG)); + } + + private final Map pluginAndModuleConfigurations = new HashMap<>(); + + // package protected so only TestClustersAware can access + @Internal + Collection getPluginAndModuleConfigurations() { + return pluginAndModuleConfigurations.values(); + } + + // creates a configuration to depend on the given plugin project, then wraps that configuration + // to grab the zip as a file provider + private Provider maybeCreatePluginOrModuleDependency(String path, String consumingConfiguration) { + var configuration = pluginAndModuleConfigurations.computeIfAbsent(path, key -> { + var bundleDependency = this.project.getDependencies().project(Map.of("path", path, "configuration", consumingConfiguration)); + return project.getConfigurations().detachedConfiguration(bundleDependency); + }); + + Provider fileProvider = configuration.getElements() + .map( + s -> s.stream() + .findFirst() + .orElseThrow( + () -> new IllegalStateException(consumingConfiguration + " configuration of project " + path + " had no files") + ) + .getAsFile() + ); + return project.getLayout().file(fileProvider); } @Override @@ -579,4 +657,5 @@ public int hashCode() { public String toString() { return "cluster{" + path + ":" + clusterName + "}"; } + } diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java index f0ab67fe51a34..ce4fd7502f417 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java @@ -21,18 +21,12 @@ import org.elasticsearch.gradle.Version; import org.elasticsearch.gradle.VersionProperties; import org.elasticsearch.gradle.distribution.ElasticsearchDistributionTypes; -import org.elasticsearch.gradle.transform.UnzipTransform; import org.elasticsearch.gradle.util.Pair; import org.gradle.api.Action; import org.gradle.api.Named; import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Project; -import org.gradle.api.artifacts.Configuration; -import org.gradle.api.artifacts.Dependency; -import org.gradle.api.artifacts.type.ArtifactTypeDefinition; -import org.gradle.api.attributes.Attribute; import org.gradle.api.file.ArchiveOperations; -import org.gradle.api.file.ConfigurableFileCollection; import org.gradle.api.file.FileCollection; import org.gradle.api.file.FileSystemOperations; import org.gradle.api.file.FileTree; @@ -52,7 +46,6 @@ import org.gradle.api.tasks.PathSensitivity; import org.gradle.api.tasks.Sync; import org.gradle.api.tasks.TaskProvider; -import org.gradle.api.tasks.bundling.AbstractArchiveTask; import org.gradle.api.tasks.bundling.Zip; import org.gradle.api.tasks.util.PatternFilterable; import org.gradle.process.ExecOperations; @@ -75,7 +68,6 @@ import java.time.Instant; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -98,7 +90,6 @@ import static java.util.Objects.requireNonNull; import static java.util.Optional.ofNullable; -import static org.elasticsearch.gradle.plugin.BasePluginBuildPlugin.EXPLODED_BUNDLE_CONFIG; public class ElasticsearchNode implements TestClusterConfiguration { @@ -130,7 +121,7 @@ public class ElasticsearchNode implements TestClusterConfiguration { private final String path; private final String name; - private final Project project; + transient private final Project project; private final Provider reaperServiceProvider; private final FileSystemOperations fileSystemOperations; private final ArchiveOperations archiveOperations; @@ -140,8 +131,6 @@ public class ElasticsearchNode implements TestClusterConfiguration { private final Path workingDir; private final LinkedHashMap> waitConditions = new LinkedHashMap<>(); - private final Map pluginAndModuleConfigurations = new HashMap<>(); - private final ConfigurableFileCollection pluginAndModuleConfiguration; private final List> plugins = new ArrayList<>(); private final List> modules = new ArrayList<>(); private final LazyPropertyMap settings = new LazyPropertyMap<>("Settings", this); @@ -151,6 +140,7 @@ public class ElasticsearchNode implements TestClusterConfiguration { private final LazyPropertyMap systemProperties = new LazyPropertyMap<>("System properties", this); private final LazyPropertyMap environment = new LazyPropertyMap<>("Environment", this); private final LazyPropertyList jvmArgs = new LazyPropertyList<>("JVM arguments", this); + private final LazyPropertyList cliJvmArgs = new LazyPropertyList<>("CLI JVM arguments", this); private final LazyPropertyMap extraConfigFiles = new LazyPropertyMap<>("Extra config files", this, FileEntry::new); private final LazyPropertyList extraJarConfigurations = new LazyPropertyList<>("Extra jar files", this); private final List> credentials = new ArrayList<>(); @@ -171,12 +161,10 @@ public class ElasticsearchNode implements TestClusterConfiguration { private final Provider runtimeJava; private final Function isReleasedVersion; private final List distributions = new ArrayList<>(); - private final Attribute bundleAttribute = Attribute.of("bundle", Boolean.class); - private int currentDistro = 0; private TestDistribution testDistribution; private volatile Process esProcess; - private Function nameCustomization = Function.identity(); + private Function nameCustomization = s -> s; private boolean isWorkingDirConfigured = false; private String httpPort = "0"; private String transportPort = "0"; @@ -223,10 +211,8 @@ public class ElasticsearchNode implements TestClusterConfiguration { waitConditions.put("ports files", this::checkPortsFilesExistWithDelay); defaultConfig.put("cluster.name", clusterName); - pluginAndModuleConfiguration = project.getObjects().fileCollection(); setTestDistribution(TestDistribution.INTEG_TEST); setVersion(VersionProperties.getElasticsearch()); - configureArtifactTransforms(); } @Input @@ -302,84 +288,34 @@ private void setDistributionType(ElasticsearchDistribution distribution, TestDis } } - // package protected so only TestClustersAware can access - @Internal - Collection getPluginAndModuleConfigurations() { - return pluginAndModuleConfigurations.values(); - } - - // creates a configuration to depend on the given plugin project, then wraps that configuration - // to grab the zip as a file provider - private Provider maybeCreatePluginOrModuleDependency(String path, String consumingConfiguration) { - var configuration = pluginAndModuleConfigurations.computeIfAbsent(path, key -> { - var bundleDependency = this.project.getDependencies().project(Map.of("path", path, "configuration", consumingConfiguration)); - return project.getConfigurations().detachedConfiguration(bundleDependency); - }); - - Provider fileProvider = configuration.getElements() - .map( - s -> s.stream() - .findFirst() - .orElseThrow( - () -> new IllegalStateException(consumingConfiguration + " configuration of project " + path + " had no files") - ) - .getAsFile() - ); - return project.getLayout().file(fileProvider); - } - @Override public void plugin(Provider plugin) { checkFrozen(); - registerExtractedConfig(plugin); this.plugins.add(plugin.map(RegularFile::getAsFile)); } @Override public void plugin(String pluginProjectPath) { - plugin(maybeCreatePluginOrModuleDependency(pluginProjectPath, "zip")); + throw new UnsupportedOperationException("Not Supported API"); } public void plugin(TaskProvider plugin) { - plugin(plugin.flatMap(AbstractArchiveTask::getArchiveFile)); + throw new UnsupportedOperationException("Not Supported API"); } @Override public void module(Provider module) { checkFrozen(); - registerExtractedConfig(module); this.modules.add(module.map(RegularFile::getAsFile)); } public void module(TaskProvider module) { - module(project.getLayout().file(module.map(Sync::getDestinationDir))); - } - - private void registerExtractedConfig(Provider pluginProvider) { - Dependency pluginDependency = this.project.getDependencies().create(project.files(pluginProvider)); - Configuration extractedConfig = project.getConfigurations().detachedConfiguration(pluginDependency); - extractedConfig.getAttributes().attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, ArtifactTypeDefinition.DIRECTORY_TYPE); - extractedConfig.getAttributes().attribute(bundleAttribute, true); - pluginAndModuleConfiguration.from(extractedConfig); - } - - private void configureArtifactTransforms() { - project.getDependencies().getAttributesSchema().attribute(bundleAttribute); - project.getDependencies().getArtifactTypes().maybeCreate(ArtifactTypeDefinition.ZIP_TYPE); - project.getDependencies().registerTransform(UnzipTransform.class, transformSpec -> { - transformSpec.getFrom() - .attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, ArtifactTypeDefinition.ZIP_TYPE) - .attribute(bundleAttribute, true); - transformSpec.getTo() - .attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, ArtifactTypeDefinition.DIRECTORY_TYPE) - .attribute(bundleAttribute, true); - transformSpec.getParameters().setAsFiletreeOutput(true); - }); + throw new IllegalStateException("Not Supported API"); } @Override public void module(String moduleProjectPath) { - module(maybeCreatePluginOrModuleDependency(moduleProjectPath, EXPLODED_BUNDLE_CONFIG)); + throw new IllegalStateException("Not Supported API"); } @Override @@ -471,6 +407,10 @@ public void jvmArgs(String... values) { jvmArgs.addAll(Arrays.asList(values)); } + public void cliJvmArgs(String... values) { + cliJvmArgs.addAll(Arrays.asList(values)); + } + @Internal public Path getConfigDir() { return configFile.getParent(); @@ -932,6 +872,10 @@ private void startElasticsearchProcess() { // Don't inherit anything from the environment for as that would lack reproducibility environment.clear(); environment.putAll(getESEnvironment()); + if (cliJvmArgs.isEmpty() == false) { + String cliJvmArgsString = String.join(" ", cliJvmArgs); + environment.put("CLI_JAVA_OPTS", cliJvmArgsString); + } // Direct the stderr to the ES log file. This should capture any jvm problems to start. // Stdout is discarded because ES duplicates the log file to stdout when run in the foreground. @@ -1551,17 +1495,6 @@ private Path getExtractedDistributionDir() { return distributions.get(currentDistro).getExtracted().getSingleFile().toPath(); } - @Classpath - public FileCollection getInstalledClasspath() { - return pluginAndModuleConfiguration.getAsFileTree().filter(f -> f.getName().endsWith(".jar")); - } - - @InputFiles - @PathSensitive(PathSensitivity.RELATIVE) - public FileCollection getInstalledFiles() { - return pluginAndModuleConfiguration.getAsFileTree().filter(f -> f.getName().endsWith(".jar") == false); - } - @Classpath public List getDistributionClasspath() { return getDistributionFiles(filter -> filter.include("**/*.jar")); diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/MockApmServer.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/MockApmServer.java index 7ec74ee19d1bb..7c1d4b6015d2e 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/MockApmServer.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/MockApmServer.java @@ -25,10 +25,10 @@ * This is a server which just accepts lines of JSON code and if the JSON * is valid and the root node is "transaction", then adds that JSON object * to a transaction list which is accessible externally to the class. - * + *

* The Elastic agent sends lines of JSON code, and so this mock server * can be used as a basic APM server for testing. - * + *

* The HTTP server used is the JDK embedded com.sun.net.httpserver */ public class MockApmServer { @@ -54,12 +54,16 @@ public static void main(String[] args) throws IOException, InterruptedException /** * Start the Mock APM server. Just returns empty JSON structures for every incoming message + * * @return - the port the Mock APM server started on * @throws IOException */ public synchronized int start() throws IOException { if (instance != null) { - throw new IOException("MockApmServer: Ooops, you can't start this instance more than once"); + String hostname = instance.getAddress().getHostName(); + int port = instance.getAddress().getPort(); + logger.lifecycle("MockApmServer is already running. Reusing on address:port " + hostname + ":" + port); + return port; } InetSocketAddress addr = new InetSocketAddress("0.0.0.0", port); HttpServer server = HttpServer.create(addr, 10); diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java index 953c0447ec71b..86df3544ddfc6 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java @@ -40,6 +40,7 @@ public abstract class RunTask extends DefaultTestClustersTask { private static final String transportCertificate = "private-cert2.p12"; private Boolean debug = false; + private Boolean cliDebug = false; private Boolean apmServerEnabled = false; private Boolean preserveData = false; @@ -62,11 +63,21 @@ public void setDebug(boolean enabled) { this.debug = enabled; } + @Option(option = "debug-cli-jvm", description = "Enable debugging configuration, to allow attaching a debugger to the cli launcher.") + public void setCliDebug(boolean enabled) { + this.cliDebug = enabled; + } + @Input public Boolean getDebug() { return debug; } + @Input + public Boolean getCliDebug() { + return cliDebug; + } + @Input public Boolean getApmServerEnabled() { return apmServerEnabled; @@ -194,7 +205,10 @@ public void beforeStart() { } catch (IOException e) { logger.warn("Unable to start APM server", e); } - + } else if (node.getSettingKeys().contains("telemetry.metrics.enabled") == false) { + // in serverless metrics are enabled by default + // if metrics were not enabled explicitly for gradlew run we should disable them + node.setting("telemetry.metrics.enabled", "false"); } } @@ -202,6 +216,9 @@ public void beforeStart() { if (debug) { enableDebug(); } + if (cliDebug) { + enableCliDebug(); + } } @TaskAction diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java index ba2a5a20c4fbb..5e6b33aa980f0 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java @@ -69,13 +69,15 @@ public Collection getClusters() { @Override @Internal public List getSharedResources() { + // Since we need to have the buildservice registered for configuration cache compatibility, + // we already get one lock for throttle service List locks = new ArrayList<>(super.getSharedResources()); BuildServiceRegistryInternal serviceRegistry = getServices().get(BuildServiceRegistryInternal.class); BuildServiceProvider serviceProvider = serviceRegistry.consume(THROTTLE_SERVICE_NAME, TestClustersThrottle.class); SharedResource resource = serviceRegistry.forService(serviceProvider); int nodeCount = clusters.stream().mapToInt(cluster -> cluster.getNodes().size()).sum(); if (nodeCount > 0) { - for (int i = 0; i < Math.min(nodeCount, resource.getMaxUsages()); i++) { + for (int i = 0; i < Math.min(nodeCount, resource.getMaxUsages() - 1); i++) { locks.add(resource.getResourceLock()); } } @@ -88,6 +90,7 @@ public WorkResult delete(Object... objects) { @Override public void beforeStart() { + TestClustersAware.super.beforeStart(); if (debugServer) { enableDebug(); } diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java index 09066d4b26e88..3fef77688c48d 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java @@ -8,16 +8,15 @@ package org.elasticsearch.gradle.testclusters; import org.gradle.api.Task; -import org.gradle.api.artifacts.Configuration; import org.gradle.api.provider.Property; import org.gradle.api.provider.Provider; import org.gradle.api.services.ServiceReference; import org.gradle.api.tasks.Nested; import java.util.Collection; -import java.util.concurrent.Callable; import static org.elasticsearch.gradle.testclusters.TestClustersPlugin.REGISTRY_SERVICE_NAME; +import static org.elasticsearch.gradle.testclusters.TestClustersPlugin.TEST_CLUSTER_TASKS_SERVICE; public interface TestClustersAware extends Task { @@ -27,6 +26,9 @@ public interface TestClustersAware extends Task { @ServiceReference(REGISTRY_SERVICE_NAME) Property getRegistery(); + @ServiceReference(TEST_CLUSTER_TASKS_SERVICE) + Property getTasksService(); + default void useCluster(ElasticsearchCluster cluster) { if (cluster.getPath().equals(getProject().getPath()) == false) { throw new TestClustersException("Task " + getPath() + " can't use test cluster from" + " another project " + cluster); @@ -34,7 +36,7 @@ default void useCluster(ElasticsearchCluster cluster) { cluster.getNodes() .all(node -> node.getDistributions().forEach(distro -> dependsOn(getProject().provider(() -> distro.maybeFreeze())))); - cluster.getNodes().all(node -> dependsOn((Callable>) node::getPluginAndModuleConfigurations)); + dependsOn(cluster.getPluginAndModuleConfigurations()); getClusters().add(cluster); } @@ -42,7 +44,9 @@ default void useCluster(Provider cluster) { useCluster(cluster.get()); } - default void beforeStart() {} + default void beforeStart() { + getTasksService().get().register(this); + } default void enableDebug() { int debugPort = 5007; @@ -54,4 +58,19 @@ default void enableDebug() { } } } + + default void enableCliDebug() { + int cliDebugPort = 5107; + for (ElasticsearchCluster cluster : getClusters()) { + for (ElasticsearchNode node : cluster.getNodes()) { + getLogger().lifecycle( + "Running cli launcher in debug mode, {} expecting running debug server on port {}", + node, + cliDebugPort + ); + node.cliJvmArgs("-agentlib:jdwp=transport=dt_socket,server=n,suspend=y,address=" + cliDebugPort); + cliDebugPort += 1; + } + } + } } diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java index d2ccda1c1f8c7..d4ae65d43893a 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java @@ -11,11 +11,14 @@ import org.elasticsearch.gradle.ReaperPlugin; import org.elasticsearch.gradle.ReaperService; import org.elasticsearch.gradle.Version; +import org.elasticsearch.gradle.transform.UnzipTransform; import org.elasticsearch.gradle.util.GradleUtils; import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Plugin; import org.gradle.api.Project; import org.gradle.api.Task; +import org.gradle.api.artifacts.type.ArtifactTypeDefinition; +import org.gradle.api.attributes.Attribute; import org.gradle.api.file.ArchiveOperations; import org.gradle.api.file.FileSystemOperations; import org.gradle.api.internal.file.FileOperations; @@ -37,7 +40,6 @@ import java.io.File; import java.util.HashMap; import java.util.Map; -import java.util.Set; import java.util.function.Function; import javax.inject.Inject; @@ -46,12 +48,15 @@ public class TestClustersPlugin implements Plugin { + public static final Attribute BUNDLE_ATTRIBUTE = Attribute.of("bundle", Boolean.class); + public static final String EXTENSION_NAME = "testClusters"; public static final String THROTTLE_SERVICE_NAME = "testClustersThrottle"; private static final String LIST_TASK_NAME = "listTestClusters"; public static final String REGISTRY_SERVICE_NAME = "testClustersRegistry"; private static final Logger logger = Logging.getLogger(TestClustersPlugin.class); + public static final String TEST_CLUSTER_TASKS_SERVICE = "testClusterTasksService"; private final ProviderFactory providerFactory; private Provider runtimeJavaProvider; private Function isReleasedVersion = v -> true; @@ -110,7 +115,7 @@ public void apply(Project project) { project.getGradle().getSharedServices().registerIfAbsent(REGISTRY_SERVICE_NAME, TestClustersRegistry.class, noop()); // register throttle so we only run at most max-workers/2 nodes concurrently - project.getGradle() + Provider testClustersThrottleProvider = project.getGradle() .getSharedServices() .registerIfAbsent( THROTTLE_SERVICE_NAME, @@ -118,8 +123,23 @@ public void apply(Project project) { spec -> spec.getMaxParallelUsages().set(Math.max(1, project.getGradle().getStartParameter().getMaxWorkerCount() / 2)) ); - // register cluster hooks + project.getTasks().withType(TestClustersAware.class).configureEach(task -> { task.usesService(testClustersThrottleProvider); }); project.getRootProject().getPluginManager().apply(TestClustersHookPlugin.class); + configureArtifactTransforms(project); + } + + private void configureArtifactTransforms(Project project) { + project.getDependencies().getAttributesSchema().attribute(BUNDLE_ATTRIBUTE); + project.getDependencies().getArtifactTypes().maybeCreate(ArtifactTypeDefinition.ZIP_TYPE); + project.getDependencies().registerTransform(UnzipTransform.class, transformSpec -> { + transformSpec.getFrom() + .attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, ArtifactTypeDefinition.ZIP_TYPE) + .attribute(BUNDLE_ATTRIBUTE, true); + transformSpec.getTo() + .attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, ArtifactTypeDefinition.DIRECTORY_TYPE) + .attribute(BUNDLE_ATTRIBUTE, true); + transformSpec.getParameters().setAsFiletreeOutput(true); + }); } private NamedDomainObjectContainer createTestClustersContainerExtension( @@ -157,13 +177,13 @@ private void createListClustersTask(Project project, NamedDomainObjectContainer< (Task t) -> container.forEach(cluster -> logger.lifecycle(" * {}: {}", cluster.getName(), cluster.getNumberOfNodes())) ); }); - } static abstract class TestClustersHookPlugin implements Plugin { @Inject public abstract BuildEventsListenerRegistry getEventsListenerRegistry(); + @SuppressWarnings("checkstyle:RedundantModifier") @Inject public TestClustersHookPlugin() {} @@ -178,10 +198,9 @@ public void apply(Project project) { Provider testClusterTasksService = project.getGradle() .getSharedServices() - .registerIfAbsent("testClusterTasksService", TaskEventsService.class, spec -> {}); + .registerIfAbsent(TEST_CLUSTER_TASKS_SERVICE, TaskEventsService.class, spec -> {}); TestClustersRegistry registry = registryProvider.get(); - // When we know what tasks will run, we claim the clusters of those task to differentiate between clusters // that are defined in the build script and the ones that will actually be used in this invocation of gradle // we use this information to determine when the last task that required the cluster executed so that we can @@ -220,7 +239,6 @@ private void configureStartClustersHook( .filter(task -> task instanceof TestClustersAware) .map(task -> (TestClustersAware) task) .forEach(awareTask -> { - testClusterTasksService.get().register(awareTask.getPath(), awareTask); awareTask.doFirst(task -> { awareTask.beforeStart(); awareTask.getClusters().forEach(awareTask.getRegistery().get()::maybeStartCluster); @@ -230,25 +248,17 @@ private void configureStartClustersHook( } } - public static void maybeStartCluster(ElasticsearchCluster cluster, Set runningClusters) { - if (runningClusters.contains(cluster)) { - return; - } - runningClusters.add(cluster); - cluster.start(); - } - static public abstract class TaskEventsService implements BuildService, OperationCompletionListener { Map tasksMap = new HashMap<>(); private TestClustersRegistry registryProvider; - public void register(String path, TestClustersAware task) { - tasksMap.put(path, task); + public void register(TestClustersAware task) { + tasksMap.put(task.getPath(), task); } public void registry(TestClustersRegistry registry) { - registryProvider = registry; + this.registryProvider = registry; } @Override diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersRegistry.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersRegistry.java index b46e86ca84bdd..1d6efdabcd59f 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersRegistry.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersRegistry.java @@ -22,11 +22,16 @@ public abstract class TestClustersRegistry implements BuildService claimsInventory = new HashMap<>(); + private final Set runningClusters = new HashSet<>(); public void claimCluster(ElasticsearchCluster cluster) { cluster.freeze(); - claimsInventory.put(cluster, claimsInventory.getOrDefault(cluster, 0) + 1); + int claim = claimsInventory.getOrDefault(cluster, 0) + 1; + claimsInventory.put(cluster, claim); + if (claim > 1) { + cluster.setShared(true); + } } public void maybeStartCluster(ElasticsearchCluster cluster) { @@ -63,7 +68,6 @@ public void stopCluster(ElasticsearchCluster cluster, boolean taskFailed) { } else { int currentClaims = claimsInventory.getOrDefault(cluster, 0) - 1; claimsInventory.put(cluster, currentClaims); - if (currentClaims <= 0 && runningClusters.contains(cluster)) { cluster.stop(false); runningClusters.remove(cluster); diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersThrottle.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersThrottle.java index 6ba3261691cb9..4dfd950e0aaf1 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersThrottle.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersThrottle.java @@ -10,4 +10,6 @@ import org.gradle.api.services.BuildService; import org.gradle.api.services.BuildServiceParameters; -public abstract class TestClustersThrottle implements BuildService {} +public abstract class TestClustersThrottle implements BuildService { + +} diff --git a/build.gradle b/build.gradle index d05c2bf53f660..c0b613beefea4 100644 --- a/build.gradle +++ b/build.gradle @@ -29,8 +29,8 @@ plugins { id 'lifecycle-base' id 'elasticsearch.docker-support' id 'elasticsearch.global-build-info' - id 'elasticsearch.build-scan' id 'elasticsearch.build-complete' + id 'elasticsearch.build-scan' id 'elasticsearch.jdk-download' id 'elasticsearch.internal-distribution-download' id 'elasticsearch.runtime-jdk-provision' @@ -161,8 +161,10 @@ tasks.register("verifyVersions") { String versionMapping = backportConfig.get("branchLabelMapping").fields().find { it.value.textValue() == 'main' }.key String expectedMapping = "^v${versions.elasticsearch.replaceAll('-SNAPSHOT', '')}\$" if (versionMapping != expectedMapping) { - throw new GradleException("Backport label mapping for branch 'main' is '${versionMapping}' but should be " + - "'${expectedMapping}'. Update .backportrc.json.") + throw new GradleException( + "Backport label mapping for branch 'main' is '${versionMapping}' but should be " + + "'${expectedMapping}'. Update .backportrc.json." + ) } } } @@ -211,9 +213,9 @@ allprojects { project.ext { // for ide hacks... isEclipse = providers.systemProperty("eclipse.launcher").isPresent() || // Detects gradle launched from Eclipse's IDE - providers.systemProperty("eclipse.application").isPresent() || // Detects gradle launched from the Eclipse compiler server - gradle.startParameter.taskNames.contains('eclipse') || // Detects gradle launched from the command line to do eclipse stuff - gradle.startParameter.taskNames.contains('cleanEclipse') + providers.systemProperty("eclipse.application").isPresent() || // Detects gradle launched from the Eclipse compiler server + gradle.startParameter.taskNames.contains('eclipse') || // Detects gradle launched from the command line to do eclipse stuff + gradle.startParameter.taskNames.contains('cleanEclipse') } ext.bwc_tests_enabled = bwc_tests_enabled @@ -229,10 +231,10 @@ allprojects { eclipse.classpath.file.whenMerged { classpath -> if (false == forbiddenApisTest.bundledSignatures.contains('jdk-non-portable')) { classpath.entries - .findAll { it.kind == "con" && it.toString().contains("org.eclipse.jdt.launching.JRE_CONTAINER") } - .each { - it.accessRules.add(new AccessRule("accessible", "com/sun/net/httpserver/*")) - } + .findAll { it.kind == "con" && it.toString().contains("org.eclipse.jdt.launching.JRE_CONTAINER") } + .each { + it.accessRules.add(new AccessRule("accessible", "com/sun/net/httpserver/*")) + } } } } @@ -248,6 +250,8 @@ allprojects { plugins.withId('lifecycle-base') { if (project.path.startsWith(":x-pack:")) { if (project.path.contains("security") || project.path.contains(":ml")) { + tasks.register('checkPart4') { dependsOn 'check' } + } else if (project.path == ":x-pack:plugin" || project.path.contains("ql") || project.path.contains("smoke-test")) { tasks.register('checkPart3') { dependsOn 'check' } } else { tasks.register('checkPart2') { dependsOn 'check' } @@ -256,7 +260,7 @@ allprojects { tasks.register('checkPart1') { dependsOn 'check' } } - tasks.register('functionalTests') { dependsOn 'check'} + tasks.register('functionalTests') { dependsOn 'check' } } /* @@ -281,7 +285,7 @@ allprojects { // :test:framework:test cannot run before and after :server:test return } - tasks.matching { it.name.equals('integTest')}.configureEach {integTestTask -> + tasks.matching { it.name.equals('integTest') }.configureEach { integTestTask -> integTestTask.mustRunAfter tasks.matching { it.name.equals("test") } } @@ -290,7 +294,7 @@ allprojects { Project upstreamProject = dep.dependencyProject if (project.path != upstreamProject?.path) { for (String taskName : ['test', 'integTest']) { - project.tasks.matching { it.name == taskName }.configureEach {task -> + project.tasks.matching { it.name == taskName }.configureEach { task -> task.shouldRunAfter(upstreamProject.tasks.matching { upStreamTask -> upStreamTask.name == taskName }) } } diff --git a/client/rest-high-level/build.gradle b/client/rest-high-level/build.gradle index 743f64b3b28d3..bcbc73f643298 100644 --- a/client/rest-high-level/build.gradle +++ b/client/rest-high-level/build.gradle @@ -61,11 +61,6 @@ tasks.named('forbiddenApisMain').configure { signaturesFiles += files('src/main/resources/forbidden/rest-high-level-signatures.txt') } -tasks.named('splitPackagesAudit').configure { - // the client package should be owned by the client, but server has some classes there too - ignoreClasses 'org.elasticsearch.client.*' -} - // we don't have tests now, as HLRC is in the process of being removed tasks.named("test").configure {enabled = false } diff --git a/client/rest-high-level/roles.yml b/client/rest-high-level/roles.yml deleted file mode 100644 index d3d0630f43058..0000000000000 --- a/client/rest-high-level/roles.yml +++ /dev/null @@ -1,12 +0,0 @@ -admin: - cluster: - - all - indices: - - names: '*' - privileges: - - all - run_as: [ '*' ] - applications: - - application: '*' - privileges: [ '*' ] - resources: [ '*' ] diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java deleted file mode 100644 index fca1e5d29efaf..0000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ /dev/null @@ -1,554 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.client; - -import org.apache.http.HttpEntity; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.entity.ContentType; -import org.apache.http.nio.entity.NByteArrayEntity; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.action.DocWriteRequest; -import org.elasticsearch.action.bulk.BulkRequest; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchScrollRequest; -import org.elasticsearch.action.support.ActiveShardCount; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; -import org.elasticsearch.action.update.UpdateRequest; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.lucene.uid.Versions; -import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.core.Nullable; -import org.elasticsearch.core.SuppressForbidden; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.index.VersionType; -import org.elasticsearch.index.seqno.SequenceNumbers; -import org.elasticsearch.rest.action.search.RestSearchAction; -import org.elasticsearch.xcontent.DeprecationHandler; -import org.elasticsearch.xcontent.NamedXContentRegistry; -import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentType; - -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; -import java.nio.charset.Charset; -import java.util.HashMap; -import java.util.Locale; -import java.util.Map; -import java.util.StringJoiner; - -final class RequestConverters { - static final XContentType REQUEST_BODY_CONTENT_TYPE = XContentType.JSON; - - private RequestConverters() { - // Contains only status utility methods - } - - static Request bulk(BulkRequest bulkRequest) throws IOException { - Request request = new Request(HttpPost.METHOD_NAME, "/_bulk"); - - Params parameters = new Params(); - parameters.withTimeout(bulkRequest.timeout()); - parameters.withRefreshPolicy(bulkRequest.getRefreshPolicy()); - parameters.withPipeline(bulkRequest.pipeline()); - parameters.withRouting(bulkRequest.routing()); - // Bulk API only supports newline delimited JSON or Smile. Before executing - // the bulk, we need to check that all requests have the same content-type - // and this content-type is supported by the Bulk API. - XContentType bulkContentType = null; - for (int i = 0; i < bulkRequest.numberOfActions(); i++) { - DocWriteRequest action = bulkRequest.requests().get(i); - - DocWriteRequest.OpType opType = action.opType(); - if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) { - bulkContentType = enforceSameContentType((IndexRequest) action, bulkContentType); - - } else if (opType == DocWriteRequest.OpType.UPDATE) { - UpdateRequest updateRequest = (UpdateRequest) action; - if (updateRequest.doc() != null) { - bulkContentType = enforceSameContentType(updateRequest.doc(), bulkContentType); - } - if (updateRequest.upsertRequest() != null) { - bulkContentType = enforceSameContentType(updateRequest.upsertRequest(), bulkContentType); - } - } - } - - if (bulkContentType == null) { - bulkContentType = XContentType.JSON; - } - - final byte separator = bulkContentType.xContent().streamSeparator(); - final ContentType requestContentType = createContentType(bulkContentType); - - ByteArrayOutputStream content = new ByteArrayOutputStream(); - for (DocWriteRequest action : bulkRequest.requests()) { - DocWriteRequest.OpType opType = action.opType(); - - try (XContentBuilder metadata = XContentBuilder.builder(bulkContentType.xContent())) { - metadata.startObject(); - { - metadata.startObject(opType.getLowercase()); - if (Strings.hasLength(action.index())) { - metadata.field("_index", action.index()); - } - if (Strings.hasLength(action.id())) { - metadata.field("_id", action.id()); - } - if (Strings.hasLength(action.routing())) { - metadata.field("routing", action.routing()); - } - if (action.version() != Versions.MATCH_ANY) { - metadata.field("version", action.version()); - } - - VersionType versionType = action.versionType(); - if (versionType != VersionType.INTERNAL) { - if (versionType == VersionType.EXTERNAL) { - metadata.field("version_type", "external"); - } else if (versionType == VersionType.EXTERNAL_GTE) { - metadata.field("version_type", "external_gte"); - } - } - - if (action.ifSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) { - metadata.field("if_seq_no", action.ifSeqNo()); - metadata.field("if_primary_term", action.ifPrimaryTerm()); - } - - if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) { - IndexRequest indexRequest = (IndexRequest) action; - if (Strings.hasLength(indexRequest.getPipeline())) { - metadata.field("pipeline", indexRequest.getPipeline()); - } - } else if (opType == DocWriteRequest.OpType.UPDATE) { - UpdateRequest updateRequest = (UpdateRequest) action; - if (updateRequest.retryOnConflict() > 0) { - metadata.field("retry_on_conflict", updateRequest.retryOnConflict()); - } - if (updateRequest.fetchSource() != null) { - metadata.field("_source", updateRequest.fetchSource()); - } - } - metadata.endObject(); - } - metadata.endObject(); - - BytesRef metadataSource = BytesReference.bytes(metadata).toBytesRef(); - content.write(metadataSource.bytes, metadataSource.offset, metadataSource.length); - content.write(separator); - } - - BytesRef source = null; - if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) { - IndexRequest indexRequest = (IndexRequest) action; - BytesReference indexSource = indexRequest.source(); - XContentType indexXContentType = indexRequest.getContentType(); - - try ( - XContentParser parser = XContentHelper.createParser( - /* - * EMPTY and THROW are fine here because we just call - * copyCurrentStructure which doesn't touch the - * registry or deprecation. - */ - NamedXContentRegistry.EMPTY, - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - indexSource, - indexXContentType - ) - ) { - try (XContentBuilder builder = XContentBuilder.builder(bulkContentType.xContent())) { - builder.copyCurrentStructure(parser); - source = BytesReference.bytes(builder).toBytesRef(); - } - } - } else if (opType == DocWriteRequest.OpType.UPDATE) { - source = XContentHelper.toXContent((UpdateRequest) action, bulkContentType, false).toBytesRef(); - } - - if (source != null) { - content.write(source.bytes, source.offset, source.length); - content.write(separator); - } - } - request.addParameters(parameters.asMap()); - request.setEntity(new NByteArrayEntity(content.toByteArray(), 0, content.size(), requestContentType)); - return request; - } - - static Request index(IndexRequest indexRequest) { - String method = Strings.hasLength(indexRequest.id()) ? HttpPut.METHOD_NAME : HttpPost.METHOD_NAME; - - String endpoint; - if (indexRequest.opType() == DocWriteRequest.OpType.CREATE) { - endpoint = endpoint(indexRequest.index(), "_create", indexRequest.id()); - } else { - endpoint = endpoint(indexRequest.index(), indexRequest.id()); - } - - Request request = new Request(method, endpoint); - - Params parameters = new Params(); - parameters.withRouting(indexRequest.routing()); - parameters.withTimeout(indexRequest.timeout()); - parameters.withVersion(indexRequest.version()); - parameters.withVersionType(indexRequest.versionType()); - parameters.withIfSeqNo(indexRequest.ifSeqNo()); - parameters.withIfPrimaryTerm(indexRequest.ifPrimaryTerm()); - parameters.withPipeline(indexRequest.getPipeline()); - parameters.withRefreshPolicy(indexRequest.getRefreshPolicy()); - parameters.withWaitForActiveShards(indexRequest.waitForActiveShards()); - parameters.withRequireAlias(indexRequest.isRequireAlias()); - - BytesRef source = indexRequest.source().toBytesRef(); - ContentType contentType = createContentType(indexRequest.getContentType()); - request.addParameters(parameters.asMap()); - request.setEntity(new NByteArrayEntity(source.bytes, source.offset, source.length, contentType)); - return request; - } - - /** - * Convert a {@linkplain SearchRequest} into a {@linkplain Request}. - * @param searchRequest the request to convert - * @param searchEndpoint the name of the search endpoint. {@literal _search} - * for standard searches and {@literal _rollup_search} for rollup - * searches. - */ - static Request search(SearchRequest searchRequest, String searchEndpoint) throws IOException { - Request request = new Request(HttpPost.METHOD_NAME, endpoint(searchRequest.indices(), searchEndpoint)); - - Params params = new Params(); - addSearchRequestParams(params, searchRequest); - - if (searchRequest.source() != null) { - request.setEntity(createEntity(searchRequest.source(), REQUEST_BODY_CONTENT_TYPE)); - } - request.addParameters(params.asMap()); - return request; - } - - static void addSearchRequestParams(Params params, SearchRequest searchRequest) { - params.putParam(RestSearchAction.TYPED_KEYS_PARAM, "true"); - params.withRouting(searchRequest.routing()); - params.withPreference(searchRequest.preference()); - if (SearchRequest.DEFAULT_INDICES_OPTIONS.equals(searchRequest.indicesOptions()) == false) { - params.withIndicesOptions(searchRequest.indicesOptions()); - } - params.withSearchType(searchRequest.searchType().name().toLowerCase(Locale.ROOT)); - if (searchRequest.isCcsMinimizeRoundtrips() != SearchRequest.defaultCcsMinimizeRoundtrips(searchRequest)) { - params.putParam("ccs_minimize_roundtrips", Boolean.toString(searchRequest.isCcsMinimizeRoundtrips())); - } - if (searchRequest.getPreFilterShardSize() != null) { - params.putParam("pre_filter_shard_size", Integer.toString(searchRequest.getPreFilterShardSize())); - } - params.withMaxConcurrentShardRequests(searchRequest.getMaxConcurrentShardRequests()); - if (searchRequest.requestCache() != null) { - params.withRequestCache(searchRequest.requestCache()); - } - if (searchRequest.allowPartialSearchResults() != null) { - params.withAllowPartialResults(searchRequest.allowPartialSearchResults()); - } - params.withBatchedReduceSize(searchRequest.getBatchedReduceSize()); - if (searchRequest.scroll() != null) { - params.putParam("scroll", searchRequest.scroll().keepAlive()); - } - } - - static Request searchScroll(SearchScrollRequest searchScrollRequest) throws IOException { - Request request = new Request(HttpPost.METHOD_NAME, "/_search/scroll"); - request.setEntity(createEntity(searchScrollRequest, REQUEST_BODY_CONTENT_TYPE)); - return request; - } - - static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) throws IOException { - return createEntity(toXContent, xContentType, ToXContent.EMPTY_PARAMS); - } - - static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType, ToXContent.Params toXContentParams) - throws IOException { - BytesRef source = XContentHelper.toXContent(toXContent, xContentType, toXContentParams, false).toBytesRef(); - return new NByteArrayEntity(source.bytes, source.offset, source.length, createContentType(xContentType)); - } - - @Deprecated - static String endpoint(String index, String type, String id) { - return new EndpointBuilder().addPathPart(index, type, id).build(); - } - - static String endpoint(String index, String id) { - return new EndpointBuilder().addPathPart(index, "_doc", id).build(); - } - - @Deprecated - static String endpoint(String index, String type, String id, String endpoint) { - return new EndpointBuilder().addPathPart(index, type, id).addPathPartAsIs(endpoint).build(); - } - - static String endpoint(String[] indices, String endpoint) { - return new EndpointBuilder().addCommaSeparatedPathParts(indices).addPathPartAsIs(endpoint).build(); - } - - @Deprecated - static String endpoint(String[] indices, String[] types, String endpoint) { - return new EndpointBuilder().addCommaSeparatedPathParts(indices) - .addCommaSeparatedPathParts(types) - .addPathPartAsIs(endpoint) - .build(); - } - - @Deprecated - static String endpoint(String[] indices, String endpoint, String type) { - return new EndpointBuilder().addCommaSeparatedPathParts(indices).addPathPartAsIs(endpoint).addPathPart(type).build(); - } - - /** - * Returns a {@link ContentType} from a given {@link XContentType}. - * - * @param xContentType the {@link XContentType} - * @return the {@link ContentType} - */ - @SuppressForbidden(reason = "Only allowed place to convert a XContentType to a ContentType") - public static ContentType createContentType(final XContentType xContentType) { - return ContentType.create(xContentType.mediaTypeWithoutParameters(), (Charset) null); - } - - /** - * Utility class to help with common parameter names and patterns. Wraps - * a {@link Request} and adds the parameters to it directly. - */ - static class Params { - private final Map parameters = new HashMap<>(); - - Params() {} - - Params putParam(String name, String value) { - if (Strings.hasLength(value)) { - parameters.put(name, value); - } - return this; - } - - Params putParam(String key, TimeValue value) { - if (value != null) { - return putParam(key, value.getStringRep()); - } - return this; - } - - Map asMap() { - return parameters; - } - - Params withPipeline(String pipeline) { - return putParam("pipeline", pipeline); - } - - Params withPreference(String preference) { - return putParam("preference", preference); - } - - Params withSearchType(String searchType) { - return putParam("search_type", searchType); - } - - Params withMaxConcurrentShardRequests(int maxConcurrentShardRequests) { - return putParam("max_concurrent_shard_requests", Integer.toString(maxConcurrentShardRequests)); - } - - Params withBatchedReduceSize(int batchedReduceSize) { - return putParam("batched_reduce_size", Integer.toString(batchedReduceSize)); - } - - Params withRequestCache(boolean requestCache) { - return putParam("request_cache", Boolean.toString(requestCache)); - } - - Params withAllowPartialResults(boolean allowPartialSearchResults) { - return putParam("allow_partial_search_results", Boolean.toString(allowPartialSearchResults)); - } - - Params withRefreshPolicy(RefreshPolicy refreshPolicy) { - if (refreshPolicy != RefreshPolicy.NONE) { - return putParam("refresh", refreshPolicy.getValue()); - } - return this; - } - - Params withRouting(String routing) { - return putParam("routing", routing); - } - - Params withTimeout(TimeValue timeout) { - return putParam("timeout", timeout); - } - - Params withVersion(long version) { - if (version != Versions.MATCH_ANY) { - return putParam("version", Long.toString(version)); - } - return this; - } - - Params withVersionType(VersionType versionType) { - if (versionType != VersionType.INTERNAL) { - return putParam("version_type", versionType.name().toLowerCase(Locale.ROOT)); - } - return this; - } - - Params withIfSeqNo(long ifSeqNo) { - if (ifSeqNo != SequenceNumbers.UNASSIGNED_SEQ_NO) { - return putParam("if_seq_no", Long.toString(ifSeqNo)); - } - return this; - } - - Params withIfPrimaryTerm(long ifPrimaryTerm) { - if (ifPrimaryTerm != SequenceNumbers.UNASSIGNED_PRIMARY_TERM) { - return putParam("if_primary_term", Long.toString(ifPrimaryTerm)); - } - return this; - } - - Params withWaitForActiveShards(ActiveShardCount activeShardCount) { - return withWaitForActiveShards(activeShardCount, ActiveShardCount.DEFAULT); - } - - Params withWaitForActiveShards(ActiveShardCount activeShardCount, ActiveShardCount defaultActiveShardCount) { - if (activeShardCount != null && activeShardCount != defaultActiveShardCount) { - return putParam("wait_for_active_shards", activeShardCount.toString().toLowerCase(Locale.ROOT)); - } - return this; - } - - Params withRequireAlias(boolean requireAlias) { - if (requireAlias) { - return putParam("require_alias", Boolean.toString(requireAlias)); - } - return this; - } - - Params withIndicesOptions(IndicesOptions indicesOptions) { - if (indicesOptions != null) { - withIgnoreUnavailable(indicesOptions.ignoreUnavailable()); - putParam("allow_no_indices", Boolean.toString(indicesOptions.allowNoIndices())); - String expandWildcards; - if (indicesOptions.expandWildcardExpressions() == false) { - expandWildcards = "none"; - } else { - StringJoiner joiner = new StringJoiner(","); - if (indicesOptions.expandWildcardsOpen()) { - joiner.add("open"); - } - if (indicesOptions.expandWildcardsClosed()) { - joiner.add("closed"); - } - expandWildcards = joiner.toString(); - } - putParam("expand_wildcards", expandWildcards); - putParam("ignore_throttled", Boolean.toString(indicesOptions.ignoreThrottled())); - } - return this; - } - - Params withIgnoreUnavailable(boolean ignoreUnavailable) { - // Always explicitly place the ignore_unavailable value. - putParam("ignore_unavailable", Boolean.toString(ignoreUnavailable)); - return this; - } - } - - /** - * Ensure that the {@link IndexRequest}'s content type is supported by the Bulk API and that it conforms - * to the current {@link BulkRequest}'s content type (if it's known at the time of this method get called). - * - * @return the {@link IndexRequest}'s content type - */ - static XContentType enforceSameContentType(IndexRequest indexRequest, @Nullable XContentType xContentType) { - XContentType requestContentType = indexRequest.getContentType(); - if (requestContentType.canonical() != XContentType.JSON && requestContentType.canonical() != XContentType.SMILE) { - throw new IllegalArgumentException( - "Unsupported content-type found for request with content-type [" - + requestContentType - + "], only JSON and SMILE are supported" - ); - } - if (xContentType == null) { - return requestContentType; - } - if (requestContentType.canonical() != xContentType.canonical()) { - throw new IllegalArgumentException( - "Mismatching content-type found for request with content-type [" - + requestContentType - + "], previous requests have content-type [" - + xContentType - + "]" - ); - } - return xContentType; - } - - /** - * Utility class to build request's endpoint given its parts as strings - */ - static class EndpointBuilder { - - private final StringJoiner joiner = new StringJoiner("/", "/", ""); - - EndpointBuilder addPathPart(String... parts) { - for (String part : parts) { - if (Strings.hasLength(part)) { - joiner.add(encodePart(part)); - } - } - return this; - } - - EndpointBuilder addCommaSeparatedPathParts(String[] parts) { - addPathPart(String.join(",", parts)); - return this; - } - - EndpointBuilder addPathPartAsIs(String... parts) { - for (String part : parts) { - if (Strings.hasLength(part)) { - joiner.add(part); - } - } - return this; - } - - String build() { - return joiner.toString(); - } - - private static String encodePart(String pathPart) { - try { - // encode each part (e.g. index, type and id) separately before merging them into the path - // we prepend "/" to the path part to make this path absolute, otherwise there can be issues with - // paths that start with `-` or contain `:` - // the authority must be an empty string and not null, else paths that being with slashes could have them - // misinterpreted as part of the authority. - URI uri = new URI(null, "", "/" + pathPart, null, null); - // manually encode any slash that each part may contain - return uri.getRawPath().substring(1).replaceAll("/", "%2F"); - } catch (URISyntaxException e) { - throw new IllegalArgumentException("Path part [" + pathPart + "] couldn't be encoded", e); - } - } - } -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java deleted file mode 100644 index b0998957910a2..0000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ /dev/null @@ -1,1042 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.client; - -import org.apache.http.Header; -import org.apache.http.HttpEntity; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchStatusException; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.bulk.BulkRequest; -import org.elasticsearch.action.bulk.BulkResponse; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchScrollRequest; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.aggregations.bucket.adjacency.AdjacencyMatrixAggregationBuilder; -import org.elasticsearch.aggregations.bucket.adjacency.ParsedAdjacencyMatrix; -import org.elasticsearch.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder; -import org.elasticsearch.aggregations.bucket.histogram.ParsedAutoDateHistogram; -import org.elasticsearch.aggregations.bucket.timeseries.ParsedTimeSeries; -import org.elasticsearch.aggregations.bucket.timeseries.TimeSeriesAggregationBuilder; -import org.elasticsearch.aggregations.pipeline.DerivativePipelineAggregationBuilder; -import org.elasticsearch.client.analytics.ParsedStringStats; -import org.elasticsearch.client.analytics.ParsedTopMetrics; -import org.elasticsearch.client.analytics.StringStatsAggregationBuilder; -import org.elasticsearch.client.analytics.TopMetricsAggregationBuilder; -import org.elasticsearch.client.core.MainResponse; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.util.concurrent.FutureUtils; -import org.elasticsearch.common.util.concurrent.ListenableFuture; -import org.elasticsearch.core.CheckedConsumer; -import org.elasticsearch.core.CheckedFunction; -import org.elasticsearch.plugins.spi.NamedXContentProvider; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.search.aggregations.Aggregation; -import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.composite.ParsedComposite; -import org.elasticsearch.search.aggregations.bucket.filter.FilterAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.filter.FiltersAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.filter.ParsedFilter; -import org.elasticsearch.search.aggregations.bucket.filter.ParsedFilters; -import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.geogrid.ParsedGeoHashGrid; -import org.elasticsearch.search.aggregations.bucket.geogrid.ParsedGeoTileGrid; -import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.global.ParsedGlobal; -import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.histogram.ParsedDateHistogram; -import org.elasticsearch.search.aggregations.bucket.histogram.ParsedHistogram; -import org.elasticsearch.search.aggregations.bucket.histogram.ParsedVariableWidthHistogram; -import org.elasticsearch.search.aggregations.bucket.histogram.VariableWidthHistogramAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.missing.MissingAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.missing.ParsedMissing; -import org.elasticsearch.search.aggregations.bucket.nested.NestedAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.nested.ParsedNested; -import org.elasticsearch.search.aggregations.bucket.nested.ParsedReverseNested; -import org.elasticsearch.search.aggregations.bucket.nested.ReverseNestedAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.range.DateRangeAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.range.GeoDistanceAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.range.IpRangeAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.range.ParsedBinaryRange; -import org.elasticsearch.search.aggregations.bucket.range.ParsedDateRange; -import org.elasticsearch.search.aggregations.bucket.range.ParsedGeoDistance; -import org.elasticsearch.search.aggregations.bucket.range.ParsedRange; -import org.elasticsearch.search.aggregations.bucket.range.RangeAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.sampler.InternalSampler; -import org.elasticsearch.search.aggregations.bucket.sampler.ParsedSampler; -import org.elasticsearch.search.aggregations.bucket.terms.DoubleTerms; -import org.elasticsearch.search.aggregations.bucket.terms.LongRareTerms; -import org.elasticsearch.search.aggregations.bucket.terms.LongTerms; -import org.elasticsearch.search.aggregations.bucket.terms.ParsedDoubleTerms; -import org.elasticsearch.search.aggregations.bucket.terms.ParsedLongRareTerms; -import org.elasticsearch.search.aggregations.bucket.terms.ParsedLongTerms; -import org.elasticsearch.search.aggregations.bucket.terms.ParsedSignificantLongTerms; -import org.elasticsearch.search.aggregations.bucket.terms.ParsedSignificantStringTerms; -import org.elasticsearch.search.aggregations.bucket.terms.ParsedStringRareTerms; -import org.elasticsearch.search.aggregations.bucket.terms.ParsedStringTerms; -import org.elasticsearch.search.aggregations.bucket.terms.SignificantLongTerms; -import org.elasticsearch.search.aggregations.bucket.terms.SignificantStringTerms; -import org.elasticsearch.search.aggregations.bucket.terms.StringRareTerms; -import org.elasticsearch.search.aggregations.bucket.terms.StringTerms; -import org.elasticsearch.search.aggregations.metrics.AvgAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.CardinalityAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.ExtendedStatsAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.GeoBoundsAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.GeoCentroidAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.InternalHDRPercentileRanks; -import org.elasticsearch.search.aggregations.metrics.InternalHDRPercentiles; -import org.elasticsearch.search.aggregations.metrics.InternalTDigestPercentileRanks; -import org.elasticsearch.search.aggregations.metrics.InternalTDigestPercentiles; -import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.MedianAbsoluteDeviationAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.MinAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.ParsedAvg; -import org.elasticsearch.search.aggregations.metrics.ParsedCardinality; -import org.elasticsearch.search.aggregations.metrics.ParsedExtendedStats; -import org.elasticsearch.search.aggregations.metrics.ParsedGeoBounds; -import org.elasticsearch.search.aggregations.metrics.ParsedGeoCentroid; -import org.elasticsearch.search.aggregations.metrics.ParsedHDRPercentileRanks; -import org.elasticsearch.search.aggregations.metrics.ParsedHDRPercentiles; -import org.elasticsearch.search.aggregations.metrics.ParsedMax; -import org.elasticsearch.search.aggregations.metrics.ParsedMedianAbsoluteDeviation; -import org.elasticsearch.search.aggregations.metrics.ParsedMin; -import org.elasticsearch.search.aggregations.metrics.ParsedScriptedMetric; -import org.elasticsearch.search.aggregations.metrics.ParsedStats; -import org.elasticsearch.search.aggregations.metrics.ParsedSum; -import org.elasticsearch.search.aggregations.metrics.ParsedTDigestPercentileRanks; -import org.elasticsearch.search.aggregations.metrics.ParsedTDigestPercentiles; -import org.elasticsearch.search.aggregations.metrics.ParsedTopHits; -import org.elasticsearch.search.aggregations.metrics.ParsedValueCount; -import org.elasticsearch.search.aggregations.metrics.ParsedWeightedAvg; -import org.elasticsearch.search.aggregations.metrics.ScriptedMetricAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.StatsAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.TopHitsAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.ValueCountAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.WeightedAvgAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.ExtendedStatsBucketPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.InternalBucketMetricValue; -import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue; -import org.elasticsearch.search.aggregations.pipeline.ParsedBucketMetricValue; -import org.elasticsearch.search.aggregations.pipeline.ParsedDerivative; -import org.elasticsearch.search.aggregations.pipeline.ParsedExtendedStatsBucket; -import org.elasticsearch.search.aggregations.pipeline.ParsedPercentilesBucket; -import org.elasticsearch.search.aggregations.pipeline.ParsedSimpleValue; -import org.elasticsearch.search.aggregations.pipeline.ParsedStatsBucket; -import org.elasticsearch.search.aggregations.pipeline.PercentilesBucketPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.StatsBucketPipelineAggregationBuilder; -import org.elasticsearch.search.suggest.Suggest; -import org.elasticsearch.search.suggest.completion.CompletionSuggestion; -import org.elasticsearch.search.suggest.completion.CompletionSuggestionBuilder; -import org.elasticsearch.search.suggest.phrase.PhraseSuggestion; -import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder; -import org.elasticsearch.search.suggest.term.TermSuggestion; -import org.elasticsearch.search.suggest.term.TermSuggestionBuilder; -import org.elasticsearch.xcontent.ContextParser; -import org.elasticsearch.xcontent.DeprecationHandler; -import org.elasticsearch.xcontent.NamedXContentRegistry; -import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParserConfiguration; -import org.elasticsearch.xcontent.XContentType; - -import java.io.Closeable; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.ServiceLoader; -import java.util.Set; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; -import java.util.function.Function; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -import static java.util.Collections.emptySet; -import static java.util.stream.Collectors.toList; - -/** - * High level REST client that wraps an instance of the low level {@link RestClient} and allows to build requests and read responses. The - * {@link RestClient} instance is internally built based on the provided {@link RestClientBuilder} and it gets closed automatically when - * closing the {@link RestHighLevelClient} instance that wraps it. - *

- * - * In case an already existing instance of a low-level REST client needs to be provided, this class can be subclassed and the - * {@link #RestHighLevelClient(RestClient, CheckedConsumer, List)} constructor can be used. - *

- * - * This class can also be sub-classed to expose additional client methods that make use of endpoints added to Elasticsearch through plugins, - * or to add support for custom response sections, again added to Elasticsearch through plugins. - *

- * - * The majority of the methods in this class come in two flavors, a blocking and an asynchronous version (e.g. - * {@link #search(SearchRequest, RequestOptions)} and {@link #searchAsync(SearchRequest, RequestOptions, ActionListener)}, where the later - * takes an implementation of an {@link ActionListener} as an argument that needs to implement methods that handle successful responses and - * failure scenarios. Most of the blocking calls can throw an {@link IOException} or an unchecked {@link ElasticsearchException} in the - * following cases: - * - *

    - *
  • an {@link IOException} is usually thrown in case of failing to parse the REST response in the high-level REST client, the request - * times out or similar cases where there is no response coming back from the Elasticsearch server
  • - *
  • an {@link ElasticsearchException} is usually thrown in case where the server returns a 4xx or 5xx error code. The high-level client - * then tries to parse the response body error details into a generic ElasticsearchException and suppresses the original - * {@link ResponseException}
  • - *
- * - * @deprecated The High Level Rest Client is deprecated in favor of the - * - * Elasticsearch Java API Client - */ -@Deprecated(since = "7.16.0", forRemoval = true) -@SuppressWarnings("removal") -public class RestHighLevelClient implements Closeable { - - private static final Logger logger = LogManager.getLogger(RestHighLevelClient.class); - /** - * Environment variable determining whether to send the 7.x compatibility header - */ - public static final String API_VERSIONING_ENV_VARIABLE = "ELASTIC_CLIENT_APIVERSIONING"; - - // To be called using performClientRequest and performClientRequestAsync to ensure version compatibility check - private final RestClient client; - private final XContentParserConfiguration parserConfig; - private final CheckedConsumer doClose; - private final boolean useAPICompatibility; - - /** Do not access directly but through getVersionValidationFuture() */ - private volatile ListenableFuture> versionValidationFuture; - - /** - * Creates a {@link RestHighLevelClient} given the low level {@link RestClientBuilder} that allows to build the - * {@link RestClient} to be used to perform requests. - */ - public RestHighLevelClient(RestClientBuilder restClientBuilder) { - this(restClientBuilder.build(), RestClient::close, Collections.emptyList()); - } - - /** - * Creates a {@link RestHighLevelClient} given the low level {@link RestClient} that it should use to perform requests and - * a list of entries that allow to parse custom response sections added to Elasticsearch through plugins. - * This constructor can be called by subclasses in case an externally created low-level REST client needs to be provided. - * The consumer argument allows to control what needs to be done when the {@link #close()} method is called. - * Also subclasses can provide parsers for custom response sections added to Elasticsearch through plugins. - */ - protected RestHighLevelClient( - RestClient restClient, - CheckedConsumer doClose, - List namedXContentEntries - ) { - this(restClient, doClose, namedXContentEntries, null); - } - - /** - * Creates a {@link RestHighLevelClient} given the low level {@link RestClient} that it should use to perform requests and - * a list of entries that allow to parse custom response sections added to Elasticsearch through plugins. - * This constructor can be called by subclasses in case an externally created low-level REST client needs to be provided. - * The consumer argument allows to control what needs to be done when the {@link #close()} method is called. - * Also subclasses can provide parsers for custom response sections added to Elasticsearch through plugins. - */ - private RestHighLevelClient( - RestClient restClient, - CheckedConsumer doClose, - List namedXContentEntries, - Boolean useAPICompatibility - ) { - this.client = Objects.requireNonNull(restClient, "restClient must not be null"); - this.doClose = Objects.requireNonNull(doClose, "doClose consumer must not be null"); - NamedXContentRegistry registry = new NamedXContentRegistry( - Stream.of(getDefaultNamedXContents().stream(), getProvidedNamedXContents().stream(), namedXContentEntries.stream()) - .flatMap(Function.identity()) - .collect(toList()) - ); - /* - * Ignores deprecation warnings. This is appropriate because it is only - * used to parse responses from Elasticsearch. Any deprecation warnings - * emitted there just mean that you are talking to an old version of - * Elasticsearch. There isn't anything you can do about the deprecation. - */ - this.parserConfig = XContentParserConfiguration.EMPTY.withRegistry(registry) - .withDeprecationHandler(DeprecationHandler.IGNORE_DEPRECATIONS); - if (useAPICompatibility == null && "true".equals(System.getenv(API_VERSIONING_ENV_VARIABLE))) { - this.useAPICompatibility = true; - } else { - this.useAPICompatibility = Boolean.TRUE.equals(useAPICompatibility); - } - } - - /** - * Returns the low-level client that the current high-level client instance is using to perform requests - */ - public final RestClient getLowLevelClient() { - return client; - } - - public final XContentParserConfiguration getParserConfig() { - return parserConfig; - } - - @Override - public final void close() throws IOException { - doClose.accept(client); - } - - /** - * Asynchronously executes a bulk request using the Bulk API. - * See Bulk API on elastic.co - * @param bulkRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @param listener the listener to be notified upon request completion - * @return cancellable that may be used to cancel the request - */ - public final Cancellable bulkAsync(BulkRequest bulkRequest, RequestOptions options, ActionListener listener) { - return performRequestAsyncAndParseEntity( - bulkRequest, - RequestConverters::bulk, - options, - BulkResponse::fromXContent, - listener, - emptySet() - ); - } - - /** - * Index a document using the Index API. - * See Index API on elastic.co - * @param indexRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @return the response - */ - public final IndexResponse index(IndexRequest indexRequest, RequestOptions options) throws IOException { - return performRequestAndParseEntity(indexRequest, RequestConverters::index, options, IndexResponse::fromXContent, emptySet()); - } - - /** - * Executes a search request using the Search API. - * See Search API on elastic.co - * @param searchRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @return the response - */ - public final SearchResponse search(SearchRequest searchRequest, RequestOptions options) throws IOException { - return performRequestAndParseEntity( - searchRequest, - r -> RequestConverters.search(r, "_search"), - options, - SearchResponse::fromXContent, - emptySet() - ); - } - - /** - * Asynchronously executes a search using the Search API. - * See Search API on elastic.co - * @param searchRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @param listener the listener to be notified upon request completion - * @return cancellable that may be used to cancel the request - */ - public final Cancellable searchAsync(SearchRequest searchRequest, RequestOptions options, ActionListener listener) { - return performRequestAsyncAndParseEntity( - searchRequest, - r -> RequestConverters.search(r, "_search"), - options, - SearchResponse::fromXContent, - listener, - emptySet() - ); - } - - /** - * Executes a search using the Search Scroll API. - * See Search - * Scroll API on elastic.co - * @param searchScrollRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @return the response - */ - public final SearchResponse scroll(SearchScrollRequest searchScrollRequest, RequestOptions options) throws IOException { - return performRequestAndParseEntity( - searchScrollRequest, - RequestConverters::searchScroll, - options, - SearchResponse::fromXContent, - emptySet() - ); - } - - /** - * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation - * layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}. - */ - @Deprecated - private Resp performRequestAndParseEntity( - Req request, - CheckedFunction requestConverter, - RequestOptions options, - CheckedFunction entityParser, - Set ignores - ) throws IOException { - return performRequest(request, requestConverter, options, response -> parseEntity(response.getEntity(), entityParser), ignores); - } - - /** - * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation - * layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}. - */ - @Deprecated - private Resp performRequest( - Req request, - CheckedFunction requestConverter, - RequestOptions options, - CheckedFunction responseConverter, - Set ignores - ) throws IOException { - ActionRequestValidationException validationException = request.validate(); - if (validationException != null && validationException.validationErrors().isEmpty() == false) { - throw validationException; - } - return internalPerformRequest(request, requestConverter, options, responseConverter, ignores); - } - - /** - * Provides common functionality for performing a request. - */ - private Resp internalPerformRequest( - Req request, - CheckedFunction requestConverter, - RequestOptions options, - CheckedFunction responseConverter, - Set ignores - ) throws IOException { - Request req = requestConverter.apply(request); - req.setOptions(options); - Response response; - try { - response = performClientRequest(req); - } catch (ResponseException e) { - if (ignores.contains(e.getResponse().getStatusLine().getStatusCode())) { - try { - return responseConverter.apply(e.getResponse()); - } catch (Exception innerException) { - // the exception is ignored as we now try to parse the response as an error. - // this covers cases like get where 404 can either be a valid document not found response, - // or an error for which parsing is completely different. We try to consider the 404 response as a valid one - // first. If parsing of the response breaks, we fall back to parsing it as an error. - throw parseResponseException(e); - } - } - throw parseResponseException(e); - } - - try { - return responseConverter.apply(response); - } catch (Exception e) { - throw new IOException("Unable to parse response body for " + response, e); - } - } - - /** - * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation - * layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}. - * @return Cancellable instance that may be used to cancel the request - */ - @Deprecated - private Cancellable performRequestAsyncAndParseEntity( - Req request, - CheckedFunction requestConverter, - RequestOptions options, - CheckedFunction entityParser, - ActionListener listener, - Set ignores - ) { - return performRequestAsync( - request, - requestConverter, - options, - response -> parseEntity(response.getEntity(), entityParser), - listener, - ignores - ); - } - - /** - * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation - * layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}. - * @return Cancellable instance that may be used to cancel the request - */ - @Deprecated - private Cancellable performRequestAsync( - Req request, - CheckedFunction requestConverter, - RequestOptions options, - CheckedFunction responseConverter, - ActionListener listener, - Set ignores - ) { - ActionRequestValidationException validationException = request.validate(); - if (validationException != null && validationException.validationErrors().isEmpty() == false) { - listener.onFailure(validationException); - return Cancellable.NO_OP; - } - return internalPerformRequestAsync(request, requestConverter, options, responseConverter, listener, ignores); - } - - /** - * Provides common functionality for asynchronously performing a request. - * @return Cancellable instance that may be used to cancel the request - */ - private Cancellable internalPerformRequestAsync( - Req request, - CheckedFunction requestConverter, - RequestOptions options, - CheckedFunction responseConverter, - ActionListener listener, - Set ignores - ) { - Request req; - try { - req = requestConverter.apply(request); - } catch (Exception e) { - listener.onFailure(e); - return Cancellable.NO_OP; - } - req.setOptions(options); - - ResponseListener responseListener = wrapResponseListener(responseConverter, listener, ignores); - return performClientRequestAsync(req, responseListener); - } - - private ResponseListener wrapResponseListener( - CheckedFunction responseConverter, - ActionListener actionListener, - Set ignores - ) { - return new ResponseListener() { - @Override - public void onSuccess(Response response) { - try { - actionListener.onResponse(responseConverter.apply(response)); - } catch (Exception e) { - IOException ioe = new IOException("Unable to parse response body for " + response, e); - onFailure(ioe); - } - } - - @Override - public void onFailure(Exception exception) { - if (exception instanceof ResponseException responseException) { - Response response = responseException.getResponse(); - if (ignores.contains(response.getStatusLine().getStatusCode())) { - try { - actionListener.onResponse(responseConverter.apply(response)); - } catch (Exception innerException) { - // the exception is ignored as we now try to parse the response as an error. - // this covers cases like get where 404 can either be a valid document not found response, - // or an error for which parsing is completely different. We try to consider the 404 response as a valid one - // first. If parsing of the response breaks, we fall back to parsing it as an error. - actionListener.onFailure(parseResponseException(responseException)); - } - } else { - actionListener.onFailure(parseResponseException(responseException)); - } - } else { - actionListener.onFailure(exception); - } - } - }; - } - - /** - * Converts a {@link ResponseException} obtained from the low level REST client into an {@link ElasticsearchException}. - * If a response body was returned, tries to parse it as an error returned from Elasticsearch. - * If no response body was returned or anything goes wrong while parsing the error, returns a new {@link ElasticsearchStatusException} - * that wraps the original {@link ResponseException}. The potential exception obtained while parsing is added to the returned - * exception as a suppressed exception. This method is guaranteed to not throw any exception eventually thrown while parsing. - */ - private ElasticsearchStatusException parseResponseException(ResponseException responseException) { - Response response = responseException.getResponse(); - HttpEntity entity = response.getEntity(); - ElasticsearchStatusException elasticsearchException; - RestStatus restStatus = RestStatus.fromCode(response.getStatusLine().getStatusCode()); - - if (entity == null) { - elasticsearchException = new ElasticsearchStatusException(responseException.getMessage(), restStatus, responseException); - } else { - try { - elasticsearchException = parseEntity(entity, RestResponse::errorFromXContent); - elasticsearchException.addSuppressed(responseException); - } catch (Exception e) { - elasticsearchException = new ElasticsearchStatusException("Unable to parse response body", restStatus, responseException); - elasticsearchException.addSuppressed(e); - } - } - return elasticsearchException; - } - - private Resp parseEntity(final HttpEntity entity, final CheckedFunction entityParser) - throws IOException { - if (entity == null) { - throw new IllegalStateException("Response body expected but not returned"); - } - if (entity.getContentType() == null) { - throw new IllegalStateException("Elasticsearch didn't return the [Content-Type] header, unable to parse response body"); - } - XContentType xContentType = XContentType.fromMediaType(entity.getContentType().getValue()); - if (xContentType == null) { - throw new IllegalStateException("Unsupported Content-Type: " + entity.getContentType().getValue()); - } - try (XContentParser parser = xContentType.xContent().createParser(parserConfig, entity.getContent())) { - return entityParser.apply(parser); - } - } - - private enum EntityType { - JSON() { - @Override - public String header() { - return "application/json"; - } - - @Override - public String compatibleHeader() { - return "application/vnd.elasticsearch+json; compatible-with=7"; - } - }, - NDJSON() { - @Override - public String header() { - return "application/x-ndjson"; - } - - @Override - public String compatibleHeader() { - return "application/vnd.elasticsearch+x-ndjson; compatible-with=7"; - } - }, - STAR() { - @Override - public String header() { - return "application/*"; - } - - @Override - public String compatibleHeader() { - return "application/vnd.elasticsearch+json; compatible-with=7"; - } - }, - YAML() { - @Override - public String header() { - return "application/yaml"; - } - - @Override - public String compatibleHeader() { - return "application/vnd.elasticsearch+yaml; compatible-with=7"; - } - }, - SMILE() { - @Override - public String header() { - return "application/smile"; - } - - @Override - public String compatibleHeader() { - return "application/vnd.elasticsearch+smile; compatible-with=7"; - } - }, - CBOR() { - @Override - public String header() { - return "application/cbor"; - } - - @Override - public String compatibleHeader() { - return "application/vnd.elasticsearch+cbor; compatible-with=7"; - } - }; - - public abstract String header(); - - public abstract String compatibleHeader(); - - @Override - public String toString() { - return header(); - } - } - - private Cancellable performClientRequestAsync(Request request, ResponseListener listener) { - // Add compatibility request headers if compatibility mode has been enabled - if (this.useAPICompatibility) { - modifyRequestForCompatibility(request); - } - - ListenableFuture> versionCheck = getVersionValidationFuture(); - - // Create a future that tracks cancellation of this method's result and forwards cancellation to the actual LLRC request. - CompletableFuture cancellationForwarder = new CompletableFuture<>(); - Cancellable result = new Cancellable() { - @Override - public void cancel() { - // Raise the flag by completing the future - FutureUtils.cancel(cancellationForwarder); - } - - @Override - void runIfNotCancelled(Runnable runnable) { - if (cancellationForwarder.isCancelled()) { - throw newCancellationException(); - } - runnable.run(); - } - }; - - // Send the request after we have done the version compatibility check. Note that if it has already happened, the listener will - // be called immediately on the same thread with no asynchronous scheduling overhead. - versionCheck.addListener(new ActionListener<>() { - @Override - public void onResponse(Optional validation) { - if (validation.isPresent() == false) { - // Send the request and propagate cancellation - Cancellable call = client.performRequestAsync(request, listener); - cancellationForwarder.whenComplete((r, t) -> - // Forward cancellation to the actual request (no need to check parameters as the - // only way for cancellationForwarder to be completed is by being cancelled). - call.cancel()); - } else { - // Version validation wasn't successful, fail the request with the validation result. - listener.onFailure(new ElasticsearchException(validation.get())); - } - } - - @Override - public void onFailure(Exception e) { - // Propagate validation request failure. This will be transient since `getVersionValidationFuture` clears the validation - // future if the request fails, leading to retries at the next HLRC request (see comments below). - listener.onFailure(e); - } - }); - - return result; - } - - /** - * Go through all the request's existing headers, looking for {@code headerName} headers and if they exist, - * changing them to use version compatibility. If no request headers are changed, modify the entity type header if appropriate - */ - private boolean addCompatibilityFor(RequestOptions.Builder newOptions, Header entityHeader, String headerName) { - // Modify any existing "Content-Type" headers on the request to use the version compatibility, if available - boolean contentTypeModified = false; - for (Header header : new ArrayList<>(newOptions.getHeaders())) { - if (headerName.equalsIgnoreCase(header.getName()) == false) { - continue; - } - contentTypeModified = contentTypeModified || modifyHeader(newOptions, header, headerName); - } - - // If there were no request-specific headers, modify the request entity's header to be compatible - if (entityHeader != null && contentTypeModified == false) { - contentTypeModified = modifyHeader(newOptions, entityHeader, headerName); - } - - return contentTypeModified; - } - - /** - * Modify the given header to be version compatible, if necessary. - * Returns true if a modification was made, false otherwise. - */ - private boolean modifyHeader(RequestOptions.Builder newOptions, Header header, String headerName) { - for (EntityType type : EntityType.values()) { - final String headerValue = header.getValue(); - if (headerValue.startsWith(type.header())) { - String newHeaderValue = headerValue.replace(type.header(), type.compatibleHeader()); - newOptions.removeHeader(header.getName()); - newOptions.addHeader(headerName, newHeaderValue); - return true; - } - } - return false; - } - - /** - * Make all necessary changes to support API compatibility for the given request. This includes - * modifying the "Content-Type" and "Accept" headers if present, or modifying the header based - * on the request's entity type. - */ - private void modifyRequestForCompatibility(Request request) { - final Header entityHeader = request.getEntity() == null ? null : request.getEntity().getContentType(); - final RequestOptions.Builder newOptions = request.getOptions().toBuilder(); - - addCompatibilityFor(newOptions, entityHeader, "Content-Type"); - if (request.getOptions().containsHeader("Accept")) { - addCompatibilityFor(newOptions, entityHeader, "Accept"); - } else { - // There is no entity, and no existing accept header, but we still need one - // with compatibility, so use the compatible JSON (default output) format - newOptions.addHeader("Accept", EntityType.JSON.compatibleHeader()); - } - request.setOptions(newOptions); - } - - private Response performClientRequest(Request request) throws IOException { - // Add compatibility request headers if compatibility mode has been enabled - if (this.useAPICompatibility) { - modifyRequestForCompatibility(request); - } - - Optional versionValidation; - try { - final var future = new PlainActionFuture>(); - getVersionValidationFuture().addListener(future); - versionValidation = future.get(); - } catch (InterruptedException | ExecutionException e) { - // Unlikely to happen - throw new ElasticsearchException(e); - } - - if (versionValidation.isPresent() == false) { - return client.performRequest(request); - } else { - throw new ElasticsearchException(versionValidation.get()); - } - } - - /** - * Returns a future that asynchronously validates the Elasticsearch product version. Its result is an optional string: if empty then - * validation was successful, if present it contains the validation error. API requests should be chained to this future and check - * the validation result before going further. - *

- * This future is a memoization of the first successful request to the "/" endpoint and the subsequent compatibility check - * ({@see #versionValidationFuture}). Further client requests reuse its result. - *

- * If the version check request fails (e.g. network error), {@link #versionValidationFuture} is cleared so that a new validation - * request is sent at the next HLRC request. This allows retries to happen while avoiding a busy retry loop (LLRC retries on the node - * pool still happen). - */ - private ListenableFuture> getVersionValidationFuture() { - ListenableFuture> currentFuture = this.versionValidationFuture; - if (currentFuture != null) { - return currentFuture; - } else { - synchronized (this) { - // Re-check in synchronized block - currentFuture = this.versionValidationFuture; - if (currentFuture != null) { - return currentFuture; - } - ListenableFuture> future = new ListenableFuture<>(); - this.versionValidationFuture = future; - - // Asynchronously call the info endpoint and complete the future with the version validation result. - Request req = new Request("GET", "/"); - // These status codes are nominal in the context of product version verification - req.addParameter("ignore", "401,403"); - client.performRequestAsync(req, new ResponseListener() { - @Override - public void onSuccess(Response response) { - Optional validation; - try { - validation = getVersionValidation(response); - } catch (Exception e) { - logger.error("Failed to parse info response", e); - validation = Optional.of( - "Failed to parse info response. Check logs for detailed information - " + e.getMessage() - ); - } - future.onResponse(validation); - } - - @Override - public void onFailure(Exception exception) { - - // Fail the requests (this one and the ones waiting for it) and clear the future - // so that we retry the next time the client executes a request. - versionValidationFuture = null; - future.onFailure(exception); - } - }); - - return future; - } - } - } - - /** - * Validates that the response info() is a compatible Elasticsearch version. - * - * @return an optional string. If empty, version is compatible. Otherwise, it's the message to return to the application. - */ - private Optional getVersionValidation(Response response) throws IOException { - // Let requests go through if the client doesn't have permissions for the info endpoint. - int statusCode = response.getStatusLine().getStatusCode(); - if (statusCode == 401 || statusCode == 403) { - return Optional.empty(); - } - - MainResponse mainResponse; - try { - mainResponse = parseEntity(response.getEntity(), MainResponse::fromXContent); - } catch (ResponseException e) { - throw parseResponseException(e); - } - - String version = mainResponse.getVersion().getNumber(); - if (Strings.hasLength(version) == false) { - return Optional.of("Missing version.number in info response"); - } - - String[] parts = version.split("\\."); - if (parts.length < 2) { - return Optional.of("Wrong version.number format in info response"); - } - - int major = Integer.parseInt(parts[0]); - int minor = Integer.parseInt(parts[1]); - - if (major < 6) { - return Optional.of("Elasticsearch version 6 or more is required"); - } - - if (major == 6 || (major == 7 && minor < 14)) { - if ("You Know, for Search".equalsIgnoreCase(mainResponse.getTagline()) == false) { - return Optional.of("Invalid or missing tagline [" + mainResponse.getTagline() + "]"); - } - - return Optional.empty(); - } - - String header = response.getHeader("X-Elastic-Product"); - if (header == null) { - return Optional.of( - "Missing [X-Elastic-Product] header. Please check that you are connecting to an Elasticsearch " - + "instance, and that any networking filters are preserving that header." - ); - } - - if ("Elasticsearch".equals(header) == false) { - return Optional.of("Invalid value [" + header + "] for [X-Elastic-Product] header."); - } - - return Optional.empty(); - } - - private static List getDefaultNamedXContents() { - Map> map = new HashMap<>(); - map.put(CardinalityAggregationBuilder.NAME, (p, c) -> ParsedCardinality.fromXContent(p, (String) c)); - map.put(InternalHDRPercentiles.NAME, (p, c) -> ParsedHDRPercentiles.fromXContent(p, (String) c)); - map.put(InternalHDRPercentileRanks.NAME, (p, c) -> ParsedHDRPercentileRanks.fromXContent(p, (String) c)); - map.put(InternalTDigestPercentiles.NAME, (p, c) -> ParsedTDigestPercentiles.fromXContent(p, (String) c)); - map.put(InternalTDigestPercentileRanks.NAME, (p, c) -> ParsedTDigestPercentileRanks.fromXContent(p, (String) c)); - map.put(PercentilesBucketPipelineAggregationBuilder.NAME, (p, c) -> ParsedPercentilesBucket.fromXContent(p, (String) c)); - map.put(MedianAbsoluteDeviationAggregationBuilder.NAME, (p, c) -> ParsedMedianAbsoluteDeviation.fromXContent(p, (String) c)); - map.put(MinAggregationBuilder.NAME, (p, c) -> ParsedMin.fromXContent(p, (String) c)); - map.put(MaxAggregationBuilder.NAME, (p, c) -> ParsedMax.fromXContent(p, (String) c)); - map.put(SumAggregationBuilder.NAME, (p, c) -> ParsedSum.fromXContent(p, (String) c)); - map.put(AvgAggregationBuilder.NAME, (p, c) -> ParsedAvg.fromXContent(p, (String) c)); - map.put(WeightedAvgAggregationBuilder.NAME, (p, c) -> ParsedWeightedAvg.fromXContent(p, (String) c)); - map.put(ValueCountAggregationBuilder.NAME, (p, c) -> ParsedValueCount.fromXContent(p, (String) c)); - map.put(InternalSimpleValue.NAME, (p, c) -> ParsedSimpleValue.fromXContent(p, (String) c)); - map.put(DerivativePipelineAggregationBuilder.NAME, (p, c) -> ParsedDerivative.fromXContent(p, (String) c)); - map.put(InternalBucketMetricValue.NAME, (p, c) -> ParsedBucketMetricValue.fromXContent(p, (String) c)); - map.put(StatsAggregationBuilder.NAME, (p, c) -> ParsedStats.fromXContent(p, (String) c)); - map.put(StatsBucketPipelineAggregationBuilder.NAME, (p, c) -> ParsedStatsBucket.fromXContent(p, (String) c)); - map.put(ExtendedStatsAggregationBuilder.NAME, (p, c) -> ParsedExtendedStats.fromXContent(p, (String) c)); - map.put(ExtendedStatsBucketPipelineAggregationBuilder.NAME, (p, c) -> ParsedExtendedStatsBucket.fromXContent(p, (String) c)); - map.put(GeoBoundsAggregationBuilder.NAME, (p, c) -> ParsedGeoBounds.fromXContent(p, (String) c)); - map.put(GeoCentroidAggregationBuilder.NAME, (p, c) -> ParsedGeoCentroid.fromXContent(p, (String) c)); - map.put(HistogramAggregationBuilder.NAME, (p, c) -> ParsedHistogram.fromXContent(p, (String) c)); - map.put(DateHistogramAggregationBuilder.NAME, (p, c) -> ParsedDateHistogram.fromXContent(p, (String) c)); - map.put(AutoDateHistogramAggregationBuilder.NAME, (p, c) -> ParsedAutoDateHistogram.fromXContent(p, (String) c)); - map.put(VariableWidthHistogramAggregationBuilder.NAME, (p, c) -> ParsedVariableWidthHistogram.fromXContent(p, (String) c)); - map.put(StringTerms.NAME, (p, c) -> ParsedStringTerms.fromXContent(p, (String) c)); - map.put(LongTerms.NAME, (p, c) -> ParsedLongTerms.fromXContent(p, (String) c)); - map.put(DoubleTerms.NAME, (p, c) -> ParsedDoubleTerms.fromXContent(p, (String) c)); - map.put(LongRareTerms.NAME, (p, c) -> ParsedLongRareTerms.fromXContent(p, (String) c)); - map.put(StringRareTerms.NAME, (p, c) -> ParsedStringRareTerms.fromXContent(p, (String) c)); - map.put(MissingAggregationBuilder.NAME, (p, c) -> ParsedMissing.fromXContent(p, (String) c)); - map.put(NestedAggregationBuilder.NAME, (p, c) -> ParsedNested.fromXContent(p, (String) c)); - map.put(ReverseNestedAggregationBuilder.NAME, (p, c) -> ParsedReverseNested.fromXContent(p, (String) c)); - map.put(GlobalAggregationBuilder.NAME, (p, c) -> ParsedGlobal.fromXContent(p, (String) c)); - map.put(FilterAggregationBuilder.NAME, (p, c) -> ParsedFilter.fromXContent(p, (String) c)); - map.put(InternalSampler.PARSER_NAME, (p, c) -> ParsedSampler.fromXContent(p, (String) c)); - map.put(GeoHashGridAggregationBuilder.NAME, (p, c) -> ParsedGeoHashGrid.fromXContent(p, (String) c)); - map.put(GeoTileGridAggregationBuilder.NAME, (p, c) -> ParsedGeoTileGrid.fromXContent(p, (String) c)); - map.put(RangeAggregationBuilder.NAME, (p, c) -> ParsedRange.fromXContent(p, (String) c)); - map.put(DateRangeAggregationBuilder.NAME, (p, c) -> ParsedDateRange.fromXContent(p, (String) c)); - map.put(GeoDistanceAggregationBuilder.NAME, (p, c) -> ParsedGeoDistance.fromXContent(p, (String) c)); - map.put(FiltersAggregationBuilder.NAME, (p, c) -> ParsedFilters.fromXContent(p, (String) c)); - map.put(AdjacencyMatrixAggregationBuilder.NAME, (p, c) -> ParsedAdjacencyMatrix.fromXContent(p, (String) c)); - map.put(SignificantLongTerms.NAME, (p, c) -> ParsedSignificantLongTerms.fromXContent(p, (String) c)); - map.put(SignificantStringTerms.NAME, (p, c) -> ParsedSignificantStringTerms.fromXContent(p, (String) c)); - map.put(ScriptedMetricAggregationBuilder.NAME, (p, c) -> ParsedScriptedMetric.fromXContent(p, (String) c)); - map.put(IpRangeAggregationBuilder.NAME, (p, c) -> ParsedBinaryRange.fromXContent(p, (String) c)); - map.put(TopHitsAggregationBuilder.NAME, (p, c) -> ParsedTopHits.fromXContent(p, (String) c)); - map.put(CompositeAggregationBuilder.NAME, (p, c) -> ParsedComposite.fromXContent(p, (String) c)); - map.put(StringStatsAggregationBuilder.NAME, (p, c) -> ParsedStringStats.PARSER.parse(p, (String) c)); - map.put(TopMetricsAggregationBuilder.NAME, (p, c) -> ParsedTopMetrics.PARSER.parse(p, (String) c)); - map.put(TimeSeriesAggregationBuilder.NAME, (p, c) -> ParsedTimeSeries.fromXContent(p, (String) (c))); - List entries = map.entrySet() - .stream() - .map(entry -> new NamedXContentRegistry.Entry(Aggregation.class, new ParseField(entry.getKey()), entry.getValue())) - .collect(Collectors.toList()); - entries.add( - new NamedXContentRegistry.Entry( - Suggest.Suggestion.class, - new ParseField(TermSuggestionBuilder.SUGGESTION_NAME), - (parser, context) -> TermSuggestion.fromXContent(parser, (String) context) - ) - ); - entries.add( - new NamedXContentRegistry.Entry( - Suggest.Suggestion.class, - new ParseField(PhraseSuggestionBuilder.SUGGESTION_NAME), - (parser, context) -> PhraseSuggestion.fromXContent(parser, (String) context) - ) - ); - entries.add( - new NamedXContentRegistry.Entry( - Suggest.Suggestion.class, - new ParseField(CompletionSuggestionBuilder.SUGGESTION_NAME), - (parser, context) -> CompletionSuggestion.fromXContent(parser, (String) context) - ) - ); - return entries; - } - - /** - * Loads and returns the {@link NamedXContentRegistry.Entry} parsers provided by plugins. - */ - private static List getProvidedNamedXContents() { - List entries = new ArrayList<>(); - for (NamedXContentProvider service : ServiceLoader.load(NamedXContentProvider.class)) { - entries.addAll(service.getNamedXContentParsers()); - } - return entries; - } -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/Validatable.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/Validatable.java deleted file mode 100644 index b7635f7054299..0000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/Validatable.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.client; - -import java.util.Optional; - -/** - * Defines a validation layer for Requests. - */ -public interface Validatable { - - Validatable EMPTY = new Validatable() { - }; - - /** - * Perform validation. This method does not have to be overridden in the event that no validation needs to be done, - * or the validation was done during object construction time. A {@link ValidationException} that is not null is - * assumed to contain validation errors and will be thrown. - * - * @return An {@link Optional} {@link ValidationException} that contains a list of validation errors. - */ - default Optional validate() { - return Optional.empty(); - } -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ValidationException.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ValidationException.java deleted file mode 100644 index d5701c5723096..0000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ValidationException.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.client; - -import org.elasticsearch.core.Nullable; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -/** - * Encapsulates an accumulation of validation errors - */ -public class ValidationException extends IllegalArgumentException { - - /** - * Creates {@link ValidationException} instance initialized with given error messages. - * @param error the errors to add - * @return {@link ValidationException} instance - */ - public static ValidationException withError(String... error) { - return withErrors(Arrays.asList(error)); - } - - /** - * Creates {@link ValidationException} instance initialized with given error messages. - * @param errors the list of errors to add - * @return {@link ValidationException} instance - */ - public static ValidationException withErrors(List errors) { - ValidationException e = new ValidationException(); - for (String error : errors) { - e.addValidationError(error); - } - return e; - } - - private final List validationErrors = new ArrayList<>(); - - /** - * Add a new validation error to the accumulating validation errors - * @param error the error to add - */ - public void addValidationError(final String error) { - validationErrors.add(error); - } - - /** - * Adds validation errors from an existing {@link ValidationException} to - * the accumulating validation errors - * @param exception the {@link ValidationException} to add errors from - */ - public final void addValidationErrors(final @Nullable ValidationException exception) { - if (exception != null) { - for (String error : exception.validationErrors()) { - addValidationError(error); - } - } - } - - /** - * Returns the validation errors accumulated - */ - public final List validationErrors() { - return validationErrors; - } - - @Override - public final String getMessage() { - StringBuilder sb = new StringBuilder(); - sb.append("Validation Failed: "); - int index = 0; - for (String error : validationErrors) { - sb.append(++index).append(": ").append(error).append(";"); - } - return sb.toString(); - } -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/MainResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/MainResponse.java deleted file mode 100644 index bf7b1a665e098..0000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/MainResponse.java +++ /dev/null @@ -1,220 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.client.core; - -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.XContentParser; - -import java.util.Objects; - -public class MainResponse { - - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - MainResponse.class.getName(), - true, - args -> { - return new MainResponse((String) args[0], (Version) args[1], (String) args[2], (String) args[3], (String) args[4]); - } - ); - - static { - PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("name")); - PARSER.declareObject(ConstructingObjectParser.constructorArg(), Version.PARSER, new ParseField("version")); - PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("cluster_name")); - PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("cluster_uuid")); - PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("tagline")); - - } - - private final String nodeName; - private final Version version; - private final String clusterName; - private final String clusterUuid; - private final String tagline; - - public MainResponse(String nodeName, Version version, String clusterName, String clusterUuid, String tagline) { - this.nodeName = nodeName; - this.version = version; - this.clusterName = clusterName; - this.clusterUuid = clusterUuid; - this.tagline = tagline; - } - - public String getNodeName() { - return nodeName; - } - - public Version getVersion() { - return version; - } - - public String getClusterName() { - return clusterName; - } - - public String getClusterUuid() { - return clusterUuid; - } - - public String getTagline() { - return tagline; - } - - public static MainResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - MainResponse that = (MainResponse) o; - return nodeName.equals(that.nodeName) - && version.equals(that.version) - && clusterName.equals(that.clusterName) - && clusterUuid.equals(that.clusterUuid) - && tagline.equals(that.tagline); - } - - @Override - public int hashCode() { - return Objects.hash(nodeName, version, clusterName, clusterUuid, tagline); - } - - public static class Version { - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - Version.class.getName(), - true, - args -> { - return new Version( - (String) args[0], - (String) args[1], - (String) args[2], - (String) args[3], - (String) args[4], - (Boolean) args[5], - (String) args[6], - (String) args[7], - (String) args[8] - ); - } - ); - - static { - PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("number")); - PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), new ParseField("build_flavor")); - PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), new ParseField("build_type")); - PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("build_hash")); - PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("build_date")); - PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), new ParseField("build_snapshot")); - PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("lucene_version")); - PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("minimum_wire_compatibility_version")); - PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("minimum_index_compatibility_version")); - } - private final String number; - private final String buildFlavor; - private final String buildType; - private final String buildHash; - private final String buildDate; - private final boolean isSnapshot; - private final String luceneVersion; - private final String minimumWireCompatibilityVersion; - private final String minimumIndexCompatibilityVersion; - - public Version( - String number, - String buildFlavor, - String buildType, - String buildHash, - String buildDate, - boolean isSnapshot, - String luceneVersion, - String minimumWireCompatibilityVersion, - String minimumIndexCompatibilityVersion - ) { - this.number = number; - this.buildFlavor = buildFlavor; - this.buildType = buildType; - this.buildHash = buildHash; - this.buildDate = buildDate; - this.isSnapshot = isSnapshot; - this.luceneVersion = luceneVersion; - this.minimumWireCompatibilityVersion = minimumWireCompatibilityVersion; - this.minimumIndexCompatibilityVersion = minimumIndexCompatibilityVersion; - } - - public String getNumber() { - return number; - } - - public String getBuildFlavor() { - return buildFlavor; - } - - public String getBuildType() { - return buildType; - } - - public String getBuildHash() { - return buildHash; - } - - public String getBuildDate() { - return buildDate; - } - - public boolean isSnapshot() { - return isSnapshot; - } - - public String getLuceneVersion() { - return luceneVersion; - } - - public String getMinimumWireCompatibilityVersion() { - return minimumWireCompatibilityVersion; - } - - public String getMinimumIndexCompatibilityVersion() { - return minimumIndexCompatibilityVersion; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - Version version = (Version) o; - return isSnapshot == version.isSnapshot - && number.equals(version.number) - && Objects.equals(buildFlavor, version.buildFlavor) - && Objects.equals(buildType, version.buildType) - && buildHash.equals(version.buildHash) - && buildDate.equals(version.buildDate) - && luceneVersion.equals(version.luceneVersion) - && minimumWireCompatibilityVersion.equals(version.minimumWireCompatibilityVersion) - && minimumIndexCompatibilityVersion.equals(version.minimumIndexCompatibilityVersion); - } - - @Override - public int hashCode() { - return Objects.hash( - number, - buildFlavor, - buildType, - buildHash, - buildDate, - isSnapshot, - luceneVersion, - minimumWireCompatibilityVersion, - minimumIndexCompatibilityVersion - ); - } - } -} diff --git a/client/rest-high-level/testnode.crt b/client/rest-high-level/testnode.crt deleted file mode 100644 index 08c160bcea5ff..0000000000000 --- a/client/rest-high-level/testnode.crt +++ /dev/null @@ -1,23 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID0zCCArugAwIBAgIJALi5bDfjMszLMA0GCSqGSIb3DQEBCwUAMEgxDDAKBgNV -BAoTA29yZzEWMBQGA1UECxMNZWxhc3RpY3NlYXJjaDEgMB4GA1UEAxMXRWxhc3Rp -Y3NlYXJjaCBUZXN0IE5vZGUwHhcNMTUwOTIzMTg1MjU3WhcNMTkwOTIyMTg1MjU3 -WjBIMQwwCgYDVQQKEwNvcmcxFjAUBgNVBAsTDWVsYXN0aWNzZWFyY2gxIDAeBgNV -BAMTF0VsYXN0aWNzZWFyY2ggVGVzdCBOb2RlMIIBIjANBgkqhkiG9w0BAQEFAAOC -AQ8AMIIBCgKCAQEA3rGZ1QbsW0+MuyrSLmMfDFKtLBkIFW8V0gRuurFg1PUKKNR1 -Mq2tMVwjjYETAU/UY0iKZOzjgvYPKhDTYBTte/WHR1ZK4CYVv7TQX/gtFQG/ge/c -7u0sLch9p7fbd+/HZiLS/rBEZDIohvgUvzvnA8+OIYnw4kuxKo/5iboAIS41klMg -/lATm8V71LMY68inht71/ZkQoAHKgcR9z4yNYvQ1WqKG8DG8KROXltll3sTrKbl5 -zJhn660es/1ZnR6nvwt6xnSTl/mNHMjkfv1bs4rJ/py3qPxicdoSIn/KyojUcgHV -F38fuAy2CQTdjVG5fWj9iz+mQvLm3+qsIYQdFwIDAQABo4G/MIG8MAkGA1UdEwQC -MAAwHQYDVR0OBBYEFEMMWLWQi/g83PzlHYqAVnty5L7HMIGPBgNVHREEgYcwgYSC -CWxvY2FsaG9zdIIVbG9jYWxob3N0LmxvY2FsZG9tYWluggpsb2NhbGhvc3Q0ghds -b2NhbGhvc3Q0LmxvY2FsZG9tYWluNIIKbG9jYWxob3N0NoIXbG9jYWxob3N0Ni5s -b2NhbGRvbWFpbjaHBH8AAAGHEAAAAAAAAAAAAAAAAAAAAAEwDQYJKoZIhvcNAQEL -BQADggEBAMjGGXT8Nt1tbl2GkiKtmiuGE2Ej66YuZ37WSJViaRNDVHLlg87TCcHe -k2rdO+6sFqQbbzEfwQ05T7xGmVu7tm54HwKMRugoQ3wct0bQC5wEWYN+oMDvSyO6 -M28mZwWb4VtR2IRyWP+ve5DHwTM9mxWa6rBlGzsQqH6YkJpZojzqk/mQTug+Y8aE -mVoqRIPMHq9ob+S9qd5lp09+MtYpwPfTPx/NN+xMEooXWW/ARfpGhWPkg/FuCu4z -1tFmCqHgNcWirzMm3dQpF78muE9ng6OB2MXQwL4VgnVkxmlZNHbkR2v/t8MyZJxC -y4g6cTMM3S/UMt5/+aIB2JAuMKyuD+A= ------END CERTIFICATE----- diff --git a/client/rest-high-level/testnode.jks b/client/rest-high-level/testnode.jks deleted file mode 100644 index ebe6146124e8f..0000000000000 Binary files a/client/rest-high-level/testnode.jks and /dev/null differ diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index 7154a2be5bbd8..ed087bef0ac76 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -87,6 +87,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static java.util.Collections.singletonList; +import static org.elasticsearch.client.RestClient.IGNORE_RESPONSE_CODES_PARAM; /** * Client that connects to an Elasticsearch cluster through HTTP. @@ -106,6 +107,9 @@ * Requests can be traced by enabling trace logging for "tracer". The trace logger outputs requests and responses in curl format. */ public class RestClient implements Closeable { + + public static final String IGNORE_RESPONSE_CODES_PARAM = "ignore"; + private static final Log logger = LogFactory.getLog(RestClient.class); private final CloseableHttpAsyncClient client; @@ -780,8 +784,8 @@ private class InternalRequest { this.request = request; Map params = new HashMap<>(request.getParameters()); params.putAll(request.getOptions().getParameters()); - // ignore is a special parameter supported by the clients, shouldn't be sent to es - String ignoreString = params.remove("ignore"); + // IGNORE_RESPONSE_CODES_PARAM is a special parameter supported by the clients, shouldn't be sent to es + String ignoreString = params.remove(IGNORE_RESPONSE_CODES_PARAM); this.ignoreErrorCodes = getIgnoreErrorCodes(ignoreString, request.getMethod()); URI uri = buildUri(pathPrefix, request.getEndpoint(), params); this.httpRequest = createHttpRequest(request.getMethod(), uri, request.getEntity(), compressionEnabled); diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java index a1c4d3fab076a..10d24242ae620 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java @@ -275,6 +275,7 @@ public void testErrorStatusCodes() throws Exception { try { Request request = new Request(method, "/" + errorStatusCode); if (false == ignoreParam.isEmpty()) { + // literal "ignore" rather than IGNORE_RESPONSE_CODES_PARAM since this is something on which callers might rely request.addParameter("ignore", ignoreParam); } Response response = restClient.performRequest(request); @@ -568,6 +569,7 @@ private HttpUriRequest performRandomRequest(String method) throws Exception { if (randomBoolean()) { ignore += "," + randomFrom(RestClientTestUtil.getAllErrorStatusCodes()); } + // literal "ignore" rather than IGNORE_RESPONSE_CODES_PARAM since this is something on which callers might rely request.addParameter("ignore", ignore); } URI uri = uriBuilder.build(); diff --git a/distribution/build.gradle b/distribution/build.gradle index 90af1472deb2e..e45f1d09625d6 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -14,6 +14,7 @@ import org.elasticsearch.gradle.internal.ConcatFilesTask import org.elasticsearch.gradle.internal.DependenciesInfoPlugin import org.elasticsearch.gradle.internal.NoticeTask import org.elasticsearch.gradle.internal.info.BuildParams +import org.elasticsearch.gradle.internal.test.HistoricalFeaturesMetadataPlugin import java.nio.file.Files import java.nio.file.Path @@ -30,6 +31,15 @@ configurations { attribute(Category.CATEGORY_ATTRIBUTE, project.getObjects().named(Category.class, Category.DOCUMENTATION)) } } + featuresMetadata { + attributes { + attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, HistoricalFeaturesMetadataPlugin.FEATURES_METADATA_TYPE) + } + } +} + +dependencies { + featuresMetadata project(':server') } def thisProj = project @@ -196,6 +206,7 @@ project.rootProject.subprojects.findAll { it.parent.path == ':modules' }.each { } distro.copyModule(processDefaultOutputsTaskProvider, module) + dependencies.add('featuresMetadata', module) if (module.name.startsWith('transport-') || (BuildParams.snapshotBuild == false && module.name == 'apm')) { distro.copyModule(processIntegTestOutputsTaskProvider, module) } @@ -214,6 +225,7 @@ xpack.subprojects.findAll { it.parent == xpack }.each { Project xpackModule -> } } distro.copyModule(processDefaultOutputsTaskProvider, xpackModule) + dependencies.add('featuresMetadata', xpackModule) if (xpackModule.name.equals('core') || xpackModule.name.equals('security')) { distro.copyModule(processIntegTestOutputsTaskProvider, xpackModule) } diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index 25b5883166ccc..e0d1dd983c0de 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -24,8 +24,8 @@ dependencies { implementation 'org.ow2.asm:asm:9.5' implementation 'org.ow2.asm:asm-tree:9.5' - api "org.bouncycastle:bcpg-fips:1.0.4" - api "org.bouncycastle:bc-fips:1.0.2" + api "org.bouncycastle:bcpg-fips:1.0.7.1" + api "org.bouncycastle:bc-fips:1.0.2.4" testImplementation project(":test:framework") testImplementation "com.google.jimfs:jimfs:${versions.jimfs}" testRuntimeOnly "com.google.guava:guava:${versions.jimfs_guava}" diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/InstallPluginActionTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/InstallPluginActionTests.java index e9adf9882b6db..c088e89338e74 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/InstallPluginActionTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/InstallPluginActionTests.java @@ -88,7 +88,6 @@ import java.util.Arrays; import java.util.Date; import java.util.HashSet; -import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; import java.util.Map; @@ -1591,15 +1590,6 @@ public void testGetSemanticVersion() { assertThat(InstallPluginAction.getSemanticVersion("foo-1.2.3"), nullValue()); } - private Map> namedComponentsMap() { - Map> result = new LinkedHashMap<>(); - Map extensibles = new LinkedHashMap<>(); - extensibles.put("a_component", "p.A"); - extensibles.put("b_component", "p.B"); - result.put("org.elasticsearch.plugins.cli.test_model.ExtensibleInterface", extensibles); - return result; - } - private static String namedComponentsJSON() { return """ { diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/RemovePluginActionTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/RemovePluginActionTests.java index 7d900155488b2..73e89fc948029 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/RemovePluginActionTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/RemovePluginActionTests.java @@ -140,10 +140,6 @@ public void testRemoveMultiple() throws Exception { assertRemoveCleaned(env); } - private static Version minimumCompatibleVersion(Version v) { - return Version.fromString((v.major - 1) + ".0.0"); - } - public void testBin() throws Exception { createPlugin("fake"); Path binDir = env.binFile().resolve("fake"); diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java index b6cd680cb5816..9dcd630f52631 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java @@ -145,7 +145,7 @@ static List apmJvmOptions(Settings settings, @Nullable SecureSettings se // Configures a log file to write to. Don't disable writing to a log file, // as the agent will then require extra Security Manager permissions when // it tries to do something else, and it's just painful. - propertiesMap.put("log_file", logsDir.resolve("apm-agent.log").toString()); + propertiesMap.put("log_file", logsDir.resolve("apm-agent.json").toString()); // No point doing anything if we don't have a destination for the trace data, and it can't be configured dynamically if (propertiesMap.containsKey("server_url") == false && propertiesMap.containsKey("server_urls") == false) { diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerCli.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerCli.java index 25c61c41638d1..ea2df72fb2c0b 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerCli.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerCli.java @@ -107,6 +107,14 @@ public void execute(Terminal terminal, OptionSet options, Environment env, Proce // we are running in the foreground, so wait for the server to exit int exitCode = server.waitFor(); + onExit(exitCode); + } + + /** + * A post-exit hook to perform additional processing before the command terminates + * @param exitCode the server process exit code + */ + protected void onExit(int exitCode) throws UserException { if (exitCode != ExitCodes.OK) { throw new UserException(exitCode, "Elasticsearch exited unexpectedly"); } diff --git a/docs/build.gradle b/docs/build.gradle index 33e6cc6080a95..da3d83378e894 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -72,6 +72,9 @@ testClusters.matching { it.name == "yamlRestTest"}.configureEach { setting 'xpack.license.self_generated.type', 'trial' setting 'indices.lifecycle.history_index_enabled', 'false' keystorePassword 'keystore-password' + if (BuildParams.isSnapshotBuild() == false) { + requiresFeature 'es.failure_store_feature_flag_enabled', new Version(8, 12, 0) + } } // debug ccr test failures: diff --git a/docs/changelog/100408.yaml b/docs/changelog/100408.yaml new file mode 100644 index 0000000000000..275c3b4a0de48 --- /dev/null +++ b/docs/changelog/100408.yaml @@ -0,0 +1,5 @@ +pr: 100408 +summary: "ESQL: Make blocks ref counted" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/100570.yaml b/docs/changelog/100570.yaml new file mode 100644 index 0000000000000..b68a905b0e046 --- /dev/null +++ b/docs/changelog/100570.yaml @@ -0,0 +1,5 @@ +pr: 100570 +summary: Added metric for cache eviction of entries with non zero frequency +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/100921.yaml b/docs/changelog/100921.yaml new file mode 100644 index 0000000000000..e6e2caa93d465 --- /dev/null +++ b/docs/changelog/100921.yaml @@ -0,0 +1,5 @@ +pr: 100921 +summary: "Add support for Serbian Language Analyzer" +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/100938.yaml b/docs/changelog/100938.yaml new file mode 100644 index 0000000000000..b21f6955c992e --- /dev/null +++ b/docs/changelog/100938.yaml @@ -0,0 +1,5 @@ +pr: 100938 +summary: "Set includeShardsStats = false in NodesStatsRequest where the caller does not use shards-level statistics" +area: Stats +type: enhancement +issues: [] diff --git a/docs/changelog/101390.yaml b/docs/changelog/101390.yaml new file mode 100644 index 0000000000000..23bdef6e39dfe --- /dev/null +++ b/docs/changelog/101390.yaml @@ -0,0 +1,5 @@ +pr: 101390 +summary: Enable inter-segment concurrency for terms aggs +area: Aggregations +type: enhancement +issues: [] diff --git a/docs/changelog/101409.yaml b/docs/changelog/101409.yaml new file mode 100644 index 0000000000000..82e7f339fdd89 --- /dev/null +++ b/docs/changelog/101409.yaml @@ -0,0 +1,5 @@ +pr: 101409 +summary: Adding a simulate ingest api +area: Ingest Node +type: feature +issues: [] diff --git a/docs/changelog/101423.yaml b/docs/changelog/101423.yaml new file mode 100644 index 0000000000000..a5497d444797f --- /dev/null +++ b/docs/changelog/101423.yaml @@ -0,0 +1,5 @@ +pr: 101423 +summary: Export circuit breaker trip count as a counter metric +area: Aggregations +type: enhancement +issues: [] diff --git a/docs/changelog/101609.yaml b/docs/changelog/101609.yaml new file mode 100644 index 0000000000000..27993574743d2 --- /dev/null +++ b/docs/changelog/101609.yaml @@ -0,0 +1,9 @@ +pr: 101609 +summary: > + Add a node feature join barrier. This prevents nodes from joining clusters that do not have + all the features already present in the cluster. This ensures that once a features is supported + by all the nodes in a cluster, that feature will never then not be supported in the future. + This is the corresponding functionality for the version join barrier, but for features +area: "Cluster Coordination" +type: feature +issues: [] diff --git a/docs/changelog/101705.yaml b/docs/changelog/101705.yaml deleted file mode 100644 index baa7e69d48d88..0000000000000 --- a/docs/changelog/101705.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101705 -summary: Respect regional AWS STS endpoints -area: Snapshot/Restore -type: bug -issues: - - 89175 diff --git a/docs/changelog/101799.yaml b/docs/changelog/101799.yaml deleted file mode 100644 index a3ef5fb190177..0000000000000 --- a/docs/changelog/101799.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101799 -summary: Fix memory leak from JWT cache (and fix the usage of the JWT auth cache) -area: Authentication -type: bug -issues: [] diff --git a/docs/changelog/101826.yaml b/docs/changelog/101826.yaml new file mode 100644 index 0000000000000..87f3f8df1b0c2 --- /dev/null +++ b/docs/changelog/101826.yaml @@ -0,0 +1,6 @@ +pr: 101826 +summary: Support keyed histograms +area: Aggregations +type: enhancement +issues: + - 100242 diff --git a/docs/changelog/101845.yaml b/docs/changelog/101845.yaml new file mode 100644 index 0000000000000..0dd95bdabca57 --- /dev/null +++ b/docs/changelog/101845.yaml @@ -0,0 +1,5 @@ +pr: 101845 +summary: Introduce new endpoint to expose data stream lifecycle stats +area: Data streams +type: enhancement +issues: [] diff --git a/docs/changelog/101859.yaml b/docs/changelog/101859.yaml new file mode 100644 index 0000000000000..54f3fb12810ca --- /dev/null +++ b/docs/changelog/101859.yaml @@ -0,0 +1,6 @@ +pr: 101859 +summary: Cover head/tail commands edge cases and data types coverage +area: EQL +type: bug +issues: + - 101724 diff --git a/docs/changelog/101892.yaml b/docs/changelog/101892.yaml deleted file mode 100644 index 175871de83d1a..0000000000000 --- a/docs/changelog/101892.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101892 -summary: Dry up `AsyncTaskIndexService` memory management and fix inefficient circuit - breaker use -area: Search -type: bug -issues: [] diff --git a/docs/changelog/101904.yaml b/docs/changelog/101904.yaml new file mode 100644 index 0000000000000..cad422cc52e15 --- /dev/null +++ b/docs/changelog/101904.yaml @@ -0,0 +1,5 @@ +pr: 101904 +summary: Allow granting API keys with JWT as the access_token +area: Security +type: feature +issues: [] diff --git a/docs/changelog/101907.yaml b/docs/changelog/101907.yaml deleted file mode 100644 index 022c061555be1..0000000000000 --- a/docs/changelog/101907.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101907 -summary: Fail listener on exception in `TcpTransport#openConnection` -area: Network -type: bug -issues: - - 100510 diff --git a/docs/changelog/101967.yaml b/docs/changelog/101967.yaml deleted file mode 100644 index 84f188db1e30b..0000000000000 --- a/docs/changelog/101967.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101967 -summary: "Fix incorrect dynamic mapping for non-numeric-value arrays #101965" -area: Mapping -type: bug -issues: [] diff --git a/docs/changelog/101971.yaml b/docs/changelog/101971.yaml deleted file mode 100644 index 23fb5463bae79..0000000000000 --- a/docs/changelog/101971.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101971 -summary: Fix inference timeout from the Inference Ingest Processor -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/101979.yaml b/docs/changelog/101979.yaml new file mode 100644 index 0000000000000..ad119df24d36f --- /dev/null +++ b/docs/changelog/101979.yaml @@ -0,0 +1,5 @@ +pr: 101979 +summary: Calculate CO2 and emmission and costs +area: Application +type: enhancement +issues: [] diff --git a/docs/changelog/101998.yaml b/docs/changelog/101998.yaml deleted file mode 100644 index be0e2d8c61ba3..0000000000000 --- a/docs/changelog/101998.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101998 -summary: Avoid negative `DesiredBalanceStats#lastConvergedIndex` -area: Allocation -type: bug -issues: [] diff --git a/docs/changelog/102020.yaml b/docs/changelog/102020.yaml new file mode 100644 index 0000000000000..7c74e9676d342 --- /dev/null +++ b/docs/changelog/102020.yaml @@ -0,0 +1,5 @@ +pr: 102020 +summary: Retrieve stacktrace events from a custom index +area: Application +type: enhancement +issues: [] diff --git a/docs/changelog/102056.yaml b/docs/changelog/102056.yaml new file mode 100644 index 0000000000000..455f66ba90b03 --- /dev/null +++ b/docs/changelog/102056.yaml @@ -0,0 +1,5 @@ +pr: 102056 +summary: Use `BulkRequest` to store Application Privileges +area: Authorization +type: enhancement +issues: [] diff --git a/docs/changelog/102057.yaml b/docs/changelog/102057.yaml new file mode 100644 index 0000000000000..d5b664ba14c29 --- /dev/null +++ b/docs/changelog/102057.yaml @@ -0,0 +1,6 @@ +pr: 102057 +summary: Simplify `BlobStoreRepository` idle check +area: Snapshot/Restore +type: bug +issues: + - 101948 diff --git a/docs/changelog/102065.yaml b/docs/changelog/102065.yaml new file mode 100644 index 0000000000000..1a9a219df4502 --- /dev/null +++ b/docs/changelog/102065.yaml @@ -0,0 +1,5 @@ +pr: 102065 +summary: Add more desired balance stats +area: Allocation +type: enhancement +issues: [] diff --git a/docs/changelog/102075.yaml b/docs/changelog/102075.yaml new file mode 100644 index 0000000000000..54daae04169db --- /dev/null +++ b/docs/changelog/102075.yaml @@ -0,0 +1,5 @@ +pr: 102075 +summary: Accept a single or multiple inputs to `_inference` +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/102089.yaml b/docs/changelog/102089.yaml new file mode 100644 index 0000000000000..9f33c0648d09f --- /dev/null +++ b/docs/changelog/102089.yaml @@ -0,0 +1,5 @@ +pr: 102089 +summary: Add prefix strings option to trained models +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/102114.yaml b/docs/changelog/102114.yaml new file mode 100644 index 0000000000000..a08389da0351b --- /dev/null +++ b/docs/changelog/102114.yaml @@ -0,0 +1,6 @@ +pr: 102114 +summary: Fix double-completion in `SecurityUsageTransportAction` +area: Security +type: bug +issues: + - 102111 diff --git a/docs/changelog/102138.yaml b/docs/changelog/102138.yaml new file mode 100644 index 0000000000000..3819e3201150e --- /dev/null +++ b/docs/changelog/102138.yaml @@ -0,0 +1,5 @@ +pr: 102138 +summary: Skip shards that don't match the source query during checkpointing +area: Transform +type: enhancement +issues: [] diff --git a/docs/changelog/102140.yaml b/docs/changelog/102140.yaml new file mode 100644 index 0000000000000..0f086649b9710 --- /dev/null +++ b/docs/changelog/102140.yaml @@ -0,0 +1,6 @@ +pr: 102140 +summary: Collect data tiers usage stats more efficiently +area: ILM+SLM +type: bug +issues: + - 100230 \ No newline at end of file diff --git a/docs/changelog/102151.yaml b/docs/changelog/102151.yaml new file mode 100644 index 0000000000000..652ae555af97d --- /dev/null +++ b/docs/changelog/102151.yaml @@ -0,0 +1,5 @@ +pr: 102151 +summary: Default `run_ml_inference` should be true +area: Application +type: bug +issues: [] diff --git a/docs/changelog/102165.yaml b/docs/changelog/102165.yaml new file mode 100644 index 0000000000000..e1c4c76f1f6ff --- /dev/null +++ b/docs/changelog/102165.yaml @@ -0,0 +1,6 @@ +pr: 102165 +summary: Fix planning of duplicate aggs +area: ES|QL +type: bug +issues: + - 102083 diff --git a/docs/changelog/102172.yaml b/docs/changelog/102172.yaml new file mode 100644 index 0000000000000..485c2c4327e11 --- /dev/null +++ b/docs/changelog/102172.yaml @@ -0,0 +1,5 @@ +pr: 102172 +summary: Adjust Histogram's bucket accounting to be iteratively +area: Aggregations +type: bug +issues: [] diff --git a/docs/changelog/102183.yaml b/docs/changelog/102183.yaml new file mode 100644 index 0000000000000..3daa1418ba5d0 --- /dev/null +++ b/docs/changelog/102183.yaml @@ -0,0 +1,13 @@ +pr: 102183 +summary: "[ES|QL] pow function always returns double" +area: ES|QL +type: "breaking" +issues: + - 99055 +breaking: + title: "[ES|QL] pow function always returns double" + area: REST API + details: "In ES|QL, the pow function no longer returns the type of its inputs, instead\ + \ always returning a double." + impact: low. Most queries should continue to function with the change. + notable: false diff --git a/docs/changelog/102184.yaml b/docs/changelog/102184.yaml new file mode 100644 index 0000000000000..ba4d045b6b0aa --- /dev/null +++ b/docs/changelog/102184.yaml @@ -0,0 +1,5 @@ +pr: 102184 +summary: Track ESQL enrich memory +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/102188.yaml b/docs/changelog/102188.yaml new file mode 100644 index 0000000000000..595a8395fab5c --- /dev/null +++ b/docs/changelog/102188.yaml @@ -0,0 +1,5 @@ +pr: 102188 +summary: Track blocks in `AsyncOperator` +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/102190.yaml b/docs/changelog/102190.yaml new file mode 100644 index 0000000000000..cd04e041fca5e --- /dev/null +++ b/docs/changelog/102190.yaml @@ -0,0 +1,5 @@ +pr: 102190 +summary: Track pages in ESQL enrich request/response +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/102192.yaml b/docs/changelog/102192.yaml new file mode 100644 index 0000000000000..531aa943c9e36 --- /dev/null +++ b/docs/changelog/102192.yaml @@ -0,0 +1,5 @@ +pr: 102192 +summary: "ESQL: Load more than one field at once" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/102193.yaml b/docs/changelog/102193.yaml new file mode 100644 index 0000000000000..4d64493602ff2 --- /dev/null +++ b/docs/changelog/102193.yaml @@ -0,0 +1,5 @@ +pr: 102193 +summary: Fix cache invalidation on privilege modification +area: Authorization +type: bug +issues: [] diff --git a/docs/changelog/102208.yaml b/docs/changelog/102208.yaml new file mode 100644 index 0000000000000..b566a85753d82 --- /dev/null +++ b/docs/changelog/102208.yaml @@ -0,0 +1,5 @@ +pr: 102208 +summary: Add static node settings to set default values for max merged segment sizes +area: Engine +type: enhancement +issues: [] diff --git a/docs/changelog/102220.yaml b/docs/changelog/102220.yaml new file mode 100644 index 0000000000000..d24dab1f91b31 --- /dev/null +++ b/docs/changelog/102220.yaml @@ -0,0 +1,5 @@ +pr: 102220 +summary: Upgrade xmlsec to 2.3.4 +area: Security +type: enhancement +issues: [] diff --git a/docs/changelog/102230.yaml b/docs/changelog/102230.yaml new file mode 100644 index 0000000000000..20e8d8d1f10a6 --- /dev/null +++ b/docs/changelog/102230.yaml @@ -0,0 +1,6 @@ +pr: 102230 +summary: Set region for the STS client via privileged calls in AWS SDK +area: Snapshot/Restore +type: bug +issues: + - 102173 diff --git a/docs/changelog/102240.yaml b/docs/changelog/102240.yaml new file mode 100644 index 0000000000000..5df0046ee92fc --- /dev/null +++ b/docs/changelog/102240.yaml @@ -0,0 +1,5 @@ +pr: 102240 +summary: Exclude stack traces from transform audit messages and health +area: Transform +type: bug +issues: [] diff --git a/docs/changelog/102244.yaml b/docs/changelog/102244.yaml new file mode 100644 index 0000000000000..3b160e033b57e --- /dev/null +++ b/docs/changelog/102244.yaml @@ -0,0 +1,5 @@ +pr: 102244 +summary: Expose reconciliation metrics via APM +area: Allocation +type: enhancement +issues: [] diff --git a/docs/changelog/102245.yaml b/docs/changelog/102245.yaml new file mode 100644 index 0000000000000..387540d96290c --- /dev/null +++ b/docs/changelog/102245.yaml @@ -0,0 +1,5 @@ +pr: 102245 +summary: Add non-green indicator names to `HealthPeriodicLogger` message +area: Health +type: enhancement +issues: [] diff --git a/docs/changelog/102250.yaml b/docs/changelog/102250.yaml new file mode 100644 index 0000000000000..755341d9a3a64 --- /dev/null +++ b/docs/changelog/102250.yaml @@ -0,0 +1,6 @@ +pr: 102250 +summary: "[ILM] Fix downsample to skip already downsampled indices" +area: ILM+SLM +type: bug +issues: + - 102249 diff --git a/docs/changelog/102259.yaml b/docs/changelog/102259.yaml new file mode 100644 index 0000000000000..3d8a1c6381f6d --- /dev/null +++ b/docs/changelog/102259.yaml @@ -0,0 +1,5 @@ +pr: 102259 +summary: "[Usage API] Count all the data streams that have lifecycle" +area: Data streams +type: bug +issues: [] diff --git a/docs/changelog/102273.yaml b/docs/changelog/102273.yaml new file mode 100644 index 0000000000000..78ecc8b2d2734 --- /dev/null +++ b/docs/changelog/102273.yaml @@ -0,0 +1,5 @@ +pr: 102273 +summary: Improve analyzer reload log message +area: Mapping +type: enhancement +issues: [] diff --git a/docs/changelog/102281.yaml b/docs/changelog/102281.yaml new file mode 100644 index 0000000000000..ac6c17591e013 --- /dev/null +++ b/docs/changelog/102281.yaml @@ -0,0 +1,5 @@ +pr: 102281 +summary: Improve failure handling in `ContinuousComputation` +area: Allocation +type: bug +issues: [] diff --git a/docs/changelog/102282.yaml b/docs/changelog/102282.yaml new file mode 100644 index 0000000000000..4860d70f99ccc --- /dev/null +++ b/docs/changelog/102282.yaml @@ -0,0 +1,6 @@ +pr: 102282 +summary: "ES|QL: Fix drop of renamed grouping" +area: ES|QL +type: bug +issues: + - 102121 diff --git a/docs/changelog/102292.yaml b/docs/changelog/102292.yaml new file mode 100644 index 0000000000000..953c3ffdf6150 --- /dev/null +++ b/docs/changelog/102292.yaml @@ -0,0 +1,5 @@ +pr: 102292 +summary: Consider duplicate stacktraces in custom index +area: Application +type: enhancement +issues: [] diff --git a/docs/changelog/102311.yaml b/docs/changelog/102311.yaml new file mode 100644 index 0000000000000..bb1769527fdd4 --- /dev/null +++ b/docs/changelog/102311.yaml @@ -0,0 +1,5 @@ +pr: 102311 +summary: Upgrade reactor netty http version +area: Snapshot/Restore +type: upgrade +issues: [] diff --git a/docs/changelog/102317.yaml b/docs/changelog/102317.yaml new file mode 100644 index 0000000000000..89b2ae5432101 --- /dev/null +++ b/docs/changelog/102317.yaml @@ -0,0 +1,6 @@ +pr: 102317 +summary: "ESQL: Fix single value query" +area: ES|QL +type: bug +issues: + - 102298 diff --git a/docs/changelog/102350.yaml b/docs/changelog/102350.yaml new file mode 100644 index 0000000000000..00a311c5d99f8 --- /dev/null +++ b/docs/changelog/102350.yaml @@ -0,0 +1,6 @@ +pr: 102350 +summary: "ESQL: Fix rare bug with empty string" +area: ES|QL +type: bug +issues: + - 101969 diff --git a/docs/changelog/102379.yaml b/docs/changelog/102379.yaml new file mode 100644 index 0000000000000..0773b137779a5 --- /dev/null +++ b/docs/changelog/102379.yaml @@ -0,0 +1,6 @@ +pr: 102379 +summary: Pass source query to `_field_caps` (as `index_filter`) when deducing destination index mappings for better + performance +area: Transform +type: enhancement +issues: [] diff --git a/docs/changelog/102388.yaml b/docs/changelog/102388.yaml new file mode 100644 index 0000000000000..3e65e46949bda --- /dev/null +++ b/docs/changelog/102388.yaml @@ -0,0 +1,6 @@ +pr: 102388 +summary: Add support for `index_filter` to open pit +area: Search +type: enhancement +issues: + - 99740 diff --git a/docs/changelog/102391.yaml b/docs/changelog/102391.yaml new file mode 100644 index 0000000000000..5fcbb9e6d2858 --- /dev/null +++ b/docs/changelog/102391.yaml @@ -0,0 +1,5 @@ +pr: 102391 +summary: "ESQL: Support the `_source` metadata field" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/102396.yaml b/docs/changelog/102396.yaml new file mode 100644 index 0000000000000..9ea53ca5b6840 --- /dev/null +++ b/docs/changelog/102396.yaml @@ -0,0 +1,5 @@ +pr: 102396 +summary: Add more logging to the real memory circuit breaker and lower minimum interval +area: "Infra/Circuit Breakers" +type: bug +issues: [] diff --git a/docs/changelog/102399.yaml b/docs/changelog/102399.yaml new file mode 100644 index 0000000000000..7a4e1ff7ddab6 --- /dev/null +++ b/docs/changelog/102399.yaml @@ -0,0 +1,6 @@ +pr: 102399 +summary: "ES|QL: Fix layout management for Project" +area: ES|QL +type: bug +issues: + - 102120 diff --git a/docs/changelog/102434.yaml b/docs/changelog/102434.yaml new file mode 100644 index 0000000000000..ab6aa886c13b1 --- /dev/null +++ b/docs/changelog/102434.yaml @@ -0,0 +1,5 @@ +pr: 102434 +summary: "ESQL: Short circuit loading empty doc values" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/102447.yaml b/docs/changelog/102447.yaml new file mode 100644 index 0000000000000..76823153670bd --- /dev/null +++ b/docs/changelog/102447.yaml @@ -0,0 +1,6 @@ +pr: 102447 +summary: Pass transform source query as `index_filter` to `open_point_in_time` request +area: Transform +type: enhancement +issues: + - 101049 diff --git a/docs/changelog/102456.yaml b/docs/changelog/102456.yaml new file mode 100644 index 0000000000000..6ef3b8f16f53c --- /dev/null +++ b/docs/changelog/102456.yaml @@ -0,0 +1,6 @@ +pr: 102456 +summary: Switch logs data streams to search all fields by default +area: Data streams +type: enhancement +issues: + - 99872 diff --git a/docs/changelog/102461.yaml b/docs/changelog/102461.yaml new file mode 100644 index 0000000000000..c0c07554ed21f --- /dev/null +++ b/docs/changelog/102461.yaml @@ -0,0 +1,5 @@ +pr: 102461 +summary: Enable concurrency for scripted metric agg +area: Aggregations +type: enhancement +issues: [] diff --git a/docs/changelog/102462.yaml b/docs/changelog/102462.yaml new file mode 100644 index 0000000000000..d44ccc4cbbc5c --- /dev/null +++ b/docs/changelog/102462.yaml @@ -0,0 +1,5 @@ +pr: 102462 +summary: Check the real memory circuit breaker when building global ordinals +area: Aggregations +type: enhancement +issues: [] diff --git a/docs/changelog/102467.yaml b/docs/changelog/102467.yaml new file mode 100644 index 0000000000000..580788a5aa2f9 --- /dev/null +++ b/docs/changelog/102467.yaml @@ -0,0 +1,6 @@ +pr: 102467 +summary: Fix dense_vector cluster stats indexed_vector_dim_min/max values +area: Mapping +type: bug +issues: + - 102416 diff --git a/docs/changelog/102472.yaml b/docs/changelog/102472.yaml new file mode 100644 index 0000000000000..b0f5bfc714643 --- /dev/null +++ b/docs/changelog/102472.yaml @@ -0,0 +1,5 @@ +pr: 102472 +summary: Expose the `invalidation` field in Get/Query `ApiKey` APIs +area: Security +type: enhancement +issues: [ ] diff --git a/docs/changelog/102476.yaml b/docs/changelog/102476.yaml new file mode 100644 index 0000000000000..a53a20ecfec20 --- /dev/null +++ b/docs/changelog/102476.yaml @@ -0,0 +1,5 @@ +pr: 102476 +summary: Unwrap `ExecutionException` when loading from cache in `AbstractIndexOrdinalsFieldData` +area: Aggregations +type: bug +issues: [] diff --git a/docs/changelog/102490.yaml b/docs/changelog/102490.yaml new file mode 100644 index 0000000000000..8ff554ab0f0fe --- /dev/null +++ b/docs/changelog/102490.yaml @@ -0,0 +1,6 @@ +pr: 102490 +summary: "ESQL: Load text field from parent keyword field" +area: ES|QL +type: enhancement +issues: + - 102473 diff --git a/docs/changelog/102492.yaml b/docs/changelog/102492.yaml new file mode 100644 index 0000000000000..943d82873e0b6 --- /dev/null +++ b/docs/changelog/102492.yaml @@ -0,0 +1,5 @@ +pr: 102492 +summary: Ensure datafeed previews with no start or end time don't search the cold or frozen tiers +area: Machine Learning +type: bug +issues: [] diff --git a/docs/changelog/102495.yaml b/docs/changelog/102495.yaml new file mode 100644 index 0000000000000..77ae42f7eebcb --- /dev/null +++ b/docs/changelog/102495.yaml @@ -0,0 +1,6 @@ +pr: 102495 +summary: "Add support for configuring proxy scheme in S3 client settings and EC2 discovery plugin" +area: Distributed +type: enhancement +issues: + - 101873 diff --git a/docs/changelog/102510.yaml b/docs/changelog/102510.yaml new file mode 100644 index 0000000000000..2b654b5c85929 --- /dev/null +++ b/docs/changelog/102510.yaml @@ -0,0 +1,7 @@ +pr: 102510 +summary: "ESQL: Make fieldcaps calls lighter" +area: ES|QL +type: enhancement +issues: + - 101763 + - 102393 diff --git a/docs/changelog/102511.yaml b/docs/changelog/102511.yaml new file mode 100644 index 0000000000000..cf80ca03e197f --- /dev/null +++ b/docs/changelog/102511.yaml @@ -0,0 +1,5 @@ +pr: 102511 +summary: Trigger parent circuit breaker when building scorers in filters aggregation +area: Aggregations +type: bug +issues: [] diff --git a/docs/changelog/102512.yaml b/docs/changelog/102512.yaml new file mode 100644 index 0000000000000..d4bc765ecaf5f --- /dev/null +++ b/docs/changelog/102512.yaml @@ -0,0 +1,6 @@ +pr: 102512 +summary: Implement exponential backoff for transform state persistence retrying +area: Transform +type: enhancement +issues: + - 102528 diff --git a/docs/changelog/102562.yaml b/docs/changelog/102562.yaml new file mode 100644 index 0000000000000..a4b4f5a095118 --- /dev/null +++ b/docs/changelog/102562.yaml @@ -0,0 +1,5 @@ +pr: 102562 +summary: Track blocks of intermediate state of aggs +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/102570.yaml b/docs/changelog/102570.yaml new file mode 100644 index 0000000000000..2d3f878dbbb27 --- /dev/null +++ b/docs/changelog/102570.yaml @@ -0,0 +1,5 @@ +pr: 102570 +summary: Added `beat.stats.libbeat.pipeline.queue.max_events` +area: Monitoring +type: enhancement +issues: [] diff --git a/docs/changelog/102571.yaml b/docs/changelog/102571.yaml new file mode 100644 index 0000000000000..25272408161db --- /dev/null +++ b/docs/changelog/102571.yaml @@ -0,0 +1,5 @@ +pr: 102571 +summary: Allow executing multiple periodic flushes while they are being made durable +area: Store +type: enhancement +issues: [] diff --git a/docs/changelog/102580.yaml b/docs/changelog/102580.yaml new file mode 100644 index 0000000000000..50d315efd7071 --- /dev/null +++ b/docs/changelog/102580.yaml @@ -0,0 +1,6 @@ +pr: 102580 +summary: Fix DISSECT with empty patterns +area: ES|QL +type: bug +issues: + - 102577 diff --git a/docs/changelog/102598.yaml b/docs/changelog/102598.yaml new file mode 100644 index 0000000000000..c32519acdf6d1 --- /dev/null +++ b/docs/changelog/102598.yaml @@ -0,0 +1,5 @@ +pr: 102598 +summary: Add apm api for asynchronous counters (always increasing) +area: Infra/Core +type: enhancement +issues: [] diff --git a/docs/changelog/102599.yaml b/docs/changelog/102599.yaml new file mode 100644 index 0000000000000..74e3d89421463 --- /dev/null +++ b/docs/changelog/102599.yaml @@ -0,0 +1,5 @@ +pr: 102599 +summary: "Recreate the Elasticsearch private temporary directory if it doesn't exist when an ML job is opened" +area: Machine Learning +type: bug +issues: [] diff --git a/docs/changelog/102602.yaml b/docs/changelog/102602.yaml new file mode 100644 index 0000000000000..dd01eaa98b214 --- /dev/null +++ b/docs/changelog/102602.yaml @@ -0,0 +1,5 @@ +pr: 102602 +summary: Consider search context missing exceptions as recoverable +area: Transform +type: bug +issues: [] diff --git a/docs/changelog/102612.yaml b/docs/changelog/102612.yaml new file mode 100644 index 0000000000000..60808ae72801a --- /dev/null +++ b/docs/changelog/102612.yaml @@ -0,0 +1,5 @@ +pr: 102612 +summary: Track blocks when hashing single multi-valued field +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/102636.yaml b/docs/changelog/102636.yaml new file mode 100644 index 0000000000000..8b32e0568b0fb --- /dev/null +++ b/docs/changelog/102636.yaml @@ -0,0 +1,5 @@ +pr: 102636 +summary: Revert non-semantic `NodeInfo` +area: Infra/Core +type: regression +issues: [] diff --git a/docs/changelog/102637.yaml b/docs/changelog/102637.yaml new file mode 100644 index 0000000000000..4d5d689934bd6 --- /dev/null +++ b/docs/changelog/102637.yaml @@ -0,0 +1,5 @@ +pr: 102637 +summary: Improve stability of spike and dip detection for the change point aggregation +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/102644.yaml b/docs/changelog/102644.yaml new file mode 100644 index 0000000000000..17c5cbebed7cc --- /dev/null +++ b/docs/changelog/102644.yaml @@ -0,0 +1,5 @@ +pr: 102644 +summary: Disable parallelism for composite agg against high cardinality fields +area: Aggregations +type: enhancement +issues: [] diff --git a/docs/changelog/102673.yaml b/docs/changelog/102673.yaml new file mode 100644 index 0000000000000..16546edb3cf3c --- /dev/null +++ b/docs/changelog/102673.yaml @@ -0,0 +1,5 @@ +pr: 102673 +summary: "ESQL: Share constant null Blocks" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/102680.yaml b/docs/changelog/102680.yaml new file mode 100644 index 0000000000000..8b32c5029ea2a --- /dev/null +++ b/docs/changelog/102680.yaml @@ -0,0 +1,5 @@ +pr: 102680 +summary: Make `api_key.delete.interval` a dynamic setting +area: Security +type: enhancement +issues: [] diff --git a/docs/changelog/102682.yaml b/docs/changelog/102682.yaml new file mode 100644 index 0000000000000..190ff3df5a7f6 --- /dev/null +++ b/docs/changelog/102682.yaml @@ -0,0 +1,5 @@ +pr: 102682 +summary: Introduce fielddata cache ttl +area: Aggregations +type: enhancement +issues: [] diff --git a/docs/changelog/102710.yaml b/docs/changelog/102710.yaml new file mode 100644 index 0000000000000..ee805c70180a0 --- /dev/null +++ b/docs/changelog/102710.yaml @@ -0,0 +1,5 @@ +pr: 102710 +summary: Enable concurrency for multi terms agg +area: Aggregations +type: enhancement +issues: [] diff --git a/docs/changelog/102715.yaml b/docs/changelog/102715.yaml new file mode 100644 index 0000000000000..7311db66ce151 --- /dev/null +++ b/docs/changelog/102715.yaml @@ -0,0 +1,6 @@ +pr: 102715 +summary: Fix leaking blocks in TopN +area: ES|QL +type: bug +issues: + - 102646 diff --git a/docs/changelog/102716.yaml b/docs/changelog/102716.yaml new file mode 100644 index 0000000000000..39317fdb38415 --- /dev/null +++ b/docs/changelog/102716.yaml @@ -0,0 +1,5 @@ +pr: 102716 +summary: Fix leaking blocks in `BlockUtils` +area: ES|QL +type: bug +issues: [] diff --git a/docs/changelog/102727.yaml b/docs/changelog/102727.yaml new file mode 100644 index 0000000000000..4f4d4fbf48899 --- /dev/null +++ b/docs/changelog/102727.yaml @@ -0,0 +1,5 @@ +pr: 102727 +summary: "ESQL: Load stored fields sequentially" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/102734.yaml b/docs/changelog/102734.yaml new file mode 100644 index 0000000000000..c27846d7d8478 --- /dev/null +++ b/docs/changelog/102734.yaml @@ -0,0 +1,5 @@ +pr: 102734 +summary: Allow match field in enrich fields +area: ES|QL +type: bug +issues: [] diff --git a/docs/changelog/102735.yaml b/docs/changelog/102735.yaml new file mode 100644 index 0000000000000..4726e08d1f314 --- /dev/null +++ b/docs/changelog/102735.yaml @@ -0,0 +1,5 @@ +pr: 102735 +summary: "[Profiling] Report in status API if docs exist" +area: Application +type: enhancement +issues: [] diff --git a/docs/changelog/102740.yaml b/docs/changelog/102740.yaml new file mode 100644 index 0000000000000..b7fc10eb19ddb --- /dev/null +++ b/docs/changelog/102740.yaml @@ -0,0 +1,5 @@ +pr: 102740 +summary: "[Profiling] Notify early about task cancellation" +area: Application +type: enhancement +issues: [] diff --git a/docs/changelog/102767.yaml b/docs/changelog/102767.yaml new file mode 100644 index 0000000000000..cf1edeeb51265 --- /dev/null +++ b/docs/changelog/102767.yaml @@ -0,0 +1,6 @@ +pr: 102767 +summary: "ESQL: remove `time_zone` request parameter" +area: ES|QL +type: bug +issues: + - 102159 diff --git a/docs/changelog/99134.yaml b/docs/changelog/99134.yaml new file mode 100644 index 0000000000000..10156b9b30066 --- /dev/null +++ b/docs/changelog/99134.yaml @@ -0,0 +1,5 @@ +pr: 99134 +summary: Add ability to create a data stream failure store +area: Data streams +type: feature +issues: [] diff --git a/docs/plugins/discovery-ec2.asciidoc b/docs/plugins/discovery-ec2.asciidoc index 3947ed71ea9ae..44acba4752aaa 100644 --- a/docs/plugins/discovery-ec2.asciidoc +++ b/docs/plugins/discovery-ec2.asciidoc @@ -97,6 +97,11 @@ The available settings for the EC2 discovery plugin are as follows. this setting determines the port to use to connect to the proxy. Defaults to `80`. +`discovery.ec2.proxy.scheme`:: + + The scheme to use when connecting to the EC2 service endpoint through proxy specified + in `discovery.ec2.proxy.host`. Valid values are `http` or `https`. Defaults to `http`. + `discovery.ec2.proxy.username` ({ref}/secure-settings.html[Secure], {ref}/secure-settings.html#reloadable-secure-settings[reloadable]):: When the address of an HTTP proxy is given in `discovery.ec2.proxy.host`, diff --git a/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc b/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc index 2bedcd4698b42..d7d837b2f8364 100644 --- a/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc @@ -127,8 +127,7 @@ init_script:: Executed prior to any collection of documents. Allows the ag + In the above example, the `init_script` creates an array `transactions` in the `state` object. -map_script:: Executed once per document collected. This is a required script. If no combine_script is specified, the resulting state - needs to be stored in the `state` object. +map_script:: Executed once per document collected. This is a required script. + In the above example, the `map_script` checks the value of the type field. If the value is 'sale' the value of the amount field is added to the transactions array. If the value of the type field is not 'sale' the negated value of the amount field is added diff --git a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc index 45cb725492f07..5273537389e3d 100644 --- a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc @@ -36,6 +36,7 @@ following types are supported: <>, <>, <>, +<>, <>, <>, <>, @@ -64,8 +65,8 @@ The following analyzers support setting custom `stem_exclusion` list: `arabic`, `armenian`, `basque`, `bengali`, `bulgarian`, `catalan`, `czech`, `dutch`, `english`, `finnish`, `french`, `galician`, `german`, `hindi`, `hungarian`, `indonesian`, `irish`, `italian`, `latvian`, -`lithuanian`, `norwegian`, `portuguese`, `romanian`, `russian`, `sorani`, -`spanish`, `swedish`, `turkish`. +`lithuanian`, `norwegian`, `portuguese`, `romanian`, `russian`, `serbian`, +`sorani`, `spanish`, `swedish`, `turkish`. ==== Reimplementing language analyzers @@ -1588,6 +1589,55 @@ PUT /russian_example <2> This filter should be removed unless there are words which should be excluded from stemming. +[[serbian-analyzer]] +===== `serbian` analyzer + +The `serbian` analyzer could be reimplemented as a `custom` analyzer as follows: + +[source,console] +---------------------------------------------------- +PUT /serbian_example +{ + "settings": { + "analysis": { + "filter": { + "serbian_stop": { + "type": "stop", + "stopwords": "_serbian_" <1> + }, + "serbian_keywords": { + "type": "keyword_marker", + "keywords": ["пример"] <2> + }, + "serbian_stemmer": { + "type": "stemmer", + "language": "serbian" + } + }, + "analyzer": { + "rebuilt_serbian": { + "tokenizer": "standard", + "filter": [ + "lowercase", + "serbian_stop", + "serbian_keywords", + "serbian_stemmer", + "serbian_normalization" + ] + } + } + } + } +} +---------------------------------------------------- +// TEST[s/"serbian_keywords",//] +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: serbian_example, first: serbian, second: rebuilt_serbian}\nendyaml\n/] + +<1> The default stopwords can be overridden with the `stopwords` +or `stopwords_path` parameters. +<2> This filter should be removed unless there are words which should +be excluded from stemming. + [[sorani-analyzer]] ===== `sorani` analyzer diff --git a/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc index a76bc6f6c5254..57e402988cc5a 100644 --- a/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc @@ -9,7 +9,7 @@ A filter that stems words using a Snowball-generated stemmer. The values: `Arabic`, `Armenian`, `Basque`, `Catalan`, `Danish`, `Dutch`, `English`, `Estonian`, `Finnish`, `French`, `German`, `German2`, `Hungarian`, `Italian`, `Irish`, `Kp`, `Lithuanian`, `Lovins`, `Norwegian`, `Porter`, `Portuguese`, `Romanian`, -`Russian`, `Spanish`, `Swedish`, `Turkish`. +`Russian`, `Serbian`, `Spanish`, `Swedish`, `Turkish`. For example: diff --git a/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc index 162164e12872d..b8d883b057823 100644 --- a/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc @@ -230,6 +230,9 @@ Russian:: https://snowballstem.org/algorithms/russian/stemmer.html[*`russian`*], https://doc.rero.ch/lm.php?url=1000%2C43%2C4%2C20091209094227-CA%2FDolamic_Ljiljana_-_Indexing_and_Searching_Strategies_for_the_Russian_20091209.pdf[`light_russian`] +Serbian:: +https://snowballstem.org/algorithms/serbian/stemmer.html[*`serbian`*] + Spanish:: https://www.ercim.eu/publication/ws-proceedings/CLEF2/savoy.pdf[*`light_spanish`*], https://snowballstem.org/algorithms/spanish/stemmer.html[`spanish`] diff --git a/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc index 12e0d76f9901b..abba633b643dc 100644 --- a/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc @@ -356,6 +356,10 @@ parameter and a link to their predefined stop words in Lucene. `_russian_`:: {lucene-stop-word-link}/snowball/russian_stop.txt[Russian stop words] +[[serbian-stop-words]] +`_serbian_`:: +{lucene-stop-word-link}/sr/stopwords.txt[Serbian stop words] + [[sorani-stop-words]] `_sorani_`:: {lucene-stop-word-link}/ckb/stopwords.txt[Sorani stop words] diff --git a/docs/reference/data-streams/change-mappings-and-settings.asciidoc b/docs/reference/data-streams/change-mappings-and-settings.asciidoc index 3922ef018a713..86d72cf52c9e9 100644 --- a/docs/reference/data-streams/change-mappings-and-settings.asciidoc +++ b/docs/reference/data-streams/change-mappings-and-settings.asciidoc @@ -601,7 +601,7 @@ stream's oldest backing index. // TESTRESPONSE[s/"index_uuid": "_eEfRrFHS9OyhqWntkgHAQ"/"index_uuid": $body.data_streams.0.indices.1.index_uuid/] // TESTRESPONSE[s/"index_name": ".ds-my-data-stream-2099.03.07-000001"/"index_name": $body.data_streams.0.indices.0.index_name/] // TESTRESPONSE[s/"index_name": ".ds-my-data-stream-2099.03.08-000002"/"index_name": $body.data_streams.0.indices.1.index_name/] -// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW"/] +// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_indices":[],"failure_store":false/] <1> First item in the `indices` array for `my-data-stream`. This item contains information about the stream's oldest backing index, @@ -704,4 +704,4 @@ Use the <> to update an existing data stream's aliases. Changing an existing data stream's aliases in its index pattern has no effect. -include::../alias.asciidoc[tag=alias-multiple-actions-example] \ No newline at end of file +include::../alias.asciidoc[tag=alias-multiple-actions-example] diff --git a/docs/reference/data-streams/data-stream-apis.asciidoc b/docs/reference/data-streams/data-stream-apis.asciidoc index d3580ca4448a7..3c2e703d264ff 100644 --- a/docs/reference/data-streams/data-stream-apis.asciidoc +++ b/docs/reference/data-streams/data-stream-apis.asciidoc @@ -25,6 +25,8 @@ preview:[] preview:[] * <> preview:[] +* <> +preview:[] The following API is available for <>: @@ -55,4 +57,6 @@ include::{es-repo-dir}/data-streams/lifecycle/apis/delete-lifecycle.asciidoc[] include::{es-repo-dir}/data-streams/lifecycle/apis/explain-lifecycle.asciidoc[] +include::{es-repo-dir}/data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc[] + include::{es-repo-dir}/indices/downsample-data-stream.asciidoc[] diff --git a/docs/reference/data-streams/downsampling-manual.asciidoc b/docs/reference/data-streams/downsampling-manual.asciidoc index b7d46b6301884..5bdfaf428d169 100644 --- a/docs/reference/data-streams/downsampling-manual.asciidoc +++ b/docs/reference/data-streams/downsampling-manual.asciidoc @@ -17,7 +17,7 @@ DELETE _ingest/pipeline/my-timestamp-pipeline The recommended way to downsample a time series data stream (TSDS) is <>. However, if you're not using ILM, you can downsample a TSDS manually. This guide shows you -how, using typical Kubernetes cluster monitoring data. +how, using typical Kubernetes cluster monitoring data. To test out manual downsampling, follow these steps: @@ -32,13 +32,13 @@ To test out manual downsampling, follow these steps: ==== Prerequisites * Refer to the <>. -* It is not possible to downsample a data stream directly, nor +* It is not possible to downsample a data stream directly, nor multiple indices at once. It's only possible to downsample one time series index (TSDS backing index). * In order to downsample an index, it needs to be read-only. For a TSDS write index, this means it needs to be rolled over and made read-only first. * Downsampling uses UTC timestamps. -* Downsampling needs at least one metric field to exist in the time series +* Downsampling needs at least one metric field to exist in the time series index. [discrete] @@ -51,8 +51,8 @@ First, you'll create a TSDS. For simplicity, in the time series mapping all be used. The `time_series_metric` values determine the kind of statistical representations that are used during downsampling. -The index template includes a set of static -<>: `host`, `namespace`, +The index template includes a set of static +<>: `host`, `namespace`, `node`, and `pod`. The time series dimensions are not changed by the downsampling process. @@ -388,6 +388,7 @@ This returns: // TESTRESPONSE[s/"ltOJGmqgTVm4T-Buoe7Acg"/$body.data_streams.0.indices.0.index_uuid/] // TESTRESPONSE[s/"2023-07-26T09:26:42.000Z"/$body.data_streams.0.time_series.temporal_ranges.0.start/] // TESTRESPONSE[s/"2023-07-26T13:26:42.000Z"/$body.data_streams.0.time_series.temporal_ranges.0.end/] +// TESTRESPONSE[s/"replicated": false/"replicated": false,"failure_indices":[],"failure_store":false/] <1> The backing index for this data stream. Before a backing index can be downsampled, the TSDS needs to be rolled over and diff --git a/docs/reference/data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc b/docs/reference/data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc new file mode 100644 index 0000000000000..6fa82dc2a810c --- /dev/null +++ b/docs/reference/data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc @@ -0,0 +1,93 @@ +[[data-streams-get-lifecycle-stats]] +=== Get data stream lifecycle stats +++++ +Get Data Stream Lifecycle +++++ + +preview::[] + +Gets stats about the execution of data stream lifecycle. + +[[get-lifecycle-stats-api-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have the `monitor` or +`manage` <> to use this API. + +[[data-streams-get-lifecycle-stats-request]] +==== {api-request-title} + +`GET _lifecycle/stats` + +[[data-streams-get-lifecycle-stats-desc]] +==== {api-description-title} + +Gets stats about the execution of the data stream lifecycle. The data stream level stats include only stats about data streams +managed by the data stream lifecycle. + +[[get-lifecycle-stats-api-response-body]] +==== {api-response-body-title} + +`last_run_duration_in_millis`:: +(Optional, long) +The duration of the last data stream lifecycle execution. +`time_between_starts_in_millis`:: +(Optional, long) +The time passed between the start of the last two data stream lifecycle executions. This should amount approximately to +<>. +`data_stream_count`:: +(integer) +The count of data streams currently being managed by the data stream lifecycle. +`data_streams`:: +(array of objects) +Contains information about the retrieved data stream lifecycles. ++ +.Properties of objects in `data_streams` +[%collapsible%open] +==== +`name`:: +(string) +The name of the data stream. +`backing_indices_in_total`:: +(integer) +The count of the backing indices of this data stream that are managed by the data stream lifecycle. +`backing_indices_in_error`:: +(integer) +The count of the backing indices of this data stream that are managed by the data stream lifecycle and have encountered an error. +==== + +[[data-streams-get-lifecycle-stats-example]] +==== {api-examples-title} + +Let's retrieve the data stream lifecycle stats of a cluster that has already executed the lifecycle more than once: + +[source,console] +-------------------------------------------------- +GET _lifecycle/stats?human&pretty +-------------------------------------------------- +// TEST[skip:this is for demonstration purposes only, we cannot ensure that DSL has run] + +The response will look like the following: + +[source,console-result] +-------------------------------------------------- +{ + "last_run_duration_in_millis": 2, + "last_run_duration": "2ms", + "time_between_starts_in_millis": 9998, + "time_between_starts": "9.99s", + "data_streams_count": 2, + "data_streams": [ + { + "name": "my-data-stream", + "backing_indices_in_total": 2, + "backing_indices_in_error": 0 + }, + { + "name": "my-other-stream", + "backing_indices_in_total": 2, + "backing_indices_in_error": 1 + } + ] +} +-------------------------------------------------- \ No newline at end of file diff --git a/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc b/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc index a6c13e5aae708..aa598b010badc 100644 --- a/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc +++ b/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc @@ -1,20 +1,36 @@ [role="xpack"] [[tutorial-migrate-data-stream-from-ilm-to-dsl]] -=== Tutorial: Migrate ILM managed data stream to Data stream lifecycle +=== Tutorial: Migrate ILM managed data stream to data stream lifecycle preview::[] -In this tutorial we'll look at migrating an existing data stream from {ilm-init} to -Data stream lifecycle. The existing {ilm-init} managed backing indices will continue +In this tutorial we'll look at migrating an existing data stream from Index Lifecycle Management ({ilm-init}) to +data stream lifecycle. The existing {ilm-init} managed backing indices will continue to be managed by {ilm-init} until they age out and get deleted by {ilm-init}; however, -the new backing indices will be managed by Data stream lifecycle. -This way, a data stream is gradually migrated away from being managed by {ilm-cap} to -being managed by Data stream lifecycle. As we'll see, {ilm-cap} and Data stream lifecycle +the new backing indices will be managed by data stream lifecycle. +This way, a data stream is gradually migrated away from being managed by {ilm-init} to +being managed by data stream lifecycle. As we'll see, {ilm-init} and data stream lifecycle can co-manage a data stream; however, an index can only be managed by one system at a time. -Let's first create a data stream with two backing indices managed by {ilm-cap}. -We first create an {ilm-cap} policy: +[discrete] +[[migrate-dsl-ilm-tldr]] +==== TL;DR +To migrate a data stream from {ilm-init} to data stream lifecycle we'll have to execute +two steps: + +1. Update the index template that's backing the data stream to set <> +to `false`, and to configure data stream lifecycle. +2. Configure the data stream lifecycle for the _existing_ data stream using +the <>. + +For more details see the <> section. + +[discrete] +[[setup-test-data]] +==== Setup ILM managed data stream +Let's first create a data stream with two backing indices managed by {ilm-init}. +We first create an {ilm-init} policy: [source,console] ---- @@ -40,7 +56,7 @@ PUT _ilm/policy/pre-dsl-ilm-policy } ---- -And let's create an index template that'll back the data stream and configures {ilm-cap}: +And let's create an index template that'll back the data stream and configures {ilm-init}: [source,console] ---- @@ -77,7 +93,7 @@ POST dsl-data-stream/_rollover ---- // TEST[continued] -We'll use the <> API to inspect the state of +We'll use the <> API to inspect the state of the data stream: [source,console] @@ -87,7 +103,7 @@ GET _data_stream/dsl-data-stream // TEST[continued] Inspecting the response we'll see that both backing indices are managed by {ilm-init} -and that the next generation index will also be managed by {ilm-init}: +and that the next generation index will also be managed by {ilm-init}: [source,console-result] ---- @@ -100,7 +116,7 @@ and that the next generation index will also be managed by {ilm-init}: }, "indices": [ { - "index_name": ".ds-dsl-data-stream-2023.10.19-000001", <1> + "index_name": ".ds-dsl-data-stream-2023.10.19-000001", <1> "index_uuid": "xCEhwsp8Tey0-FLNFYVwSg", "prefer_ilm": true, <2> "ilm_policy": "pre-dsl-ilm-policy", <3> @@ -132,37 +148,40 @@ and that the next generation index will also be managed by {ilm-init}: // TESTRESPONSE[s/"index_uuid": "xCEhwsp8Tey0-FLNFYVwSg"/"index_uuid": $body.data_streams.0.indices.0.index_uuid/] // TESTRESPONSE[s/"index_name": ".ds-dsl-data-stream-2023.10.19-000002"/"index_name": $body.data_streams.0.indices.1.index_name/] // TESTRESPONSE[s/"index_uuid": "PA_JquKGSiKcAKBA8DJ5gw"/"index_uuid": $body.data_streams.0.indices.1.index_uuid/] -// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW"/] +// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_indices":[],"failure_store":false/] <1> The name of the backing index. -<2> For each backing index we display the value of the <> +<2> For each backing index we display the value of the <> configuration which will indicate if {ilm-init} takes precedence over data stream lifecycle in case both systems are configured for an index. <3> The {ilm-init} policy configured for this index. -<4> The system that manages this index (possible values are "Index Lifecycle Management", +<4> The system that manages this index (possible values are "Index Lifecycle Management", "Data stream lifecycle", or "Unmanaged") -<5> The system that will manage the next generation index (the new write index of this -data stream, once the data stream is rolled over). The possible values are +<5> The system that will manage the next generation index (the new write index of this +data stream, once the data stream is rolled over). The possible values are "Index Lifecycle Management", "Data stream lifecycle", or "Unmanaged". <6> The <> value configured in the index template that's backing the data stream. This value will be configured for all the new backing indices. If it's not configured in the index template the backing indices will receive the `true` -default value ({ilm-init} takes precedence over data stream lifecycle by default as it's +default value ({ilm-init} takes precedence over data stream lifecycle by default as it's currently richer in features). -<7> The {ilm-init} policy configured in the index template that's backing this data -stream (which will be configured on all the new backing indices, as long as it exists +<7> The {ilm-init} policy configured in the index template that's backing this data +stream (which will be configured on all the new backing indices, as long as it exists in the index template). +[discrete] +[[migrate-from-ilm-to-dsl]] +==== Migrate data stream to data stream lifecycle To migrate the `dsl-data-stream` to data stream lifecycle we'll have to execute two steps: -1. Update the index template that's backing the index template to configure <> +1. Update the index template that's backing the data stream to set <> to `false`, and to configure data stream lifecycle. 2. Configure the data stream lifecycle for the _existing_ `dsl-data-stream` using the <>. IMPORTANT: The data stream lifecycle configuration that's added to the index template, -being a data stream configuration, will only apply to **new** data streams. +being a data stream configuration, will only apply to **new** data streams. Our data stream exists already, so even though we added a data stream lifecycle configuration in the index template it will not be applied to `dsl-data-stream`. @@ -192,13 +211,13 @@ PUT _index_template/dsl-data-stream-template <1> The `prefer_ilm` setting will now be configured on the **new** backing indices (created by rolling over the data stream) such that {ilm-init} does _not_ take -precedence over Data stream lifecycle. +precedence over data stream lifecycle. <2> We're configuring the data stream lifecycle so _new_ data streams will be -managed by Data stream lifecycle. +managed by data stream lifecycle. -We've now made sure that new data streams will be managed by Data stream lifecycle. +We've now made sure that new data streams will be managed by data stream lifecycle. -Let's update our existing `dsl-data-stream` and configure Data stream lifecycle: +Let's update our existing `dsl-data-stream` and configure data stream lifecycle: [source,console] ---- @@ -210,7 +229,7 @@ PUT _data_stream/dsl-data-stream/_lifecycle // TEST[continued] We can inspect the data stream to check that the next generation will indeed be -managed by Data stream lifecycle: +managed by data stream lifecycle: [source,console] -------------------------------------------------- @@ -229,10 +248,10 @@ GET _data_stream/dsl-data-stream }, "indices": [ { - "index_name": ".ds-dsl-data-stream-2023.10.19-000001", + "index_name": ".ds-dsl-data-stream-2023.10.19-000001", "index_uuid": "xCEhwsp8Tey0-FLNFYVwSg", - "prefer_ilm": true, - "ilm_policy": "pre-dsl-ilm-policy", + "prefer_ilm": true, + "ilm_policy": "pre-dsl-ilm-policy", "managed_by": "Index Lifecycle Management" <1> }, { @@ -250,7 +269,7 @@ GET _data_stream/dsl-data-stream "enabled": true, "data_retention": "7d" }, - "ilm_policy": "pre-dsl-ilm-policy", + "ilm_policy": "pre-dsl-ilm-policy", "next_generation_managed_by": "Data stream lifecycle", <3> "prefer_ilm": false, <4> "hidden": false, @@ -265,7 +284,7 @@ GET _data_stream/dsl-data-stream // TESTRESPONSE[s/"index_uuid": "xCEhwsp8Tey0-FLNFYVwSg"/"index_uuid": $body.data_streams.0.indices.0.index_uuid/] // TESTRESPONSE[s/"index_name": ".ds-dsl-data-stream-2023.10.19-000002"/"index_name": $body.data_streams.0.indices.1.index_name/] // TESTRESPONSE[s/"index_uuid": "PA_JquKGSiKcAKBA8DJ5gw"/"index_uuid": $body.data_streams.0.indices.1.index_uuid/] -// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW"/] +// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_indices":[],"failure_store":false/] <1> The existing backing index will continue to be managed by {ilm-init} <2> The existing backing index will continue to be managed by {ilm-init} @@ -274,7 +293,7 @@ GET _data_stream/dsl-data-stream and will be configured accordingly for new backing indices. We'll now rollover the data stream to see the new generation index being managed by -Data stream lifecycle: +data stream lifecycle: [source,console] ---- @@ -299,11 +318,11 @@ GET _data_stream/dsl-data-stream }, "indices": [ { - "index_name": ".ds-dsl-data-stream-2023.10.19-000001", + "index_name": ".ds-dsl-data-stream-2023.10.19-000001", "index_uuid": "xCEhwsp8Tey0-FLNFYVwSg", - "prefer_ilm": true, - "ilm_policy": "pre-dsl-ilm-policy", - "managed_by": "Index Lifecycle Management" <1> + "prefer_ilm": true, + "ilm_policy": "pre-dsl-ilm-policy", + "managed_by": "Index Lifecycle Management" <1> }, { "index_name": ".ds-dsl-data-stream-2023.10.19-000002", @@ -327,9 +346,9 @@ GET _data_stream/dsl-data-stream "enabled": true, "data_retention": "7d" }, - "ilm_policy": "pre-dsl-ilm-policy", - "next_generation_managed_by": "Data stream lifecycle", - "prefer_ilm": false, + "ilm_policy": "pre-dsl-ilm-policy", + "next_generation_managed_by": "Data stream lifecycle", + "prefer_ilm": false, "hidden": false, "system": false, "allow_custom_routing": false, @@ -344,7 +363,7 @@ GET _data_stream/dsl-data-stream // TESTRESPONSE[s/"index_uuid": "PA_JquKGSiKcAKBA8DJ5gw"/"index_uuid": $body.data_streams.0.indices.1.index_uuid/] // TESTRESPONSE[s/"index_name": ".ds-dsl-data-stream-2023.10.19-000003"/"index_name": $body.data_streams.0.indices.2.index_name/] // TESTRESPONSE[s/"index_uuid": "PA_JquKGSiKcAKBA8abcd1"/"index_uuid": $body.data_streams.0.indices.2.index_uuid/] -// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW"/] +// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_indices":[],"failure_store":false/] <1> The backing indices that existed before rollover will continue to be managed by {ilm-init} <2> The backing indices that existed before rollover will continue to be managed by {ilm-init} @@ -352,27 +371,30 @@ GET _data_stream/dsl-data-stream in the index template <4> The new write index is managed by `Data stream lifecycle` -We can easily change this data stream to be managed by {ilm-cap} because we didn't remove -the {ilm-cap} policy when we <>. We can achieve this in two ways: 1. <> from the data streams -2. Disable Data stream lifecycle by configuring the `enabled` flag to `false`. +2. Disable data stream lifecycle by configuring the `enabled` flag to `false`. -Let's implement option 2 and disable the data stream lifecycle: +Let's implement option 2 and disable the data stream lifecycle: [source,console] ---- PUT _data_stream/dsl-data-stream/_lifecycle { "data_retention": "7d", - "enabled": false <1> + "enabled": false <1> } ---- // TEST[continued] -<1> The `enabled` flag can be ommitted and defaults to `true` however, here we +<1> The `enabled` flag can be ommitted and defaults to `true` however, here we explicitly configure it to `false` Let's check the state of the data stream: @@ -393,23 +415,23 @@ GET _data_stream/dsl-data-stream }, "indices": [ { - "index_name": ".ds-dsl-data-stream-2023.10.19-000001", + "index_name": ".ds-dsl-data-stream-2023.10.19-000001", "index_uuid": "xCEhwsp8Tey0-FLNFYVwSg", - "prefer_ilm": true, - "ilm_policy": "pre-dsl-ilm-policy", - "managed_by": "Index Lifecycle Management" + "prefer_ilm": true, + "ilm_policy": "pre-dsl-ilm-policy", + "managed_by": "Index Lifecycle Management" }, { "index_name": ".ds-dsl-data-stream-2023.10.19-000002", "index_uuid": "PA_JquKGSiKcAKBA8DJ5gw", "prefer_ilm": true, "ilm_policy": "pre-dsl-ilm-policy", - "managed_by": "Index Lifecycle Management" + "managed_by": "Index Lifecycle Management" }, { "index_name": ".ds-dsl-data-stream-2023.10.19-000003", "index_uuid": "PA_JquKGSiKcAKBA8abcd1", - "prefer_ilm": false, + "prefer_ilm": false, "ilm_policy": "pre-dsl-ilm-policy", "managed_by": "Index Lifecycle Management" <1> } @@ -421,9 +443,9 @@ GET _data_stream/dsl-data-stream "enabled": false, <2> "data_retention": "7d" }, - "ilm_policy": "pre-dsl-ilm-policy", - "next_generation_managed_by": "Index Lifecycle Management", <3> - "prefer_ilm": false, + "ilm_policy": "pre-dsl-ilm-policy", + "next_generation_managed_by": "Index Lifecycle Management", <3> + "prefer_ilm": false, "hidden": false, "system": false, "allow_custom_routing": false, @@ -438,14 +460,14 @@ GET _data_stream/dsl-data-stream // TESTRESPONSE[s/"index_uuid": "PA_JquKGSiKcAKBA8DJ5gw"/"index_uuid": $body.data_streams.0.indices.1.index_uuid/] // TESTRESPONSE[s/"index_name": ".ds-dsl-data-stream-2023.10.19-000003"/"index_name": $body.data_streams.0.indices.2.index_name/] // TESTRESPONSE[s/"index_uuid": "PA_JquKGSiKcAKBA8abcd1"/"index_uuid": $body.data_streams.0.indices.2.index_uuid/] -// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW"/] -<1> The write index is now managed by {ilm-cap} -<2> The `lifecycle` configured on the data stream is now disabled. -<3> The next write index will be managed by {ilm-cap} - -Had we removed the {ilm-cap} policy from the index template when we <> -it, the write index of the data stream will now be `Unmanaged` because the index -wouldn't have the {ilm-cap} policy configured to fallback onto. +// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_indices":[],"failure_store":false/] +<1> The write index is now managed by {ilm-init} +<2> The `lifecycle` configured on the data stream is now disabled. +<3> The next write index will be managed by {ilm-init} + +Had we removed the {ilm-init} policy from the index template when we <> +it, the write index of the data stream will now be `Unmanaged` because the index +wouldn't have the {ilm-init} policy configured to fallback onto. ////////////////////////// [source,console] diff --git a/docs/reference/docs/data-replication.asciidoc b/docs/reference/docs/data-replication.asciidoc index 9503b6b6bb29d..2c1a16c81d011 100644 --- a/docs/reference/docs/data-replication.asciidoc +++ b/docs/reference/docs/data-replication.asciidoc @@ -28,6 +28,8 @@ Every indexing operation in Elasticsearch is first resolved to a replication gro typically based on the document ID. Once the replication group has been determined, the operation is forwarded internally to the current _primary shard_ of the group. This stage of indexing is referred to as the _coordinating stage_. +image::images/data_processing_flow.png[An example of a basic write model.] + The next stage of indexing is the _primary stage_, performed on the primary shard. The primary shard is responsible for validating the operation and forwarding it to the other replicas. Since replicas can be offline, the primary is not required to replicate to all replicas. Instead, Elasticsearch maintains a list of shard copies that should diff --git a/docs/reference/esql/esql-get-started.asciidoc b/docs/reference/esql/esql-get-started.asciidoc index 82831ef943398..e54825406257f 100644 --- a/docs/reference/esql/esql-get-started.asciidoc +++ b/docs/reference/esql/esql-get-started.asciidoc @@ -7,50 +7,14 @@ This guide shows how you can use {esql} to query and aggregate your data. -TIP: To get started with {esql} without setting up your own deployment, visit -the public {esql} demo environment at -https://esql.demo.elastic.co/[esql.demo.elastic.co]. It comes with preloaded -data sets and sample queries. - [discrete] [[esql-getting-started-prerequisites]] === Prerequisites -To follow along with the queries in this getting started guide, first ingest -some sample data using the following requests: - -[source,console] ----- -PUT sample_data -{ - "mappings": { - "properties": { - "client.ip": { - "type": "ip" - }, - "message": { - "type": "keyword" - } - } - } -} - -PUT sample_data/_bulk -{"index": {}} -{"@timestamp": "2023-10-23T12:15:03.360Z", "client.ip": "172.21.2.162", "message": "Connected to 10.1.0.3", "event.duration": 3450233} -{"index": {}} -{"@timestamp": "2023-10-23T12:27:28.948Z", "client.ip": "172.21.2.113", "message": "Connected to 10.1.0.2", "event.duration": 2764889} -{"index": {}} -{"@timestamp": "2023-10-23T13:33:34.937Z", "client.ip": "172.21.0.5", "message": "Disconnected", "event.duration": 1232382} -{"index": {}} -{"@timestamp": "2023-10-23T13:51:54.732Z", "client.ip": "172.21.3.15", "message": "Connection error", "event.duration": 725448} -{"index": {}} -{"@timestamp": "2023-10-23T13:52:55.015Z", "client.ip": "172.21.3.15", "message": "Connection error", "event.duration": 8268153} -{"index": {}} -{"@timestamp": "2023-10-23T13:53:55.832Z", "client.ip": "172.21.3.15", "message": "Connection error", "event.duration": 5033755} -{"index": {}} -{"@timestamp": "2023-10-23T13:55:01.543Z", "client.ip": "172.21.3.15", "message": "Connected to 10.1.0.1", "event.duration": 1756467} ----- +To follow along with the queries in this guide, you can either set up your own +deployment, or use Elastic's public {esql} demo environment. + +include::{es-repo-dir}/tab-widgets/esql/esql-getting-started-widget-sample-data.asciidoc[] [discrete] [[esql-getting-started-running-queries]] @@ -58,7 +22,7 @@ PUT sample_data/_bulk In {kib}, you can use Console or Discover to run {esql} queries: -include::{es-repo-dir}/tab-widgets/esql/esql-getting-started-widget.asciidoc[] +include::{es-repo-dir}/tab-widgets/esql/esql-getting-started-widget-discover-console.asciidoc[] [discrete] [[esql-getting-started-first-query]] @@ -300,57 +264,9 @@ image::images/esql/esql-enrich.png[align="center"] Before you can use `ENRICH`, you first need to <> and <> -an <>. The following requests create and -execute a policy that links an IP address to an environment ("Development", -"QA", or "Production"): - -[source,console] ----- -PUT clientips -{ - "mappings": { - "properties": { - "client.ip": { - "type": "keyword" - }, - "env": { - "type": "keyword" - } - } - } -} - -PUT clientips/_bulk -{ "index" : {}} -{ "client.ip": "172.21.0.5", "env": "Development" } -{ "index" : {}} -{ "client.ip": "172.21.2.113", "env": "QA" } -{ "index" : {}} -{ "client.ip": "172.21.2.162", "env": "QA" } -{ "index" : {}} -{ "client.ip": "172.21.3.15", "env": "Production" } -{ "index" : {}} -{ "client.ip": "172.21.3.16", "env": "Production" } - -PUT /_enrich/policy/clientip_policy -{ - "match": { - "indices": "clientips", - "match_field": "client.ip", - "enrich_fields": ["env"] - } -} - -PUT /_enrich/policy/clientip_policy/_execute ----- - -//// -[source,console] ----- -DELETE /_enrich/policy/clientip_policy ----- -// TEST[continued] -//// +an <>. + +include::{es-repo-dir}/tab-widgets/esql/esql-getting-started-widget-enrich-policy.asciidoc[] After creating and executing a policy, you can use it with the `ENRICH` command: diff --git a/docs/reference/esql/esql-limitations.asciidoc b/docs/reference/esql/esql-limitations.asciidoc index 3abe6a6df7e01..f1971fd409754 100644 --- a/docs/reference/esql/esql-limitations.asciidoc +++ b/docs/reference/esql/esql-limitations.asciidoc @@ -57,6 +57,7 @@ include::processing-commands/limit.asciidoc[tag=limitation] ** `completion` ** `dense_vector` ** `double_range` +** `flattened` ** `float_range` ** `histogram` ** `integer_range` @@ -72,6 +73,35 @@ unsupported type is not explicitly used in a query, it is returned with `null` values, with the exception of nested fields. Nested fields are not returned at all. +[discrete] +[[esql-limitations-full-text-search]] +=== Full-text search is not supported + +Because of <>, +full-text search is not yet supported. Queries on `text` fields are like queries +on `keyword` fields: they are case-sensitive and need to match the full string. + +For example, after indexing a field of type `text` with the value `Elasticsearch +query language`, the following `WHERE` clause does not match because the `LIKE` +operator is case-sensitive: +[source,esql] +---- +| WHERE field LIKE "elasticsearch query language" +---- + +The following `WHERE` clause does not match either, because the `LIKE` operator +tries to match the whole string: +[source,esql] +---- +| WHERE field LIKE "Elasticsearch" +---- + +As a workaround, use wildcards and regular expressions. For example: +[source,esql] +---- +| WHERE field RLIKE "[Ee]lasticsearch.*" +---- + [discrete] [[esql-limitations-text-fields]] === `text` fields behave like `keyword` fields @@ -157,6 +187,12 @@ return `null` when applied to a multivalued field, unless documented otherwise. Work around this limitation by converting the field to single value with one of the <>. +[discrete] +[[esql-limitations-timezone]] +=== Timezone support + +{esql} only supports the UTC timezone. + [discrete] [[esql-limitations-kibana]] === Kibana limitations diff --git a/docs/reference/esql/esql-process-data-with-dissect-grok.asciidoc b/docs/reference/esql/esql-process-data-with-dissect-grok.asciidoc index 8f235ed0b7add..a13633a9f8d92 100644 --- a/docs/reference/esql/esql-process-data-with-dissect-grok.asciidoc +++ b/docs/reference/esql/esql-process-data-with-dissect-grok.asciidoc @@ -62,8 +62,9 @@ clientip:keyword | @timestamp:keyword | status:keyword include::../ingest/processors/dissect.asciidoc[tag=intro-example-explanation] -An empty key `%{}` or a <> can be used to -match values, but exclude the value from the output. +A <> can be used to match values, but +exclude the value from the output. +// TODO: Change back to original text when https://github.com/elastic/elasticsearch/pull/102580 is merged All matched values are output as keyword string data types. Use the <> to convert to another data type. @@ -126,24 +127,79 @@ include::../ingest/processors/dissect.asciidoc[tag=dissect-key-modifiers] ====== Right padding modifier (`->`) include::../ingest/processors/dissect.asciidoc[tag=dissect-modifier-skip-right-padding] +For example: +[source.merge.styled,esql] +---- +include::{esql-specs}/docs.csv-spec[tag=dissectRightPaddingModifier] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/docs.csv-spec[tag=dissectRightPaddingModifier-result] +|=== + +//// +// TODO: Re-enable when https://github.com/elastic/elasticsearch/pull/102580 is merged +include::../ingest/processors/dissect.asciidoc[tag=dissect-modifier-empty-right-padding] + +For example: +[source.merge.styled,esql] +---- +include::{esql-specs}/docs.csv-spec[tag=dissectEmptyRightPaddingModifier] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/docs.csv-spec[tag=dissectEmptyRightPaddingModifier-result] +|=== +//// + [[esql-append-modifier]] ====== Append modifier (`+`) include::../ingest/processors/dissect.asciidoc[tag=append-modifier] +[source.merge.styled,esql] +---- +include::{esql-specs}/docs.csv-spec[tag=dissectAppendModifier] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/docs.csv-spec[tag=dissectAppendModifier-result] +|=== + [[esql-append-order-modifier]] ====== Append with order modifier (`+` and `/n`) include::../ingest/processors/dissect.asciidoc[tag=append-order-modifier] +[source.merge.styled,esql] +---- +include::{esql-specs}/docs.csv-spec[tag=dissectAppendWithOrderModifier] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/docs.csv-spec[tag=dissectAppendWithOrderModifier-result] +|=== + [[esql-named-skip-key]] ====== Named skip key (`?`) -include::../ingest/processors/dissect.asciidoc[tag=named-skip-key] +// include::../ingest/processors/dissect.asciidoc[tag=named-skip-key] +// TODO: Re-enable when https://github.com/elastic/elasticsearch/pull/102580 is merged + +Dissect supports ignoring matches in the final result. This can be done with a +named skip key using the `{?name}` syntax: + +[source.merge.styled,esql] +---- +include::{esql-specs}/docs.csv-spec[tag=dissectNamedSkipKey] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/docs.csv-spec[tag=dissectNamedSkipKey-result] +|=== [[esql-dissect-limitations]] ===== Limitations // tag::dissect-limitations[] -The `DISSECT` command does not support -<>. +The `DISSECT` command does not support reference keys and empty keys. // end::dissect-limitations[] [[esql-process-data-with-grok]] diff --git a/docs/reference/esql/esql-query-api.asciidoc b/docs/reference/esql/esql-query-api.asciidoc index 437871d31a88f..afa9ab7254cfa 100644 --- a/docs/reference/esql/esql-query-api.asciidoc +++ b/docs/reference/esql/esql-query-api.asciidoc @@ -68,11 +68,6 @@ responses. See <>. `query`:: (Required, object) {esql} query to run. For syntax, refer to <>. -[[esql-search-api-time-zone]] -`time_zone`:: -(Optional, string) ISO-8601 time zone ID for the search. Several {esql} -date/time functions use this time zone. Defaults to `Z` (UTC). - [discrete] [role="child_attributes"] [[esql-query-api-response-body]] diff --git a/docs/reference/esql/esql-syntax.asciidoc b/docs/reference/esql/esql-syntax.asciidoc index 725b1d3ff1e03..22c9b1f100827 100644 --- a/docs/reference/esql/esql-syntax.asciidoc +++ b/docs/reference/esql/esql-syntax.asciidoc @@ -9,7 +9,7 @@ [[esql-basic-syntax]] === Basic syntax -An {esql} query is composed of a <> followed +An {esql} query is composed of a <> followed by an optional series of <>, separated by a pipe character: `|`. For example: @@ -36,6 +36,101 @@ source-command | processing-command1 | processing-command2 ---- ==== +[discrete] +[[esql-identifiers]] +==== Identifiers + +The identifiers can be used as they are and don't require quoting, unless +containing special characters, in which case they must be quoted with +backticks (+{backtick}+). What "special characters" means is command dependent. + +For <>, <>, <>, +<>, <> and +<> these are: `=`, +{backtick}+, `,`, ` ` (space), `|` , +`[`, `]`, `\t` (TAB), `\r` (CR), `\n` (LF); one `/` is allowed unquoted, but +a sequence of two or more require quoting. + +The rest of the commands - those allowing for identifiers be used in +expressions - require quoting if the identifier contains characters other than +letters, numbers and `_` and doesn't start with a letter, `_` or `@`. + +For instance: + +[source,esql] +---- +// Retain just one field +FROM index +| KEEP 1.field +---- + +is legal. However, if same field is to be used with an <>, +it'd have to be quoted: + +[source,esql] +---- +// Copy one field +FROM index +| EVAL my_field = `1.field` +---- + +[discrete] +[[esql-literals]] +==== Literals + +{esql} currently supports numeric and string literals. + +[discrete] +[[esql-string-literals]] +===== String literals + +A string literal is a sequence of unicode characters delimited by double +quotes (`"`). + +[source,esql] +---- +// Filter by a string value +FROM index +| WHERE first_name == "Georgi" +---- + +If the literal string itself contains quotes, these need to be escaped (`\\"`). +{esql} also supports the triple-quotes (`"""`) delimiter, for convenience: + +[source,esql] +---- +ROW name = """Indiana "Indy" Jones""" +---- + +The special characters CR, LF and TAB can be provided with the usual escaping: +`\r`, `\n`, `\t`, respectively. + +[discrete] +[[esql-numeric-literals]] +===== Numerical literals + +The numeric literals are accepted in decimal and in the scientific notation +with the exponent marker (`e` or `E`), starting either with a digit, decimal +point `.` or the negative sign `-`: + +[source, sql] +---- +1969 -- integer notation +3.14 -- decimal notation +.1234 -- decimal notation starting with decimal point +4E5 -- scientific notation (with exponent marker) +1.2e-3 -- scientific notation with decimal point +-.1e2 -- scientific notation starting with the negative sign +---- + +The integer numeric literals are implicitly converted to the `integer`, `long` +or the `double` type, whichever can first accommodate the literal's value. + +The floating point literals are implicitly converted the `double` type. + +To obtain constant values of different types, use one of the numeric +<>. + + [discrete] [[esql-comments]] ==== Comments diff --git a/docs/reference/esql/functions/in.asciidoc b/docs/reference/esql/functions/in.asciidoc index be5688250ecc7..c64c64873f7cb 100644 --- a/docs/reference/esql/functions/in.asciidoc +++ b/docs/reference/esql/functions/in.asciidoc @@ -2,10 +2,16 @@ [[esql-in-operator]] === `IN` +//tag::body[] The `IN` operator allows testing whether a field or expression equals an element in a list of literals, fields or expressions: -[source,esql] +[source.merge.styled,esql] ---- include::{esql-specs}/row.csv-spec[tag=in-with-expressions] ----- \ No newline at end of file +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/row.csv-spec[tag=in-with-expressions-result] +|=== +//end::body[] \ No newline at end of file diff --git a/docs/reference/esql/functions/like.asciidoc b/docs/reference/esql/functions/like.asciidoc index 9d06a3d051b93..d89b6715f86eb 100644 --- a/docs/reference/esql/functions/like.asciidoc +++ b/docs/reference/esql/functions/like.asciidoc @@ -2,6 +2,7 @@ [[esql-like-operator]] === `LIKE` +// tag::body[] Use `LIKE` to filter data based on string patterns using wildcards. `LIKE` usually acts on a field placed on the left-hand side of the operator, but it can also act on a constant (literal) expression. The right-hand side of the operator @@ -12,9 +13,12 @@ The following wildcard characters are supported: * `*` matches zero or more characters. * `?` matches one character. -[source,esql] +[source.merge.styled,esql] ---- -FROM employees -| WHERE first_name LIKE "?b*" -| KEEP first_name, last_name +include::{esql-specs}/docs.csv-spec[tag=like] ---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/docs.csv-spec[tag=like-result] +|=== +// end::body[] \ No newline at end of file diff --git a/docs/reference/esql/functions/ltrim.asciidoc b/docs/reference/esql/functions/ltrim.asciidoc index 6e6d30a73b865..e5230e4edd41a 100644 --- a/docs/reference/esql/functions/ltrim.asciidoc +++ b/docs/reference/esql/functions/ltrim.asciidoc @@ -1,6 +1,9 @@ [discrete] [[esql-ltrim]] === `LTRIM` +[.text-center] +image::esql/functions/signature/ltrim.svg[Embedded,opts=inline] + Removes leading whitespaces from strings. [source.merge.styled,esql] @@ -11,3 +14,7 @@ include::{esql-specs}/string.csv-spec[tag=ltrim] |=== include::{esql-specs}/string.csv-spec[tag=ltrim-result] |=== + +Supported types: + +include::types/rtrim.asciidoc[] diff --git a/docs/reference/esql/functions/pow.asciidoc b/docs/reference/esql/functions/pow.asciidoc index 9f7805bfd3eae..b13151c8cbd76 100644 --- a/docs/reference/esql/functions/pow.asciidoc +++ b/docs/reference/esql/functions/pow.asciidoc @@ -5,7 +5,8 @@ image::esql/functions/signature/pow.svg[Embedded,opts=inline] Returns the value of a base (first argument) raised to the power of an exponent (second argument). -Both arguments must be numeric. +Both arguments must be numeric. The output is always a double. Note that it is still possible to overflow +a double result here; in that case, null will be returned. [source.merge.styled,esql] ---- @@ -16,62 +17,6 @@ include::{esql-specs}/math.csv-spec[tag=powDI] include::{esql-specs}/math.csv-spec[tag=powDI-result] |=== -[discrete] -==== Type rules - -The type of the returned value is determined by the types of the base and exponent. -The following rules are applied to determine the result type: - -* If either of the base or exponent are of a floating point type, the result will be a double -* Otherwise, if either the base or the exponent are 64-bit (long or unsigned long), the result will be a long -* Otherwise, the result will be a 32-bit integer (this covers all other numeric types, including int, short and byte) - -For example, using simple integers as arguments will lead to an integer result: - -[source.merge.styled,esql] ----- -include::{esql-specs}/math.csv-spec[tag=powII] ----- -[%header.monospaced.styled,format=dsv,separator=|] -|=== -include::{esql-specs}/math.csv-spec[tag=powII-result] -|=== - -NOTE: The actual power function is performed using double precision values for all cases. -This means that for very large non-floating point values there is a small chance that the -operation can lead to slightly different answers than expected. -However, a more likely outcome of very large non-floating point values is numerical overflow. - -[discrete] -==== Arithmetic errors - -Arithmetic errors and numeric overflow do not result in an error. Instead, the result will be `null` -and a warning for the `ArithmeticException` added. -For example: - -[source.merge.styled,esql] ----- -include::{esql-specs}/math.csv-spec[tag=powULOverrun] ----- -[%header.monospaced.styled,format=dsv,separator=|] -|=== -include::{esql-specs}/math.csv-spec[tag=powULOverrun-warning] -|=== -[%header.monospaced.styled,format=dsv,separator=|] -|=== -include::{esql-specs}/math.csv-spec[tag=powULOverrun-result] -|=== - -If it is desired to protect against numerical overruns, use `TO_DOUBLE` on either of the arguments: - -[source.merge.styled,esql] ----- -include::{esql-specs}/math.csv-spec[tag=pow2d] ----- -[%header.monospaced.styled,format=dsv,separator=|] -|=== -include::{esql-specs}/math.csv-spec[tag=pow2d-result] -|=== [discrete] ==== Fractional exponents diff --git a/docs/reference/esql/functions/predicates.asciidoc b/docs/reference/esql/functions/predicates.asciidoc index 9a3ea89e9aa73..16b461b40ebf7 100644 --- a/docs/reference/esql/functions/predicates.asciidoc +++ b/docs/reference/esql/functions/predicates.asciidoc @@ -2,6 +2,7 @@ [[esql-predicates]] === `IS NULL` and `IS NOT NULL` predicates +//tag::body[] For NULL comparison, use the `IS NULL` and `IS NOT NULL` predicates: [source.merge.styled,esql] @@ -21,3 +22,4 @@ include::{esql-specs}/null.csv-spec[tag=is-not-null] |=== include::{esql-specs}/null.csv-spec[tag=is-not-null-result] |=== +//end::body[] \ No newline at end of file diff --git a/docs/reference/esql/functions/rlike.asciidoc b/docs/reference/esql/functions/rlike.asciidoc index 0fd8d8ab319da..1cdbbe6964123 100644 --- a/docs/reference/esql/functions/rlike.asciidoc +++ b/docs/reference/esql/functions/rlike.asciidoc @@ -2,14 +2,18 @@ [[esql-rlike-operator]] ==== `RLIKE` +// tag::body[] Use `RLIKE` to filter data based on string patterns using using <>. `RLIKE` usually acts on a field placed on the left-hand side of the operator, but it can also act on a constant (literal) expression. The right-hand side of the operator represents the pattern. -[source,esql] +[source.merge.styled,esql] ---- -FROM employees -| WHERE first_name RLIKE ".leja.*" -| KEEP first_name, last_name ----- \ No newline at end of file +include::{esql-specs}/docs.csv-spec[tag=rlike] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/docs.csv-spec[tag=rlike-result] +|=== +// end::body[] \ No newline at end of file diff --git a/docs/reference/esql/functions/rtrim.asciidoc b/docs/reference/esql/functions/rtrim.asciidoc index 3224331e9ed6a..8eb0494e90d9e 100644 --- a/docs/reference/esql/functions/rtrim.asciidoc +++ b/docs/reference/esql/functions/rtrim.asciidoc @@ -1,6 +1,9 @@ [discrete] [[esql-rtrim]] === `RTRIM` +[.text-center] +image::esql/functions/signature/rtrim.svg[Embedded,opts=inline] + Removes trailing whitespaces from strings. [source.merge.styled,esql] @@ -11,3 +14,7 @@ include::{esql-specs}/string.csv-spec[tag=rtrim] |=== include::{esql-specs}/string.csv-spec[tag=rtrim-result] |=== + +Supported types: + +include::types/rtrim.asciidoc[] diff --git a/docs/reference/esql/functions/signature/case.svg b/docs/reference/esql/functions/signature/case.svg deleted file mode 100644 index 09e8f7efa2835..0000000000000 --- a/docs/reference/esql/functions/signature/case.svg +++ /dev/null @@ -1 +0,0 @@ -CASE(arg1,arg2) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/ltrim.svg b/docs/reference/esql/functions/signature/ltrim.svg index ad7a4da0248e6..327e75b92ca19 100644 --- a/docs/reference/esql/functions/signature/ltrim.svg +++ b/docs/reference/esql/functions/signature/ltrim.svg @@ -1 +1 @@ -LTRIM(arg1) \ No newline at end of file +LTRIM(str) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/rtrim.svg b/docs/reference/esql/functions/signature/rtrim.svg index 3d95ddf5ef6ef..b830bb59c5c31 100644 --- a/docs/reference/esql/functions/signature/rtrim.svg +++ b/docs/reference/esql/functions/signature/rtrim.svg @@ -1 +1 @@ -RTRIM(arg1) \ No newline at end of file +RTRIM(str) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/trim.svg b/docs/reference/esql/functions/signature/trim.svg index 6f1273142fa51..5fc865d306f11 100644 --- a/docs/reference/esql/functions/signature/trim.svg +++ b/docs/reference/esql/functions/signature/trim.svg @@ -1 +1 @@ -TRIM(arg1) \ No newline at end of file +TRIM(str) \ No newline at end of file diff --git a/docs/reference/esql/functions/types/ltrim.asciidoc b/docs/reference/esql/functions/types/ltrim.asciidoc index 11c02c8f0c3bb..26f4e7633d8ae 100644 --- a/docs/reference/esql/functions/types/ltrim.asciidoc +++ b/docs/reference/esql/functions/types/ltrim.asciidoc @@ -1,6 +1,6 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== -arg1 | result +str | result keyword | keyword text | text |=== diff --git a/docs/reference/esql/functions/types/pow.asciidoc b/docs/reference/esql/functions/types/pow.asciidoc index 37bddc60c118f..0e22c123ebf53 100644 --- a/docs/reference/esql/functions/types/pow.asciidoc +++ b/docs/reference/esql/functions/types/pow.asciidoc @@ -3,8 +3,18 @@ base | exponent | result double | double | double double | integer | double +double | long | double +double | unsigned_long | double integer | double | double -integer | integer | integer +integer | integer | double +integer | long | double +integer | unsigned_long | double long | double | double -long | integer | long +long | integer | double +long | long | double +long | unsigned_long | double +unsigned_long | double | double +unsigned_long | integer | double +unsigned_long | long | double +unsigned_long | unsigned_long | double |=== diff --git a/docs/reference/esql/functions/types/rtrim.asciidoc b/docs/reference/esql/functions/types/rtrim.asciidoc index 11c02c8f0c3bb..26f4e7633d8ae 100644 --- a/docs/reference/esql/functions/types/rtrim.asciidoc +++ b/docs/reference/esql/functions/types/rtrim.asciidoc @@ -1,6 +1,6 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== -arg1 | result +str | result keyword | keyword text | text |=== diff --git a/docs/reference/esql/functions/types/trim.asciidoc b/docs/reference/esql/functions/types/trim.asciidoc index 11c02c8f0c3bb..26f4e7633d8ae 100644 --- a/docs/reference/esql/functions/types/trim.asciidoc +++ b/docs/reference/esql/functions/types/trim.asciidoc @@ -1,6 +1,6 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== -arg1 | result +str | result keyword | keyword text | text |=== diff --git a/docs/reference/esql/processing-commands/where.asciidoc b/docs/reference/esql/processing-commands/where.asciidoc index e723a977bf99c..973b163b08b10 100644 --- a/docs/reference/esql/processing-commands/where.asciidoc +++ b/docs/reference/esql/processing-commands/where.asciidoc @@ -19,9 +19,6 @@ A boolean expression. The `WHERE` processing command produces a table that contains all the rows from the input table for which the provided condition evaluates to `true`. -`WHERE` supports various <> and -<>. - *Examples* [source,esql] @@ -36,9 +33,22 @@ Which, if `still_hired` is a boolean field, can be simplified to: include::{esql-specs}/docs.csv-spec[tag=whereBoolean] ---- -Using a function: +`WHERE` supports various <>. For example the +<> function: [source,esql] ---- include::{esql-specs}/docs.csv-spec[tag=whereFunction] ---- + +For a complete list of all functions, refer to <>. + +include::../functions/predicates.asciidoc[tag=body] + +include::../functions/like.asciidoc[tag=body] + +include::../functions/rlike.asciidoc[tag=body] + +include::../functions/in.asciidoc[tag=body] + +For a complete list of all operators, refer to <>. \ No newline at end of file diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc new file mode 100644 index 0000000000000..3e474953a72f9 --- /dev/null +++ b/docs/reference/getting-started.asciidoc @@ -0,0 +1,285 @@ +[chapter] +[[getting-started]] += Quick start + +This guide helps you learn how to: + +* install and run {es} and {kib} (using {ecloud} or Docker), +* add simple (non-timestamped) dataset to {es}, +* run basic searches. + +[TIP] +==== +If you're interested in using {es} with Python, check out Elastic Search Labs. This is the best place to explore AI-powered search use cases, such as working with embeddings, vector search, and retrieval augmented generation (RAG). + +* https://www.elastic.co/search-labs/tutorials/search-tutorial/welcome[Tutorial]: this walks you through building a complete search solution with {es}, from the ground up. +* https://github.com/elastic/elasticsearch-labs[`elasticsearch-labs` repository]: it contains a range of Python https://github.com/elastic/elasticsearch-labs/tree/main/notebooks[notebooks] and https://github.com/elastic/elasticsearch-labs/tree/main/example-apps[example apps]. +==== + +[discrete] +[[run-elasticsearch]] +=== Run {es} + +The simplest way to set up {es} is to create a managed deployment with {ess} on +{ecloud}. If you prefer to manage your own test environment, install and +run {es} using Docker. + +include::{es-repo-dir}/tab-widgets/code.asciidoc[] +include::{es-repo-dir}/tab-widgets/quick-start-install-widget.asciidoc[] + +[discrete] +[[send-requests-to-elasticsearch]] +=== Send requests to {es} + +You send data and other requests to {es} using REST APIs. This lets you interact +with {es} using any client that sends HTTP requests, such as +https://curl.se[curl]. You can also use {kib}'s Console to send requests to +{es}. + +include::{es-repo-dir}/tab-widgets/api-call-widget.asciidoc[] + +[discrete] +[[add-data]] +=== Add data + +You add data to {es} as JSON objects called documents. {es} stores these +documents in searchable indices. + +[discrete] +[[add-single-document]] +==== Add a single document + +Submit the following indexing request to add a single document to the +`books` index. +The request automatically creates the index. + +//// +[source,console] +---- +PUT books +---- +// TESTSETUP +//// + +[source,console] +---- +POST books/_doc +{"name": "Snow Crash", "author": "Neal Stephenson", "release_date": "1992-06-01", "page_count": 470} +---- +// TEST[s/_doc/_doc?refresh=wait_for/] + +The response includes metadata that {es} generates for the document including a unique `_id` for the document within the index. + +.Expand to see example response +[%collapsible] +=============== +[source,console-result] +---- +{ + "_index": "books", + "_id": "O0lG2IsBaSa7VYx_rEia", + "_version": 1, + "result": "created", + "_shards": { + "total": 2, + "successful": 2, + "failed": 0 + }, + "_seq_no": 0, + "_primary_term": 1 +} +---- +// TEST[skip:TODO] +=============== + +[discrete] +[[add-multiple-documents]] +==== Add multiple documents + +Use the `_bulk` endpoint to add multiple documents in one request. Bulk data +must be newline-delimited JSON (NDJSON). Each line must end in a newline +character (`\n`), including the last line. + +[source,console] +---- +POST /_bulk +{ "index" : { "_index" : "books" } } +{"name": "Revelation Space", "author": "Alastair Reynolds", "release_date": "2000-03-15", "page_count": 585} +{ "index" : { "_index" : "books" } } +{"name": "1984", "author": "George Orwell", "release_date": "1985-06-01", "page_count": 328} +{ "index" : { "_index" : "books" } } +{"name": "Fahrenheit 451", "author": "Ray Bradbury", "release_date": "1953-10-15", "page_count": 227} +{ "index" : { "_index" : "books" } } +{"name": "Brave New World", "author": "Aldous Huxley", "release_date": "1932-06-01", "page_count": 268} +{ "index" : { "_index" : "books" } } +{"name": "The Handmaids Tale", "author": "Margaret Atwood", "release_date": "1985-06-01", "page_count": 311} +---- +// TEST[continued] + +You should receive a response indicating there were no errors. + +.Expand to see example response +[%collapsible] +=============== +[source,console-result] +---- +{ + "errors": false, + "took": 29, + "items": [ + { + "index": { + "_index": "books", + "_id": "QklI2IsBaSa7VYx_Qkh-", + "_version": 1, + "result": "created", + "_shards": { + "total": 2, + "successful": 2, + "failed": 0 + }, + "_seq_no": 1, + "_primary_term": 1, + "status": 201 + } + }, + { + "index": { + "_index": "books", + "_id": "Q0lI2IsBaSa7VYx_Qkh-", + "_version": 1, + "result": "created", + "_shards": { + "total": 2, + "successful": 2, + "failed": 0 + }, + "_seq_no": 2, + "_primary_term": 1, + "status": 201 + } + }, + { + "index": { + "_index": "books", + "_id": "RElI2IsBaSa7VYx_Qkh-", + "_version": 1, + "result": "created", + "_shards": { + "total": 2, + "successful": 2, + "failed": 0 + }, + "_seq_no": 3, + "_primary_term": 1, + "status": 201 + } + }, + { + "index": { + "_index": "books", + "_id": "RUlI2IsBaSa7VYx_Qkh-", + "_version": 1, + "result": "created", + "_shards": { + "total": 2, + "successful": 2, + "failed": 0 + }, + "_seq_no": 4, + "_primary_term": 1, + "status": 201 + } + }, + { + "index": { + "_index": "books", + "_id": "RklI2IsBaSa7VYx_Qkh-", + "_version": 1, + "result": "created", + "_shards": { + "total": 2, + "successful": 2, + "failed": 0 + }, + "_seq_no": 5, + "_primary_term": 1, + "status": 201 + } + } + ] +} +---- +// TEST[skip:TODO] +=============== + +[discrete] +[[qs-search-data]] +=== Search data + +Indexed documents are available for search in near real-time. + +[discrete] +[[search-all-documents]] +==== Search all documents + +Run the following command to search the `books` index for all documents: +[source,console] +---- +GET books/_search +---- +// TEST[continued] + +The `_source` of each hit contains the original +JSON object submitted during indexing. + +[discrete] +[[qs-match-query]] +==== `match` query + +You can use the `match` query to search for documents that contain a specific value in a specific field. +This is the standard query for performing full-text search, including fuzzy matching and phrase searches. + +Run the following command to search the `books` index for documents containing `brave` in the `name` field: +[source,console] +---- +GET books/_search +{ + "query": { + "match": { + "name": "brave" + } + } +} +---- +// TEST[continued] + +[discrete] +[[whats-next]] +=== Next steps + +Now that {es} is up and running and you've learned the basics, you'll probably want to test out larger datasets, or index your own data. + +[discrete] +[[whats-next-search-learn-more]] +==== Learn more about search queries + +* <>. Jump here to learn about exact value search, full-text search, vector search, and more, using the <>. + +[discrete] +[[whats-next-more-data]] +==== Add more data + +* Learn how to {kibana-ref}/sample-data.html[install sample data] using {kib}. This is a quick way to test out {es} on larger workloads. +* Learn how to use the {kibana-ref}/connect-to-elasticsearch.html#upload-data-kibana[upload data UI] in {kib} to add your own CSV, TSV, or JSON files. +* Use the https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[bulk API] to ingest your own datasets to {es}. + +[discrete] +[[whats-next-client-libraries]] +==== {es} programming language clients + +* Check out our https://www.elastic.co/guide/en/elasticsearch/client/index.html[client library] to work with your {es} instance in your preferred programming language. +* If you're using Python, check out https://www.elastic.co/search-labs[Elastic Search Labs] for a range of examples that use the {es} Python client. This is the best place to explore AI-powered search use cases, such as working with embeddings, vector search, and retrieval augmented generation (RAG). +** This extensive, hands-on https://www.elastic.co/search-labs/tutorials/search-tutorial/welcome[tutorial] +walks you through building a complete search solution with {es}, from the ground up. +** https://github.com/elastic/elasticsearch-labs[`elasticsearch-labs`] contains a range of executable Python https://github.com/elastic/elasticsearch-labs/tree/main/notebooks[notebooks] and https://github.com/elastic/elasticsearch-labs/tree/main/example-apps[example apps]. \ No newline at end of file diff --git a/docs/reference/images/data_processing_flow.png b/docs/reference/images/data_processing_flow.png new file mode 100644 index 0000000000000..9b2f58ad61166 Binary files /dev/null and b/docs/reference/images/data_processing_flow.png differ diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index 828a3e4d1d01d..b09d67e990636 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -17,6 +17,8 @@ include::intro.asciidoc[] include::release-notes/highlights.asciidoc[] +include::getting-started.asciidoc[] + include::setup.asciidoc[] include::upgrade.asciidoc[] diff --git a/docs/reference/indices/get-data-stream.asciidoc b/docs/reference/indices/get-data-stream.asciidoc index 1faee74ae953c..7701aa9f64cfe 100644 --- a/docs/reference/indices/get-data-stream.asciidoc +++ b/docs/reference/indices/get-data-stream.asciidoc @@ -156,8 +156,8 @@ Universally unique identifier (UUID) for the index. `prefer_ilm`:: (boolean) -Functionality in preview:[]. Indicates if this index is configured to prefer {ilm} -when both {ilm-cap} and <> are configured to +Functionality in preview:[]. Indicates if this index is configured to prefer {ilm} +when both {ilm-cap} and <> are configured to manage this index. `managed_by`:: @@ -223,8 +223,8 @@ Functionality in preview:[]. Indicates the system that will managed the next gen `prefer_ilm`:: (boolean) -Functionality in preview:[]. Indicates if the index template used to create the data -stream's backing indices is configured to prefer {ilm-cap} when both {ilm-cap} and +Functionality in preview:[]. Indicates if the index template used to create the data +stream's backing indices is configured to prefer {ilm-cap} when both {ilm-cap} and <> are configured to manage this index. `hidden`:: @@ -351,3 +351,4 @@ The API returns the following response: // TESTRESPONSE[s/"index_name": ".ds-my-data-stream-two-2099.03.08-000001"/"index_name": $body.data_streams.1.indices.0.index_name/] // TESTRESPONSE[s/"index_uuid": "3liBu2SYS5axasRt6fUIpA"/"index_uuid": $body.data_streams.1.indices.0.index_uuid/] // TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW"/] +// TESTRESPONSE[s/"replicated": false/"replicated": false,"failure_indices":[],"failure_store":false/] diff --git a/docs/reference/inference/post-inference.asciidoc b/docs/reference/inference/post-inference.asciidoc index f26a73d093091..f8515a8b33c39 100644 --- a/docs/reference/inference/post-inference.asciidoc +++ b/docs/reference/inference/post-inference.asciidoc @@ -25,9 +25,9 @@ Performs an inference task on an input text by using an {infer} model. [[post-inference-api-desc]] ==== {api-description-title} -The perform {infer} API enables you to use {infer} models to perform specific -tasks on data that you provide as an input. The API returns a response with the -resutls of the tasks. The {infer} model you use can perform one specific task +The perform {infer} API enables you to use {infer} models to perform specific +tasks on data that you provide as an input. The API returns a response with the +resutls of the tasks. The {infer} model you use can perform one specific task that has been defined when the model was created with the <>. @@ -50,8 +50,9 @@ The type of {infer} task that the model performs. == {api-request-body-title} `input`:: -(Required, string) +(Required, array of strings) The text on which you want to perform the {infer} task. +`input` can be a single string or an array. [discrete] @@ -77,23 +78,26 @@ The API returns the following response: [source,console-result] ------------------------------------------------------------ { - "sparse_embedding": { - "port": 2.1259406, - "sky": 1.7073475, - "color": 1.6922266, - "dead": 1.6247464, - "television": 1.3525393, - "above": 1.2425821, - "tuned": 1.1440028, - "colors": 1.1218185, - "tv": 1.0111054, - "ports": 1.0067928, - "poem": 1.0042328, - "channel": 0.99471164, - "tune": 0.96235967, - "scene": 0.9020516, + "sparse_embedding": [ + { + "port": 2.1259406, + "sky": 1.7073475, + "color": 1.6922266, + "dead": 1.6247464, + "television": 1.3525393, + "above": 1.2425821, + "tuned": 1.1440028, + "colors": 1.1218185, + "tv": 1.0111054, + "ports": 1.0067928, + "poem": 1.0042328, + "channel": 0.99471164, + "tune": 0.96235967, + "scene": 0.9020516, + (...) + }, (...) - } + ] } ------------------------------------------------------------ -// NOTCONSOLE \ No newline at end of file +// NOTCONSOLE diff --git a/docs/reference/inference/put-inference.asciidoc b/docs/reference/inference/put-inference.asciidoc index 3b8cd19aded53..9f0539fb551cb 100644 --- a/docs/reference/inference/put-inference.asciidoc +++ b/docs/reference/inference/put-inference.asciidoc @@ -52,18 +52,67 @@ The type of the {infer} task that the model will perform. Available task types: (Required, string) The type of service supported for the specified task type. Available services: -* `elser` +* `elser`, +* `openai`. `service_settings`:: (Required, object) Settings used to install the {infer} model. These settings are specific to the `service` you specified. ++ +.`service_settings` for `elser` +[%collapsible%closed] +===== +`num_allocations`::: +(Required, integer) +The number of model allocations to create. + +`num_threads`::: +(Required, integer) +The number of threads to use by each model allocation. +===== ++ +.`service_settings` for `openai` +[%collapsible%closed] +===== +`api_key`::: +(Required, string) +A valid API key of your OpenAI account. You can find your OpenAI API keys in +your OpenAI account under the +https://platform.openai.com/api-keys[API keys section]. + +IMPORTANT: You need to provide the API key only once, during the {infer} model +creation. The <> does not retrieve your API key. After +creating the {infer} model, you cannot change the associated API key. If you +want to use a different API key, delete the {infer} model and recreate it with +the same name and the updated API key. + +`organization_id`::: +(Optional, string) +The unique identifier of your organization. You can find the Organization ID in +your OpenAI account under +https://platform.openai.com/account/organization[**Settings** > **Organizations**]. + +`url`::: +(Optional, string) +The URL endpoint to use for the requests. Can be changed for testing purposes. +Defaults to `https://api.openai.com/v1/embeddings`. +===== `task_settings`:: (Optional, object) Settings to configure the {infer} task. These settings are specific to the `` you specified. - ++ +.`task_settings` for `text_embedding` +[%collapsible%closed] +===== +`model`::: +(Optional, string) +The name of the model to use for the {infer} task. Refer to the +https://platform.openai.com/docs/guides/embeddings/what-are-embeddings[OpenAI documentation] +for the list of available text embedding models. +===== [discrete] [[put-inference-api-example]] @@ -103,3 +152,22 @@ Example response: } ------------------------------------------------------------ // NOTCONSOLE + + +The following example shows how to create an {infer} model called +`openai_embeddings` to perform a `text_embedding` task type. + +[source,console] +------------------------------------------------------------ +PUT _inference/text_embedding/openai_embeddings +{ + "service": "openai", + "service_settings": { + "api_key": "" + }, + "task_settings": { + "model": "text-embedding-ada-002" + } +} +------------------------------------------------------------ +// TEST[skip:TBD] \ No newline at end of file diff --git a/docs/reference/ingest/apis/enrich/execute-enrich-policy.asciidoc b/docs/reference/ingest/apis/enrich/execute-enrich-policy.asciidoc index ee3af9c21de8f..ebad9f09250d3 100644 --- a/docs/reference/ingest/apis/enrich/execute-enrich-policy.asciidoc +++ b/docs/reference/ingest/apis/enrich/execute-enrich-policy.asciidoc @@ -37,8 +37,9 @@ PUT /_enrich/policy/my-policy [source,console] -------------------------------------------------- -PUT /_enrich/policy/my-policy/_execute +PUT /_enrich/policy/my-policy/_execute?wait_for_completion=false -------------------------------------------------- +// TEST[s/\?wait_for_completion=false//] //// [source,console] @@ -93,8 +94,13 @@ The previous enrich index will deleted with a delayed maintenance job. By default this is done every 15 minutes. // end::update-enrich-index[] -Because this API request performs several operations, -it may take a while to return a response. +By default, this API is synchronous: It returns when a policy has been executed. +Because executing a policy performs several operations, it may take a while to +return a response, especially when the source indices are large. This can lead +to timeouts. To prevent timeouts, set the `wait_for_completion` parameter to +`false`. This runs the request asynchronously in the background, and returns a +task ID. You can use the task ID to manage the request with the <>. [[execute-enrich-policy-api-path-params]] ==== {api-path-parms-title} @@ -107,6 +113,7 @@ Enrich policy to execute. ==== {api-query-parms-title} `wait_for_completion`:: -(Required, Boolean) -If `true`, the request blocks other enrich policy execution requests until -complete. Defaults to `true`. +(Optional, Boolean) +If `true`, the request blocks until execution is complete. If `false`, the +request returns immediately and execution runs asynchronously in the background. +Defaults to `true`. diff --git a/docs/reference/ingest/apis/index.asciidoc b/docs/reference/ingest/apis/index.asciidoc index 772c35d542c2f..04fcd500a9721 100644 --- a/docs/reference/ingest/apis/index.asciidoc +++ b/docs/reference/ingest/apis/index.asciidoc @@ -29,3 +29,4 @@ include::delete-pipeline.asciidoc[] include::geoip-stats-api.asciidoc[] include::get-pipeline.asciidoc[] include::simulate-pipeline.asciidoc[] +include::simulate-ingest.asciidoc[] diff --git a/docs/reference/ingest/apis/simulate-ingest.asciidoc b/docs/reference/ingest/apis/simulate-ingest.asciidoc new file mode 100644 index 0000000000000..36f1f089ce90e --- /dev/null +++ b/docs/reference/ingest/apis/simulate-ingest.asciidoc @@ -0,0 +1,361 @@ + +[[simulate-ingest-api]] +=== Simulate ingest API +++++ +Simulate ingest +++++ + +Executes ingest pipelines against a set of provided documents, optionally +with substitute pipeline definitions. This API is meant to be used for +troubleshooting or pipeline development, as it does not actually index any +data into {es}. + +//// +[source,console] +---- +PUT /_ingest/pipeline/my-pipeline +{ + "description" : "example pipeline to simulate", + "processors": [ + { + "set" : { + "field" : "field1", + "value" : "value1" + } + } + ] +} + +PUT /_ingest/pipeline/my-final-pipeline +{ + "description" : "example final pipeline to simulate", + "processors": [ + { + "set" : { + "field" : "field2", + "value" : "value2" + } + } + ] +} + +PUT /my-index +{ + "settings": { + "index": { + "default_pipeline": "my-pipeline", + "final_pipeline": "my-final-pipeline" + } + } +} +---- +// TESTSETUP +//// + +[source,console] +---- +POST /_ingest/_simulate +{ + "docs": [ + { + "_index": "my-index", + "_id": "id", + "_source": { + "foo": "bar" + } + }, + { + "_index": "my-index", + "_id": "id", + "_source": { + "foo": "rab" + } + } + ], + "pipeline_substitutions": { <1> + "my-pipeline": { + "processors": [ + { + "set": { + "field": "field3", + "value": "value3" + } + } + ] + } + } +} +---- + +<1> This replaces the existing `my-pipeline` pipeline with the contents given here for the duration of this request. + +[[simulate-ingest-api-request]] +==== {api-request-title} + +`POST /_ingest/_simulate` + +`GET /_ingest/_simulate` + +`POST /_ingest//_simulate` + +`GET /_ingest//_simulate` + +[[simulate-ingest-api-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have the +`index` or `create` <> +to use this API. + +[[simulate-ingest-api-desc]] +==== {api-description-title} + +The simulate ingest API simulates ingesting data into an index. It +executes the default and final pipeline for that index against a set +of documents provided in the body of the request. If a pipeline +contains a <>, it follows that +reroute processor to the new index, executing that index's pipelines +as well the same way that a non-simulated ingest would. No data is +indexed into {es}. Instead, the transformed document is returned, +along with the list of pipelines that have been executed and the name +of the index where the document would have been indexed if this were +not a simulation. This differs from the +<> in that you specify a +single pipeline for that API, and it only runs that one pipeline. The +simulate pipeline API is more useful for developing a single pipeline, +while the simulate ingest API is more useful for troubleshooting the +interaction of the various pipelines that get applied when ingesting +into an index. + + +By default, the pipeline definitions that are currently in the system +are used. However, you can supply substitute pipeline definitions in the +body of the request. These will be used in place of the pipeline +definitions that are already in the system. This can be used to replace +existing pipeline definitions or to create new ones. The pipeline +substitutions are only used within this request. + +[[simulate-ingest-api-path-params]] +==== {api-path-parms-title} + +``:: +(Optional, string) +The index to simulate ingesting into. This can be overridden by specifying an index +on each document. If you provide a in the request path, it is used for any +documents that don’t explicitly specify an index argument. + +[[simulate-ingest-api-query-params]] +==== {api-query-parms-title} + +`pipeline`:: +(Optional, string) +Pipeline to use as the default pipeline. This can be used to override the default pipeline +of the index being ingested into. + + +[role="child_attributes"] +[[simulate-ingest-api-request-body]] +==== {api-request-body-title} + +`docs`:: +(Required, array of objects) +Sample documents to test in the pipeline. ++ +.Properties of `docs` objects +[%collapsible%open] +==== +`_id`:: +(Optional, string) +Unique identifier for the document. + +`_index`:: +(Optional, string) +Name of the index that the document will be ingested into. + +`_source`:: +(Required, object) +JSON body for the document. +==== + +`pipeline_substitutions`:: +(Optional, map of strings to objects) +Map of pipeline IDs to substitute pipeline definition objects. ++ +.Properties of pipeline definition objects +[%collapsible%open] +==== +include::put-pipeline.asciidoc[tag=pipeline-object] +==== + +[[simulate-ingest-api-example]] +==== {api-examples-title} + + +[[simulate-ingest-api-pre-existing-pipelines-ex]] +===== Use pre-existing pipeline definitions +In this example the index `index` has a default pipeline called `my-pipeline` and a final +pipeline called `my-final-pipeline`. Since both documents are being ingested into `index`, +both pipelines are executed using the pipeline definitions that are already in the system. + +[source,console] +---- +POST /_ingest/_simulate +{ + "docs": [ + { + "_index": "my-index", + "_id": "123", + "_source": { + "foo": "bar" + } + }, + { + "_index": "my-index", + "_id": "456", + "_source": { + "foo": "rab" + } + } + ] +} +---- + +The API returns the following response: + +[source,console-result] +---- +{ + "docs": [ + { + "doc": { + "_id": "123", + "_index": "my-index", + "_version": -3, + "_source": { + "field1": "value1", + "field2": "value2", + "foo": "bar" + }, + "executed_pipelines": [ + "my-pipeline", + "my-final-pipeline" + ] + } + }, + { + "doc": { + "_id": "456", + "_index": "my-index", + "_version": -3, + "_source": { + "field1": "value1", + "field2": "value2", + "foo": "rab" + }, + "executed_pipelines": [ + "my-pipeline", + "my-final-pipeline" + ] + } + } + ] +} +---- + +[[simulate-ingest-api-request-body-ex]] +===== Specify a pipeline substitution in the request body +In this example the index `index` has a default pipeline called `my-pipeline` and a final +pipeline called `my-final-pipeline`. But a substitute definition of `my-pipeline` is +provided in `pipeline_substitutions`. The substitute `my-pipeline` will be used in place of +the `my-pipeline` that is in the system, and then the `my-final-pipeline` that is already +defined in the system will be executed. + +[source,console] +---- +POST /_ingest/_simulate +{ + "docs": [ + { + "_index": "my-index", + "_id": "123", + "_source": { + "foo": "bar" + } + }, + { + "_index": "my-index", + "_id": "456", + "_source": { + "foo": "rab" + } + } + ], + "pipeline_substitutions": { + "my-pipeline": { + "processors": [ + { + "uppercase": { + "field": "foo" + } + } + ] + } + } +} +---- + +The API returns the following response: + +[source,console-result] +---- +{ + "docs": [ + { + "doc": { + "_id": "123", + "_index": "my-index", + "_version": -3, + "_source": { + "field2": "value2", + "foo": "BAR" + }, + "executed_pipelines": [ + "my-pipeline", + "my-final-pipeline" + ] + } + }, + { + "doc": { + "_id": "456", + "_index": "my-index", + "_version": -3, + "_source": { + "field2": "value2", + "foo": "RAB" + }, + "executed_pipelines": [ + "my-pipeline", + "my-final-pipeline" + ] + } + } + ] +} +---- + +//// +[source,console] +---- +DELETE /my-index + +DELETE /_ingest/pipeline/* +---- + +[source,console-result] +---- +{ + "acknowledged": true +} +---- +//// diff --git a/docs/reference/ingest/geo-match-enrich-policy-type-ex.asciidoc b/docs/reference/ingest/geo-match-enrich-policy-type-ex.asciidoc index 6ba601e55ebe0..38c695c0b0667 100644 --- a/docs/reference/ingest/geo-match-enrich-policy-type-ex.asciidoc +++ b/docs/reference/ingest/geo-match-enrich-policy-type-ex.asciidoc @@ -72,8 +72,9 @@ enrich index for the policy. [source,console] ---- -POST /_enrich/policy/postal_policy/_execute +POST /_enrich/policy/postal_policy/_execute?wait_for_completion=false ---- +// TEST[s/\?wait_for_completion=false//] // TEST[continued] Use the <> to create an ingest diff --git a/docs/reference/ingest/match-enrich-policy-type-ex.asciidoc b/docs/reference/ingest/match-enrich-policy-type-ex.asciidoc index 306e69577c426..ed75d15df853e 100644 --- a/docs/reference/ingest/match-enrich-policy-type-ex.asciidoc +++ b/docs/reference/ingest/match-enrich-policy-type-ex.asciidoc @@ -58,8 +58,9 @@ enrich index for the policy. [source,console] ---- -POST /_enrich/policy/users-policy/_execute +POST /_enrich/policy/users-policy/_execute?wait_for_completion=false ---- +// TEST[s/\?wait_for_completion=false//] // TEST[continued] diff --git a/docs/reference/ingest/processors/dissect.asciidoc b/docs/reference/ingest/processors/dissect.asciidoc index 9d408ea150644..8f25bd8c8b90e 100644 --- a/docs/reference/ingest/processors/dissect.asciidoc +++ b/docs/reference/ingest/processors/dissect.asciidoc @@ -122,6 +122,7 @@ Use the right padding modifier to allow for repetition of the characters after a The right padding modifier may be placed on any key with any other modifiers. It should always be the furthest right modifier. For example: `%{+keyname/1->}` and `%{->}` +// end::dissect-modifier-skip-right-padding[] Right padding modifier example |====== @@ -132,7 +133,9 @@ Right padding modifier example * level = WARN |====== +// tag::dissect-modifier-empty-right-padding[] The right padding modifier may be used with an empty key to help skip unwanted data. For example, the same input string, but wrapped with brackets requires the use of an empty right padded key to achieve the same result. +// end::dissect-modifier-empty-right-padding[] Right padding modifier with empty key example |====== @@ -142,7 +145,6 @@ Right padding modifier with empty key example * ts = 1998-08-10T17:15:42,466 * level = WARN |====== -// end::dissect-modifier-skip-right-padding[] [[append-modifier]] ===== Append modifier (`+`) @@ -151,6 +153,7 @@ Right padding modifier with empty key example Dissect supports appending two or more results together for the output. Values are appended left to right. An append separator can be specified. In this example the append_separator is defined as a space. +// end::append-modifier[] Append modifier example |====== @@ -159,7 +162,7 @@ Append modifier example | *Result* a| * name = john jacob jingleheimer schmidt |====== -// end::append-modifier[] + [[append-order-modifier]] ===== Append with order modifier (`+` and `/n`) @@ -168,6 +171,7 @@ Append modifier example Dissect supports appending two or more results together for the output. Values are appended based on the order defined (`/n`). An append separator can be specified. In this example the append_separator is defined as a comma. +// end::append-order-modifier[] Append with order modifier example |====== @@ -176,7 +180,6 @@ Append with order modifier example | *Result* a| * name = schmidt,john,jingleheimer,jacob |====== -// end::append-order-modifier[] [[named-skip-key]] ===== Named skip key (`?`) @@ -184,6 +187,7 @@ Append with order modifier example // tag::named-skip-key[] Dissect supports ignoring matches in the final result. This can be done with an empty key `%{}`, but for readability it may be desired to give that empty key a name. +// end::named-skip-key[] Named skip key modifier example |====== @@ -193,7 +197,6 @@ Named skip key modifier example * clientip = 1.2.3.4 * @timestamp = 30/Apr/1998:22:00:52 +0000 |====== -// end::named-skip-key[] [[reference-keys]] ===== Reference keys (`*` and `&`) diff --git a/docs/reference/ingest/range-enrich-policy-type-ex.asciidoc b/docs/reference/ingest/range-enrich-policy-type-ex.asciidoc index 390360a640ea3..f11a95a6c5fe4 100644 --- a/docs/reference/ingest/range-enrich-policy-type-ex.asciidoc +++ b/docs/reference/ingest/range-enrich-policy-type-ex.asciidoc @@ -70,8 +70,9 @@ enrich index for the policy. [source,console] ---- -POST /_enrich/policy/networks-policy/_execute +POST /_enrich/policy/networks-policy/_execute?wait_for_completion=false ---- +// TEST[s/\?wait_for_completion=false//] // TEST[continued] diff --git a/docs/reference/ingest/search-inference-processing.asciidoc b/docs/reference/ingest/search-inference-processing.asciidoc index fad11b28858b7..48505ab314c1e 100644 --- a/docs/reference/ingest/search-inference-processing.asciidoc +++ b/docs/reference/ingest/search-inference-processing.asciidoc @@ -54,7 +54,7 @@ A common use case is a user searching FAQs, or a support agent searching a knowl The diagram below shows how documents are processed during ingestion. // Original diagram: https://whimsical.com/ml-in-enterprise-search-ErCetPqrcCPu2QYHvAwrgP@2bsEvpTYSt1Hiuq6UBf68tUWvFiXdzLt6ao -image::../images/ingest/document-enrichment-diagram.png["ML inference pipeline diagram"] +image::images/ingest/document-enrichment-diagram.png["ML inference pipeline diagram"] * Documents are processed by the `my-index-0001` pipeline, which happens automatically when indexing through a an Elastic connector or crawler. * The `_run_ml_inference` field is set to `true` to ensure the ML inference pipeline (`my-index-0001@ml-inference`) is executed. @@ -95,7 +95,7 @@ Once your index-specific ML inference pipeline is ready, you can add inference p To add an inference processor to the ML inference pipeline, click the *Add Inference Pipeline* button in the *Machine Learning Inference Pipelines* card. [role="screenshot"] -image::../images/ingest/document-enrichment-add-inference-pipeline.png["Add Inference Pipeline"] +image::images/ingest/document-enrichment-add-inference-pipeline.png["Add Inference Pipeline"] Here, you'll be able to: diff --git a/docs/reference/ingest/search-ingest-pipelines.asciidoc b/docs/reference/ingest/search-ingest-pipelines.asciidoc index 049a74670581d..f37e07f632810 100644 --- a/docs/reference/ingest/search-ingest-pipelines.asciidoc +++ b/docs/reference/ingest/search-ingest-pipelines.asciidoc @@ -22,7 +22,7 @@ To find this tab in the Kibana UI: The tab is highlighted in this screenshot: [.screenshot] -image::../images/ingest/ingest-pipeline-ent-search-ui.png[align="center"] +image::images/ingest/ingest-pipeline-ent-search-ui.png[align="center"] [discrete#ingest-pipeline-search-in-enterprise-search] === Overview diff --git a/docs/reference/intro.asciidoc b/docs/reference/intro.asciidoc index 3ea2c96eeaf02..3fc23b44994a7 100644 --- a/docs/reference/intro.asciidoc +++ b/docs/reference/intro.asciidoc @@ -24,6 +24,7 @@ to handle data in a wide variety of use cases: * Store and analyze logs, metrics, and security event data * Use machine learning to automatically model the behavior of your data in real time +* Use {es} as a vector database to create, store, and search vector embeddings * Automate business workflows using {es} as a storage engine * Manage, integrate, and analyze spatial information using {es} as a geographic information system (GIS) diff --git a/docs/reference/landing-page.asciidoc b/docs/reference/landing-page.asciidoc index a53a5770fe030..6d6c257f0c594 100644 --- a/docs/reference/landing-page.asciidoc +++ b/docs/reference/landing-page.asciidoc @@ -62,7 +62,7 @@ Elasticsearch is the search and analytics engine that powers the Elastic Stack.

- +

@@ -215,6 +215,12 @@
  • Plugins and integrations
  • +
  • + Search Labs +
  • +
  • + Notebook examples +
  • diff --git a/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc index 05e23d901d5d3..478a70e23b93f 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc @@ -26,8 +26,9 @@ Instantiates a {dfeed}. [[ml-put-datafeed-desc]] == {api-description-title} -{ml-docs}/ml-dfeeds.html[{dfeeds-cap}] retrieve data from {es} for analysis by -an {anomaly-job}. You can associate only one {dfeed} to each {anomaly-job}. +{ml-docs}/ml-ad-run-jobs.html#ml-ad-datafeeds[{dfeeds-cap}] retrieve data from +{es} for analysis by an {anomaly-job}. You can associate only one {dfeed} to +each {anomaly-job}. The {dfeed} contains a query that runs at a defined interval (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay`) at diff --git a/docs/reference/ml/anomaly-detection/ml-configuring-alerts.asciidoc b/docs/reference/ml/anomaly-detection/ml-configuring-alerts.asciidoc index c16098910bbe3..bf98327807e70 100644 --- a/docs/reference/ml/anomaly-detection/ml-configuring-alerts.asciidoc +++ b/docs/reference/ml/anomaly-detection/ml-configuring-alerts.asciidoc @@ -146,6 +146,11 @@ When an alert occurs, it is always the same name as the job ID of the associated them from generating actions. For more details, refer to {kibana-ref}/create-and-manage-rules.html#controlling-rules[Snooze and disable rules]. +You can also review how the alerts that are occured correlate with the +{anomaly-detect} results in the **Anomaly exloprer** by using the +**Anomaly timeline** swimlane and the **Alerts** panel. + + [[creating-anomaly-jobs-health-rules]] == {anomaly-jobs-cap} health rules diff --git a/docs/reference/ml/trained-models/apis/put-trained-models.asciidoc b/docs/reference/ml/trained-models/apis/put-trained-models.asciidoc index 5696a032b165c..45517b99c2177 100644 --- a/docs/reference/ml/trained-models/apis/put-trained-models.asciidoc +++ b/docs/reference/ml/trained-models/apis/put-trained-models.asciidoc @@ -443,7 +443,7 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field] (Optional, object) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization] + -Refer to <> to review the properties of the +Refer to <> to review the properties of the `tokenization` object. ===== @@ -469,7 +469,7 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field] (Optional, object) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization] + -Refer to <> to review the +Refer to <> to review the properties of the `tokenization` object. ===== @@ -488,7 +488,7 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field] (Optional, object) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization] + -Refer to <> to review the properties of the +Refer to <> to review the properties of the `tokenization` object. ===== @@ -514,7 +514,7 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenizati Recommended to set `max_sentence_length` to `386` with `128` of `span` and set `truncate` to `none`. + -Refer to <> to review the properties of the +Refer to <> to review the properties of the `tokenization` object. ===== @@ -546,7 +546,7 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-classific `num_top_classes`:::: (Optional, integer) -Specifies the number of top class predictions to return. Defaults to all classes +Specifies the number of top class predictions to return. Defaults to all classes (-1). `results_field`:::: @@ -557,7 +557,7 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field] (Optional, object) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization] + -Refer to <> to review the properties of the +Refer to <> to review the properties of the `tokenization` object. ===== @@ -580,7 +580,7 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field] (Optional, object) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization] + -Refer to <> to review the properties of the +Refer to <> to review the properties of the `tokenization` object. ===== @@ -599,7 +599,7 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-similarit (Optional, object) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization] + -Refer to <> to review the properties of the +Refer to <> to review the properties of the `tokenization` object. ===== @@ -634,7 +634,7 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field] (Optional, object) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization] + -Refer to <> to review the properties of the +Refer to <> to review the properties of the `tokenization` object. ===== ==== @@ -701,6 +701,33 @@ the platform identifiers used by Elasticsearch, so one of, `linux-x86_64`, For portable models (those that work independent of processor architecture or OS features), leave this field unset. +//Begin prefix_strings +`prefix_strings`:: +(Optional, object) +Certain NLP models are trained in such a way that a prefix string should +be applied to the input text before the input is evaluated. The prefix +may be different depending on the intention. For asymmetric tasks such +as infromation retrieval the prefix applied to a passage as it is indexed +can be different to the prefix applied when searching those passages. + +`prefix_strings` has 2 options, a prefix string that is always applied +in the search context and one that is always applied when ingesting the +docs. Both are optional. ++ +.Properties of `prefix_strings` +[%collapsible%open] +==== +`search`::: +(Optional, string) +The prefix string to prepend to the input text for requests +originating from a search query. + +`ingest`::: +(Optional, string) +The prefix string to prepend to the input text for requests +at ingest where the {infer} ingest processor is used. // TODO is there a shortcut for Inference ingest processor? +==== +//End prefix_strings `tags`:: (Optional, string) diff --git a/docs/reference/modules/cluster/shards_allocation.asciidoc b/docs/reference/modules/cluster/shards_allocation.asciidoc index 1f7b83294651f..5a7aa43155c66 100644 --- a/docs/reference/modules/cluster/shards_allocation.asciidoc +++ b/docs/reference/modules/cluster/shards_allocation.asciidoc @@ -183,6 +183,13 @@ The minimum improvement in weight which triggers a rebalancing shard movement. Defaults to `1.0f`. Raising this value will cause {es} to stop rebalancing shards sooner, leaving the cluster in a more unbalanced state. -NOTE: Regardless of the result of the balancing algorithm, rebalancing might +[NOTE] +==== +* It is not recommended to adjust the values of the heuristics settings. The +default values are generally good, and although different values may improve +the current balance, it is possible that they create problems in the future +if the cluster or workload changes. +* Regardless of the result of the balancing algorithm, rebalancing might not be allowed due to allocation rules such as forced awareness and allocation filtering. +==== diff --git a/docs/reference/modules/discovery/quorums.asciidoc b/docs/reference/modules/discovery/quorums.asciidoc index 6f6e978891096..f6f50b88b3190 100644 --- a/docs/reference/modules/discovery/quorums.asciidoc +++ b/docs/reference/modules/discovery/quorums.asciidoc @@ -15,7 +15,7 @@ those of the other piece. Elasticsearch allows you to add and remove master-eligible nodes to a running cluster. In many cases you can do this simply by starting or stopping the nodes -as required. See <>. +as required. See <> for more information. As nodes are added or removed Elasticsearch maintains an optimal level of fault tolerance by updating the cluster's <> for more information. +==== +// end::quorums-and-availability[] + +After a master-eligible node has joined or left the cluster the elected master +may issue a cluster-state update that adjusts the voting configuration to match, +and this can take a short time to complete. It is important to wait for this +adjustment to complete before removing more nodes from the cluster. See +<> for more information. [discrete] ==== Master elections diff --git a/docs/reference/modules/discovery/voting.asciidoc b/docs/reference/modules/discovery/voting.asciidoc index b249f9f38bfd4..04cae9d02ab66 100644 --- a/docs/reference/modules/discovery/voting.asciidoc +++ b/docs/reference/modules/discovery/voting.asciidoc @@ -11,12 +11,7 @@ Usually the voting configuration is the same as the set of all the master-eligible nodes that are currently in the cluster. However, there are some situations in which they may be different. -IMPORTANT: To ensure the cluster remains available, you **must not stop half or -more of the nodes in the voting configuration at the same time**. As long as more -than half of the voting nodes are available, the cluster can work normally. For -example, if there are three or four master-eligible nodes, the cluster -can tolerate one unavailable node. If there are two or fewer master-eligible -nodes, they must all remain available. +include::quorums.asciidoc[tag=quorums-and-availability] After a node joins or leaves the cluster, {es} reacts by automatically making corresponding changes to the voting configuration in order to ensure that the diff --git a/docs/reference/query-dsl/text-expansion-query.asciidoc b/docs/reference/query-dsl/text-expansion-query.asciidoc index d15fd40846529..e924cc05376d9 100644 --- a/docs/reference/query-dsl/text-expansion-query.asciidoc +++ b/docs/reference/query-dsl/text-expansion-query.asciidoc @@ -78,29 +78,7 @@ GET my-index/_search ---- // TEST[skip: TBD] -[discrete] -[[optimizing-text-expansion]] -=== Optimizing the search performance of the text_expansion query - -https://www.elastic.co/blog/faster-retrieval-of-top-hits-in-elasticsearch-with-block-max-wand[Max WAND] -is an optimization technique used by {es} to skip documents that cannot score -competitively against the current best matching documents. However, the tokens -generated by the ELSER model don't work well with the Max WAND optimization. -Consequently, enabling Max WAND can actually increase query latency for -`text_expansion`. For datasets of a significant size, disabling Max -WAND leads to lower query latencies. - -Max WAND is controlled by the -<> query parameter. Setting track_total_hits -to true forces {es} to consider all documents, resulting in lower query -latencies for the `text_expansion` query. However, other {es} queries run slower -when Max WAND is disabled. - -If you are combining the `text_expansion` query with standard text queries in a -compound search, it is recommended to measure the query performance before -deciding which setting to use. - -NOTE: The `track_total_hits` option applies to all queries in the search request -and may be optimal for some queries but not for others. Take into account the -characteristics of all your queries to determine the most suitable -configuration. +[NOTE] +==== +Depending on your data, the text expansion query may be faster with `track_total_hits: false`. +==== diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index f065c2deeae72..e0568f500f268 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -1720,11 +1720,6 @@ See <>. See <>. -[role="exclude",id="getting-started"] -=== Quick start - -See {estc-welcome}/getting-started-general-purpose.html[Set up a general purpose Elastic deployment]. - [role="exclude",id="getting-started-index"] === Index some documents diff --git a/docs/reference/release-notes.asciidoc b/docs/reference/release-notes.asciidoc index 65a5c741a83c5..011c44216cc0c 100644 --- a/docs/reference/release-notes.asciidoc +++ b/docs/reference/release-notes.asciidoc @@ -7,6 +7,7 @@ This section summarizes the changes in each release. * <> +* <> * <> * <> * <> @@ -55,6 +56,7 @@ This section summarizes the changes in each release. -- include::release-notes/8.12.0.asciidoc[] +include::release-notes/8.11.1.asciidoc[] include::release-notes/8.11.0.asciidoc[] include::release-notes/8.10.4.asciidoc[] include::release-notes/8.10.3.asciidoc[] diff --git a/docs/reference/release-notes/8.10.0.asciidoc b/docs/reference/release-notes/8.10.0.asciidoc index 9fbe7a2b1d099..34d1d26e5d69a 100644 --- a/docs/reference/release-notes/8.10.0.asciidoc +++ b/docs/reference/release-notes/8.10.0.asciidoc @@ -35,6 +35,8 @@ delete all the snapshots in the repository taken with version 8.10.0 or later using a cluster running version 8.10.4. // end::repositorydata-format-change[] +include::8.7.1.asciidoc[tag=no-preventive-gc-issue] + [[breaking-8.10.0]] [float] === Breaking changes diff --git a/docs/reference/release-notes/8.10.1.asciidoc b/docs/reference/release-notes/8.10.1.asciidoc index d049d5b33b1f7..0cb00699eeac7 100644 --- a/docs/reference/release-notes/8.10.1.asciidoc +++ b/docs/reference/release-notes/8.10.1.asciidoc @@ -9,6 +9,8 @@ Also see <>. include::8.10.0.asciidoc[tag=repositorydata-format-change] +include::8.7.1.asciidoc[tag=no-preventive-gc-issue] + [[bug-8.10.1]] [float] === Bug fixes diff --git a/docs/reference/release-notes/8.10.2.asciidoc b/docs/reference/release-notes/8.10.2.asciidoc index c428b4534fe79..911a410104a26 100644 --- a/docs/reference/release-notes/8.10.2.asciidoc +++ b/docs/reference/release-notes/8.10.2.asciidoc @@ -7,4 +7,6 @@ include::8.10.0.asciidoc[tag=repositorydata-format-change] +include::8.7.1.asciidoc[tag=no-preventive-gc-issue] + Also see <>. diff --git a/docs/reference/release-notes/8.10.3.asciidoc b/docs/reference/release-notes/8.10.3.asciidoc index b7828f52ad082..119930058a42e 100644 --- a/docs/reference/release-notes/8.10.3.asciidoc +++ b/docs/reference/release-notes/8.10.3.asciidoc @@ -7,6 +7,19 @@ include::8.10.0.asciidoc[tag=repositorydata-format-change] +// tag::no-preventive-gc-issue[] +* High Memory Pressure due to a GC change in JDK 21 ++ +This version of Elasticsearch is bundled with JDK 21. In JDK 21 +https://bugs.openjdk.org/browse/JDK-8297639[Preventive GC has been removed]. +This may lead to increased memory pressure and an increased number of CircuitBreakerExceptions when retrieving large +documents under some particular load. (issue: {es-issue}99592[#99592]) ++ +If you needed to explicitly <>, we recommend you avoid to upgrade to this version, as the settings to enable Preventive GC have been removed +from JDK 21. +// end::no-preventive-gc-issue[] + Also see <>. [[bug-8.10.3]] diff --git a/docs/reference/release-notes/8.10.4.asciidoc b/docs/reference/release-notes/8.10.4.asciidoc index f2e95af71afcb..6c49bae1e2150 100644 --- a/docs/reference/release-notes/8.10.4.asciidoc +++ b/docs/reference/release-notes/8.10.4.asciidoc @@ -25,6 +25,8 @@ first. If you cannot repair the repository in this way, first delete all the snapshots in the repository taken with version 8.10.0 or later using a cluster running version 8.10.4. +include::8.10.3.asciidoc[tag=no-preventive-gc-issue] + Also see <>. [[bug-8.10.4]] diff --git a/docs/reference/release-notes/8.11.0.asciidoc b/docs/reference/release-notes/8.11.0.asciidoc index 16ff5edd6d91a..acb27dc180727 100644 --- a/docs/reference/release-notes/8.11.0.asciidoc +++ b/docs/reference/release-notes/8.11.0.asciidoc @@ -10,6 +10,11 @@ Also see <>. Infra/Core:: * Remove `transport_versions` from cluster state API {es-pull}99223[#99223] +[[known-issues-8.11.0]] +[float] +=== Known issues +include::8.10.3.asciidoc[tag=no-preventive-gc-issue] + [[bug-8.11.0]] [float] === Bug fixes diff --git a/docs/reference/release-notes/8.11.1.asciidoc b/docs/reference/release-notes/8.11.1.asciidoc new file mode 100644 index 0000000000000..b1dbc4a95c963 --- /dev/null +++ b/docs/reference/release-notes/8.11.1.asciidoc @@ -0,0 +1,43 @@ +[[release-notes-8.11.1]] +== {es} version 8.11.1 + +Also see <>. + +[[known-issues-8.11.1]] +[float] +=== Known issues +include::8.10.3.asciidoc[tag=no-preventive-gc-issue] + +[[bug-8.11.1]] +[float] +=== Bug fixes + +Allocation:: +* Avoid negative `DesiredBalanceStats#lastConvergedIndex` {es-pull}101998[#101998] + +Authentication:: +* Fix memory leak from JWT cache (and fix the usage of the JWT auth cache) {es-pull}101799[#101799] + +Machine Learning:: +* Fix inference timeout from the Inference Ingest Processor {es-pull}101971[#101971] + +Mapping:: +* Fix incorrect dynamic mapping for non-numeric-value arrays #101965 {es-pull}101967[#101967] + +Network:: +* Fail listener on exception in `TcpTransport#openConnection` {es-pull}101907[#101907] (issue: {es-issue}100510[#100510]) + +Search:: +* Dry up `AsyncTaskIndexService` memory management and fix inefficient circuit breaker use {es-pull}101892[#101892] + +Snapshot/Restore:: +* Respect regional AWS STS endpoints {es-pull}101705[#101705] (issue: {es-issue}89175[#89175]) + +[[enhancement-8.11.1]] +[float] +=== Enhancements + +Machine Learning:: +* Add inference counts by model to the machine learning usage stats {es-pull}101915[#101915] + + diff --git a/docs/reference/release-notes/8.7.1.asciidoc b/docs/reference/release-notes/8.7.1.asciidoc index a0513bc1a8f0e..70f5e4add88ca 100644 --- a/docs/reference/release-notes/8.7.1.asciidoc +++ b/docs/reference/release-notes/8.7.1.asciidoc @@ -18,6 +18,23 @@ This issue is fixed in 8.8.0. include::8.6.0.asciidoc[tag=reconciliation-imbalance-known-issue] +// tag::no-preventive-gc-issue[] +* High Memory Pressure due to a GC JVM setting change ++ +This version of Elasticsearch is bundled with JDK 20. In JDK 20 +https://bugs.openjdk.org/browse/JDK-8293861[Preventive GC is disabled by default]. +This may lead to increased memory pressure and an increased number of CircuitBreakerExceptions when retrieving large +documents under some load patterns. (issue: {es-issue}99592[#99592]) ++ +If this change affects your use of Elasticsearch, consider re-enabling the previous behaviour +by adding the JVM arguments `-XX:+UnlockDiagnosticVMOptions -XX:+G1UsePreventiveGC` (reference: +https://www.oracle.com/java/technologies/javase/20-relnote-issues.html#JDK-8293861[JDK 20 release notes]). It is +important to note that this workaround is temporary and works only with JDK 20, which is bundled with Elasticsearch up +to version 8.10.2 inclusive. Successive versions are bundling JDK 21+, where this setting +https://bugs.openjdk.org/browse/JDK-8297639[has been removed]. Specifying those JVM arguments will prevent the +JVM (and therefore Elasticsearch Nodes) from starting. +// end::no-preventive-gc-issue[] + [[bug-8.7.1]] [float] === Bug fixes diff --git a/docs/reference/release-notes/8.8.2.asciidoc b/docs/reference/release-notes/8.8.2.asciidoc index d7e6b9b1fcc76..8a24ae2e8d4ef 100644 --- a/docs/reference/release-notes/8.8.2.asciidoc +++ b/docs/reference/release-notes/8.8.2.asciidoc @@ -3,6 +3,11 @@ Also see <>. +[[known-issues-8.8.2]] +[float] +=== Known issues +include::8.7.1.asciidoc[tag=no-preventive-gc-issue] + [[bug-8.8.2]] [float] === Bug fixes diff --git a/docs/reference/release-notes/8.9.0.asciidoc b/docs/reference/release-notes/8.9.0.asciidoc index 2b7b143c268dc..c49eac9f0327c 100644 --- a/docs/reference/release-notes/8.9.0.asciidoc +++ b/docs/reference/release-notes/8.9.0.asciidoc @@ -12,6 +12,8 @@ task is longer than the model's max_sequence_length and truncate is set to none then inference fails with the message `question answering result has invalid dimension`. (issue: {es-issue}97917[#97917]) +include::8.7.1.asciidoc[tag=no-preventive-gc-issue] + [[breaking-8.9.0]] [float] === Breaking changes diff --git a/docs/reference/release-notes/8.9.1.asciidoc b/docs/reference/release-notes/8.9.1.asciidoc index 18c226538c4b9..680860622c1bb 100644 --- a/docs/reference/release-notes/8.9.1.asciidoc +++ b/docs/reference/release-notes/8.9.1.asciidoc @@ -3,6 +3,11 @@ Also see <>. +[[known-issues-8.9.1]] +[float] +=== Known issues +include::8.7.1.asciidoc[tag=no-preventive-gc-issue] + [[bug-8.9.1]] [float] === Bug fixes diff --git a/docs/reference/release-notes/8.9.2.asciidoc b/docs/reference/release-notes/8.9.2.asciidoc index 6b00405261daf..8464d21e1ccc4 100644 --- a/docs/reference/release-notes/8.9.2.asciidoc +++ b/docs/reference/release-notes/8.9.2.asciidoc @@ -3,6 +3,11 @@ Also see <>. +[[known-issues-8.9.2]] +[float] +=== Known issues +include::8.7.1.asciidoc[tag=no-preventive-gc-issue] + [float] [[security-updates-8.9.2]] === Security updates diff --git a/docs/reference/rest-api/security/get-api-keys.asciidoc b/docs/reference/rest-api/security/get-api-keys.asciidoc index ddbe0612ec987..d75edda9296a5 100644 --- a/docs/reference/rest-api/security/get-api-keys.asciidoc +++ b/docs/reference/rest-api/security/get-api-keys.asciidoc @@ -175,7 +175,7 @@ A successful call returns a JSON structure that contains the information of the <4> Creation time for the API key in milliseconds <5> Optional expiration time for the API key in milliseconds <6> Invalidation status for the API key. If the key has been invalidated, it has -a value of `true`. Otherwise, it is `false`. +a value of `true` and an additional field with the `invalidation` time in milliseconds. Otherwise, it is `false`. <7> Principal for which this API key was created <8> Realm name of the principal for which this API key was created <9> Metadata of the API key diff --git a/docs/reference/rest-api/security/grant-api-keys.asciidoc b/docs/reference/rest-api/security/grant-api-keys.asciidoc index ad16f602d32c2..8feb6c3cd5f52 100644 --- a/docs/reference/rest-api/security/grant-api-keys.asciidoc +++ b/docs/reference/rest-api/security/grant-api-keys.asciidoc @@ -15,7 +15,7 @@ Creates an API key on behalf of another user. [[security-api-grant-api-key-prereqs]] ==== {api-prereq-title} -* To use this API, you must have the `grant_api_key` cluster privilege. +* To use this API, you must have the `grant_api_key` or the `manage_api_key` cluster privilege. [[security-api-grant-api-key-desc]] ==== {api-description-title} @@ -23,10 +23,13 @@ Creates an API key on behalf of another user. This API is similar to <>, however it creates the API key for a user that is different than the user that runs the API. -The caller must have authentication credentials (either an access token, -or a username and password) for the user on whose behalf the API key will be -created. It is not possible to use this API to create an API key without that -user's credentials. +The caller must have authentication credentials for the user on whose behalf +the API key will be created. It is not possible to use this API to create an +API key without that user's credentials. +The supported user authentication credentials types are: + * username and password + * <> + * <> The user, for whom the authentication credentials is provided, can optionally <> (impersonate) another user. @@ -55,8 +58,11 @@ The following parameters can be specified in the body of a POST request: `access_token`:: (Required*, string) -The user's access token. If you specify the `access_token` grant type, this -parameter is required. It is not valid with other grant types. +The user's <>, or JWT. Both <> and +<> JWT token types are supported, and they depend on the underlying JWT realm configuration. +The created API key will have a point in time snapshot of permissions of the user authenticated with this token +(or even more restricted permissions, see the `role_descriptors` parameter). +If you specify the `access_token` grant type, this parameter is required. It is not valid with other grant types. `api_key`:: (Required, object) @@ -83,15 +89,32 @@ It supports nested data structure. Within the `metadata` object, keys beginning with `_` are reserved for system usage. +`client_authentication`:: +(Optional, object) When using the `access_token` grant type, and when supplying a +JWT, this specifies the client authentication for <> that +need it (i.e. what's normally specified by the `ES-Client-Authentication` request header). + +`scheme`::: +(Required, string) The scheme (case-sensitive) as it's supplied in the +`ES-Client-Authentication` request header. Currently, the only supported +value is <>. + +`value`::: +(Required, string) The value that follows the scheme for the client credentials +as it's supplied in the `ES-Client-Authentication` request header. For example, +if the request header would be `ES-Client-Authentication: SharedSecret myShar3dS3cret` +if the client were to authenticate directly with a JWT, then `value` here should +be `myShar3dS3cret`. + `grant_type`:: (Required, string) The type of grant. Supported grant types are: `access_token`,`password`. `access_token`::: (Required*, string) -In this type of grant, you must supply an access token that was created by the -{es} token service. For more information, see -<> and <>. +In this type of grant, you must supply either an access token, that was created by the +{es} token service (see <> and <>), +or a <> (either a JWT `access_token` or a JWT `id_token`). `password`::: In this type of grant, you must supply the user ID and password for which you diff --git a/docs/reference/rest-api/security/has-privileges-user-profile.asciidoc b/docs/reference/rest-api/security/has-privileges-user-profile.asciidoc index 6f2d234395e95..afadf394aa43c 100644 --- a/docs/reference/rest-api/security/has-privileges-user-profile.asciidoc +++ b/docs/reference/rest-api/security/has-privileges-user-profile.asciidoc @@ -96,31 +96,33 @@ requested set of cluster, index, and application privileges: [source,console] -------------------------------------------------- -POST /_security/user/_has_privileges +POST /_security/profile/_has_privileges { "uids": [ "u_LQPnxDxEjIH0GOUoFkZr5Y57YUwSkL9Joiq-g4OCbPc_0", "u_rzRnxDgEHIH0GOUoFkZr5Y27YUwSk19Joiq=g4OCxxB_1", "u_does-not-exist_0" ], - "cluster": [ "monitor", "create_snapshot", "manage_ml" ], - "index" : [ - { - "names": [ "suppliers", "products" ], - "privileges": [ "create_doc"] - }, - { - "names": [ "inventory" ], - "privileges" : [ "read", "write" ] - } - ], - "application": [ - { - "application": "inventory_manager", - "privileges" : [ "read", "data:write/inventory" ], - "resources" : [ "product/1852563" ] - } - ] + "privileges": { + "cluster": [ "monitor", "create_snapshot", "manage_ml" ], + "index" : [ + { + "names": [ "suppliers", "products" ], + "privileges": [ "create_doc"] + }, + { + "names": [ "inventory" ], + "privileges" : [ "read", "write" ] + } + ], + "application": [ + { + "application": "inventory_manager", + "privileges" : [ "read", "data:write/inventory" ], + "resources" : [ "product/1852563" ] + } + ] + } } -------------------------------------------------- // TEST[skip:TODO setup and tests will be possible once the profile uid is predictable] diff --git a/docs/reference/rest-api/security/query-api-key.asciidoc b/docs/reference/rest-api/security/query-api-key.asciidoc index f7b315d5db904..0e5973a010a47 100644 --- a/docs/reference/rest-api/security/query-api-key.asciidoc +++ b/docs/reference/rest-api/security/query-api-key.asciidoc @@ -77,6 +77,9 @@ Expiration time of the API key in milliseconds. Indicates whether the API key is invalidated. If `true`, the key is invalidated. Defaults to `false`. +`invalidation`:: +Invalidation time of the API key in milliseconds. This field is only set for invalidated API keys. + `username`:: Username of the API key owner. diff --git a/docs/reference/search/point-in-time-api.asciidoc b/docs/reference/search/point-in-time-api.asciidoc index 0403f9b04b2d1..2e32324cb44d9 100644 --- a/docs/reference/search/point-in-time-api.asciidoc +++ b/docs/reference/search/point-in-time-api.asciidoc @@ -22,6 +22,13 @@ or alias. To search a <> for an alias, you must have the `read` index privilege for the alias's data streams or indices. +[[point-in-time-api-request-body]] +==== {api-request-body-title} + +`index_filter`:: +(Optional, <> Allows to filter indices if the provided +query rewrites to `match_none` on every shard. + [[point-in-time-api-example]] ==== {api-examples-title} @@ -60,7 +67,7 @@ POST /_search <1> or <> as these parameters are copied from the point in time. <2> Just like regular searches, you can <>, up to the first 10,000 hits. If you +`size` to page through search results>>, up to the first 10,000 hits. If you want to retrieve more hits, use PIT with <>. <3> The `id` parameter tells Elasticsearch to execute the request using contexts from this point in time. diff --git a/docs/reference/search/search-your-data/knn-search.asciidoc b/docs/reference/search/search-your-data/knn-search.asciidoc index 4bf1ceabe08d8..c39719f1a3b61 100644 --- a/docs/reference/search/search-your-data/knn-search.asciidoc +++ b/docs/reference/search/search-your-data/knn-search.asciidoc @@ -76,12 +76,10 @@ to search one or more `dense_vector` fields with indexing enabled. requires the following mapping options: + -- -* An `index` value of `true`. - * A `similarity` value. This value determines the similarity metric used to score documents based on similarity between the query and document vector. For a list of available metrics, see the <> -parameter documentation. +parameter documentation. The `similarity` setting defaults to `cosine`. [source,console] ---- @@ -92,13 +90,11 @@ PUT image-index "image-vector": { "type": "dense_vector", "dims": 3, - "index": true, "similarity": "l2_norm" }, "title-vector": { "type": "dense_vector", "dims": 5, - "index": true, "similarity": "l2_norm" }, "title": { @@ -158,7 +154,7 @@ NOTE: Support for approximate kNN search was added in version 8.0. Before this, `dense_vector` fields did not support enabling `index` in the mapping. If you created an index prior to 8.0 containing `dense_vector` fields, then to support approximate kNN search the data must be reindexed using a new field -mapping that sets `index: true`. +mapping that sets `index: true` which is the default option. [discrete] [[tune-approximate-knn-for-speed-accuracy]] @@ -199,9 +195,7 @@ PUT byte-image-index "byte-image-vector": { "type": "dense_vector", "element_type": "byte", - "dims": 2, - "index": true, - "similarity": "cosine" + "dims": 2 }, "title": { "type": "text" @@ -516,9 +510,7 @@ PUT passage_vectors "properties": { "vector": { "type": "dense_vector", - "dims": 2, - "index": true, - "similarity": "cosine" + "dims": 2 }, "text": { "type": "text", @@ -877,7 +869,6 @@ PUT image-index "image-vector": { "type": "dense_vector", "dims": 3, - "index": true, "similarity": "l2_norm", "index_options": { "type": "hnsw", @@ -912,8 +903,8 @@ the global top `k` matches across shards. You cannot set the To run an exact kNN search, use a `script_score` query with a vector function. . Explicitly map one or more `dense_vector` fields. If you don't intend to use -the field for approximate kNN, omit the `index` mapping option or set it to -`false`. This can significantly improve indexing speed. +the field for approximate kNN, set the `index` mapping option to `false`. This +can significantly improve indexing speed. + [source,console] ---- diff --git a/docs/reference/search/search-your-data/search-api.asciidoc b/docs/reference/search/search-your-data/search-api.asciidoc index f3e271918b9b2..496812a0cedb4 100644 --- a/docs/reference/search/search-your-data/search-api.asciidoc +++ b/docs/reference/search/search-your-data/search-api.asciidoc @@ -440,6 +440,17 @@ GET my-index-000001/_search Finally you can force an accurate count by setting `"track_total_hits"` to `true` in the request. +[TIP] +========================================= +The `track_total_hits` parameter allows you to trade hit count accuracy for performance. +In general the lower the value of `track_total_hits` the faster the query will be, +with `false` returning the fastest results. +Setting `track_total_hits` to true will cause {es} to return exact hit counts, which could +hurt query performance because it disables the +https://www.elastic.co/blog/faster-retrieval-of-top-hits-in-elasticsearch-with-block-max-wand[Max WAND] +optimization. +========================================= + [discrete] [[quickly-check-for-matching-docs]] === Quickly check for matching docs diff --git a/docs/reference/search/search-your-data/semantic-search-elser.asciidoc b/docs/reference/search/search-your-data/semantic-search-elser.asciidoc index 164beb221cd4f..0bee9533cd358 100644 --- a/docs/reference/search/search-your-data/semantic-search-elser.asciidoc +++ b/docs/reference/search/search-your-data/semantic-search-elser.asciidoc @@ -45,7 +45,7 @@ you must provide suitably sized nodes yourself. First, the mapping of the destination index - the index that contains the tokens that the model created based on your text - must be created. The destination index must have a field with the -<> or <> field +<> or <> field type to index the ELSER output. NOTE: ELSER output must be ingested into a field with the `sparse_vector` or @@ -72,11 +72,11 @@ PUT my-index } ---- // TEST[skip:TBD] -<1> The name of the field to contain the generated tokens. It must be refrenced +<1> The name of the field to contain the generated tokens. It must be refrenced in the {infer} pipeline configuration in the next step. <2> The field to contain the tokens is a `sparse_vector` field. -<3> The name of the field from which to create the sparse vector representation. -In this example, the name of the field is `content`. It must be referenced in the +<3> The name of the field from which to create the sparse vector representation. +In this example, the name of the field is `content`. It must be referenced in the {infer} pipeline configuration in the next step. <4> The field type which is text in this example. @@ -93,24 +93,24 @@ that is being ingested in the pipeline. [source,console] ---- -PUT _ingest/pipeline/elser-v2-test -{ - "processors": [ - { - "inference": { - "model_id": ".elser_model_2", - "input_output": [ <1> - { - "input_field": "content", - "output_field": "content_embedding" - } - ] - } - } - ] +PUT _ingest/pipeline/elser-v2-test +{ + "processors": [ + { + "inference": { + "model_id": ".elser_model_2", + "input_output": [ <1> + { + "input_field": "content", + "output_field": "content_embedding" + } + ] + } + } + ] } ---- -<1> Configuration object that defines the `input_field` for the {infer} process +<1> Configuration object that defines the `input_field` for the {infer} process and the `output_field` that will contain the {infer} results. //// @@ -137,8 +137,8 @@ https://github.com/elastic/stack-docs/blob/main/docs/en/stack/ml/nlp/data/msmarc Download the file and upload it to your cluster using the {kibana-ref}/connect-to-elasticsearch.html#upload-data-kibana[Data Visualizer] -in the {ml-app} UI. Assign the name `id` to the first column and `content` to -the second column. The index name is `test-data`. Once the upload is complete, +in the {ml-app} UI. Assign the name `id` to the first column and `content` to +the second column. The index name is `test-data`. Once the upload is complete, you can see an index named `test-data` with 182469 documents. @@ -184,9 +184,9 @@ follow the progress. [[text-expansion-query]] ==== Semantic search by using the `text_expansion` query -To perform semantic search, use the `text_expansion` query, and provide the -query text and the ELSER model ID. The example below uses the query text "How to -avoid muscle soreness after running?", the `content_embedding` field contains +To perform semantic search, use the `text_expansion` query, and provide the +query text and the ELSER model ID. The example below uses the query text "How to +avoid muscle soreness after running?", the `content_embedding` field contains the generated ELSER output: [source,console] @@ -208,9 +208,9 @@ GET my-index/_search The result is the top 10 documents that are closest in meaning to your query text from the `my-index` index sorted by their relevancy. The result also contains the extracted tokens for each of the relevant search results with their -weights. Tokens are learned associations capturing relevance, they are not -synonyms. To learn more about what tokens are, refer to -{ml-docs}/ml-nlp-elser.html#elser-tokens[this page]. It is possible to exclude +weights. Tokens are learned associations capturing relevance, they are not +synonyms. To learn more about what tokens are, refer to +{ml-docs}/ml-nlp-elser.html#elser-tokens[this page]. It is possible to exclude tokens from source, refer to <> to learn more. [source,consol-result] @@ -253,9 +253,6 @@ tokens from source, refer to <> to learn more. ---- // NOTCONSOLE -To learn about optimizing your `text_expansion` query, refer to -<>. - [discrete] [[text-expansion-compound-query]] @@ -281,7 +278,7 @@ GET my-index/_search "bool": { <1> "should": [ { - "text_expansion": { + "text_expansion": { "content_embedding": { "model_text": "How to avoid muscle soreness after running?", "model_id": ".elser_model_2", @@ -333,12 +330,12 @@ WARNING: Reindex uses the document source to populate the destination index. space-saving optimsation that should only be applied if you are certain that reindexing will not be required in the future! It's important to carefully consider this trade-off and make sure that excluding the ELSER terms from the -source aligns with your specific requirements and use case. Review the -<> and <> sections carefully to learn +source aligns with your specific requirements and use case. Review the +<> and <> sections carefully to learn more about the possible consequences of excluding the tokens from the `_source`. -The mapping that excludes `content_embedding` from the `_source` field can be -created by the following API call: +The mapping that excludes `content_embedding` from the `_source` field can be +created by the following API call: [source,console] ---- @@ -352,10 +349,10 @@ PUT my-index }, "properties": { "content_embedding": { - "type": "sparse_vector" + "type": "sparse_vector" }, - "content": { - "type": "text" + "content": { + "type": "text" } } } @@ -363,6 +360,10 @@ PUT my-index ---- // TEST[skip:TBD] +[NOTE] +==== +Depending on your data, the text expansion query may be faster with `track_total_hits: false`. +==== [discrete] [[further-reading]] diff --git a/docs/reference/security/authentication/jwt-realm.asciidoc b/docs/reference/security/authentication/jwt-realm.asciidoc index 142c93286c2e9..68e20380449a5 100644 --- a/docs/reference/security/authentication/jwt-realm.asciidoc +++ b/docs/reference/security/authentication/jwt-realm.asciidoc @@ -123,8 +123,9 @@ Instructs the realm to treat and validate incoming JWTs as ID Tokens (`id_token` Specifies the client authentication type as `shared_secret`, which means that the client is authenticated using an HTTP request header that must match a pre-configured secret value. The client must provide this shared secret with -every request in the `ES-Client-Authentication` header. The header value must be a -case-sensitive match to the realm's `client_authentication.shared_secret`. +every request in the `ES-Client-Authentication` header and using the +`SharedSecret` scheme. The header value must be a case-sensitive match +to the realm's `client_authentication.shared_secret`. `allowed_issuer`:: Sets a verifiable identifier for your JWT issuer. This value is typically a @@ -519,6 +520,7 @@ After mapping the roles, you can make an <> to {es} using a JWT and include the `ES-Client-Authentication` header: +[[jwt-auth-shared-secret-scheme-example]] [source,sh] ---- curl -s -X GET -H "Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJhdWQiOlsiZXMwMSIsImVzMDIiLCJlczAzIl0sInN1YiI6InVzZXIyIiwiaXNzIjoibXktaXNzdWVyIiwiZXhwIjo0MDcwOTA4ODAwLCJpYXQiOjk0NjY4NDgwMCwiZW1haWwiOiJ1c2VyMkBzb21ldGhpbmcuZXhhbXBsZS5jb20ifQ.UgO_9w--EoRyUKcWM5xh9SimTfMzl1aVu6ZBsRWhxQA" -H "ES-Client-Authentication: sharedsecret test-secret" https://localhost:9200/_security/_authenticate diff --git a/docs/reference/setup/important-settings/path-settings.asciidoc b/docs/reference/setup/important-settings/path-settings.asciidoc index 0b46a35db7262..3e87d504963a2 100644 --- a/docs/reference/setup/important-settings/path-settings.asciidoc +++ b/docs/reference/setup/important-settings/path-settings.asciidoc @@ -127,6 +127,6 @@ double the size of your cluster so it will only work if you have the capacity to expand your cluster like this. If you currently use multiple data paths but your cluster is not highly -available then the you can migrate to a non-deprecated configuration by taking +available then you can migrate to a non-deprecated configuration by taking a snapshot, creating a new cluster with the desired configuration and restoring the snapshot into it. diff --git a/docs/reference/snapshot-restore/apis/repo-analysis-api.asciidoc b/docs/reference/snapshot-restore/apis/repo-analysis-api.asciidoc index 252ef827649fa..2b2090405af60 100644 --- a/docs/reference/snapshot-restore/apis/repo-analysis-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/repo-analysis-api.asciidoc @@ -178,6 +178,20 @@ complete. In this case it still instructs some of the other nodes in the cluster to attempt to read the blob, but all of these reads must fail to find the blob. +Linearizable registers are special blobs that {es} manipulates using an atomic +compare-and-exchange operation. This operation ensures correct and +strongly-consistent behavior even when the blob is accessed by multiple nodes +at the same time. The detailed implementation of the compare-and-exchange +operation on linearizable registers varies by repository type. Repository +analysis verifies that that uncontended compare-and-exchange operations on a +linearizable register blob always succeed. Repository analysis also verifies +that contended operations either succeed or report the contention but do not +return incorrect results. If an operation fails due to contention, {es} retries +the operation until it succeeds. Most of the compare-and-exchange operations +performed by repository analysis atomically increment a counter which is +represented as an 8-byte blob. Some operations also verify the behavior on +small blobs with sizes other than 8 bytes. + [[repo-analysis-api-path-params]] ==== {api-path-parms-title} diff --git a/docs/reference/snapshot-restore/repository-azure.asciidoc b/docs/reference/snapshot-restore/repository-azure.asciidoc index e848ec9620cb4..35cf454906050 100644 --- a/docs/reference/snapshot-restore/repository-azure.asciidoc +++ b/docs/reference/snapshot-restore/repository-azure.asciidoc @@ -257,3 +257,15 @@ following naming rules: permitted in container names. * All letters in a container name must be lowercase. * Container names must be from 3 through 63 characters long. + +[[repository-azure-linearizable-registers]] +==== Linearizable register implementation + +The linearizable register implementation for Azure repositories is based on +Azure's support for strongly consistent leases. Each lease may only be held by +a single node at any time. The node presents its lease when performing a read +or write operation on a protected blob. Lease-protected operations fail if the +lease is invalid or expired. To perform a compare-and-exchange operation on a +register, {es} first obtains a lease on the blob, then reads the blob contents +under the lease, and finally uploads the updated blob under the same lease. +This process ensures that the read and write operations happen atomically. diff --git a/docs/reference/snapshot-restore/repository-gcs.asciidoc b/docs/reference/snapshot-restore/repository-gcs.asciidoc index d99b9bc81567f..b359952715a73 100644 --- a/docs/reference/snapshot-restore/repository-gcs.asciidoc +++ b/docs/reference/snapshot-restore/repository-gcs.asciidoc @@ -275,3 +275,13 @@ The service account used to access the bucket must have the "Writer" access to t 3. Go to the https://console.cloud.google.com/storage/browser[Storage Browser]. 4. Select the bucket and "Edit bucket permission". 5. The service account must be configured as a "User" with "Writer" access. + +[[repository-gcs-linearizable-registers]] +==== Linearizable register implementation + +The linearizable register implementation for GCS repositories is based on GCS's +support for strongly consistent preconditions on put-blob operations. To +perform a compare-and-exchange operation on a register, {es} retrieves the +register blob and its current generation, and then uploads the updated blob +using the observed generation as its precondition. The precondition ensures +that the generation has not changed in the meantime. diff --git a/docs/reference/snapshot-restore/repository-s3.asciidoc b/docs/reference/snapshot-restore/repository-s3.asciidoc index 70993f5b515b3..032d4f47bf678 100644 --- a/docs/reference/snapshot-restore/repository-s3.asciidoc +++ b/docs/reference/snapshot-restore/repository-s3.asciidoc @@ -12,7 +12,7 @@ https://www.elastic.co/cloud/.* To register an S3 repository, specify the type as `s3` when creating the repository. The repository defaults to using https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html[ECS -IAM Role] credentials for authentication. You can also use <> Kubernetes service accounts. +IAM Role] credentials for authentication. You can also use <> for authentication. The only mandatory setting is the bucket name: @@ -133,6 +133,12 @@ settings belong in the `elasticsearch.yml` file. The port of a proxy to connect to S3 through. +`proxy.scheme`:: + + The scheme to use for the proxy connection to S3. Valid values are either `http` or `https`. + Defaults to `http`. This setting allows to specify the protocol used for communication with the + proxy server + `proxy.username` ({ref}/secure-settings.html[Secure], {ref}/secure-settings.html#reloadable-secure-settings[reloadable]):: The username to connect to the `proxy.host` with. @@ -198,75 +204,6 @@ pattern then you should set this setting to `true` when upgrading. https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/ClientConfiguration.html#setSignerOverride-java.lang.String-[AWS Java SDK documentation] for details. Defaults to empty string which means that no signing algorithm override will be used. -[discrete] -[[repository-s3-compatible-services]] -===== S3-compatible services - -There are a number of storage systems that provide an S3-compatible API, and -the `repository-s3` type allows you to use these systems in place of AWS S3. -To do so, you should set the `s3.client.CLIENT_NAME.endpoint` setting to the -system's endpoint. This setting accepts IP addresses and hostnames and may -include a port. For example, the endpoint may be `172.17.0.2` or -`172.17.0.2:9000`. - -By default {es} communicates with your storage system using HTTPS, and -validates the repository's certificate chain using the JVM-wide truststore. -Ensure that the JVM-wide truststore includes an entry for your repository. If -you wish to use unsecured HTTP communication instead of HTTPS, set -`s3.client.CLIENT_NAME.protocol` to `http`. - -https://minio.io[MinIO] is an example of a storage system that provides an -S3-compatible API. The `repository-s3` type allows {es} to work with -MinIO-backed repositories as well as repositories stored on AWS S3. Other -S3-compatible storage systems may also work with {es}, but these are not -covered by the {es} test suite. - -Note that some storage systems claim to be S3-compatible but do not faithfully -emulate S3's behaviour in full. The `repository-s3` type requires full -compatibility with S3. In particular it must support the same set of API -endpoints, return the same errors in case of failures, and offer consistency and -performance at least as good as S3 even when accessed concurrently by multiple -nodes. You will need to work with the supplier of your storage system to address -any incompatibilities you encounter. Please do not report {es} issues involving -storage systems which claim to be S3-compatible unless you can demonstrate that -the same issue exists when using a genuine AWS S3 repository. - -You can perform some basic checks of the suitability of your storage system -using the {ref}/repo-analysis-api.html[repository analysis API]. If this API -does not complete successfully, or indicates poor performance, then your -storage system is not fully compatible with AWS S3 and therefore unsuitable for -use as a snapshot repository. However, these checks do not guarantee full -compatibility. - -Most storage systems can be configured to log the details of their interaction -with {es}. If you are investigating a suspected incompatibility with AWS S3, it -is usually simplest to collect these logs and provide them to the supplier of -your storage system for further analysis. If the incompatibility is not clear -from the logs emitted by the storage system, configure {es} to log every -request it makes to the S3 API by <> of the `com.amazonaws.request` logger to `DEBUG`: - -[source,console] ----- -PUT /_cluster/settings -{ - "persistent": { - "logger.com.amazonaws.request": "DEBUG" - } -} ----- -// TEST[skip:we don't really want to change this logger] - -Collect the Elasticsearch logs covering the time period of the failed analysis -from all nodes in your cluster and share them with the supplier of your storage -system along with the analysis response so they can use them to determine the -problem. See the -https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/java-dg-logging.html[AWS Java SDK] -documentation for further information, including details about other loggers -that can be used to obtain even more verbose logs. When you have finished -collecting the logs needed by your supplier, set the logger settings back to -`null` to return to the default logging configuration. See <> -and <> for more information. [[repository-s3-repository]] ==== Repository settings @@ -401,7 +338,7 @@ This sets up a repository that uses all client settings from the client `my.s3.endpoint` by the repository settings. [[repository-s3-permissions]] -===== Recommended S3 permissions +==== Recommended S3 permissions In order to restrict the Elasticsearch snapshot process to the minimum required resources, we recommend using Amazon IAM in conjunction with pre-existing S3 @@ -493,7 +430,28 @@ bucket, in this example, named "foo". The bucket needs to exist to register a repository for snapshots. If you did not create the bucket then the repository registration will fail. -===== Cleaning up multi-part uploads +[[iam-kubernetes-service-accounts]] +[discrete] +===== Using IAM roles for Kubernetes service accounts for authentication + +If you want to use https://aws.amazon.com/blogs/opensource/introducing-fine-grained-iam-roles-service-accounts/[Kubernetes service accounts] +for authentication, you need to add a symlink to the `$AWS_WEB_IDENTITY_TOKEN_FILE` environment variable +(which should be automatically set by a Kubernetes pod) in the S3 repository config directory, so the repository +can have the read access for the service account (a repository can't read any files outside its config directory). +For example: + +[source,bash] +---- +mkdir -p "${ES_PATH_CONF}/repository-s3" +ln -s $AWS_WEB_IDENTITY_TOKEN_FILE "${ES_PATH_CONF}/repository-s3/aws-web-identity-token-file" +---- + +IMPORTANT: The symlink must be created on all data and master eligible nodes and be readable +by the `elasticsearch` user. By default, {es} runs as user `elasticsearch` using uid:gid `1000:0`. + +If the symlink exists, it will be used by default by all S3 repositories that don't have explicit `client` credentials. + +==== Cleaning up multi-part uploads {es} uses S3's multi-part upload process to upload larger blobs to the repository. The multi-part upload process works by dividing each blob into @@ -521,7 +479,6 @@ a bucket lifecycle policy] to automatically abort incomplete uploads once they reach a certain age. [[repository-s3-aws-vpc]] -[discrete] ==== AWS VPC bandwidth settings AWS instances resolve S3 endpoints to a public IP. If the Elasticsearch @@ -537,23 +494,81 @@ bandwidth of your VPC's NAT instance. Instances residing in a public subnet in an AWS VPC will connect to S3 via the VPC's internet gateway and not be bandwidth limited by the VPC's NAT instance. +[[repository-s3-compatible-services]] +==== S3-compatible services -[[iam-kubernetes-service-accounts]] -[discrete] -==== Using IAM roles for Kubernetes service accounts for authentication -If you want to use https://aws.amazon.com/blogs/opensource/introducing-fine-grained-iam-roles-service-accounts/[Kubernetes service accounts] -for authentication, you need to add a symlink to the `$AWS_WEB_IDENTITY_TOKEN_FILE` environment variable -(which should be automatically set by a Kubernetes pod) in the S3 repository config directory, so the repository -can have the read access for the service account (a repository can't read any files outside its config directory). -For example: +There are a number of storage systems that provide an S3-compatible API, and +the `repository-s3` type allows you to use these systems in place of AWS S3. +To do so, you should set the `s3.client.CLIENT_NAME.endpoint` setting to the +system's endpoint. This setting accepts IP addresses and hostnames and may +include a port. For example, the endpoint may be `172.17.0.2` or +`172.17.0.2:9000`. -[source,bash] +By default {es} communicates with your storage system using HTTPS, and +validates the repository's certificate chain using the JVM-wide truststore. +Ensure that the JVM-wide truststore includes an entry for your repository. If +you wish to use unsecured HTTP communication instead of HTTPS, set +`s3.client.CLIENT_NAME.protocol` to `http`. + +https://minio.io[MinIO] is an example of a storage system that provides an +S3-compatible API. The `repository-s3` type allows {es} to work with +MinIO-backed repositories as well as repositories stored on AWS S3. Other +S3-compatible storage systems may also work with {es}, but these are not +covered by the {es} test suite. + +Note that some storage systems claim to be S3-compatible but do not faithfully +emulate S3's behaviour in full. The `repository-s3` type requires full +compatibility with S3. In particular it must support the same set of API +endpoints, return the same errors in case of failures, and offer consistency and +performance at least as good as S3 even when accessed concurrently by multiple +nodes. You will need to work with the supplier of your storage system to address +any incompatibilities you encounter. Please do not report {es} issues involving +storage systems which claim to be S3-compatible unless you can demonstrate that +the same issue exists when using a genuine AWS S3 repository. + +You can perform some basic checks of the suitability of your storage system +using the {ref}/repo-analysis-api.html[repository analysis API]. If this API +does not complete successfully, or indicates poor performance, then your +storage system is not fully compatible with AWS S3 and therefore unsuitable for +use as a snapshot repository. However, these checks do not guarantee full +compatibility. + +Most storage systems can be configured to log the details of their interaction +with {es}. If you are investigating a suspected incompatibility with AWS S3, it +is usually simplest to collect these logs and provide them to the supplier of +your storage system for further analysis. If the incompatibility is not clear +from the logs emitted by the storage system, configure {es} to log every +request it makes to the S3 API by <> of the `com.amazonaws.request` logger to `DEBUG`: + +[source,console] ---- -mkdir -p "${ES_PATH_CONF}/repository-s3" -ln -s $AWS_WEB_IDENTITY_TOKEN_FILE "${ES_PATH_CONF}/repository-s3/aws-web-identity-token-file" +PUT /_cluster/settings +{ + "persistent": { + "logger.com.amazonaws.request": "DEBUG" + } +} ---- +// TEST[skip:we don't really want to change this logger] -IMPORTANT: The symlink must be created on all data and master eligible nodes and be readable -by the `elasticsearch` user. By default, {es} runs as user `elasticsearch` using uid:gid `1000:0`. +Collect the Elasticsearch logs covering the time period of the failed analysis +from all nodes in your cluster and share them with the supplier of your storage +system along with the analysis response so they can use them to determine the +problem. See the +https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/java-dg-logging.html[AWS Java SDK] +documentation for further information, including details about other loggers +that can be used to obtain even more verbose logs. When you have finished +collecting the logs needed by your supplier, set the logger settings back to +`null` to return to the default logging configuration. See <> +and <> for more information. -If the symlink exists, it will be used by default by all S3 repositories that don't have explicit `client` credentials. +[[repository-s3-linearizable-registers]] +==== Linearizable register implementation + +The linearizable register implementation for S3 repositories is based on the +strongly consistent semantics of the multipart upload API. {es} first creates a +multipart upload to indicate its intention to perform a linearizable register +operation. {es} then lists and cancels all other multipart uploads for the same +register. {es} then attempts to complete the upload. If the upload completes +successfully then the compare-and-exchange operation was atomic. diff --git a/docs/reference/snapshot-restore/repository-shared-file-system.asciidoc b/docs/reference/snapshot-restore/repository-shared-file-system.asciidoc index 0bd64d43f1381..6be49d9d4422f 100644 --- a/docs/reference/snapshot-restore/repository-shared-file-system.asciidoc +++ b/docs/reference/snapshot-restore/repository-shared-file-system.asciidoc @@ -84,3 +84,12 @@ each node, but for these accounts to have different numeric user or group IDs. If your shared file system uses NFS then ensure that every node is running with the same numeric UID and GID, or else update your NFS configuration to account for the variance in numeric IDs across nodes. + +[[repository-fs-linearizable-registers]] +==== Linearizable register implementation + +The linearizable register implementation for shared filesystem repositories is +based around file locking. To perform a compare-and-exchange operation on a +register, {es} first locks he underlying file and then writes the updated +contents under the same lock. This ensures that the file has not changed in the +meantime. diff --git a/docs/reference/sql/language/indices.asciidoc b/docs/reference/sql/language/indices.asciidoc index 1f986bfea8c70..1dee7f0840ade 100644 --- a/docs/reference/sql/language/indices.asciidoc +++ b/docs/reference/sql/language/indices.asciidoc @@ -111,13 +111,13 @@ Explicitly perform the inclusion through the dedicated `FROZEN` keyword in the ` [source, sql] ---- -include-tagged::{sql-specs}/docs/docs.csv-spec[showTablesIncludeFrozen] +include-tagged::{sql-specs}/docs/docs-frozen.csv-spec[showTablesIncludeFrozen] ---- [source, sql] ---- -include-tagged::{sql-specs}/docs/docs.csv-spec[fromTableIncludeFrozen] +include-tagged::{sql-specs}/docs/docs-frozen.csv-spec[fromTableIncludeFrozen] ---- Unless enabled, frozen indices are completely ignored; it is as if they do not exist and as such, queries ran against them are likely to fail. diff --git a/docs/reference/tab-widgets/api-call-widget.asciidoc b/docs/reference/tab-widgets/api-call-widget.asciidoc new file mode 100644 index 0000000000000..adc2aa86f1c0e --- /dev/null +++ b/docs/reference/tab-widgets/api-call-widget.asciidoc @@ -0,0 +1,40 @@ +++++ +
    +
    + + +
    +
    +++++ + +include::api-call.asciidoc[tag=cloud] + +++++ +
    + +
    +++++ \ No newline at end of file diff --git a/docs/reference/tab-widgets/api-call.asciidoc b/docs/reference/tab-widgets/api-call.asciidoc new file mode 100644 index 0000000000000..ecbd49eae7f8f --- /dev/null +++ b/docs/reference/tab-widgets/api-call.asciidoc @@ -0,0 +1,57 @@ +// tag::cloud[] +**Use {kib}** + +//tag::kibana-api-ex[] +. Open {kib}'s main menu ("*☰*" near Elastic logo) and go to **Dev Tools > Console**. ++ +[role="screenshot"] +image::images/kibana-console.png[{kib} Console,align="center"] + +. Run the following test API request in Console: ++ +[source,console] +---- +GET / +---- + +//end::kibana-api-ex[] + +**Use curl** + +To communicate with {es} using curl or another client, you need your cluster's +endpoint. + +. Open {kib}'s main menu and click **Manage this deployment**. + +. From your deployment menu, go to the **Elasticsearch** page. Click **Copy +endpoint**. + +. To submit an example API request, run the following curl command in a new +terminal session. Replace `` with the password for the `elastic` user. +Replace `` with your endpoint. ++ +[source,sh] +---- +curl -u elastic: / +---- +// NOTCONSOLE + +// end::cloud[] + +// tag::self-managed[] +**Use {kib}** + +include::api-call.asciidoc[tag=kibana-api-ex] + +**Use curl** + +To submit an example API request, run the following curl command in a new +terminal session. + +[source,sh] +---- +curl --cacert http_ca.crt -u elastic:$ELASTIC_PASSWORD https://localhost:9200 +---- +// NOTCONSOLE + +// end::self-managed[] \ No newline at end of file diff --git a/docs/reference/tab-widgets/code.asciidoc b/docs/reference/tab-widgets/code.asciidoc new file mode 100644 index 0000000000000..a6949b681edc6 --- /dev/null +++ b/docs/reference/tab-widgets/code.asciidoc @@ -0,0 +1,163 @@ +// Defining styles and script here for simplicity. +++++ + + +++++ \ No newline at end of file diff --git a/docs/reference/tab-widgets/esql/esql-getting-started.asciidoc b/docs/reference/tab-widgets/esql/esql-getting-started-discover-console.asciidoc similarity index 89% rename from docs/reference/tab-widgets/esql/esql-getting-started.asciidoc rename to docs/reference/tab-widgets/esql/esql-getting-started-discover-console.asciidoc index 0ebcb7c92e59f..b8998ef199c99 100644 --- a/docs/reference/tab-widgets/esql/esql-getting-started.asciidoc +++ b/docs/reference/tab-widgets/esql/esql-getting-started-discover-console.asciidoc @@ -34,6 +34,9 @@ FROM sample_data include::../../esql/esql-kibana.asciidoc[tag=esql-mode] +Adjust the time filter so it includes the timestamps in the sample data (October +23rd, 2023). + After switching to {esql} mode, the query bar shows a sample query. You can replace this query with the queries in this getting started guide. diff --git a/docs/reference/tab-widgets/esql/esql-getting-started-enrich-policy.asciidoc b/docs/reference/tab-widgets/esql/esql-getting-started-enrich-policy.asciidoc new file mode 100644 index 0000000000000..a1898dffda684 --- /dev/null +++ b/docs/reference/tab-widgets/esql/esql-getting-started-enrich-policy.asciidoc @@ -0,0 +1,66 @@ +// tag::own-deployment[] + +The following requests create and execute a policy called `clientip_policy`. The +policy links an IP address to an environment ("Development", "QA", or +"Production"): + +[source,console] +---- +PUT clientips +{ + "mappings": { + "properties": { + "client.ip": { + "type": "keyword" + }, + "env": { + "type": "keyword" + } + } + } +} + +PUT clientips/_bulk +{ "index" : {}} +{ "client.ip": "172.21.0.5", "env": "Development" } +{ "index" : {}} +{ "client.ip": "172.21.2.113", "env": "QA" } +{ "index" : {}} +{ "client.ip": "172.21.2.162", "env": "QA" } +{ "index" : {}} +{ "client.ip": "172.21.3.15", "env": "Production" } +{ "index" : {}} +{ "client.ip": "172.21.3.16", "env": "Production" } + +PUT /_enrich/policy/clientip_policy +{ + "match": { + "indices": "clientips", + "match_field": "client.ip", + "enrich_fields": ["env"] + } +} + +PUT /_enrich/policy/clientip_policy/_execute?wait_for_completion=false +---- +// TEST[s/\?wait_for_completion=false//] + +//// +[source,console] +---- +DELETE /_enrich/policy/clientip_policy +---- +// TEST[continued] +//// + +// end::own-deployment[] + + +// tag::demo-env[] + +On the demo environment at https://esql.demo.elastic.co/[esql.demo.elastic.co], +an enrich policy called `clientip_policy` has already been created an executed. +The policy links an IP address to an environment ("Development", "QA", or +"Production") + +// end::demo-env[] diff --git a/docs/reference/tab-widgets/esql/esql-getting-started-sample-data.asciidoc b/docs/reference/tab-widgets/esql/esql-getting-started-sample-data.asciidoc new file mode 100644 index 0000000000000..434954d8d400a --- /dev/null +++ b/docs/reference/tab-widgets/esql/esql-getting-started-sample-data.asciidoc @@ -0,0 +1,48 @@ +// tag::own-deployment[] + +First ingest some sample data. In {kib}, open the main menu and select *Dev +Tools*. Run the the following two requests: + +[source,console] +---- +PUT sample_data +{ + "mappings": { + "properties": { + "client.ip": { + "type": "ip" + }, + "message": { + "type": "keyword" + } + } + } +} + +PUT sample_data/_bulk +{"index": {}} +{"@timestamp": "2023-10-23T12:15:03.360Z", "client.ip": "172.21.2.162", "message": "Connected to 10.1.0.3", "event.duration": 3450233} +{"index": {}} +{"@timestamp": "2023-10-23T12:27:28.948Z", "client.ip": "172.21.2.113", "message": "Connected to 10.1.0.2", "event.duration": 2764889} +{"index": {}} +{"@timestamp": "2023-10-23T13:33:34.937Z", "client.ip": "172.21.0.5", "message": "Disconnected", "event.duration": 1232382} +{"index": {}} +{"@timestamp": "2023-10-23T13:51:54.732Z", "client.ip": "172.21.3.15", "message": "Connection error", "event.duration": 725448} +{"index": {}} +{"@timestamp": "2023-10-23T13:52:55.015Z", "client.ip": "172.21.3.15", "message": "Connection error", "event.duration": 8268153} +{"index": {}} +{"@timestamp": "2023-10-23T13:53:55.832Z", "client.ip": "172.21.3.15", "message": "Connection error", "event.duration": 5033755} +{"index": {}} +{"@timestamp": "2023-10-23T13:55:01.543Z", "client.ip": "172.21.3.15", "message": "Connected to 10.1.0.1", "event.duration": 1756467} +---- + +// end::own-deployment[] + + +// tag::demo-env[] + +The data set used in this guide has been preloaded into the Elastic {esql} +public demo environment. Visit +https://esql.demo.elastic.co/[esql.demo.elastic.co] to start using it. + +// end::demo-env[] diff --git a/docs/reference/tab-widgets/esql/esql-getting-started-widget.asciidoc b/docs/reference/tab-widgets/esql/esql-getting-started-widget-discover-console.asciidoc similarity index 72% rename from docs/reference/tab-widgets/esql/esql-getting-started-widget.asciidoc rename to docs/reference/tab-widgets/esql/esql-getting-started-widget-discover-console.asciidoc index 49dc573f3b0bb..dff80e25812c3 100644 --- a/docs/reference/tab-widgets/esql/esql-getting-started-widget.asciidoc +++ b/docs/reference/tab-widgets/esql/esql-getting-started-widget-discover-console.asciidoc @@ -1,6 +1,6 @@ ++++ -
    -
    +
    +
    @@ -31,7 +31,7 @@ include::esql-getting-started.asciidoc[tag=console] hidden=""> ++++ -include::esql-getting-started.asciidoc[tag=discover] +include::esql-getting-started-discover-console.asciidoc[tag=discover] ++++
    diff --git a/docs/reference/tab-widgets/esql/esql-getting-started-widget-enrich-policy.asciidoc b/docs/reference/tab-widgets/esql/esql-getting-started-widget-enrich-policy.asciidoc new file mode 100644 index 0000000000000..cafefeb2652e4 --- /dev/null +++ b/docs/reference/tab-widgets/esql/esql-getting-started-widget-enrich-policy.asciidoc @@ -0,0 +1,39 @@ +++++ +
    +
    + + +
    +
    +++++ + +include::esql-getting-started-enrich-policy.asciidoc[tag=own-deployment] + +++++ +
    + +
    +++++ \ No newline at end of file diff --git a/docs/reference/tab-widgets/esql/esql-getting-started-widget-sample-data.asciidoc b/docs/reference/tab-widgets/esql/esql-getting-started-widget-sample-data.asciidoc new file mode 100644 index 0000000000000..4a33cf3f08866 --- /dev/null +++ b/docs/reference/tab-widgets/esql/esql-getting-started-widget-sample-data.asciidoc @@ -0,0 +1,39 @@ +++++ +
    +
    + + +
    +
    +++++ + +include::esql-getting-started-sample-data.asciidoc[tag=own-deployment] + +++++ +
    + +
    +++++ \ No newline at end of file diff --git a/docs/reference/tab-widgets/quick-start-install-widget.asciidoc b/docs/reference/tab-widgets/quick-start-install-widget.asciidoc new file mode 100644 index 0000000000000..f3ff804ade255 --- /dev/null +++ b/docs/reference/tab-widgets/quick-start-install-widget.asciidoc @@ -0,0 +1,40 @@ +++++ +
    +
    + + +
    +
    +++++ + +include::quick-start-install.asciidoc[tag=cloud] + +++++ +
    + +
    +++++ \ No newline at end of file diff --git a/docs/reference/tab-widgets/quick-start-install.asciidoc b/docs/reference/tab-widgets/quick-start-install.asciidoc new file mode 100644 index 0000000000000..b8daf62dad63b --- /dev/null +++ b/docs/reference/tab-widgets/quick-start-install.asciidoc @@ -0,0 +1,71 @@ + +// tag::cloud[] +include::{docs-root}/shared/cloud/ess-getting-started.asciidoc[tag=generic] + +. Click **Continue** to open {kib}, the user interface for {ecloud}. + +. Click **Explore on my own**. +// end::cloud[] + +// tag::self-managed[] +*Start a single-node cluster* + +We'll use a single-node {es} cluster in this quick start, which makes sense for testing and development. +Refer to <> for advanced Docker documentation. + +. Run the following Docker commands: ++ +[source,sh,subs="attributes"] +---- +docker network create elastic +docker pull {docker-image} +docker run --name es01 --net elastic -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" -t {docker-image} +---- + +. Copy the generated `elastic` password and enrollment token, which are output to your terminal. +You'll use these to enroll {kib} with your {es} cluster and log in. +These credentials are only shown when you start {es} for the first time. ++ +We recommend storing the `elastic` password as an environment variable in your shell. Example: ++ +[source,sh] +---- +export ELASTIC_PASSWORD="your_password" +---- ++ +. Copy the `http_ca.crt` SSL certificate from the container to your local machine. ++ +[source,sh] +---- +docker cp es01:/usr/share/elasticsearch/config/certs/http_ca.crt . +---- ++ +. Make a REST API call to {es} to ensure the {es} container is running. ++ +[source,sh] +---- +curl --cacert http_ca.crt -u elastic:$ELASTIC_PASSWORD https://localhost:9200 +---- +// NOTCONSOLE + +*Run {kib}* + +{kib} is the user interface for Elastic. +It's great for getting started with {es} and exploring your data. +We'll be using the Dev Tools *Console* in {kib} to make REST API calls to {es}. + +In a new terminal session, start {kib} and connect it to your {es} container: + +[source,sh,subs="attributes"] +---- +docker pull {kib-docker-image} +docker run --name kibana --net elastic -p 5601:5601 {kib-docker-image} +---- + +When you start {kib}, a unique URL is output to your terminal. +To access {kib}: + +. Open the generated URL in your browser. +. Paste the enrollment token that you copied earlier, to connect your {kib} instance with {es}. +. Log in to {kib} as the `elastic` user with the password that was generated when you started {es}. +// end::self-managed[] \ No newline at end of file diff --git a/docs/reference/tab-widgets/semantic-search/field-mappings.asciidoc b/docs/reference/tab-widgets/semantic-search/field-mappings.asciidoc index 2fe2f9cea83f9..b702a1fc8f426 100644 --- a/docs/reference/tab-widgets/semantic-search/field-mappings.asciidoc +++ b/docs/reference/tab-widgets/semantic-search/field-mappings.asciidoc @@ -63,9 +63,7 @@ PUT my-index "properties": { "my_embeddings.predicted_value": { <1> "type": "dense_vector", <2> - "dims": 384,<3> - "index": true, - "similarity": "cosine" + "dims": 384 <3> }, "my_text_field": { <4> "type": "text" <5> diff --git a/gradle/build.versions.toml b/gradle/build.versions.toml index 94ed94df43818..e8d94ce624dbb 100644 --- a/gradle/build.versions.toml +++ b/gradle/build.versions.toml @@ -17,6 +17,7 @@ commons-codec = "commons-codec:commons-codec:1.11" commmons-io = "commons-io:commons-io:2.2" docker-compose = "com.avast.gradle:gradle-docker-compose-plugin:0.17.5" forbiddenApis = "de.thetaphi:forbiddenapis:3.6" +gradle-enterprise = "com.gradle:gradle-enterprise-gradle-plugin:3.14.1" hamcrest = "org.hamcrest:hamcrest:2.1" httpcore = "org.apache.httpcomponents:httpcore:4.4.12" httpclient = "org.apache.httpcomponents:httpclient:4.5.10" diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index e340efb0c6987..ed7ae1b5b5638 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -69,9 +69,9 @@ - - - + + + @@ -381,6 +381,26 @@ + + + + + + + + + + + + + + + + + + + + @@ -1401,19 +1421,19 @@ - - - + + + - - - + + + - - - + + + @@ -2894,14 +2914,9 @@ - - - - - - - - + + + @@ -3002,14 +3017,19 @@ - - - + + + + + + + + - - - + + + @@ -3022,9 +3042,9 @@ - - - + + + @@ -3032,19 +3052,24 @@ - - - - - - - - + + + + + + + + + + + + + @@ -4067,6 +4092,11 @@ + + + + + @@ -4127,6 +4157,11 @@ + + + + + @@ -4157,6 +4192,11 @@ + + + + + diff --git a/libs/core/src/main/java/org/elasticsearch/core/AbstractRefCounted.java b/libs/core/src/main/java/org/elasticsearch/core/AbstractRefCounted.java index 04cd4375a42be..ca5704fa9866d 100644 --- a/libs/core/src/main/java/org/elasticsearch/core/AbstractRefCounted.java +++ b/libs/core/src/main/java/org/elasticsearch/core/AbstractRefCounted.java @@ -19,6 +19,7 @@ public abstract class AbstractRefCounted implements RefCounted { public static final String ALREADY_CLOSED_MESSAGE = "already closed, can't increment ref count"; + public static final String INVALID_DECREF_MESSAGE = "invalid decRef call: already closed"; private static final VarHandle VH_REFCOUNT_FIELD; @@ -63,7 +64,7 @@ public final boolean tryIncRef() { public final boolean decRef() { touch(); int i = (int) VH_REFCOUNT_FIELD.getAndAdd(this, -1); - assert i > 0 : "invalid decRef call: already closed"; + assert i > 0 : INVALID_DECREF_MESSAGE; if (i == 1) { try { closeInternal(); diff --git a/libs/core/src/main/java/org/elasticsearch/core/CheckedConsumer.java b/libs/core/src/main/java/org/elasticsearch/core/CheckedConsumer.java index 6698b47f62f3c..56325dc21bb4a 100644 --- a/libs/core/src/main/java/org/elasticsearch/core/CheckedConsumer.java +++ b/libs/core/src/main/java/org/elasticsearch/core/CheckedConsumer.java @@ -8,12 +8,20 @@ package org.elasticsearch.core; -import java.util.function.Consumer; +import java.util.Objects; /** - * A {@link Consumer}-like interface which allows throwing checked exceptions. + * A {@link java.util.function.Consumer}-like interface which allows throwing checked exceptions. */ @FunctionalInterface public interface CheckedConsumer { void accept(T t) throws E; + + default CheckedConsumer andThen(CheckedConsumer after) throws E { + Objects.requireNonNull(after); + return (T t) -> { + accept(t); + after.accept(t); + }; + } } diff --git a/libs/core/src/main/java/org/elasticsearch/core/RefCounted.java b/libs/core/src/main/java/org/elasticsearch/core/RefCounted.java index 0f7dec4968ba7..1f725ac48a16f 100644 --- a/libs/core/src/main/java/org/elasticsearch/core/RefCounted.java +++ b/libs/core/src/main/java/org/elasticsearch/core/RefCounted.java @@ -38,7 +38,7 @@ public interface RefCounted { void incRef(); /** - * Tries to increment the refCount of this instance. This method will return {@code true} iff the refCount was + * Tries to increment the refCount of this instance. This method will return {@code true} iff the refCount was successfully incremented. * * @see #decRef() * @see #incRef() @@ -62,4 +62,16 @@ public interface RefCounted { * @return whether there are currently any active references to this object. */ boolean hasReferences(); + + /** + * Similar to {@link #incRef()} except that it also asserts that it managed to acquire the ref, for use in situations where it is a bug + * if all refs have been released. + */ + default void mustIncRef() { + if (tryIncRef()) { + return; + } + assert false : AbstractRefCounted.ALREADY_CLOSED_MESSAGE; + incRef(); // throws an ISE + } } diff --git a/libs/core/src/main/java/org/elasticsearch/core/RestApiVersion.java b/libs/core/src/main/java/org/elasticsearch/core/RestApiVersion.java index 7d25b5a6163c1..5153ba688d6a9 100644 --- a/libs/core/src/main/java/org/elasticsearch/core/RestApiVersion.java +++ b/libs/core/src/main/java/org/elasticsearch/core/RestApiVersion.java @@ -18,6 +18,8 @@ public enum RestApiVersion { V_8(8), + + @UpdateForV9 // v9 will not need to support the v7 REST API V_7(7); public final byte major; diff --git a/libs/core/src/main/java/org/elasticsearch/core/UpdateForV9.java b/libs/core/src/main/java/org/elasticsearch/core/UpdateForV9.java new file mode 100644 index 0000000000000..2a31e2ccde222 --- /dev/null +++ b/libs/core/src/main/java/org/elasticsearch/core/UpdateForV9.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.core; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Annotation to identify a block of code (a whole class, a method, or a field) that needs to be reviewed (for cleanup, remove or change) + * before releasing 9.0 + */ +@Retention(RetentionPolicy.SOURCE) +@Target({ ElementType.LOCAL_VARIABLE, ElementType.CONSTRUCTOR, ElementType.FIELD, ElementType.METHOD, ElementType.TYPE }) +public @interface UpdateForV9 { +} diff --git a/libs/lz4/src/test/java/org/elasticsearch/lz4/ESLZ4Tests.java b/libs/lz4/src/test/java/org/elasticsearch/lz4/ESLZ4Tests.java index 3f7de0bfd4e0e..65a126920cdb9 100644 --- a/libs/lz4/src/test/java/org/elasticsearch/lz4/ESLZ4Tests.java +++ b/libs/lz4/src/test/java/org/elasticsearch/lz4/ESLZ4Tests.java @@ -1898,80 +1898,4 @@ public void testRoundtripIssue12() { testRoundTrip(data, 9, data.length - 9); } - private static void assertCompressedArrayEquals(String message, byte[] expected, byte[] actual) { - int off = 0; - int decompressedOff = 0; - while (true) { - if (off == expected.length) { - break; - } - final Sequence sequence1 = readSequence(expected, off); - final Sequence sequence2 = readSequence(actual, off); - assertEquals(message + ", off=" + off + ", decompressedOff=" + decompressedOff, sequence1, sequence2); - off += sequence1.length; - decompressedOff += sequence1.literalLen + sequence1.matchLen; - } - } - - private static Sequence readSequence(byte[] buf, int off) { - final int start = off; - final int token = buf[off++] & 0xFF; - int literalLen = token >>> 4; - if (literalLen >= 0x0F) { - int len; - while ((len = buf[off++] & 0xFF) == 0xFF) { - literalLen += 0xFF; - } - literalLen += len; - } - off += literalLen; - if (off == buf.length) { - return new Sequence(literalLen, -1, -1, off - start); - } - int matchDec = (buf[off++] & 0xFF) | ((buf[off++] & 0xFF) << 8); - int matchLen = token & 0x0F; - if (matchLen >= 0x0F) { - int len; - while ((len = buf[off++] & 0xFF) == 0xFF) { - matchLen += 0xFF; - } - matchLen += len; - } - matchLen += 4; - return new Sequence(literalLen, matchDec, matchLen, off - start); - } - - private static class Sequence { - final int literalLen, matchDec, matchLen, length; - - private Sequence(int literalLen, int matchDec, int matchLen, int length) { - this.literalLen = literalLen; - this.matchDec = matchDec; - this.matchLen = matchLen; - this.length = length; - } - - @Override - public String toString() { - return "Sequence [literalLen=" + literalLen + ", matchDec=" + matchDec + ", matchLen=" + matchLen + "]"; - } - - @Override - public int hashCode() { - return 42; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - Sequence other = (Sequence) obj; - if (literalLen != other.literalLen) return false; - if (matchDec != other.matchDec) return false; - if (matchLen != other.matchLen) return false; - return true; - } - - } } diff --git a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfiguration.java b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfiguration.java index 6e517f731843b..610fb444e0a93 100644 --- a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfiguration.java +++ b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfiguration.java @@ -10,7 +10,6 @@ import java.nio.file.Path; import java.security.GeneralSecurityException; -import java.security.NoSuchAlgorithmException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -53,12 +52,7 @@ public record SslConfiguration( static { LinkedHashMap protocolAlgorithmMap = new LinkedHashMap<>(); - try { - SSLContext.getInstance("TLSv1.3"); - protocolAlgorithmMap.put("TLSv1.3", "TLSv1.3"); - } catch (NoSuchAlgorithmException e) { - // ignore since we support JVMs using BCJSSE in FIPS mode which doesn't support TLSv1.3 - } + protocolAlgorithmMap.put("TLSv1.3", "TLSv1.3"); protocolAlgorithmMap.put("TLSv1.2", "TLSv1.2"); protocolAlgorithmMap.put("TLSv1.1", "TLSv1.1"); protocolAlgorithmMap.put("TLSv1", "TLSv1"); diff --git a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/AdjacencyMatrixIT.java b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/AdjacencyMatrixIT.java index 37e782cd7c611..44e708e00d4d5 100644 --- a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/AdjacencyMatrixIT.java +++ b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/AdjacencyMatrixIT.java @@ -61,19 +61,19 @@ public void setupSuiteScopeCluster() throws Exception { for (int i = 0; i < numTag1Docs; i++) { numSingleTag1Docs++; XContentBuilder source = jsonBuilder().startObject().field("value", i + 1).field("tag", "tag1").endObject(); - builders.add(client().prepareIndex("idx").setId("" + i).setSource(source)); + builders.add(prepareIndex("idx").setId("" + i).setSource(source)); if (randomBoolean()) { // randomly index the document twice so that we have deleted // docs that match the filter - builders.add(client().prepareIndex("idx").setId("" + i).setSource(source)); + builders.add(prepareIndex("idx").setId("" + i).setSource(source)); } } for (int i = numTag1Docs; i < (numTag1Docs + numTag2Docs); i++) { numSingleTag2Docs++; XContentBuilder source = jsonBuilder().startObject().field("value", i + 1).field("tag", "tag2").endObject(); - builders.add(client().prepareIndex("idx").setId("" + i).setSource(source)); + builders.add(prepareIndex("idx").setId("" + i).setSource(source)); if (randomBoolean()) { - builders.add(client().prepareIndex("idx").setId("" + i).setSource(source)); + builders.add(prepareIndex("idx").setId("" + i).setSource(source)); } } for (int i = numTag1Docs + numTag2Docs; i < numDocs; i++) { @@ -81,17 +81,15 @@ public void setupSuiteScopeCluster() throws Exception { numTag1Docs++; numTag2Docs++; XContentBuilder source = jsonBuilder().startObject().field("value", i + 1).array("tag", "tag1", "tag2").endObject(); - builders.add(client().prepareIndex("idx").setId("" + i).setSource(source)); + builders.add(prepareIndex("idx").setId("" + i).setSource(source)); if (randomBoolean()) { - builders.add(client().prepareIndex("idx").setId("" + i).setSource(source)); + builders.add(prepareIndex("idx").setId("" + i).setSource(source)); } } prepareCreate("empty_bucket_idx").setMapping("value", "type=integer").get(); for (int i = 0; i < 2; i++) { builders.add( - client().prepareIndex("empty_bucket_idx") - .setId("" + i) - .setSource(jsonBuilder().startObject().field("value", i * 2).endObject()) + prepareIndex("empty_bucket_idx").setId("" + i).setSource(jsonBuilder().startObject().field("value", i * 2).endObject()) ); } indexRandom(true, builders); diff --git a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/SearchCancellationIT.java b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/SearchCancellationIT.java index 4d64ad1030136..a6e530a9d66cf 100644 --- a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/SearchCancellationIT.java +++ b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/SearchCancellationIT.java @@ -12,9 +12,9 @@ import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.bulk.BulkRequestBuilder; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.aggregations.AggregationsPlugin; import org.elasticsearch.aggregations.bucket.timeseries.TimeSeriesAggregationBuilder; @@ -80,8 +80,7 @@ public void testCancellationDuringTimeSeriesAggregation() throws Exception { BulkRequestBuilder bulkRequestBuilder = client().prepareBulk().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); for (int j = 0; j < numberOfDocsPerRefresh; j++) { bulkRequestBuilder.add( - client().prepareIndex("test") - .setOpType(DocWriteRequest.OpType.CREATE) + prepareIndex("test").setOpType(DocWriteRequest.OpType.CREATE) .setSource( "@timestamp", now + (long) i * numberOfDocsPerRefresh + j, @@ -116,7 +115,7 @@ public void testCancellationDuringTimeSeriesAggregation() throws Exception { ) .execute(); awaitForBlock(plugins); - cancelSearch(SearchAction.NAME); + cancelSearch(TransportSearchAction.TYPE.name()); disableBlocks(plugins); SearchPhaseExecutionException ex = expectThrows(SearchPhaseExecutionException.class, searchResponse::actionGet); diff --git a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesAggregationsIT.java b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesAggregationsIT.java index 3f7d52c32e8df..2050ce20b1aee 100644 --- a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesAggregationsIT.java +++ b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesAggregationsIT.java @@ -170,7 +170,7 @@ public void setupSuiteScopeCluster() throws Exception { tsValues.put(timestamp, metrics); docSource.field("@timestamp", timestamp); docSource.endObject(); - docs.add(client().prepareIndex("index" + findIndex(timestamp)).setOpType(DocWriteRequest.OpType.CREATE).setSource(docSource)); + docs.add(prepareIndex("index" + findIndex(timestamp)).setOpType(DocWriteRequest.OpType.CREATE).setSource(docSource)); } indexRandom(true, false, docs); } @@ -503,20 +503,20 @@ public void testGetHitsFailure() throws Exception { ); client().prepareBulk() - .add(client().prepareIndex("test").setId("2").setSource("key", "bar", "val", 2, "@timestamp", "2021-01-01T00:00:10Z")) - .add(client().prepareIndex("test").setId("1").setSource("key", "bar", "val", 10, "@timestamp", "2021-01-01T00:00:00Z")) + .add(prepareIndex("test").setId("2").setSource("key", "bar", "val", 2, "@timestamp", "2021-01-01T00:00:10Z")) + .add(prepareIndex("test").setId("1").setSource("key", "bar", "val", 10, "@timestamp", "2021-01-01T00:00:00Z")) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); client().prepareBulk() - .add(client().prepareIndex("test").setId("4").setSource("key", "bar", "val", 50, "@timestamp", "2021-01-01T00:00:30Z")) - .add(client().prepareIndex("test").setId("3").setSource("key", "bar", "val", 40, "@timestamp", "2021-01-01T00:00:20Z")) + .add(prepareIndex("test").setId("4").setSource("key", "bar", "val", 50, "@timestamp", "2021-01-01T00:00:30Z")) + .add(prepareIndex("test").setId("3").setSource("key", "bar", "val", 40, "@timestamp", "2021-01-01T00:00:20Z")) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); client().prepareBulk() - .add(client().prepareIndex("test").setId("7").setSource("key", "foo", "val", 20, "@timestamp", "2021-01-01T00:00:00Z")) - .add(client().prepareIndex("test").setId("8").setSource("key", "foo", "val", 30, "@timestamp", "2021-01-01T00:10:00Z")) - .add(client().prepareIndex("test").setId("5").setSource("key", "baz", "val", 20, "@timestamp", "2021-01-01T00:00:00Z")) - .add(client().prepareIndex("test").setId("6").setSource("key", "baz", "val", 30, "@timestamp", "2021-01-01T00:10:00Z")) + .add(prepareIndex("test").setId("7").setSource("key", "foo", "val", 20, "@timestamp", "2021-01-01T00:00:00Z")) + .add(prepareIndex("test").setId("8").setSource("key", "foo", "val", 30, "@timestamp", "2021-01-01T00:10:00Z")) + .add(prepareIndex("test").setId("5").setSource("key", "baz", "val", 20, "@timestamp", "2021-01-01T00:00:00Z")) + .add(prepareIndex("test").setId("6").setSource("key", "baz", "val", 30, "@timestamp", "2021-01-01T00:10:00Z")) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); diff --git a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesNestedAggregationsIT.java b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesNestedAggregationsIT.java index 7fddc65ac3e03..5c58b7f7bff5a 100644 --- a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesNestedAggregationsIT.java +++ b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesNestedAggregationsIT.java @@ -67,7 +67,7 @@ public void setup() throws Exception { final BulkRequestBuilder bulkIndexRequest = client().prepareBulk(); for (int docId = 0; docId < numberOfDocuments; docId++) { final XContentBuilder document = timeSeriesDocument(FOO_DIM_VALUE, BAR_DIM_VALUE, BAZ_DIM_VALUE, docId, timestamps::next); - bulkIndexRequest.add(client().prepareIndex("index").setOpType(DocWriteRequest.OpType.CREATE).setSource(document)); + bulkIndexRequest.add(prepareIndex("index").setOpType(DocWriteRequest.OpType.CREATE).setSource(document)); } final BulkResponse bulkIndexResponse = bulkIndexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); diff --git a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/pipeline/DateDerivativeIT.java b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/pipeline/DateDerivativeIT.java index 14bae46e1e00f..ce7e4c63dc69c 100644 --- a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/pipeline/DateDerivativeIT.java +++ b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/pipeline/DateDerivativeIT.java @@ -64,21 +64,20 @@ protected Collection> nodePlugins() { } private static IndexRequestBuilder indexDoc(String idx, ZonedDateTime date, int value) throws Exception { - return client().prepareIndex(idx).setSource(jsonBuilder().startObject().timeField("date", date).field("value", value).endObject()); + return prepareIndex(idx).setSource(jsonBuilder().startObject().timeField("date", date).field("value", value).endObject()); } private IndexRequestBuilder indexDoc(int month, int day, int value) throws Exception { - return client().prepareIndex("idx") - .setSource( - jsonBuilder().startObject() - .field("value", value) - .timeField("date", date(month, day)) - .startArray("dates") - .timeValue(date(month, day)) - .timeValue(date(month + 1, day + 1)) - .endArray() - .endObject() - ); + return prepareIndex("idx").setSource( + jsonBuilder().startObject() + .field("value", value) + .timeField("date", date(month, day)) + .startArray("dates") + .timeValue(date(month, day)) + .timeValue(date(month + 1, day + 1)) + .endArray() + .endObject() + ); } @Override @@ -90,9 +89,7 @@ public void setupSuiteScopeCluster() throws Exception { List builders = new ArrayList<>(); for (int i = 0; i < 2; i++) { builders.add( - client().prepareIndex("empty_bucket_idx") - .setId("" + i) - .setSource(jsonBuilder().startObject().field("value", i * 2).endObject()) + prepareIndex("empty_bucket_idx").setId("" + i).setSource(jsonBuilder().startObject().field("value", i * 2).endObject()) ); } builders.addAll( diff --git a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/pipeline/SerialDiffIT.java b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/pipeline/SerialDiffIT.java index e0c91689b333d..7cbb298f49931 100644 --- a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/pipeline/SerialDiffIT.java +++ b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/pipeline/SerialDiffIT.java @@ -142,8 +142,9 @@ public void setupSuiteScopeCluster() throws Exception { for (PipelineAggregationHelperTests.MockBucket mockBucket : mockHisto) { for (double value : mockBucket.docValues) { builders.add( - client().prepareIndex("idx") - .setSource(jsonBuilder().startObject().field(INTERVAL_FIELD, mockBucket.key).field(VALUE_FIELD, value).endObject()) + prepareIndex("idx").setSource( + jsonBuilder().startObject().field(INTERVAL_FIELD, mockBucket.key).field(VALUE_FIELD, value).endObject() + ) ); } } diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java index a25bbe0a6d0be..5a036a59b4bca 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java @@ -30,7 +30,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.Comparator; import java.util.List; import java.util.Map; @@ -152,14 +151,14 @@ private void setFiltersAsMap(Map filters) { } // internally we want to have a fixed order of filters, regardless of // the order of the filters in the request - Collections.sort(this.filters, Comparator.comparing(KeyedFilter::key)); + this.filters.sort(Comparator.comparing(KeyedFilter::key)); } private AdjacencyMatrixAggregationBuilder setFiltersAsList(List filters) { this.filters = new ArrayList<>(filters); // internally we want to have a fixed order of filters, regardless of // the order of the filters in the request - Collections.sort(this.filters, Comparator.comparing(KeyedFilter::key)); + this.filters.sort(Comparator.comparing(KeyedFilter::key)); return this; } diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java index 84765a1432210..c17cc004e25b5 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.List; @@ -181,11 +180,7 @@ public InternalAggregation reduce(List aggregations, Aggreg for (InternalAggregation aggregation : aggregations) { InternalAdjacencyMatrix filters = (InternalAdjacencyMatrix) aggregation; for (InternalBucket bucket : filters.buckets) { - List sameRangeList = bucketsMap.get(bucket.key); - if (sameRangeList == null) { - sameRangeList = new ArrayList<>(aggregations.size()); - bucketsMap.put(bucket.key, sameRangeList); - } + List sameRangeList = bucketsMap.computeIfAbsent(bucket.key, k -> new ArrayList<>(aggregations.size())); sameRangeList.add(bucket); } } @@ -198,11 +193,9 @@ public InternalAggregation reduce(List aggregations, Aggreg } } reduceContext.consumeBucketsAndMaybeBreak(reducedBuckets.size()); - Collections.sort(reducedBuckets, Comparator.comparing(InternalBucket::getKey)); + reducedBuckets.sort(Comparator.comparing(InternalBucket::getKey)); - InternalAdjacencyMatrix reduced = new InternalAdjacencyMatrix(name, reducedBuckets, getMetadata()); - - return reduced; + return new InternalAdjacencyMatrix(name, reducedBuckets, getMetadata()); } @Override diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/ParsedAdjacencyMatrix.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/ParsedAdjacencyMatrix.java index efd5e498c9a91..1c558db86e8eb 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/ParsedAdjacencyMatrix.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/ParsedAdjacencyMatrix.java @@ -48,11 +48,7 @@ public ParsedBucket getBucketByKey(String key) { ParsedAdjacencyMatrix::new ); static { - declareMultiBucketAggregationFields( - PARSER, - parser -> ParsedBucket.fromXContent(parser), - parser -> ParsedBucket.fromXContent(parser) - ); + declareMultiBucketAggregationFields(PARSER, ParsedBucket::fromXContent, ParsedBucket::fromXContent); } public static ParsedAdjacencyMatrix fromXContent(XContentParser parser, String name) throws IOException { diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java index dd497e8ca5478..d096012c3d634 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java @@ -173,7 +173,7 @@ public String getMinimumIntervalExpression() { public AutoDateHistogramAggregationBuilder setMinimumIntervalExpression(String minimumIntervalExpression) { if (minimumIntervalExpression != null && ALLOWED_INTERVALS.containsValue(minimumIntervalExpression) == false) { throw new IllegalArgumentException( - MINIMUM_INTERVAL_FIELD.getPreferredName() + " must be one of [" + ALLOWED_INTERVALS.values().toString() + "]" + MINIMUM_INTERVAL_FIELD.getPreferredName() + " must be one of [" + ALLOWED_INTERVALS.values() + "]" ); } this.minimumIntervalExpression = minimumIntervalExpression; @@ -210,9 +210,8 @@ protected ValuesSourceAggregatorFactory innerBuild( int maxRoundingInterval = Arrays.stream(roundings, 0, roundings.length - 1) .map(rounding -> rounding.innerIntervals) .flatMapToInt(Arrays::stream) - .boxed() .reduce(Integer::max) - .get(); + .getAsInt(); Settings settings = context.getIndexSettings().getNodeSettings(); int maxBuckets = MultiBucketConsumerService.MAX_BUCKET_SETTING.get(settings); int bucketCeiling = maxBuckets / maxRoundingInterval; @@ -287,7 +286,7 @@ public RoundingInfo( this.innerIntervals = innerIntervals; Objects.requireNonNull(dateTimeUnit, "dateTimeUnit cannot be null"); if (ALLOWED_INTERVALS.containsKey(dateTimeUnit) == false) { - throw new IllegalArgumentException("dateTimeUnit must be one of " + ALLOWED_INTERVALS.keySet().toString()); + throw new IllegalArgumentException("dateTimeUnit must be one of " + ALLOWED_INTERVALS.keySet()); } this.dateTimeUnit = ALLOWED_INTERVALS.get(dateTimeUnit); } diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregator.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregator.java index c0b89b915229d..491ec3fe6f95d 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregator.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregator.java @@ -479,7 +479,7 @@ private int collectValue(long owningBucketOrd, int roundingIdx, int doc, long ro /** * Increase the rounding of {@code owningBucketOrd} using - * estimated, bucket counts, {@link #rebucket() rebucketing} the all + * estimated, bucket counts, {@link FromMany#rebucket() rebucketing} the all * buckets if the estimated number of wasted buckets is too high. */ private int increaseRoundingIfNeeded(long owningBucketOrd, int oldEstimatedBucketCount, long newKey, int oldRounding) { diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregatorFactory.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregatorFactory.java index f517291d96d90..be244a2c62da3 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregatorFactory.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregatorFactory.java @@ -74,7 +74,7 @@ public static void registerAggregators(ValuesSourceRegistry.Builder builder) { private final AutoDateHistogramAggregatorSupplier aggregatorSupplier; private final int numBuckets; - private RoundingInfo[] roundingInfos; + private final RoundingInfo[] roundingInfos; public AutoDateHistogramAggregatorFactory( String name, diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java index 4593d6901513a..c058fb5743369 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java @@ -422,30 +422,14 @@ protected Bucket reduceBucket(List buckets, AggregationReduceContext con return new InternalAutoDateHistogram.Bucket(buckets.get(0).key, docCount, format, aggs); } - private static class BucketReduceResult { - final List buckets; - final int roundingIdx; - final long innerInterval; - final Rounding.Prepared preparedRounding; - final long min; - final long max; - - BucketReduceResult( - List buckets, - int roundingIdx, - long innerInterval, - Rounding.Prepared preparedRounding, - long min, - long max - ) { - this.buckets = buckets; - this.roundingIdx = roundingIdx; - this.innerInterval = innerInterval; - this.preparedRounding = preparedRounding; - this.min = min; - this.max = max; - } - } + private record BucketReduceResult( + List buckets, + int roundingIdx, + long innerInterval, + Rounding.Prepared preparedRounding, + long min, + long max + ) {} private BucketReduceResult addEmptyBuckets(BucketReduceResult current, AggregationReduceContext reduceContext) { List list = current.buckets; diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ArrayValuesSource.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ArrayValuesSource.java index 9a46d71205012..05c2e928fd84f 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ArrayValuesSource.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ArrayValuesSource.java @@ -19,7 +19,7 @@ * Class to encapsulate a set of ValuesSource objects labeled by field name */ public abstract class ArrayValuesSource { - protected MultiValueMode multiValueMode; + protected final MultiValueMode multiValueMode; protected String[] names; protected VS[] values; diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ParsedMatrixStats.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ParsedMatrixStats.java index 0c44946ac96a0..2866a08e8608e 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ParsedMatrixStats.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ParsedMatrixStats.java @@ -86,7 +86,7 @@ public double getCorrelation(String fieldX, String fieldY) { @Override protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { builder.field(CommonFields.DOC_COUNT.getPreferredName(), getDocCount()); - if (counts != null && counts.isEmpty() == false) { + if (counts.isEmpty() == false) { builder.startArray(InternalMatrixStats.Fields.FIELDS); for (String fieldName : counts.keySet()) { builder.startObject(); diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/RunningStats.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/RunningStats.java index e4b8e15cd5e1b..9b8ea7321582b 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/RunningStats.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/RunningStats.java @@ -178,10 +178,8 @@ public void add(final String[] fieldNames, final double[] fieldVals) { private void updateCovariance(final String[] fieldNames, final Map deltas) { // deep copy of hash keys (field names) ArrayList cFieldNames = new ArrayList<>(Arrays.asList(fieldNames)); - String fieldName; double dR, newVal; - for (int i = 0; i < fieldNames.length; ++i) { - fieldName = fieldNames[i]; + for (String fieldName : fieldNames) { cFieldNames.remove(fieldName); // update running covariances dR = deltas.get(fieldName); @@ -231,12 +229,12 @@ public void merge(final RunningStats other) { } else if (this.docCount == 0) { for (Map.Entry fs : other.means.entrySet()) { final String fieldName = fs.getKey(); - this.means.put(fieldName, fs.getValue().doubleValue()); - this.counts.put(fieldName, other.counts.get(fieldName).longValue()); - this.fieldSum.put(fieldName, other.fieldSum.get(fieldName).doubleValue()); - this.variances.put(fieldName, other.variances.get(fieldName).doubleValue()); - this.skewness.put(fieldName, other.skewness.get(fieldName).doubleValue()); - this.kurtosis.put(fieldName, other.kurtosis.get(fieldName).doubleValue()); + this.means.put(fieldName, fs.getValue()); + this.counts.put(fieldName, other.counts.get(fieldName)); + this.fieldSum.put(fieldName, other.fieldSum.get(fieldName)); + this.variances.put(fieldName, other.variances.get(fieldName)); + this.skewness.put(fieldName, other.skewness.get(fieldName)); + this.kurtosis.put(fieldName, other.kurtosis.get(fieldName)); if (other.covariances.containsKey(fieldName)) { this.covariances.put(fieldName, other.covariances.get(fieldName)); } diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/BucketSelectorPipelineAggregator.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/BucketSelectorPipelineAggregator.java index 23abc8a328601..3b8559c77fddc 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/BucketSelectorPipelineAggregator.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/BucketSelectorPipelineAggregator.java @@ -24,9 +24,9 @@ import static org.elasticsearch.search.aggregations.pipeline.BucketHelpers.resolveBucketValue; public class BucketSelectorPipelineAggregator extends PipelineAggregator { - private GapPolicy gapPolicy; - private Script script; - private Map bucketsPathsMap; + private final GapPolicy gapPolicy; + private final Script script; + private final Map bucketsPathsMap; BucketSelectorPipelineAggregator( String name, diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/BucketSortPipelineAggregationBuilder.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/BucketSortPipelineAggregationBuilder.java index ad26d8ed59438..57b60df785673 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/BucketSortPipelineAggregationBuilder.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/BucketSortPipelineAggregationBuilder.java @@ -84,7 +84,7 @@ public class BucketSortPipelineAggregationBuilder extends AbstractPipelineAggreg private GapPolicy gapPolicy = GapPolicy.SKIP; public BucketSortPipelineAggregationBuilder(String name, List sorts) { - super(name, NAME, sorts == null ? new String[0] : sorts.stream().map(s -> s.getFieldName()).toArray(String[]::new)); + super(name, NAME, sorts == null ? new String[0] : sorts.stream().map(FieldSortBuilder::getFieldName).toArray(String[]::new)); this.sorts = sorts == null ? Collections.emptyList() : sorts; } diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/MovFnPipelineAggregationBuilder.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/MovFnPipelineAggregationBuilder.java index 35d5a97aa854f..1132507d520f4 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/MovFnPipelineAggregationBuilder.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/MovFnPipelineAggregationBuilder.java @@ -69,7 +69,7 @@ public class MovFnPipelineAggregationBuilder extends AbstractPipelineAggregation } throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); }, GAP_POLICY, ObjectParser.ValueType.STRING); - }; + } public MovFnPipelineAggregationBuilder(String name, String bucketsPath, Script script, int window) { super(name, NAME, new String[] { bucketsPath }); diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/MovFnPipelineAggregator.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/MovFnPipelineAggregator.java index 7431e806d96e4..c9debf89e8162 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/MovFnPipelineAggregator.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/MovFnPipelineAggregator.java @@ -50,7 +50,6 @@ public class MovFnPipelineAggregator extends PipelineAggregator { private final DocValueFormat formatter; private final BucketHelpers.GapPolicy gapPolicy; private final Script script; - private final String bucketsPath; private final int window; private final int shift; @@ -65,7 +64,6 @@ public class MovFnPipelineAggregator extends PipelineAggregator { Map metadata ) { super(name, new String[] { bucketsPath }, metadata); - this.bucketsPath = bucketsPath; this.script = script; this.formatter = formatter; this.gapPolicy = gapPolicy; @@ -136,9 +134,6 @@ private static int clamp(int index, List list) { if (index < 0) { return 0; } - if (index > list.size()) { - return list.size(); - } - return index; + return Math.min(index, list.size()); } } diff --git a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrixTests.java b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrixTests.java index f8cf953c4caf3..22877c5bbc32b 100644 --- a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrixTests.java +++ b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrixTests.java @@ -67,8 +67,7 @@ public void setUp() throws Exception { @Override protected InternalAdjacencyMatrix createTestInstance(String name, Map metadata, InternalAggregations aggregations) { final List buckets = new ArrayList<>(); - for (int i = 0; i < keys.size(); ++i) { - String key = keys.get(i); + for (String key : keys) { int docCount = randomIntBetween(0, 1000); buckets.add(new InternalAdjacencyMatrix.InternalBucket(key, docCount, aggregations)); } diff --git a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregatorTests.java b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregatorTests.java index 8a3a061750a87..880d223442e29 100644 --- a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregatorTests.java +++ b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregatorTests.java @@ -183,7 +183,7 @@ public void testMultiBucketAggregationAsSubAggregation() throws IOException { public void testAggregationSize() throws IOException { CheckedConsumer buildIndex = multiTsWriter(); - List> verifiers = new ArrayList>(); + List> verifiers = new ArrayList<>(); verifiers.add(ts -> assertThat(ts.getBucketByKey("{dim1=aaa, dim2=xxx}").docCount, equalTo(2L))); verifiers.add(ts -> assertThat(ts.getBucketByKey("{dim1=aaa, dim2=yyy}").docCount, equalTo(2L))); diff --git a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/metric/MultiPassStats.java b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/metric/MultiPassStats.java index 6a43f02697e26..7275cd26cae65 100644 --- a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/metric/MultiPassStats.java +++ b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/metric/MultiPassStats.java @@ -22,12 +22,12 @@ class MultiPassStats { private final String fieldBKey; private long count; - private Map means = new HashMap<>(); - private Map variances = new HashMap<>(); - private Map skewness = new HashMap<>(); - private Map kurtosis = new HashMap<>(); - private Map> covariances = new HashMap<>(); - private Map> correlations = new HashMap<>(); + private final Map means = new HashMap<>(); + private final Map variances = new HashMap<>(); + private final Map skewness = new HashMap<>(); + private final Map kurtosis = new HashMap<>(); + private final Map> covariances = new HashMap<>(); + private final Map> correlations = new HashMap<>(); MultiPassStats(String fieldAName, String fieldBName) { this.fieldAKey = fieldAName; diff --git a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/pipeline/BucketSortPipelineAggregationBuilderTests.java b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/pipeline/BucketSortPipelineAggregationBuilderTests.java index bb67c8da7eca4..a025f03d0eafc 100644 --- a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/pipeline/BucketSortPipelineAggregationBuilderTests.java +++ b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/pipeline/BucketSortPipelineAggregationBuilderTests.java @@ -40,7 +40,7 @@ protected BucketSortPipelineAggregationBuilder createTestAggregatorFactory() { sorts.add(fieldSortBuilder); } BucketSortPipelineAggregationBuilder factory = new BucketSortPipelineAggregationBuilder(randomAlphaOfLengthBetween(3, 20), sorts); - Integer from = randomIntBetween(0, 20); + int from = randomIntBetween(0, 20); Integer size = randomBoolean() ? randomIntBetween(1, 1000) : null; if (randomBoolean()) { factory.from(from); diff --git a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/pipeline/DerivativeAggregatorTests.java b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/pipeline/DerivativeAggregatorTests.java index 70eb63c5e61da..68245d31c14b6 100644 --- a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/pipeline/DerivativeAggregatorTests.java +++ b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/pipeline/DerivativeAggregatorTests.java @@ -218,7 +218,7 @@ public void testSingleValueAggDerivative() throws IOException { Object[] propertiesDocCounts = (Object[]) histogram.getProperty("_count"); Object[] propertiesSumCounts = (Object[]) histogram.getProperty("sum.value"); - Long expectedSumPreviousBucket = Long.MIN_VALUE; // start value, gets + long expectedSumPreviousBucket = Long.MIN_VALUE; // start value, gets // overwritten for (int i = 0; i < numValueBuckets; ++i) { Histogram.Bucket bucket = buckets.get(i); @@ -270,7 +270,7 @@ public void testMultiValueAggDerivative() throws IOException { Object[] propertiesDocCounts = (Object[]) histogram.getProperty("_count"); Object[] propertiesSumCounts = (Object[]) histogram.getProperty("stats.sum"); - Long expectedSumPreviousBucket = Long.MIN_VALUE; // start value, gets + long expectedSumPreviousBucket = Long.MIN_VALUE; // start value, gets // overwritten for (int i = 0; i < numValueBuckets; ++i) { Histogram.Bucket bucket = buckets.get(i); @@ -670,8 +670,8 @@ public void testDerivDerivNPE() throws IOException { } } - private Long getTotalDocCountAcrossBuckets(List buckets) { - Long count = 0L; + private long getTotalDocCountAcrossBuckets(List buckets) { + long count = 0L; for (Histogram.Bucket bucket : buckets) { count += bucket.getDocCount(); } diff --git a/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/top_hits.yml b/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/top_hits.yml index 4d4848e8aebc3..5cf0265374b08 100644 --- a/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/top_hits.yml +++ b/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/top_hits.yml @@ -600,10 +600,13 @@ synthetic _source: - do: bulk: index: test2 + refresh: true body: - { index: { } } - { gender: 3 } - do: + # The script can't process a bucket without a salary value for gender '3'. + catch: /path not supported for \[top_salary_hits\]:\ \[_source.salary\]./ search: index: test2 size: 0 @@ -630,13 +633,6 @@ synthetic _source: ts: top_salary_hits[_source.salary] script: "params.ts < 8000" - # Empty bucket for gender '3' affects nothing. - - length: { aggregations.genders.buckets: 1} - - match: { aggregations.genders.buckets.0.top_salary_hits.hits.total.value: 4} - - match: { aggregations.genders.buckets.0.top_salary_hits.hits.hits.0._source.gender: 1} - - match: { aggregations.genders.buckets.0.top_salary_hits.hits.hits.0._source.salary: 4000} - - match: { aggregations.genders.buckets.0.top_salary_hits.hits.hits.0._source.birth_date: 1982} - - do: catch: /path not supported for \[top_salary_hits\]:\ \[_source.nosuchfield\]./ search: diff --git a/modules/analysis-common/src/internalClusterTest/java/org/elasticsearch/analysis/common/QueryStringWithAnalyzersIT.java b/modules/analysis-common/src/internalClusterTest/java/org/elasticsearch/analysis/common/QueryStringWithAnalyzersIT.java index 32e20aea3c2e1..bb450f1cc43ee 100644 --- a/modules/analysis-common/src/internalClusterTest/java/org/elasticsearch/analysis/common/QueryStringWithAnalyzersIT.java +++ b/modules/analysis-common/src/internalClusterTest/java/org/elasticsearch/analysis/common/QueryStringWithAnalyzersIT.java @@ -49,7 +49,7 @@ public void testCustomWordDelimiterQueryString() { .setMapping("field1", "type=text,analyzer=my_analyzer", "field2", "type=text,analyzer=my_analyzer") ); - client().prepareIndex("test").setId("1").setSource("field1", "foo bar baz", "field2", "not needed").get(); + prepareIndex("test").setId("1").setSource("field1", "foo bar baz", "field2", "not needed").get(); refresh(); assertHitCount( diff --git a/modules/analysis-common/src/internalClusterTest/java/org/elasticsearch/analysis/common/ReloadAnalyzerTests.java b/modules/analysis-common/src/internalClusterTest/java/org/elasticsearch/analysis/common/ReloadAnalyzerTests.java index da8b431b4eda3..2ef1a7639e597 100644 --- a/modules/analysis-common/src/internalClusterTest/java/org/elasticsearch/analysis/common/ReloadAnalyzerTests.java +++ b/modules/analysis-common/src/internalClusterTest/java/org/elasticsearch/analysis/common/ReloadAnalyzerTests.java @@ -91,8 +91,8 @@ private Path setupSynonyms() throws IOException { .setMapping("field", "type=text,analyzer=standard,search_analyzer=" + SYNONYM_ANALYZER_NAME) ); - client().prepareIndex(INDEX_NAME).setId("1").setSource("field", "Foo").get(); - assertNoFailures(indicesAdmin().prepareRefresh(INDEX_NAME).execute().actionGet()); + prepareIndex(INDEX_NAME).setId("1").setSource("field", "Foo").get(); + assertNoFailures(indicesAdmin().prepareRefresh(INDEX_NAME).get()); assertHitCount(client().prepareSearch(INDEX_NAME).setQuery(QueryBuilders.matchQuery("field", "baz")), 1L); assertHitCount(client().prepareSearch(INDEX_NAME).setQuery(QueryBuilders.matchQuery("field", "buzz")), 0L); @@ -161,8 +161,8 @@ public void testSynonymsInMultiplexerUpdateable() throws FileNotFoundException, .setMapping("field", "type=text,analyzer=standard,search_analyzer=" + SYNONYM_ANALYZER_NAME) ); - client().prepareIndex(INDEX_NAME).setId("1").setSource("field", "foo").get(); - assertNoFailures(indicesAdmin().prepareRefresh(INDEX_NAME).execute().actionGet()); + prepareIndex(INDEX_NAME).setId("1").setSource("field", "foo").get(); + assertNoFailures(indicesAdmin().prepareRefresh(INDEX_NAME).get()); assertHitCount(client().prepareSearch(INDEX_NAME).setQuery(QueryBuilders.matchQuery("field", "baz")), 1L); assertHitCount(client().prepareSearch(INDEX_NAME).setQuery(QueryBuilders.matchQuery("field", "buzz")), 0L); diff --git a/modules/analysis-common/src/internalClusterTest/java/org/elasticsearch/analysis/common/ReloadSynonymAnalyzerIT.java b/modules/analysis-common/src/internalClusterTest/java/org/elasticsearch/analysis/common/ReloadSynonymAnalyzerIT.java index f0063f663142d..d55dbd0f1d783 100644 --- a/modules/analysis-common/src/internalClusterTest/java/org/elasticsearch/analysis/common/ReloadSynonymAnalyzerIT.java +++ b/modules/analysis-common/src/internalClusterTest/java/org/elasticsearch/analysis/common/ReloadSynonymAnalyzerIT.java @@ -70,8 +70,8 @@ private void testSynonymsUpdate(boolean preview) throws FileNotFoundException, I .setMapping("field", "type=text,analyzer=standard,search_analyzer=my_synonym_analyzer") ); - client().prepareIndex("test").setId("1").setSource("field", "foo").get(); - assertNoFailures(indicesAdmin().prepareRefresh("test").execute().actionGet()); + prepareIndex("test").setId("1").setSource("field", "foo").get(); + assertNoFailures(indicesAdmin().prepareRefresh("test").get()); assertHitCount(prepareSearch("test").setQuery(QueryBuilders.matchQuery("field", "baz")), 1L); assertHitCount(prepareSearch("test").setQuery(QueryBuilders.matchQuery("field", "buzz")), 0L); diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index 90a8d3379775f..35face57b8294 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -91,6 +91,7 @@ import org.apache.lucene.analysis.ru.RussianAnalyzer; import org.apache.lucene.analysis.shingle.ShingleFilter; import org.apache.lucene.analysis.snowball.SnowballFilter; +import org.apache.lucene.analysis.sr.SerbianAnalyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.analysis.sv.SwedishAnalyzer; import org.apache.lucene.analysis.th.ThaiAnalyzer; @@ -197,6 +198,7 @@ public Map>> getAn analyzers.put("portuguese", PortugueseAnalyzerProvider::new); analyzers.put("romanian", RomanianAnalyzerProvider::new); analyzers.put("russian", RussianAnalyzerProvider::new); + analyzers.put("serbian", SerbianAnalyzerProvider::new); analyzers.put("sorani", SoraniAnalyzerProvider::new); analyzers.put("spanish", SpanishAnalyzerProvider::new); analyzers.put("swedish", SwedishAnalyzerProvider::new); @@ -447,6 +449,7 @@ public List getPreBuiltAnalyzerProviderFactorie analyzers.add(new PreBuiltAnalyzerProviderFactory("portuguese", CachingStrategy.LUCENE, PortugueseAnalyzer::new)); analyzers.add(new PreBuiltAnalyzerProviderFactory("romanian", CachingStrategy.LUCENE, RomanianAnalyzer::new)); analyzers.add(new PreBuiltAnalyzerProviderFactory("russian", CachingStrategy.LUCENE, RussianAnalyzer::new)); + analyzers.add(new PreBuiltAnalyzerProviderFactory("serbian", CachingStrategy.LUCENE, SerbianAnalyzer::new)); analyzers.add(new PreBuiltAnalyzerProviderFactory("sorani", CachingStrategy.LUCENE, SoraniAnalyzer::new)); analyzers.add(new PreBuiltAnalyzerProviderFactory("spanish", CachingStrategy.LUCENE, SpanishAnalyzer::new)); analyzers.add(new PreBuiltAnalyzerProviderFactory("swedish", CachingStrategy.LUCENE, SwedishAnalyzer::new)); diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SerbianAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SerbianAnalyzerProvider.java new file mode 100644 index 0000000000000..567502b75bced --- /dev/null +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SerbianAnalyzerProvider.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.analysis.common; + +import org.apache.lucene.analysis.CharArraySet; +import org.apache.lucene.analysis.sr.SerbianAnalyzer; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.Analysis; + +public class SerbianAnalyzerProvider extends AbstractIndexAnalyzerProvider { + + private final SerbianAnalyzer analyzer; + + SerbianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + super(name, settings); + analyzer = new SerbianAnalyzer( + Analysis.parseStopWords(env, settings, SerbianAnalyzer.getDefaultStopSet()), + Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET) + ); + } + + @Override + public SerbianAnalyzer get() { + return this.analyzer; + } +} diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactory.java index 8f9a882e29d2a..7385987567fb0 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactory.java @@ -70,6 +70,7 @@ import org.tartarus.snowball.ext.PortugueseStemmer; import org.tartarus.snowball.ext.RomanianStemmer; import org.tartarus.snowball.ext.RussianStemmer; +import org.tartarus.snowball.ext.SerbianStemmer; import org.tartarus.snowball.ext.SpanishStemmer; import org.tartarus.snowball.ext.SwedishStemmer; import org.tartarus.snowball.ext.TurkishStemmer; @@ -237,6 +238,9 @@ public TokenStream create(TokenStream tokenStream) { } else if ("light_russian".equalsIgnoreCase(language) || "lightRussian".equalsIgnoreCase(language)) { return new RussianLightStemFilter(tokenStream); + } else if ("serbian".equalsIgnoreCase(language)) { + return new SnowballFilter(tokenStream, new SerbianStemmer()); + // Spanish stemmers } else if ("spanish".equalsIgnoreCase(language)) { return new SnowballFilter(tokenStream, new SpanishStemmer()); diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HighlighterWithAnalyzersTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HighlighterWithAnalyzersTests.java index 9c46e8830647b..2693245ac2757 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HighlighterWithAnalyzersTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HighlighterWithAnalyzersTests.java @@ -109,7 +109,7 @@ public void testNgramHighlightingWithBrokenPositions() throws IOException { .putList("analysis.analyzer.search_autocomplete.filter", "lowercase", "wordDelimiter") ) ); - client().prepareIndex("test").setId("1").setSource("name", "ARCOTEL Hotels Deutschland").get(); + prepareIndex("test").setId("1").setSource("name", "ARCOTEL Hotels Deutschland").get(); refresh(); assertResponse( prepareSearch("test").setQuery(matchQuery("name.autocomplete", "deut tel").operator(Operator.OR)) @@ -151,8 +151,7 @@ public void testMultiPhraseCutoff() throws IOException { ); ensureGreen(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( "body", "Test: http://www.facebook.com http://elasticsearch.org " @@ -212,7 +211,7 @@ public void testSynonyms() throws IOException { ); ensureGreen(); - client().prepareIndex("test").setId("0").setSource("field1", "The quick brown fox jumps over the lazy dog").get(); + prepareIndex("test").setId("0").setSource("field1", "The quick brown fox jumps over the lazy dog").get(); refresh(); for (String highlighterType : new String[] { "plain", "fvh", "unified" }) { logger.info("--> highlighting (type=" + highlighterType + ") and searching on field1"); @@ -249,14 +248,10 @@ public void testPhrasePrefix() throws IOException { ensureGreen(); - client().prepareIndex("first_test_index") - .setId("0") + prepareIndex("first_test_index").setId("0") .setSource("field0", "The quick brown fox jumps over the lazy dog", "field1", "The quick brown fox jumps over the lazy dog") .get(); - client().prepareIndex("first_test_index") - .setId("1") - .setSource("field1", "The quick browse button is a fancy thing, right bro?") - .get(); + prepareIndex("first_test_index").setId("1").setSource("field1", "The quick browse button is a fancy thing, right bro?").get(); refresh(); logger.info("--> highlighting and searching on field0"); @@ -329,8 +324,7 @@ public void testPhrasePrefix() throws IOException { "type=text,analyzer=synonym" ) ); - client().prepareIndex("second_test_index") - .setId("0") + prepareIndex("second_test_index").setId("0") .setSource( "type", "type2", @@ -340,11 +334,10 @@ public void testPhrasePrefix() throws IOException { "The quick brown fox jumps over the lazy dog" ) .get(); - client().prepareIndex("second_test_index") - .setId("1") + prepareIndex("second_test_index").setId("1") .setSource("type", "type2", "field4", "The quick browse button is a fancy thing, right bro?") .get(); - client().prepareIndex("second_test_index").setId("2").setSource("type", "type2", "field4", "a quick fast blue car").get(); + prepareIndex("second_test_index").setId("2").setSource("type", "type2", "field4", "a quick fast blue car").get(); refresh(); assertResponse( diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/20_analyzers.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/20_analyzers.yml index dcec02729a44e..c03bdb3111050 100644 --- a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/20_analyzers.yml +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/20_analyzers.yml @@ -988,6 +988,35 @@ - length: { tokens: 1 } - match: { tokens.0.token: вмест } +--- +"serbian": + - do: + indices.create: + index: test + body: + settings: + analysis: + analyzer: + my_analyzer: + type: serbian + + - do: + indices.analyze: + body: + text: будите шампиони + analyzer: serbian + - length: { tokens: 1 } + - match: { tokens.0.token: sampion } + + - do: + indices.analyze: + index: test + body: + text: будите шампиони + analyzer: my_analyzer + - length: { tokens: 1 } + - match: { tokens.0.token: sampion } + --- "sorani": - do: diff --git a/modules/apm/build.gradle b/modules/apm/build.gradle index c9002a71bf746..13f1ac4a4cd3e 100644 --- a/modules/apm/build.gradle +++ b/modules/apm/build.gradle @@ -18,7 +18,7 @@ dependencies { implementation "io.opentelemetry:opentelemetry-api:${otelVersion}" implementation "io.opentelemetry:opentelemetry-context:${otelVersion}" implementation "io.opentelemetry:opentelemetry-semconv:${otelVersion}-alpha" - runtimeOnly "co.elastic.apm:elastic-apm-agent:1.43.0" + runtimeOnly "co.elastic.apm:elastic-apm-agent:1.44.0" } tasks.named("dependencyLicenses").configure { diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java index 07bbc5c55f7cd..51f008db646fa 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java @@ -12,19 +12,23 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.ReleasableLock; +import org.elasticsearch.telemetry.apm.internal.metrics.DoubleAsyncCounterAdapter; import org.elasticsearch.telemetry.apm.internal.metrics.DoubleCounterAdapter; import org.elasticsearch.telemetry.apm.internal.metrics.DoubleGaugeAdapter; import org.elasticsearch.telemetry.apm.internal.metrics.DoubleHistogramAdapter; import org.elasticsearch.telemetry.apm.internal.metrics.DoubleUpDownCounterAdapter; +import org.elasticsearch.telemetry.apm.internal.metrics.LongAsyncCounterAdapter; import org.elasticsearch.telemetry.apm.internal.metrics.LongCounterAdapter; import org.elasticsearch.telemetry.apm.internal.metrics.LongGaugeAdapter; import org.elasticsearch.telemetry.apm.internal.metrics.LongHistogramAdapter; import org.elasticsearch.telemetry.apm.internal.metrics.LongUpDownCounterAdapter; +import org.elasticsearch.telemetry.metric.DoubleAsyncCounter; import org.elasticsearch.telemetry.metric.DoubleCounter; import org.elasticsearch.telemetry.metric.DoubleGauge; import org.elasticsearch.telemetry.metric.DoubleHistogram; import org.elasticsearch.telemetry.metric.DoubleUpDownCounter; import org.elasticsearch.telemetry.metric.DoubleWithAttributes; +import org.elasticsearch.telemetry.metric.LongAsyncCounter; import org.elasticsearch.telemetry.metric.LongCounter; import org.elasticsearch.telemetry.metric.LongGauge; import org.elasticsearch.telemetry.metric.LongHistogram; @@ -48,6 +52,8 @@ public class APMMeterRegistry implements MeterRegistry { private final Registrar doubleGauges = new Registrar<>(); private final Registrar doubleHistograms = new Registrar<>(); private final Registrar longCounters = new Registrar<>(); + private final Registrar longAsynchronousCounters = new Registrar<>(); + private final Registrar doubleAsynchronousCounters = new Registrar<>(); private final Registrar longUpDownCounters = new Registrar<>(); private final Registrar longGauges = new Registrar<>(); private final Registrar longHistograms = new Registrar<>(); @@ -127,6 +133,35 @@ public LongCounter registerLongCounter(String name, String description, String u } } + @Override + public LongAsyncCounter registerLongAsyncCounter(String name, String description, String unit, Supplier observer) { + try (ReleasableLock lock = registerLock.acquire()) { + return longAsynchronousCounters.register(new LongAsyncCounterAdapter(meter, name, description, unit, observer)); + } + } + + @Override + public LongAsyncCounter getLongAsyncCounter(String name) { + return longAsynchronousCounters.get(name); + } + + @Override + public DoubleAsyncCounter registerDoubleAsyncCounter( + String name, + String description, + String unit, + Supplier observer + ) { + try (ReleasableLock lock = registerLock.acquire()) { + return doubleAsynchronousCounters.register(new DoubleAsyncCounterAdapter(meter, name, description, unit, observer)); + } + } + + @Override + public DoubleAsyncCounter getDoubleAsyncCounter(String name) { + return doubleAsynchronousCounters.get(name); + } + @Override public LongCounter getLongCounter(String name) { return longCounters.get(name); diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/AbstractInstrument.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/AbstractInstrument.java index 2a806ca19a4e0..bbeaba0f6f088 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/AbstractInstrument.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/AbstractInstrument.java @@ -25,7 +25,7 @@ * @param delegated instrument */ public abstract class AbstractInstrument implements Instrument { - private static final int MAX_NAME_LENGTH = 63; // TODO(stu): change to 255 when we upgrade to otel 1.30+, see #101679 + private static final int MAX_NAME_LENGTH = 255; private final AtomicReference delegate; private final String name; private final String description; diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleAsyncCounterAdapter.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleAsyncCounterAdapter.java new file mode 100644 index 0000000000000..a1ea9f33e31fb --- /dev/null +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleAsyncCounterAdapter.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.telemetry.apm.internal.metrics; + +import io.opentelemetry.api.metrics.Meter; +import io.opentelemetry.api.metrics.ObservableDoubleCounter; + +import org.elasticsearch.common.util.concurrent.ReleasableLock; +import org.elasticsearch.telemetry.apm.AbstractInstrument; +import org.elasticsearch.telemetry.metric.DoubleAsyncCounter; +import org.elasticsearch.telemetry.metric.DoubleWithAttributes; + +import java.util.Objects; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Supplier; + +public class DoubleAsyncCounterAdapter extends AbstractInstrument implements DoubleAsyncCounter { + private final Supplier observer; + private final ReleasableLock closedLock = new ReleasableLock(new ReentrantLock()); + private boolean closed = false; + + public DoubleAsyncCounterAdapter(Meter meter, String name, String description, String unit, Supplier observer) { + super(meter, name, description, unit); + this.observer = observer; + } + + @Override + protected ObservableDoubleCounter buildInstrument(Meter meter) { + var builder = Objects.requireNonNull(meter).counterBuilder(getName()); + return builder.setDescription(getDescription()).setUnit(getUnit()).ofDoubles().buildWithCallback(measurement -> { + DoubleWithAttributes observation; + try { + observation = observer.get(); + } catch (RuntimeException err) { + assert false : "observer must not throw [" + err.getMessage() + "]"; + return; + } + measurement.record(observation.value(), OtelHelper.fromMap(observation.attributes())); + }); + } + + @Override + public void close() throws Exception { + try (ReleasableLock lock = closedLock.acquire()) { + if (closed == false) { + getInstrument().close(); + } + closed = true; + } + } +} diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongAsyncCounterAdapter.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongAsyncCounterAdapter.java new file mode 100644 index 0000000000000..126cca1964283 --- /dev/null +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongAsyncCounterAdapter.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.telemetry.apm.internal.metrics; + +import io.opentelemetry.api.metrics.Meter; +import io.opentelemetry.api.metrics.ObservableLongCounter; + +import org.elasticsearch.common.util.concurrent.ReleasableLock; +import org.elasticsearch.telemetry.apm.AbstractInstrument; +import org.elasticsearch.telemetry.metric.LongAsyncCounter; +import org.elasticsearch.telemetry.metric.LongWithAttributes; + +import java.util.Objects; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Supplier; + +public class LongAsyncCounterAdapter extends AbstractInstrument implements LongAsyncCounter { + private final Supplier observer; + private final ReleasableLock closedLock = new ReleasableLock(new ReentrantLock()); + private boolean closed = false; + + public LongAsyncCounterAdapter(Meter meter, String name, String description, String unit, Supplier observer) { + super(meter, name, description, unit); + this.observer = observer; + } + + @Override + protected ObservableLongCounter buildInstrument(Meter meter) { + var builder = Objects.requireNonNull(meter).counterBuilder(getName()); + return builder.setDescription(getDescription()).setUnit(getUnit()).buildWithCallback(measurement -> { + LongWithAttributes observation; + try { + observation = observer.get(); + } catch (RuntimeException err) { + assert false : "observer must not throw [" + err.getMessage() + "]"; + return; + } + measurement.record(observation.value(), OtelHelper.fromMap(observation.attributes())); + }); + } + + @Override + public void close() throws Exception { + try (ReleasableLock lock = closedLock.acquire()) { + if (closed == false) { + getInstrument().close(); + } + closed = true; + } + } +} diff --git a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/APMMeterRegistryTests.java b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/APMMeterRegistryTests.java index b393edd6e58e3..82dd911d1b821 100644 --- a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/APMMeterRegistryTests.java +++ b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/APMMeterRegistryTests.java @@ -88,13 +88,13 @@ public void testNoopIsSetOnStop() { public void testMaxNameLength() { APMMeterService apmMeter = new APMMeterService(TELEMETRY_ENABLED, () -> testOtel, () -> noopOtel); apmMeter.start(); - int max_length = 63; + int max_length = 255; var counter = apmMeter.getMeterRegistry().registerLongCounter("a".repeat(max_length), "desc", "count"); assertThat(counter, instanceOf(LongCounter.class)); IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, () -> apmMeter.getMeterRegistry().registerLongCounter("a".repeat(max_length + 1), "desc", "count") ); - assertThat(iae.getMessage(), containsString("exceeds maximum length [63]")); + assertThat(iae.getMessage(), containsString("exceeds maximum length [255]")); } } diff --git a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/RecordingOtelMeter.java b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/RecordingOtelMeter.java index 6661653499f63..94627c1e53813 100644 --- a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/RecordingOtelMeter.java +++ b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/RecordingOtelMeter.java @@ -110,8 +110,10 @@ public LongCounter build() { @Override public ObservableLongCounter buildWithCallback(Consumer callback) { - unimplemented(); - return null; + LongAsyncCounterRecorder longAsyncCounter = new LongAsyncCounterRecorder(name, callback); + recorder.register(longAsyncCounter, longAsyncCounter.getInstrument(), name, description, unit); + callbacks.add(longAsyncCounter); + return longAsyncCounter; } @Override @@ -121,6 +123,24 @@ public ObservableLongMeasurement buildObserver() { } } + private class LongAsyncCounterRecorder extends AbstractInstrument implements ObservableLongCounter, Callback, OtelInstrument { + final Consumer callback; + + LongAsyncCounterRecorder(String name, Consumer callback) { + super(name, InstrumentType.LONG_ASYNC_COUNTER); + this.callback = callback; + } + + @Override + public void close() { + callbacks.remove(this); + } + + public void doCall() { + callback.accept(new LongMeasurementRecorder(name, instrument)); + } + } + private class LongRecorder extends LongUpDownRecorder implements LongCounter, OtelInstrument { LongRecorder(String name) { super(name, InstrumentType.LONG_COUNTER); @@ -172,8 +192,10 @@ public DoubleCounter build() { @Override public ObservableDoubleCounter buildWithCallback(Consumer callback) { - unimplemented(); - return null; + DoubleAsyncCounterRecorder doubleAsyncCounter = new DoubleAsyncCounterRecorder(name, callback); + recorder.register(doubleAsyncCounter, doubleAsyncCounter.getInstrument(), name, description, unit); + callbacks.add(doubleAsyncCounter); + return doubleAsyncCounter; } @Override @@ -183,6 +205,24 @@ public ObservableDoubleMeasurement buildObserver() { } } + private class DoubleAsyncCounterRecorder extends AbstractInstrument implements ObservableDoubleCounter, Callback, OtelInstrument { + final Consumer callback; + + DoubleAsyncCounterRecorder(String name, Consumer callback) { + super(name, InstrumentType.DOUBLE_ASYNC_COUNTER); + this.callback = callback; + } + + @Override + public void close() { + callbacks.remove(this); + } + + public void doCall() { + callback.accept(new DoubleMeasurementRecorder(name, instrument)); + } + } + private class DoubleRecorder extends DoubleUpDownRecorder implements DoubleCounter, OtelInstrument { DoubleRecorder(String name) { super(name, InstrumentType.DOUBLE_COUNTER); diff --git a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/metrics/AsyncCountersAdapterTests.java b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/metrics/AsyncCountersAdapterTests.java new file mode 100644 index 0000000000000..fa8706deee870 --- /dev/null +++ b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/metrics/AsyncCountersAdapterTests.java @@ -0,0 +1,102 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.telemetry.apm.internal.metrics; + +import org.elasticsearch.telemetry.Measurement; +import org.elasticsearch.telemetry.apm.APMMeterRegistry; +import org.elasticsearch.telemetry.apm.RecordingOtelMeter; +import org.elasticsearch.telemetry.metric.DoubleAsyncCounter; +import org.elasticsearch.telemetry.metric.DoubleWithAttributes; +import org.elasticsearch.telemetry.metric.LongAsyncCounter; +import org.elasticsearch.telemetry.metric.LongWithAttributes; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; + +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +public class AsyncCountersAdapterTests extends ESTestCase { + RecordingOtelMeter otelMeter; + APMMeterRegistry registry; + + @Before + public void init() { + otelMeter = new RecordingOtelMeter(); + registry = new APMMeterRegistry(otelMeter); + } + + // testing that a value reported is then used in a callback + public void testLongAsyncCounter() throws Exception { + AtomicReference attrs = new AtomicReference<>(); + LongAsyncCounter longAsyncCounter = registry.registerLongAsyncCounter("name", "desc", "unit", attrs::get); + + attrs.set(new LongWithAttributes(1L, Map.of("k", 1L))); + + otelMeter.collectMetrics(); + + List metrics = otelMeter.getRecorder().getMeasurements(longAsyncCounter); + assertThat(metrics, hasSize(1)); + assertThat(metrics.get(0).attributes(), equalTo(Map.of("k", 1L))); + assertThat(metrics.get(0).getLong(), equalTo(1L)); + + attrs.set(new LongWithAttributes(2L, Map.of("k", 5L))); + + otelMeter.getRecorder().resetCalls(); + otelMeter.collectMetrics(); + + metrics = otelMeter.getRecorder().getMeasurements(longAsyncCounter); + assertThat(metrics, hasSize(1)); + assertThat(metrics.get(0).attributes(), equalTo(Map.of("k", 5L))); + assertThat(metrics.get(0).getLong(), equalTo(2L)); + + longAsyncCounter.close(); + + otelMeter.getRecorder().resetCalls(); + otelMeter.collectMetrics(); + + metrics = otelMeter.getRecorder().getMeasurements(longAsyncCounter); + assertThat(metrics, hasSize(0)); + } + + public void testDoubleAsyncAdapter() throws Exception { + AtomicReference attrs = new AtomicReference<>(); + DoubleAsyncCounter doubleAsyncCounter = registry.registerDoubleAsyncCounter("name", "desc", "unit", attrs::get); + + attrs.set(new DoubleWithAttributes(1.0, Map.of("k", 1.0))); + + otelMeter.collectMetrics(); + + List metrics = otelMeter.getRecorder().getMeasurements(doubleAsyncCounter); + assertThat(metrics, hasSize(1)); + assertThat(metrics.get(0).attributes(), equalTo(Map.of("k", 1.0))); + assertThat(metrics.get(0).getDouble(), equalTo(1.0)); + + attrs.set(new DoubleWithAttributes(2.0, Map.of("k", 5.0))); + + otelMeter.getRecorder().resetCalls(); + otelMeter.collectMetrics(); + + metrics = otelMeter.getRecorder().getMeasurements(doubleAsyncCounter); + assertThat(metrics, hasSize(1)); + assertThat(metrics.get(0).attributes(), equalTo(Map.of("k", 5.0))); + assertThat(metrics.get(0).getDouble(), equalTo(2.0)); + + doubleAsyncCounter.close(); + + otelMeter.getRecorder().resetCalls(); + otelMeter.collectMetrics(); + + metrics = otelMeter.getRecorder().getMeasurements(doubleAsyncCounter); + assertThat(metrics, hasSize(0)); + } +} diff --git a/modules/build.gradle b/modules/build.gradle index ad7049a9905f0..7707b60b38b25 100644 --- a/modules/build.gradle +++ b/modules/build.gradle @@ -8,7 +8,7 @@ configure(subprojects.findAll { it.parent.path == project.path }) { group = 'org.elasticsearch.plugin' // for modules which publish client jars - apply plugin: 'elasticsearch.internal-testclusters' + // apply plugin: 'elasticsearch.internal-testclusters' apply plugin: 'elasticsearch.internal-es-plugin' apply plugin: 'elasticsearch.internal-test-artifact' diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java index 188d0d282ffb4..61b53ea10a786 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java @@ -49,7 +49,6 @@ import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.MultiSearchRequestBuilder; -import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.update.UpdateRequest; @@ -435,16 +434,12 @@ public void testComposableTemplateOnlyMatchingWithDataStreamName() throws Except }"""; PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request("id_1"); request.indexTemplate( - new ComposableIndexTemplate( - List.of(dataStreamName), // use no wildcard, so that backing indices don't match just by name - new Template(null, new CompressedXContent(mapping), null), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ) + ComposableIndexTemplate.builder() + // use no wildcard, so that backing indices don't match just by name + .indexPatterns(List.of(dataStreamName)) + .template(new Template(null, new CompressedXContent(mapping), null)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); @@ -517,16 +512,11 @@ public void testTimeStampValidationInvalidFieldMapping() throws Exception { }"""; PutComposableIndexTemplateAction.Request createTemplateRequest = new PutComposableIndexTemplateAction.Request("logs-foo"); createTemplateRequest.indexTemplate( - new ComposableIndexTemplate( - List.of("logs-*"), - new Template(null, new CompressedXContent(mapping), null), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("logs-*")) + .template(new Template(null, new CompressedXContent(mapping), null)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ); Exception e = expectThrows( @@ -551,8 +541,7 @@ public void testResolvabilityOfDataStreamsInAPIs() throws Exception { verifyResolvability( dataStreamName, - client().prepareIndex(dataStreamName) - .setSource("{\"@timestamp\": \"2020-12-12\"}", XContentType.JSON) + prepareIndex(dataStreamName).setSource("{\"@timestamp\": \"2020-12-12\"}", XContentType.JSON) .setOpType(DocWriteRequest.OpType.CREATE), false ); @@ -595,8 +584,7 @@ public void testResolvabilityOfDataStreamsInAPIs() throws Exception { client().execute(CreateDataStreamAction.INSTANCE, request).actionGet(); verifyResolvability( "logs-barbaz", - client().prepareIndex("logs-barbaz") - .setSource("{\"@timestamp\": \"2020-12-12\"}", XContentType.JSON) + prepareIndex("logs-barbaz").setSource("{\"@timestamp\": \"2020-12-12\"}", XContentType.JSON) .setOpType(DocWriteRequest.OpType.CREATE), false ); @@ -672,16 +660,14 @@ public void testCannotDeleteComposableTemplateUsedByDataStream() throws Exceptio // Now replace it with a higher-priority template and delete the old one PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request("id2"); request.indexTemplate( - new ComposableIndexTemplate( - Collections.singletonList("metrics-foobar*"), // Match the other data stream with a slightly different pattern - new Template(null, null, null), - null, - 2L, // Higher priority than the other composable template - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ) + ComposableIndexTemplate.builder() + // Match the other data stream with a slightly different pattern + .indexPatterns(Collections.singletonList("metrics-foobar*")) + .template(new Template(null, null, null)) + // Higher priority than the other composable template + .priority(2L) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); @@ -714,13 +700,11 @@ public void testAliasActionsOnDataStreams() throws Exception { public void testDataSteamAliasWithFilter() throws Exception { putComposableIndexTemplate("id1", List.of("logs-*")); String dataStreamName = "logs-foobar"; - client().prepareIndex(dataStreamName) - .setId("1") + prepareIndex(dataStreamName).setId("1") .setSource("{\"@timestamp\": \"2022-12-12\", \"type\": \"x\"}", XContentType.JSON) .setOpType(DocWriteRequest.OpType.CREATE) .get(); - client().prepareIndex(dataStreamName) - .setId("2") + prepareIndex(dataStreamName).setId("2") .setSource("{\"@timestamp\": \"2022-12-12\", \"type\": \"y\"}", XContentType.JSON) .setOpType(DocWriteRequest.OpType.CREATE) .get(); @@ -789,13 +773,11 @@ public void testDataSteamAliasWithFilter() throws Exception { public void testSearchFilteredAndUnfilteredAlias() throws Exception { putComposableIndexTemplate("id1", List.of("logs-*")); String dataStreamName = "logs-foobar"; - client().prepareIndex(dataStreamName) - .setId("1") + prepareIndex(dataStreamName).setId("1") .setSource("{\"@timestamp\": \"2022-12-12\", \"type\": \"x\"}", XContentType.JSON) .setOpType(DocWriteRequest.OpType.CREATE) .get(); - client().prepareIndex(dataStreamName) - .setId("2") + prepareIndex(dataStreamName).setId("2") .setSource("{\"@timestamp\": \"2022-12-12\", \"type\": \"y\"}", XContentType.JSON) .setOpType(DocWriteRequest.OpType.CREATE) .get(); @@ -1211,15 +1193,11 @@ public void testIndexDocsWithCustomRoutingTargetingDataStreamIsNotAllowed() thro } public void testIndexDocsWithCustomRoutingAllowed() throws Exception { - ComposableIndexTemplate template = new ComposableIndexTemplate( - List.of("logs-foobar*"), - new Template(null, null, null), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, true) - ); + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(List.of("logs-foobar*")) + .template(new Template(null, null, null)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, true)) + .build(); client().execute( PutComposableIndexTemplateAction.INSTANCE, new PutComposableIndexTemplateAction.Request("id1").indexTemplate(template) @@ -1357,16 +1335,11 @@ public void testMultipleTimestampValuesInDocument() throws Exception { public void testMixedAutoCreate() throws Exception { PutComposableIndexTemplateAction.Request createTemplateRequest = new PutComposableIndexTemplateAction.Request("logs-foo"); createTemplateRequest.indexTemplate( - new ComposableIndexTemplate( - List.of("logs-foo*"), - new Template(null, new CompressedXContent(generateMapping("@timestamp")), null), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("logs-foo*")) + .template(new Template(null, new CompressedXContent(generateMapping("@timestamp")), null)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, createTemplateRequest).actionGet(); @@ -1815,7 +1788,9 @@ public ClusterState execute(ClusterState currentState) throws Exception { original.isSystem(), original.isAllowCustomRouting(), original.getIndexMode(), - original.getLifecycle() + original.getLifecycle(), + original.isFailureStore(), + original.getFailureIndices() ); brokenDataStreamHolder.set(broken); return ClusterState.builder(currentState) @@ -1869,11 +1844,12 @@ private static void verifyResolvability( if (fail) { String expectedErrorMessage = "no such index [" + dataStream + "]"; if (requestBuilder instanceof MultiSearchRequestBuilder) { - MultiSearchResponse multiSearchResponse = ((MultiSearchRequestBuilder) requestBuilder).get(); - assertThat(multiSearchResponse.getResponses().length, equalTo(1)); - assertThat(multiSearchResponse.getResponses()[0].isFailure(), is(true)); - assertThat(multiSearchResponse.getResponses()[0].getFailure(), instanceOf(IllegalArgumentException.class)); - assertThat(multiSearchResponse.getResponses()[0].getFailure().getMessage(), equalTo(expectedErrorMessage)); + assertResponse((MultiSearchRequestBuilder) requestBuilder, multiSearchResponse -> { + assertThat(multiSearchResponse.getResponses().length, equalTo(1)); + assertThat(multiSearchResponse.getResponses()[0].isFailure(), is(true)); + assertThat(multiSearchResponse.getResponses()[0].getFailure(), instanceOf(IllegalArgumentException.class)); + assertThat(multiSearchResponse.getResponses()[0].getFailure().getMessage(), equalTo(expectedErrorMessage)); + }); } else if (requestBuilder instanceof ValidateQueryRequestBuilder) { Exception e = expectThrows(IndexNotFoundException.class, requestBuilder::get); assertThat(e.getMessage(), equalTo(expectedErrorMessage)); @@ -1885,8 +1861,10 @@ private static void verifyResolvability( if (requestBuilder instanceof SearchRequestBuilder searchRequestBuilder) { assertHitCount(searchRequestBuilder, expectedCount); } else if (requestBuilder instanceof MultiSearchRequestBuilder) { - MultiSearchResponse multiSearchResponse = ((MultiSearchRequestBuilder) requestBuilder).get(); - assertThat(multiSearchResponse.getResponses()[0].isFailure(), is(false)); + assertResponse( + (MultiSearchRequestBuilder) requestBuilder, + multiSearchResponse -> assertThat(multiSearchResponse.getResponses()[0].isFailure(), is(false)) + ); } else { requestBuilder.get(); } @@ -1936,19 +1914,17 @@ public void testPartitionedTemplate() throws IOException { /** * partition size with no routing required */ - ComposableIndexTemplate template = new ComposableIndexTemplate( - List.of("logs"), - new Template( - Settings.builder().put("index.number_of_shards", "3").put("index.routing_partition_size", "2").build(), - null, - null - ), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, true) - ); + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(List.of("logs")) + .template( + new Template( + Settings.builder().put("index.number_of_shards", "3").put("index.routing_partition_size", "2").build(), + null, + null + ) + ) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, true)) + .build(); ComposableIndexTemplate finalTemplate = template; client().execute( PutComposableIndexTemplateAction.INSTANCE, @@ -1957,24 +1933,22 @@ public void testPartitionedTemplate() throws IOException { /** * partition size with routing required */ - template = new ComposableIndexTemplate( - List.of("logs"), - new Template( - Settings.builder().put("index.number_of_shards", "3").put("index.routing_partition_size", "2").build(), - new CompressedXContent(""" - { - "_routing": { - "required": true - } - }"""), - null - ), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, true) - ); + template = ComposableIndexTemplate.builder() + .indexPatterns(List.of("logs")) + .template( + new Template( + Settings.builder().put("index.number_of_shards", "3").put("index.routing_partition_size", "2").build(), + new CompressedXContent(""" + { + "_routing": { + "required": true + } + }"""), + null + ) + ) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, true)) + .build(); client().execute( PutComposableIndexTemplateAction.INSTANCE, new PutComposableIndexTemplateAction.Request("my-it").indexTemplate(template) @@ -1983,19 +1957,17 @@ public void testPartitionedTemplate() throws IOException { /** * routing settings with allow custom routing false */ - template = new ComposableIndexTemplate( - List.of("logs"), - new Template( - Settings.builder().put("index.number_of_shards", "3").put("index.routing_partition_size", "2").build(), - null, - null - ), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false) - ); + template = ComposableIndexTemplate.builder() + .indexPatterns(List.of("logs")) + .template( + new Template( + Settings.builder().put("index.number_of_shards", "3").put("index.routing_partition_size", "2").build(), + null, + null + ) + ) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build(); ComposableIndexTemplate finalTemplate1 = template; Exception e = expectThrows( IllegalArgumentException.class, @@ -2013,24 +1985,22 @@ public void testPartitionedTemplate() throws IOException { } public void testRoutingEnabledInMappingDisabledInDataStreamTemplate() throws IOException { - ComposableIndexTemplate template = new ComposableIndexTemplate( - List.of("logs"), - new Template( - Settings.builder().put("index.number_of_shards", "3").put("index.routing_partition_size", "2").build(), - new CompressedXContent(""" - { - "_routing": { - "required": true - } - }"""), - null - ), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false) - ); + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(List.of("logs")) + .template( + new Template( + Settings.builder().put("index.number_of_shards", "3").put("index.routing_partition_size", "2").build(), + new CompressedXContent(""" + { + "_routing": { + "required": true + } + }"""), + null + ) + ) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build(); Exception e = expectThrows( IllegalArgumentException.class, () -> client().execute( @@ -2046,28 +2016,26 @@ public void testSearchWithRouting() throws IOException, ExecutionException, Inte /** * partition size with routing required */ - ComposableIndexTemplate template = new ComposableIndexTemplate( - List.of("my-logs"), - new Template( - Settings.builder() - .put("index.number_of_shards", "10") - .put("index.number_of_routing_shards", "10") - .put("index.routing_partition_size", "4") - .build(), - new CompressedXContent(""" - { - "_routing": { - "required": true - } - }"""), - null - ), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, true) - ); + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(List.of("my-logs")) + .template( + new Template( + Settings.builder() + .put("index.number_of_shards", "10") + .put("index.number_of_routing_shards", "10") + .put("index.routing_partition_size", "4") + .build(), + new CompressedXContent(""" + { + "_routing": { + "required": true + } + }"""), + null + ) + ) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, true)) + .build(); client().execute( PutComposableIndexTemplateAction.INSTANCE, new PutComposableIndexTemplateAction.Request("my-it").indexTemplate(template) @@ -2328,16 +2296,12 @@ static void putComposableIndexTemplate( ) throws IOException { PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request(id); request.indexTemplate( - new ComposableIndexTemplate( - patterns, - new Template(settings, mappings == null ? null : CompressedXContent.fromJSON(mappings), aliases, lifecycle), - null, - null, - null, - metadata, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(patterns) + .template(new Template(settings, mappings == null ? null : CompressedXContent.fromJSON(mappings), aliases, lifecycle)) + .metadata(metadata) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); } diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java index ceac7423b0b72..c3e59be54cc7f 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java @@ -842,8 +842,7 @@ public void testDeleteDataStreamDuringSnapshot() throws Exception { .setOpType(DocWriteRequest.OpType.CREATE) .setId(Integer.toString(i)) .setSource(Collections.singletonMap("@timestamp", "2020-12-12")) - .execute() - .actionGet(); + .get(); } refresh(); assertDocCount(dataStream, 100L); diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataTierDataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataTierDataStreamIT.java index aeb7516c35816..672d2d21d73a5 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataTierDataStreamIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataTierDataStreamIT.java @@ -33,23 +33,17 @@ public void testDefaultDataStreamAllocateToHot() { startHotOnlyNode(); ensureGreen(); - ComposableIndexTemplate template = new ComposableIndexTemplate( - Collections.singletonList(index), - null, - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ); + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList(index)) + + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build(); client().execute( PutComposableIndexTemplateAction.INSTANCE, new PutComposableIndexTemplateAction.Request("template").indexTemplate(template) ).actionGet(); - var dsIndexName = client().prepareIndex(index) - .setCreate(true) + var dsIndexName = prepareIndex(index).setCreate(true) .setId("1") .setSource("@timestamp", "2020-09-09") .setWaitForActiveShards(0) diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamIT.java index 922b58e3920e1..734e2d7273d19 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamIT.java @@ -317,15 +317,11 @@ public Collection getSystemDataStreamDescriptors() { ".test-data-stream", "system data stream test", Type.EXTERNAL, - new ComposableIndexTemplate( - List.of(".test-data-stream"), - new Template(Settings.EMPTY, mappings, null), - null, - null, - null, - null, - new DataStreamTemplate() - ), + ComposableIndexTemplate.builder() + .indexPatterns(List.of(".test-data-stream")) + .template(new Template(Settings.EMPTY, mappings, null)) + .dataStreamTemplate(new DataStreamTemplate()) + .build(), Map.of(), List.of("product"), ExecutorNames.DEFAULT_SYSTEM_DATA_STREAM_THREAD_POOLS diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamSnapshotIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamSnapshotIT.java index 0f60cbba0a4ff..698656dfa7406 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamSnapshotIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamSnapshotIT.java @@ -64,8 +64,7 @@ public void testSystemDataStreamInGlobalState() throws Exception { } // Index a doc so that a concrete backing index will be created - DocWriteResponse indexRepsonse = client().prepareIndex(SYSTEM_DATA_STREAM_NAME) - .setId("42") + DocWriteResponse indexRepsonse = prepareIndex(SYSTEM_DATA_STREAM_NAME).setId("42") .setSource("{ \"@timestamp\": \"2099-03-08T11:06:07.000Z\", \"name\": \"my-name\" }", XContentType.JSON) .setOpType(DocWriteRequest.OpType.CREATE) .get(); @@ -162,20 +161,16 @@ public void testSystemDataStreamInFeatureState() throws Exception { } // Index a doc so that a concrete backing index will be created - DocWriteResponse indexToDataStreamResponse = client().prepareIndex(SYSTEM_DATA_STREAM_NAME) - .setId("42") + DocWriteResponse indexToDataStreamResponse = prepareIndex(SYSTEM_DATA_STREAM_NAME).setId("42") .setSource("{ \"@timestamp\": \"2099-03-08T11:06:07.000Z\", \"name\": \"my-name\" }", XContentType.JSON) .setOpType(DocWriteRequest.OpType.CREATE) - .execute() - .actionGet(); + .get(); assertThat(indexToDataStreamResponse.status().getStatus(), oneOf(200, 201)); // Index a doc so that a concrete backing index will be created - DocWriteResponse indexResponse = client().prepareIndex("my-index") - .setId("42") + DocWriteResponse indexResponse = prepareIndex("my-index").setId("42") .setSource("{ \"name\": \"my-name\" }", XContentType.JSON) .setOpType(DocWriteRequest.OpType.CREATE) - .execute() .get(); assertThat(indexResponse.status().getStatus(), oneOf(200, 201)); @@ -238,15 +233,10 @@ public Collection getSystemDataStreamDescriptors() { SYSTEM_DATA_STREAM_NAME, "a system data stream for testing", SystemDataStreamDescriptor.Type.EXTERNAL, - new ComposableIndexTemplate( - List.of(".system-data-stream"), - null, - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate() - ), + ComposableIndexTemplate.builder() + .indexPatterns(List.of(".system-data-stream")) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build(), Map.of(), Collections.singletonList("test"), new ExecutorNames(ThreadPool.Names.SYSTEM_CRITICAL_READ, ThreadPool.Names.SYSTEM_READ, ThreadPool.Names.SYSTEM_WRITE) diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java index 5dbf52f33d7da..ab42d831c6545 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java @@ -115,16 +115,11 @@ public void testTimeRanges() throws Exception { if (randomBoolean()) { var request = new PutComposableIndexTemplateAction.Request("id"); request.indexTemplate( - new ComposableIndexTemplate( - List.of("k8s*"), - new Template(templateSettings.build(), mapping, null), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("k8s*")) + .template(new Template(templateSettings.build(), mapping, null)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); } else { @@ -134,16 +129,12 @@ public void testTimeRanges() throws Exception { var putTemplateRequest = new PutComposableIndexTemplateAction.Request("id"); putTemplateRequest.indexTemplate( - new ComposableIndexTemplate( - List.of("k8s*"), - new Template(templateSettings.build(), null, null), - List.of("1"), - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("k8s*")) + .template(new Template(templateSettings.build(), null, null)) + .componentTemplates(List.of("1")) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, putTemplateRequest).actionGet(); } @@ -249,20 +240,17 @@ public void testInvalidTsdbTemplatesNoTimeSeriesDimensionAttribute() throws Exce { var request = new PutComposableIndexTemplateAction.Request("id"); request.indexTemplate( - new ComposableIndexTemplate( - List.of("k8s*"), - new Template( - Settings.builder().put("index.mode", "time_series").put("index.routing_path", "metricset").build(), - new CompressedXContent(mappingTemplate), - null - ), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("k8s*")) + .template( + new Template( + Settings.builder().put("index.mode", "time_series").put("index.routing_path", "metricset").build(), + new CompressedXContent(mappingTemplate), + null + ) + ) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build() ); var e = expectThrows( IllegalArgumentException.class, @@ -280,20 +268,17 @@ public void testInvalidTsdbTemplatesNoTimeSeriesDimensionAttribute() throws Exce { var request = new PutComposableIndexTemplateAction.Request("id"); request.indexTemplate( - new ComposableIndexTemplate( - List.of("k8s*"), - new Template( - Settings.builder().put("index.mode", "time_series").build(), - new CompressedXContent(mappingTemplate), - null - ), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("k8s*")) + .template( + new Template( + Settings.builder().put("index.mode", "time_series").build(), + new CompressedXContent(mappingTemplate), + null + ) + ) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build() ); var e = expectThrows( InvalidIndexTemplateException.class, @@ -317,20 +302,17 @@ public void testInvalidTsdbTemplatesNoKeywordFieldType() throws Exception { }"""; var request = new PutComposableIndexTemplateAction.Request("id"); request.indexTemplate( - new ComposableIndexTemplate( - List.of("k8s*"), - new Template( - Settings.builder().put("index.mode", "time_series").put("index.routing_path", "metricset").build(), - new CompressedXContent(mappingTemplate), - null - ), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("k8s*")) + .template( + new Template( + Settings.builder().put("index.mode", "time_series").put("index.routing_path", "metricset").build(), + new CompressedXContent(mappingTemplate), + null + ) + ) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build() ); Exception e = expectThrows( IllegalArgumentException.class, @@ -360,20 +342,17 @@ public void testInvalidTsdbTemplatesMissingSettings() throws Exception { }"""; var request = new PutComposableIndexTemplateAction.Request("id"); request.indexTemplate( - new ComposableIndexTemplate( - List.of("k8s*"), - new Template( - Settings.builder().put("index.routing_path", "metricset").build(), - new CompressedXContent(mappingTemplate), - null - ), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("k8s*")) + .template( + new Template( + Settings.builder().put("index.routing_path", "metricset").build(), + new CompressedXContent(mappingTemplate), + null + ) + ) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build() ); var e = expectThrows( IllegalArgumentException.class, @@ -389,16 +368,11 @@ public void testSkippingShards() throws Exception { var templateSettings = Settings.builder().put("index.mode", "time_series").put("index.routing_path", "metricset").build(); var request = new PutComposableIndexTemplateAction.Request("id1"); request.indexTemplate( - new ComposableIndexTemplate( - List.of("pattern-1"), - new Template(templateSettings, mapping, null), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("pattern-1")) + .template(new Template(templateSettings, mapping, null)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); var indexRequest = new IndexRequest("pattern-1").opType(DocWriteRequest.OpType.CREATE).setRefreshPolicy("true"); @@ -408,16 +382,11 @@ public void testSkippingShards() throws Exception { { var request = new PutComposableIndexTemplateAction.Request("id2"); request.indexTemplate( - new ComposableIndexTemplate( - List.of("pattern-2"), - new Template(null, mapping, null), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("pattern-2")) + .template(new Template(null, mapping, null)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); var indexRequest = new IndexRequest("pattern-2").opType(DocWriteRequest.OpType.CREATE).setRefreshPolicy("true"); @@ -457,26 +426,23 @@ public void testTrimId() throws Exception { String dataStreamName = "k8s"; var putTemplateRequest = new PutComposableIndexTemplateAction.Request("id"); putTemplateRequest.indexTemplate( - new ComposableIndexTemplate( - List.of(dataStreamName + "*"), - new Template( - Settings.builder() - .put("index.mode", "time_series") - .put("index.number_of_replicas", 0) - // Reduce sync interval to speedup this integraton test, - // otherwise by default it will take 30 seconds before minimum retained seqno is updated: - .put("index.soft_deletes.retention_lease.sync_interval", "100ms") - .build(), - new CompressedXContent(MAPPING_TEMPLATE), - null - ), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of(dataStreamName + "*")) + .template( + new Template( + Settings.builder() + .put("index.mode", "time_series") + .put("index.number_of_replicas", 0) + // Reduce sync interval to speedup this integraton test, + // otherwise by default it will take 30 seconds before minimum retained seqno is updated: + .put("index.soft_deletes.retention_lease.sync_interval", "100ms") + .build(), + new CompressedXContent(MAPPING_TEMPLATE), + null + ) + ) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, putTemplateRequest).actionGet(); diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudSystemDataStreamLifecycleIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudSystemDataStreamLifecycleIT.java index d2baec3150392..8e590d3f28346 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudSystemDataStreamLifecycleIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudSystemDataStreamLifecycleIT.java @@ -199,20 +199,18 @@ public Collection getSystemDataStreamDescriptors() { ".test-data-stream", "system data stream test", Type.EXTERNAL, - new ComposableIndexTemplate( - List.of(".test-data-stream"), - new Template( - Settings.EMPTY, - mappings, - null, - DataStreamLifecycle.newBuilder().dataRetention(randomMillisUpToYear9999()).build() - ), - null, - null, - null, - null, - new DataStreamTemplate() - ), + ComposableIndexTemplate.builder() + .indexPatterns(List.of(".test-data-stream")) + .template( + new Template( + Settings.EMPTY, + mappings, + null, + DataStreamLifecycle.newBuilder().dataRetention(randomMillisUpToYear9999()).build() + ) + ) + .dataStreamTemplate(new DataStreamTemplate()) + .build(), Map.of(), List.of("product"), ExecutorNames.DEFAULT_SYSTEM_DATA_STREAM_THREAD_POOLS diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java index 0d3588ba20b9a..7ac86c8aee614 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java @@ -182,16 +182,10 @@ public void testOriginationDate() throws Exception { }"""; PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request("id2"); request.indexTemplate( - new ComposableIndexTemplate( - List.of("index_*"), - new Template(null, CompressedXContent.fromJSON(mapping), null, null), - null, - null, - null, - null, - null, - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("index_*")) + .template(new Template(null, CompressedXContent.fromJSON(mapping), null, null)) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); @@ -628,35 +622,6 @@ public void testDataLifecycleServiceConfiguresTheMergePolicy() throws Exception }); } - private static List getBackingIndices(String dataStreamName) { - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName }); - GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) - .actionGet(); - assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1)); - assertThat(getDataStreamResponse.getDataStreams().get(0).getDataStream().getName(), equalTo(dataStreamName)); - return getDataStreamResponse.getDataStreams().get(0).getDataStream().getIndices().stream().map(Index::getName).toList(); - } - - static void indexDocs(String dataStream, int numDocs) { - BulkRequest bulkRequest = new BulkRequest(); - for (int i = 0; i < numDocs; i++) { - String value = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(System.currentTimeMillis()); - bulkRequest.add( - new IndexRequest(dataStream).opType(DocWriteRequest.OpType.CREATE) - .source(String.format(Locale.ROOT, "{\"%s\":\"%s\"}", DEFAULT_TIMESTAMP_FIELD, value), XContentType.JSON) - ); - } - BulkResponse bulkResponse = client().bulk(bulkRequest).actionGet(); - assertThat(bulkResponse.getItems().length, equalTo(numDocs)); - String backingIndexPrefix = DataStream.BACKING_INDEX_PREFIX + dataStream; - for (BulkItemResponse itemResponse : bulkResponse) { - assertThat(itemResponse.getFailureMessage(), nullValue()); - assertThat(itemResponse.status(), equalTo(RestStatus.CREATED)); - assertThat(itemResponse.getIndex(), startsWith(backingIndexPrefix)); - } - indicesAdmin().refresh(new RefreshRequest(dataStream)).actionGet(); - } - public void testReenableDataStreamLifecycle() throws Exception { // start with a lifecycle that's not enabled DataStreamLifecycle lifecycle = new DataStreamLifecycle(null, null, false); @@ -706,6 +671,35 @@ public void testReenableDataStreamLifecycle() throws Exception { }); } + private static List getBackingIndices(String dataStreamName) { + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName }); + GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) + .actionGet(); + assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1)); + assertThat(getDataStreamResponse.getDataStreams().get(0).getDataStream().getName(), equalTo(dataStreamName)); + return getDataStreamResponse.getDataStreams().get(0).getDataStream().getIndices().stream().map(Index::getName).toList(); + } + + static void indexDocs(String dataStream, int numDocs) { + BulkRequest bulkRequest = new BulkRequest(); + for (int i = 0; i < numDocs; i++) { + String value = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(System.currentTimeMillis()); + bulkRequest.add( + new IndexRequest(dataStream).opType(DocWriteRequest.OpType.CREATE) + .source(String.format(Locale.ROOT, "{\"%s\":\"%s\"}", DEFAULT_TIMESTAMP_FIELD, value), XContentType.JSON) + ); + } + BulkResponse bulkResponse = client().bulk(bulkRequest).actionGet(); + assertThat(bulkResponse.getItems().length, equalTo(numDocs)); + String backingIndexPrefix = DataStream.BACKING_INDEX_PREFIX + dataStream; + for (BulkItemResponse itemResponse : bulkResponse) { + assertThat(itemResponse.getFailureMessage(), nullValue()); + assertThat(itemResponse.status(), equalTo(RestStatus.CREATED)); + assertThat(itemResponse.getIndex(), startsWith(backingIndexPrefix)); + } + indicesAdmin().refresh(new RefreshRequest(dataStream)).actionGet(); + } + static void putComposableIndexTemplate( String id, @Nullable String mappings, @@ -716,16 +710,12 @@ static void putComposableIndexTemplate( ) throws IOException { PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request(id); request.indexTemplate( - new ComposableIndexTemplate( - patterns, - new Template(settings, mappings == null ? null : CompressedXContent.fromJSON(mappings), null, lifecycle), - null, - null, - null, - metadata, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(patterns) + .template(new Template(settings, mappings == null ? null : CompressedXContent.fromJSON(mappings), null, lifecycle)) + .metadata(metadata) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); } diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java index c9968a545cb7d..57febae28bb4d 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java @@ -351,16 +351,12 @@ static void putComposableIndexTemplate( ) throws IOException { PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request(id); request.indexTemplate( - new ComposableIndexTemplate( - patterns, - new Template(settings, mappings == null ? null : CompressedXContent.fromJSON(mappings), null, lifecycle), - null, - null, - null, - metadata, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(patterns) + .template(new Template(settings, mappings == null ? null : CompressedXContent.fromJSON(mappings), null, lifecycle)) + .metadata(metadata) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); } diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LogsDataStreamIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LogsDataStreamIT.java index b150c71c86122..b8e79d2fec7cd 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LogsDataStreamIT.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LogsDataStreamIT.java @@ -12,6 +12,7 @@ import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; import org.junit.After; +import org.junit.Before; import java.io.IOException; import java.util.List; @@ -25,6 +26,14 @@ public class LogsDataStreamIT extends DisabledSecurityDataStreamTestCase { + private RestClient client; + + @Before + public void setup() throws Exception { + client = client(); + waitForLogs(client); + } + @After public void cleanUp() throws IOException { adminClient().performRequest(new Request("DELETE", "_data_stream/*")); @@ -32,9 +41,6 @@ public void cleanUp() throws IOException { @SuppressWarnings("unchecked") public void testDefaultLogsSettingAndMapping() throws Exception { - RestClient client = client(); - waitForLogs(client); - String dataStreamName = "logs-generic-default"; createDataStream(client, dataStreamName); String backingIndex = getWriteBackingIndex(client, dataStreamName); @@ -104,9 +110,6 @@ public void testDefaultLogsSettingAndMapping() throws Exception { @SuppressWarnings("unchecked") public void testCustomMapping() throws Exception { - RestClient client = client(); - waitForLogs(client); - { Request request = new Request("POST", "/_component_template/logs@custom"); request.setJsonEntity(""" @@ -182,9 +185,6 @@ public void testCustomMapping() throws Exception { @SuppressWarnings("unchecked") public void testLogsDefaultPipeline() throws Exception { - RestClient client = client(); - waitForLogs(client); - { Request request = new Request("POST", "/_component_template/logs@custom"); request.setJsonEntity(""" @@ -284,9 +284,6 @@ public void testLogsDefaultPipeline() throws Exception { @SuppressWarnings("unchecked") public void testLogsMessagePipeline() throws Exception { - RestClient client = client(); - waitForLogs(client); - { Request request = new Request("PUT", "/_ingest/pipeline/logs@custom"); request.setJsonEntity(""" @@ -412,8 +409,6 @@ public void testLogsMessagePipeline() throws Exception { @SuppressWarnings("unchecked") public void testNoSubobjects() throws Exception { - RestClient client = client(); - waitForLogs(client); { Request request = new Request("POST", "/_component_template/logs-test-subobjects-mappings"); request.setJsonEntity(""" @@ -633,6 +628,94 @@ public void testNoSubobjects() throws Exception { } + public void testAllFieldsAreSearchableByDefault() throws Exception { + final String dataStreamName = "logs-generic-default"; + createDataStream(client, dataStreamName); + + // index a doc with "message" field and an additional one that will be mapped to a "match_only_text" type + indexDoc(client, dataStreamName, """ + { + "@timestamp": "2023-04-18", + "message": "Hello world", + "another.message": "Hi world" + } + """); + + // verify that both fields are searchable when not querying specific fields + List results = searchDocs(client, dataStreamName, """ + { + "query": { + "simple_query_string": { + "query": "Hello" + } + } + } + """); + assertEquals(1, results.size()); + + results = searchDocs(client, dataStreamName, """ + { + "query": { + "simple_query_string": { + "query": "Hi" + } + } + } + """); + assertEquals(1, results.size()); + } + + public void testDefaultFieldCustomization() throws Exception { + Request request = new Request("POST", "/_component_template/logs@custom"); + request.setJsonEntity(""" + { + "template": { + "settings": { + "index": { + "query": { + "default_field": ["message"] + } + } + } + } + } + """); + assertOK(client.performRequest(request)); + + final String dataStreamName = "logs-generic-default"; + createDataStream(client, dataStreamName); + + indexDoc(client, dataStreamName, """ + { + "@timestamp": "2023-04-18", + "message": "Hello world", + "another.message": "Hi world" + } + """); + + List results = searchDocs(client, dataStreamName, """ + { + "query": { + "simple_query_string": { + "query": "Hello" + } + } + } + """); + assertEquals(1, results.size()); + + results = searchDocs(client, dataStreamName, """ + { + "query": { + "simple_query_string": { + "query": "Hi" + } + } + } + """); + assertEquals(0, results.size()); + } + static void waitForLogs(RestClient client) throws Exception { assertBusy(() -> { try { diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleStatsIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleStatsIT.java new file mode 100644 index 0000000000000..cce9132d99d19 --- /dev/null +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleStatsIT.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.datastreams.lifecycle; + +import org.elasticsearch.client.Request; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.datastreams.DisabledSecurityDataStreamTestCase; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; + +public class DataStreamLifecycleStatsIT extends DisabledSecurityDataStreamTestCase { + + @Before + public void updateClusterSettings() throws IOException { + updateClusterSettings( + Settings.builder() + .put("data_streams.lifecycle.poll_interval", "1s") + .put("cluster.lifecycle.default.rollover", "min_docs=1,max_docs=1") + .build() + ); + } + + @After + public void cleanUp() throws IOException { + adminClient().performRequest(new Request("DELETE", "_data_stream/*?expand_wildcards=hidden")); + } + + @SuppressWarnings("unchecked") + public void testStats() throws Exception { + // Check empty stats and wait until we have 2 executions + assertBusy(() -> { + Request request = new Request("GET", "/_lifecycle/stats"); + Map response = entityAsMap(client().performRequest(request)); + assertThat(response.get("data_stream_count"), is(0)); + assertThat(response.get("data_streams"), is(List.of())); + assertThat(response.containsKey("last_run_duration_in_millis"), is(true)); + assertThat(response.containsKey("time_between_starts_in_millis"), is(true)); + }); + + // Create a template + Request putComposableIndexTemplateRequest = new Request("POST", "/_index_template/1"); + putComposableIndexTemplateRequest.setJsonEntity(""" + { + "index_patterns": ["my-data-stream-*"], + "data_stream": {}, + "template": { + "lifecycle": {} + } + } + """); + assertOK(client().performRequest(putComposableIndexTemplateRequest)); + + // Create two data streams with one doc each + Request createDocRequest = new Request("POST", "/my-data-stream-1/_doc?refresh=true"); + createDocRequest.setJsonEntity("{ \"@timestamp\": \"2022-12-12\"}"); + assertOK(client().performRequest(createDocRequest)); + createDocRequest = new Request("POST", "/my-data-stream-2/_doc?refresh=true"); + createDocRequest.setJsonEntity("{ \"@timestamp\": \"2022-12-12\"}"); + assertOK(client().performRequest(createDocRequest)); + + Request request = new Request("GET", "/_lifecycle/stats"); + Map response = entityAsMap(client().performRequest(request)); + assertThat(response.get("data_stream_count"), is(2)); + List> dataStreams = (List>) response.get("data_streams"); + assertThat(dataStreams.get(0).get("name"), is("my-data-stream-1")); + assertThat((Integer) dataStreams.get(0).get("backing_indices_in_total"), greaterThanOrEqualTo(1)); + assertThat((Integer) dataStreams.get(0).get("backing_indices_in_error"), is(0)); + assertThat(dataStreams.get(1).get("name"), is("my-data-stream-2")); + assertThat((Integer) dataStreams.get(1).get("backing_indices_in_total"), greaterThanOrEqualTo(1)); + assertThat((Integer) dataStreams.get(0).get("backing_indices_in_error"), is(0)); + assertThat(response.containsKey("last_run_duration_in_millis"), is(true)); + assertThat(response.containsKey("time_between_starts_in_millis"), is(true)); + } +} diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java index 2cf44dc0e3218..dd8e13cf18408 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java @@ -40,11 +40,14 @@ import org.elasticsearch.datastreams.lifecycle.action.DeleteDataStreamLifecycleAction; import org.elasticsearch.datastreams.lifecycle.action.ExplainDataStreamLifecycleAction; import org.elasticsearch.datastreams.lifecycle.action.GetDataStreamLifecycleAction; +import org.elasticsearch.datastreams.lifecycle.action.GetDataStreamLifecycleStatsAction; import org.elasticsearch.datastreams.lifecycle.action.PutDataStreamLifecycleAction; import org.elasticsearch.datastreams.lifecycle.action.TransportDeleteDataStreamLifecycleAction; import org.elasticsearch.datastreams.lifecycle.action.TransportExplainDataStreamLifecycleAction; import org.elasticsearch.datastreams.lifecycle.action.TransportGetDataStreamLifecycleAction; +import org.elasticsearch.datastreams.lifecycle.action.TransportGetDataStreamLifecycleStatsAction; import org.elasticsearch.datastreams.lifecycle.action.TransportPutDataStreamLifecycleAction; +import org.elasticsearch.datastreams.lifecycle.rest.RestDataStreamLifecycleStatsAction; import org.elasticsearch.datastreams.lifecycle.rest.RestDeleteDataStreamLifecycleAction; import org.elasticsearch.datastreams.lifecycle.rest.RestExplainDataStreamLifecycleAction; import org.elasticsearch.datastreams.lifecycle.rest.RestGetDataStreamLifecycleAction; @@ -189,6 +192,7 @@ public Collection createComponents(PluginServices services) { actions.add(new ActionHandler<>(GetDataStreamLifecycleAction.INSTANCE, TransportGetDataStreamLifecycleAction.class)); actions.add(new ActionHandler<>(DeleteDataStreamLifecycleAction.INSTANCE, TransportDeleteDataStreamLifecycleAction.class)); actions.add(new ActionHandler<>(ExplainDataStreamLifecycleAction.INSTANCE, TransportExplainDataStreamLifecycleAction.class)); + actions.add(new ActionHandler<>(GetDataStreamLifecycleStatsAction.INSTANCE, TransportGetDataStreamLifecycleStatsAction.class)); return actions; } @@ -218,6 +222,7 @@ public List getRestHandlers( handlers.add(new RestGetDataStreamLifecycleAction()); handlers.add(new RestDeleteDataStreamLifecycleAction()); handlers.add(new RestExplainDataStreamLifecycleAction()); + handlers.add(new RestDataStreamLifecycleStatsAction()); return handlers; } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportAction.java index de81ca9bef18c..e44ee5107711f 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportAction.java @@ -136,19 +136,9 @@ static GetDataStreamAction.Response innerOperation( Map backingIndicesSettingsValues = new HashMap<>(); Metadata metadata = state.getMetadata(); - for (Index index : dataStream.getIndices()) { - IndexMetadata indexMetadata = metadata.index(index); - Boolean preferIlm = PREFER_ILM_SETTING.get(indexMetadata.getSettings()); - assert preferIlm != null : "must use the default prefer ilm setting value, if nothing else"; - ManagedBy managedBy; - if (metadata.isIndexManagedByILM(indexMetadata)) { - managedBy = ManagedBy.ILM; - } else if (dataStream.isIndexManagedByDataStreamLifecycle(index, metadata::index)) { - managedBy = ManagedBy.LIFECYCLE; - } else { - managedBy = ManagedBy.UNMANAGED; - } - backingIndicesSettingsValues.put(index, new IndexProperties(preferIlm, indexMetadata.getLifecyclePolicyName(), managedBy)); + collectIndexSettingsValues(dataStream, backingIndicesSettingsValues, metadata, dataStream.getIndices()); + if (DataStream.isFailureStoreEnabled() && dataStream.getFailureIndices().isEmpty() == false) { + collectIndexSettingsValues(dataStream, backingIndicesSettingsValues, metadata, dataStream.getFailureIndices()); } GetDataStreamAction.Response.TimeSeries timeSeries = null; @@ -213,6 +203,28 @@ static GetDataStreamAction.Response innerOperation( ); } + private static void collectIndexSettingsValues( + DataStream dataStream, + Map backingIndicesSettingsValues, + Metadata metadata, + List backingIndices + ) { + for (Index index : backingIndices) { + IndexMetadata indexMetadata = metadata.index(index); + Boolean preferIlm = PREFER_ILM_SETTING.get(indexMetadata.getSettings()); + assert preferIlm != null : "must use the default prefer ilm setting value, if nothing else"; + ManagedBy managedBy; + if (metadata.isIndexManagedByILM(indexMetadata)) { + managedBy = ManagedBy.ILM; + } else if (dataStream.isIndexManagedByDataStreamLifecycle(index, metadata::index)) { + managedBy = ManagedBy.LIFECYCLE; + } else { + managedBy = ManagedBy.UNMANAGED; + } + backingIndicesSettingsValues.put(index, new IndexProperties(preferIlm, indexMetadata.getLifecyclePolicyName(), managedBy)); + } + } + static List getDataStreams( ClusterState clusterState, IndexNameExpressionResolver iner, diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleErrorStore.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleErrorStore.java index 47589fd7276f4..01ccbdbe3ffec 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleErrorStore.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleErrorStore.java @@ -13,7 +13,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.core.Nullable; -import java.util.List; +import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.function.LongSupplier; @@ -87,7 +87,7 @@ public ErrorEntry getError(String indexName) { /** * Return an immutable view (a snapshot) of the tracked indices at the moment this method is called. */ - public List getAllIndices() { - return List.copyOf(indexNameToError.keySet()); + public Set getAllIndices() { + return Set.copyOf(indexNameToError.keySet()); } } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java index 03d1340c14dbb..9f9a90704167d 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java @@ -175,6 +175,13 @@ public class DataStreamLifecycleService implements ClusterStateListener, Closeab */ private volatile int signallingErrorRetryInterval; + /** + * The following stats are tracking how the data stream lifecycle runs are performing time wise + */ + private volatile Long lastRunStartedAt = null; + private volatile Long lastRunDuration = null; + private volatile Long timeBetweenStarts = null; + private static final SimpleBatchedExecutor FORCE_MERGE_STATE_UPDATE_TASK_EXECUTOR = new SimpleBatchedExecutor<>() { @Override @@ -299,6 +306,11 @@ public void triggered(SchedulerEngine.Event event) { */ // default visibility for testing purposes void run(ClusterState state) { + long startTime = nowSupplier.getAsLong(); + if (lastRunStartedAt != null) { + timeBetweenStarts = startTime - lastRunStartedAt; + } + lastRunStartedAt = startTime; int affectedIndices = 0; int affectedDataStreams = 0; for (DataStream dataStream : state.metadata().dataStreams().values()) { @@ -396,8 +408,10 @@ void run(ClusterState state) { affectedIndices += indicesToExcludeForRemainingRun.size(); affectedDataStreams++; } + lastRunDuration = nowSupplier.getAsLong() - lastRunStartedAt; logger.trace( - "Data stream lifecycle service performed operations on [{}] indices, part of [{}] data streams", + "Data stream lifecycle service run for {} and performed operations on [{}] indices, part of [{}] data streams", + TimeValue.timeValueMillis(lastRunDuration).toHumanReadableString(2), affectedIndices, affectedDataStreams ); @@ -1193,6 +1207,22 @@ static TimeValue getRetentionConfiguration(DataStream dataStream) { return dataStream.getLifecycle().getEffectiveDataRetention(); } + /** + * @return the duration of the last run in millis or null if the service hasn't completed a run yet. + */ + @Nullable + public Long getLastRunDuration() { + return lastRunDuration; + } + + /** + * @return the time passed between the start times of the last two consecutive runs or null if the service hasn't started twice yet. + */ + @Nullable + public Long getTimeBetweenStarts() { + return timeBetweenStarts; + } + /** * Action listener that records the encountered failure using the provided recordError callback for the * provided target index. If the listener is notified of success it will clear the recorded entry for the provided diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleStatsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleStatsAction.java new file mode 100644 index 0000000000000..c3444a67b847c --- /dev/null +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleStatsAction.java @@ -0,0 +1,154 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.datastreams.lifecycle.action; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.master.MasterNodeReadRequest; +import org.elasticsearch.common.collect.Iterators; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ChunkedToXContentObject; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.xcontent.ToXContent; + +import java.io.IOException; +import java.util.Iterator; +import java.util.List; +import java.util.Objects; + +/** + * This action retrieves the data stream lifecycle stats from the master node. + */ +public class GetDataStreamLifecycleStatsAction extends ActionType { + + public static final GetDataStreamLifecycleStatsAction INSTANCE = new GetDataStreamLifecycleStatsAction(); + public static final String NAME = "cluster:monitor/data_stream/lifecycle/stats"; + + private GetDataStreamLifecycleStatsAction() { + super(NAME, Response::new); + } + + public static class Request extends MasterNodeReadRequest { + + public Request(StreamInput in) throws IOException { + super(in); + } + + public Request() {} + + @Override + public ActionRequestValidationException validate() { + return null; + } + } + + public static class Response extends ActionResponse implements ChunkedToXContentObject { + + private final Long runDuration; + private final Long timeBetweenStarts; + private final List dataStreamStats; + + public Response(@Nullable Long runDuration, @Nullable Long timeBetweenStarts, List dataStreamStats) { + this.runDuration = runDuration; + this.timeBetweenStarts = timeBetweenStarts; + this.dataStreamStats = dataStreamStats; + } + + public Response(StreamInput in) throws IOException { + super(in); + this.runDuration = in.readOptionalVLong(); + this.timeBetweenStarts = in.readOptionalVLong(); + this.dataStreamStats = in.readCollectionAsImmutableList(DataStreamStats::read); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalVLong(runDuration); + out.writeOptionalVLong(timeBetweenStarts); + out.writeCollection(dataStreamStats, (o, v) -> v.writeTo(o)); + } + + public Long getRunDuration() { + return runDuration; + } + + public Long getTimeBetweenStarts() { + return timeBetweenStarts; + } + + public List getDataStreamStats() { + return dataStreamStats; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Response other = (Response) o; + return Objects.equals(runDuration, other.runDuration) + && Objects.equals(timeBetweenStarts, other.timeBetweenStarts) + && Objects.equals(dataStreamStats, other.dataStreamStats); + } + + @Override + public int hashCode() { + return Objects.hash(runDuration, timeBetweenStarts, dataStreamStats); + } + + @Override + public Iterator toXContentChunked(ToXContent.Params outerParams) { + return Iterators.concat(Iterators.single((builder, params) -> { + builder.startObject(); + if (runDuration != null) { + builder.field("last_run_duration_in_millis", runDuration); + if (builder.humanReadable()) { + builder.field("last_run_duration", TimeValue.timeValueMillis(runDuration).toHumanReadableString(2)); + } + } + if (timeBetweenStarts != null) { + builder.field("time_between_starts_in_millis", timeBetweenStarts); + if (builder.humanReadable()) { + builder.field("time_between_starts", TimeValue.timeValueMillis(timeBetweenStarts).toHumanReadableString(2)); + } + } + builder.field("data_stream_count", dataStreamStats.size()); + builder.startArray("data_streams"); + return builder; + }), Iterators.map(dataStreamStats.iterator(), stat -> (builder, params) -> { + builder.startObject(); + builder.field("name", stat.dataStreamName); + builder.field("backing_indices_in_total", stat.backingIndicesInTotal); + builder.field("backing_indices_in_error", stat.backingIndicesInError); + builder.endObject(); + return builder; + }), Iterators.single((builder, params) -> { + builder.endArray(); + builder.endObject(); + return builder; + })); + } + + public record DataStreamStats(String dataStreamName, int backingIndicesInTotal, int backingIndicesInError) implements Writeable { + + public static DataStreamStats read(StreamInput in) throws IOException { + return new DataStreamStats(in.readString(), in.readVInt(), in.readVInt()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(dataStreamName); + out.writeVInt(backingIndicesInTotal); + out.writeVInt(backingIndicesInError); + } + } + } +} diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleStatsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleStatsAction.java new file mode 100644 index 0000000000000..03bc1d129eaba --- /dev/null +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleStatsAction.java @@ -0,0 +1,110 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.datastreams.lifecycle.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService; +import org.elasticsearch.index.Index; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.util.ArrayList; +import java.util.Comparator; +import java.util.List; +import java.util.Set; + +/** + * Exposes stats about the latest lifecycle run and the error store. + */ +public class TransportGetDataStreamLifecycleStatsAction extends TransportMasterNodeReadAction< + GetDataStreamLifecycleStatsAction.Request, + GetDataStreamLifecycleStatsAction.Response> { + + private final DataStreamLifecycleService lifecycleService; + + @Inject + public TransportGetDataStreamLifecycleStatsAction( + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + DataStreamLifecycleService lifecycleService + ) { + super( + GetDataStreamLifecycleStatsAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + GetDataStreamLifecycleStatsAction.Request::new, + indexNameExpressionResolver, + GetDataStreamLifecycleStatsAction.Response::new, + EsExecutors.DIRECT_EXECUTOR_SERVICE + ); + this.lifecycleService = lifecycleService; + } + + @Override + protected void masterOperation( + Task task, + GetDataStreamLifecycleStatsAction.Request request, + ClusterState state, + ActionListener listener + ) throws Exception { + listener.onResponse(collectStats(state)); + } + + // Visible for testing + GetDataStreamLifecycleStatsAction.Response collectStats(ClusterState state) { + Metadata metadata = state.metadata(); + Set indicesInErrorStore = lifecycleService.getErrorStore().getAllIndices(); + List dataStreamStats = new ArrayList<>(); + for (DataStream dataStream : state.metadata().dataStreams().values()) { + if (dataStream.getLifecycle() != null && dataStream.getLifecycle().isEnabled()) { + int total = 0; + int inError = 0; + for (Index index : dataStream.getIndices()) { + if (dataStream.isIndexManagedByDataStreamLifecycle(index, metadata::index)) { + total++; + if (indicesInErrorStore.contains(index.getName())) { + inError++; + } + } + } + dataStreamStats.add(new GetDataStreamLifecycleStatsAction.Response.DataStreamStats(dataStream.getName(), total, inError)); + } + } + return new GetDataStreamLifecycleStatsAction.Response( + lifecycleService.getLastRunDuration(), + lifecycleService.getTimeBetweenStarts(), + dataStreamStats.isEmpty() + ? dataStreamStats + : dataStreamStats.stream() + .sorted(Comparator.comparing(GetDataStreamLifecycleStatsAction.Response.DataStreamStats::dataStreamName)) + .toList() + ); + } + + @Override + protected ClusterBlockException checkBlock(GetDataStreamLifecycleStatsAction.Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + } +} diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDataStreamLifecycleStatsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDataStreamLifecycleStatsAction.java new file mode 100644 index 0000000000000..2daff2a05940c --- /dev/null +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDataStreamLifecycleStatsAction.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.datastreams.lifecycle.rest; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.datastreams.lifecycle.action.GetDataStreamLifecycleStatsAction; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; +import org.elasticsearch.rest.action.RestChunkedToXContentListener; + +import java.util.List; + +import static org.elasticsearch.rest.RestRequest.Method.GET; + +@ServerlessScope(Scope.PUBLIC) +public class RestDataStreamLifecycleStatsAction extends BaseRestHandler { + + @Override + public String getName() { + return "data_stream_lifecycle_stats_action"; + } + + @Override + public List routes() { + return List.of(new Route(GET, "/_lifecycle/stats")); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { + String masterNodeTimeout = restRequest.param("master_timeout"); + GetDataStreamLifecycleStatsAction.Request request = new GetDataStreamLifecycleStatsAction.Request(); + if (masterNodeTimeout != null) { + request.masterNodeTimeout(masterNodeTimeout); + } + return channel -> client.execute(GetDataStreamLifecycleStatsAction.INSTANCE, request, new RestChunkedToXContentListener<>(channel)); + } +} diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java index 23a86b657b82d..e622d16b5d4c9 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java @@ -315,7 +315,9 @@ public void testGetAdditionalIndexSettingsDataStreamAlreadyCreatedTimeSettingsMi ds.isSystem(), ds.isAllowCustomRouting(), IndexMode.TIME_SERIES, - ds.getLifecycle() + ds.getLifecycle(), + ds.isFailureStore(), + ds.getFailureIndices() ) ); Metadata metadata = mb.build(); diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java index da0caff9e591d..928512f659039 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java @@ -226,16 +226,11 @@ private String createDataStream(boolean hidden) throws Exception { Template idxTemplate = new Template(null, new CompressedXContent(""" {"properties":{"@timestamp":{"type":"date"},"data":{"type":"keyword"}}} """), null); - ComposableIndexTemplate template = new ComposableIndexTemplate( - List.of(dataStreamName + "*"), - idxTemplate, - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(hidden, false), - null - ); + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(List.of(dataStreamName + "*")) + .template(idxTemplate) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(hidden, false)) + .build(); assertAcked( client().execute( PutComposableIndexTemplateAction.INSTANCE, diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java index 4f36feba17c89..e7339cc3f334a 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java @@ -59,16 +59,13 @@ public void testRequireRoutingPath() throws Exception { // Missing routing path should fail validation var componentTemplate = new ComponentTemplate(new Template(null, new CompressedXContent("{}"), null), null, null); var state = service.addComponentTemplate(ClusterState.EMPTY_STATE, true, "1", componentTemplate); - var indexTemplate = new ComposableIndexTemplate( - Collections.singletonList("logs-*-*"), - new Template(builder().put("index.mode", "time_series").build(), null, null), - List.of("1"), - 100L, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ); + var indexTemplate = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("logs-*-*")) + .template(new Template(builder().put("index.mode", "time_series").build(), null, null)) + .componentTemplates(List.of("1")) + .priority(100L) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build(); var e = expectThrows(InvalidIndexTemplateException.class, () -> service.addIndexTemplateV2(state, false, "1", indexTemplate)); assertThat(e.getMessage(), containsString("[index.mode=time_series] requires a non-empty [index.routing_path]")); } @@ -81,16 +78,13 @@ public void testRequireRoutingPath() throws Exception { null ); state = service.addComponentTemplate(state, true, "1", componentTemplate); - var indexTemplate = new ComposableIndexTemplate( - Collections.singletonList("logs-*-*"), - new Template(builder().put("index.mode", "time_series").build(), null, null), - List.of("1"), - 100L, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ); + var indexTemplate = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("logs-*-*")) + .template(new Template(builder().put("index.mode", "time_series").build(), null, null)) + .componentTemplates(List.of("1")) + .priority(100L) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build(); state = service.addIndexTemplateV2(state, false, "1", indexTemplate); assertThat(state.getMetadata().templatesV2().get("1"), equalTo(indexTemplate)); } @@ -103,46 +97,39 @@ public void testRequireRoutingPath() throws Exception { null ); state = service.addComponentTemplate(state, true, "1", componentTemplate); - var indexTemplate = new ComposableIndexTemplate( - Collections.singletonList("logs-*-*"), - new Template(null, null, null), - List.of("1"), - 100L, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ); + var indexTemplate = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("logs-*-*")) + .template(new Template(null, null, null)) + .componentTemplates(List.of("1")) + .priority(100L) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build(); state = service.addIndexTemplateV2(state, false, "1", indexTemplate); assertThat(state.getMetadata().templatesV2().get("1"), equalTo(indexTemplate)); } { // Routing path defined in index template - var indexTemplate = new ComposableIndexTemplate( - Collections.singletonList("logs-*-*"), - new Template(builder().put("index.mode", "time_series").put("index.routing_path", "uid").build(), null, null), - List.of("1"), - 100L, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ); + var indexTemplate = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("logs-*-*")) + .template(new Template(builder().put("index.mode", "time_series").put("index.routing_path", "uid").build(), null, null)) + .componentTemplates(List.of("1")) + .priority(100L) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build(); var state = service.addIndexTemplateV2(ClusterState.EMPTY_STATE, false, "1", indexTemplate); assertThat(state.getMetadata().templatesV2().get("1"), equalTo(indexTemplate)); } { // Routing fetched from mapping in index template - var indexTemplate = new ComposableIndexTemplate( - Collections.singletonList("logs-*-*"), - new Template(builder().put("index.mode", "time_series").build(), new CompressedXContent(generateTsdbMapping()), null), - List.of("1"), - 100L, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ); + var indexTemplate = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("logs-*-*")) + .template( + new Template(builder().put("index.mode", "time_series").build(), new CompressedXContent(generateTsdbMapping()), null) + ) + .componentTemplates(List.of("1")) + .priority(100L) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build(); var state = service.addIndexTemplateV2(ClusterState.EMPTY_STATE, false, "1", indexTemplate); assertThat(state.getMetadata().templatesV2().get("1"), equalTo(indexTemplate)); } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java index 803f5c8661f17..1a9287c1d5ee8 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java @@ -96,16 +96,11 @@ private void createTemplate(boolean tsdb) throws IOException { var templateSettings = Settings.builder().put("index.mode", tsdb ? "time_series" : "standard"); var request = new PutComposableIndexTemplateAction.Request("id"); request.indexTemplate( - new ComposableIndexTemplate( - List.of("k8s*"), - new Template(templateSettings.build(), new CompressedXContent(mappingTemplate), null), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("k8s*")) + .template(new Template(templateSettings.build(), new CompressedXContent(mappingTemplate), null)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeServiceTests.java index 989bebc68061d..c383991dba19c 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeServiceTests.java @@ -151,7 +151,9 @@ public void testUpdateTimeSeriesTemporalRange_NoUpdateBecauseReplicated() { d.isSystem(), d.isAllowCustomRouting(), d.getIndexMode(), - d.getLifecycle() + d.getLifecycle(), + d.isFailureStore(), + d.getFailureIndices() ) ) .build(); diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsResponseTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsResponseTests.java index 12e1604d10c1f..5ebea62fc596a 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsResponseTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsResponseTests.java @@ -33,6 +33,7 @@ import java.util.Map; import static org.elasticsearch.cluster.metadata.DataStream.getDefaultBackingIndexName; +import static org.elasticsearch.cluster.metadata.DataStream.getDefaultFailureStoreName; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; @@ -63,14 +64,16 @@ protected Response mutateInstance(Response instance) { @SuppressWarnings("unchecked") public void testResponseIlmAndDataStreamLifecycleRepresentation() throws Exception { - // we'll test a data stream with 3 backing indices - two managed by ILM (having the ILM policy configured for them) - // and one without any ILM policy configured + // we'll test a data stream with 3 backing indices and a failure store - two backing indices managed by ILM (having the ILM policy + // configured for them) and the remainder without any ILM policy configured String dataStreamName = "logs"; Index firstGenerationIndex = new Index(getDefaultBackingIndexName(dataStreamName, 1), UUIDs.base64UUID()); Index secondGenerationIndex = new Index(getDefaultBackingIndexName(dataStreamName, 2), UUIDs.base64UUID()); Index writeIndex = new Index(getDefaultBackingIndexName(dataStreamName, 3), UUIDs.base64UUID()); + Index failureStoreIndex = new Index(getDefaultFailureStoreName(dataStreamName, 1, System.currentTimeMillis()), UUIDs.base64UUID()); List indices = List.of(firstGenerationIndex, secondGenerationIndex, writeIndex); + List failureStores = List.of(failureStoreIndex); { // data stream has an enabled lifecycle DataStream logs = new DataStream( @@ -83,7 +86,9 @@ public void testResponseIlmAndDataStreamLifecycleRepresentation() throws Excepti false, true, IndexMode.STANDARD, - new DataStreamLifecycle() + new DataStreamLifecycle(), + true, + failureStores ); String ilmPolicyName = "rollover-30days"; @@ -93,6 +98,8 @@ public void testResponseIlmAndDataStreamLifecycleRepresentation() throws Excepti secondGenerationIndex, new Response.IndexProperties(false, ilmPolicyName, ManagedBy.LIFECYCLE), writeIndex, + new Response.IndexProperties(false, null, ManagedBy.LIFECYCLE), + failureStoreIndex, new Response.IndexProperties(false, null, ManagedBy.LIFECYCLE) ); @@ -156,6 +163,18 @@ public void testResponseIlmAndDataStreamLifecycleRepresentation() throws Excepti writeIndexRepresentation.get(Response.DataStreamInfo.MANAGED_BY.getPreferredName()), is(ManagedBy.LIFECYCLE.displayValue) ); + + List failureStoresRepresentation = (List) dataStreamMap.get( + DataStream.FAILURE_INDICES_FIELD.getPreferredName() + ); + Map failureStoreRepresentation = (Map) failureStoresRepresentation.get(0); + assertThat(failureStoreRepresentation.get("index_name"), is(failureStoreIndex.getName())); + assertThat(failureStoreRepresentation.get(Response.DataStreamInfo.PREFER_ILM.getPreferredName()), is(false)); + assertThat(failureStoreRepresentation.get(Response.DataStreamInfo.ILM_POLICY_FIELD.getPreferredName()), is(nullValue())); + assertThat( + failureStoreRepresentation.get(Response.DataStreamInfo.MANAGED_BY.getPreferredName()), + is(ManagedBy.LIFECYCLE.displayValue) + ); } } @@ -171,7 +190,9 @@ public void testResponseIlmAndDataStreamLifecycleRepresentation() throws Excepti false, true, IndexMode.STANDARD, - new DataStreamLifecycle(null, null, false) + new DataStreamLifecycle(null, null, false), + true, + failureStores ); String ilmPolicyName = "rollover-30days"; @@ -181,6 +202,8 @@ public void testResponseIlmAndDataStreamLifecycleRepresentation() throws Excepti secondGenerationIndex, new Response.IndexProperties(true, ilmPolicyName, ManagedBy.ILM), writeIndex, + new Response.IndexProperties(false, null, ManagedBy.UNMANAGED), + failureStoreIndex, new Response.IndexProperties(false, null, ManagedBy.UNMANAGED) ); @@ -233,6 +256,18 @@ public void testResponseIlmAndDataStreamLifecycleRepresentation() throws Excepti writeIndexRepresentation.get(Response.DataStreamInfo.MANAGED_BY.getPreferredName()), is(ManagedBy.UNMANAGED.displayValue) ); + + List failureStoresRepresentation = (List) dataStreamMap.get( + DataStream.FAILURE_INDICES_FIELD.getPreferredName() + ); + Map failureStoreRepresentation = (Map) failureStoresRepresentation.get(0); + assertThat(failureStoreRepresentation.get("index_name"), is(failureStoreIndex.getName())); + assertThat(failureStoreRepresentation.get(Response.DataStreamInfo.PREFER_ILM.getPreferredName()), is(false)); + assertThat(failureStoreRepresentation.get(Response.DataStreamInfo.ILM_POLICY_FIELD.getPreferredName()), is(nullValue())); + assertThat( + failureStoreRepresentation.get(Response.DataStreamInfo.MANAGED_BY.getPreferredName()), + is(ManagedBy.UNMANAGED.displayValue) + ); } } } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleErrorStoreTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleErrorStoreTests.java index c1255cc9e3a72..9f1928374eb5f 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleErrorStoreTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleErrorStoreTests.java @@ -12,12 +12,13 @@ import org.elasticsearch.test.ESTestCase; import org.junit.Before; -import java.util.List; +import java.util.Set; import java.util.stream.Stream; import static org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleErrorStore.MAX_ERROR_MESSAGE_LENGTH; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -36,7 +37,7 @@ public void testRecordAndRetrieveError() { assertThat(existingRecordedError, is(nullValue())); assertThat(errorStore.getError("test"), is(notNullValue())); assertThat(errorStore.getAllIndices().size(), is(1)); - assertThat(errorStore.getAllIndices().get(0), is("test")); + assertThat(errorStore.getAllIndices(), hasItem("test")); existingRecordedError = errorStore.recordError("test", new IllegalStateException("bad state")); assertThat(existingRecordedError, is(notNullValue())); @@ -51,7 +52,7 @@ public void testRetrieveAfterClear() { public void testGetAllIndicesIsASnapshotViewOfTheStore() { Stream.iterate(0, i -> i + 1).limit(5).forEach(i -> errorStore.recordError("test" + i, new NullPointerException("testing"))); - List initialAllIndices = errorStore.getAllIndices(); + Set initialAllIndices = errorStore.getAllIndices(); assertThat(initialAllIndices.size(), is(5)); assertThat( initialAllIndices, diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleFixtures.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleFixtures.java index 5a15e831f5ad6..6833f2222b585 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleFixtures.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleFixtures.java @@ -83,16 +83,12 @@ static void putComposableIndexTemplate( ) throws IOException { PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request(id); request.indexTemplate( - new ComposableIndexTemplate( - patterns, - new Template(settings, mappings == null ? null : CompressedXContent.fromJSON(mappings), null, lifecycle), - null, - null, - null, - metadata, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(patterns) + .template(new Template(settings, mappings == null ? null : CompressedXContent.fromJSON(mappings), null, lifecycle)) + .metadata(metadata) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ); assertTrue(client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet().isAcknowledged()); } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java index 0ee168d130986..2445e6b0d72df 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java @@ -94,6 +94,7 @@ import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.cluster.metadata.IndexMetadata.APIBlock.WRITE; @@ -119,6 +120,7 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Mockito.mock; public class DataStreamLifecycleServiceTests extends ESTestCase { @@ -280,7 +282,9 @@ public void testRetentionNotExecutedForTSIndicesWithinTimeBounds() { dataStream.isSystem(), dataStream.isAllowCustomRouting(), dataStream.getIndexMode(), - DataStreamLifecycle.newBuilder().dataRetention(0L).build() + DataStreamLifecycle.newBuilder().dataRetention(0L).build(), + dataStream.isFailureStore(), + dataStream.getFailureIndices() ) ); clusterState = ClusterState.builder(clusterState).metadata(builder).build(); @@ -1376,6 +1380,31 @@ public void testTimeSeriesIndicesStillWithinTimeBounds() { } } + public void testTrackingTimeStats() { + AtomicLong now = new AtomicLong(0); + long delta = randomLongBetween(10, 10000); + DataStreamLifecycleService service = new DataStreamLifecycleService( + Settings.EMPTY, + getTransportRequestsRecordingClient(), + clusterService, + Clock.systemUTC(), + threadPool, + () -> now.getAndAdd(delta), + new DataStreamLifecycleErrorStore(() -> Clock.systemUTC().millis()), + mock(AllocationService.class) + ); + assertThat(service.getLastRunDuration(), is(nullValue())); + assertThat(service.getTimeBetweenStarts(), is(nullValue())); + + service.run(ClusterState.EMPTY_STATE); + assertThat(service.getLastRunDuration(), is(delta)); + assertThat(service.getTimeBetweenStarts(), is(nullValue())); + + service.run(ClusterState.EMPTY_STATE); + assertThat(service.getLastRunDuration(), is(delta)); + assertThat(service.getTimeBetweenStarts(), is(2 * delta)); + } + /* * Creates a test cluster state with the given indexName. If customDataStreamLifecycleMetadata is not null, it is added as the value * of the index's custom metadata named "data_stream_lifecycle". diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/action/DataStreamLifecycleStatsResponseTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/action/DataStreamLifecycleStatsResponseTests.java new file mode 100644 index 0000000000000..111d1b61da8c9 --- /dev/null +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/action/DataStreamLifecycleStatsResponseTests.java @@ -0,0 +1,166 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.datastreams.lifecycle.action; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.stream.IntStream; + +import static org.elasticsearch.xcontent.ToXContent.EMPTY_PARAMS; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + +public class DataStreamLifecycleStatsResponseTests extends AbstractWireSerializingTestCase { + + @Override + protected GetDataStreamLifecycleStatsAction.Response createTestInstance() { + boolean hasRun = usually(); + var runDuration = hasRun ? randomLongBetween(10, 100000000) : null; + var timeBetweenStarts = hasRun && usually() ? randomLongBetween(10, 100000000) : null; + var dataStreams = IntStream.range(0, randomInt(10)) + .mapToObj( + ignored -> new GetDataStreamLifecycleStatsAction.Response.DataStreamStats( + randomAlphaOfLength(10), + randomIntBetween(1, 1000), + randomIntBetween(0, 100) + ) + ) + .toList(); + return new GetDataStreamLifecycleStatsAction.Response(runDuration, timeBetweenStarts, dataStreams); + } + + @Override + protected GetDataStreamLifecycleStatsAction.Response mutateInstance(GetDataStreamLifecycleStatsAction.Response instance) { + var runDuration = instance.getRunDuration(); + var timeBetweenStarts = instance.getTimeBetweenStarts(); + var dataStreams = instance.getDataStreamStats(); + switch (randomInt(2)) { + case 0 -> runDuration = runDuration != null && randomBoolean() + ? null + : randomValueOtherThan(runDuration, () -> randomLongBetween(10, 100000000)); + case 1 -> timeBetweenStarts = timeBetweenStarts != null && randomBoolean() + ? null + : randomValueOtherThan(timeBetweenStarts, () -> randomLongBetween(10, 100000000)); + default -> dataStreams = mutateDataStreamStats(dataStreams); + } + return new GetDataStreamLifecycleStatsAction.Response(runDuration, timeBetweenStarts, dataStreams); + } + + private List mutateDataStreamStats( + List dataStreamStats + ) { + // change the stats of a data stream + List mutated = new ArrayList<>(dataStreamStats); + if (randomBoolean() && dataStreamStats.isEmpty() == false) { + int i = randomInt(dataStreamStats.size() - 1); + GetDataStreamLifecycleStatsAction.Response.DataStreamStats instance = dataStreamStats.get(i); + mutated.set(i, switch (randomInt(2)) { + case 0 -> new GetDataStreamLifecycleStatsAction.Response.DataStreamStats( + instance.dataStreamName() + randomAlphaOfLength(2), + instance.backingIndicesInTotal(), + instance.backingIndicesInError() + ); + case 1 -> new GetDataStreamLifecycleStatsAction.Response.DataStreamStats( + instance.dataStreamName(), + instance.backingIndicesInTotal() + randomIntBetween(1, 10), + instance.backingIndicesInError() + ); + default -> new GetDataStreamLifecycleStatsAction.Response.DataStreamStats( + instance.dataStreamName(), + instance.backingIndicesInTotal(), + instance.backingIndicesInError() + randomIntBetween(1, 10) + ); + + }); + } else if (dataStreamStats.isEmpty() || randomBoolean()) { + mutated.add( + new GetDataStreamLifecycleStatsAction.Response.DataStreamStats( + randomAlphaOfLength(10), + randomIntBetween(1, 1000), + randomIntBetween(0, 100) + ) + ); + } else { + mutated.remove(randomInt(dataStreamStats.size() - 1)); + } + return mutated; + } + + @SuppressWarnings("unchecked") + public void testXContentSerialization() throws IOException { + GetDataStreamLifecycleStatsAction.Response testInstance = createTestInstance(); + try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { + builder.humanReadable(true); + testInstance.toXContentChunked(ToXContent.EMPTY_PARAMS).forEachRemaining(xcontent -> { + try { + xcontent.toXContent(builder, EMPTY_PARAMS); + } catch (IOException e) { + logger.error(e.getMessage(), e); + fail(e.getMessage()); + } + }); + Map xContentMap = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2(); + if (testInstance.getRunDuration() == null) { + assertThat(xContentMap.get("last_run_duration_in_millis"), nullValue()); + assertThat(xContentMap.get("last_run_duration"), nullValue()); + } else { + assertThat(xContentMap.get("last_run_duration_in_millis"), is(testInstance.getRunDuration().intValue())); + assertThat( + xContentMap.get("last_run_duration"), + is(TimeValue.timeValueMillis(testInstance.getRunDuration()).toHumanReadableString(2)) + ); + } + + if (testInstance.getTimeBetweenStarts() == null) { + assertThat(xContentMap.get("time_between_starts_in_millis"), nullValue()); + assertThat(xContentMap.get("time_between_starts"), nullValue()); + } else { + assertThat(xContentMap.get("time_between_starts_in_millis"), is(testInstance.getTimeBetweenStarts().intValue())); + assertThat( + xContentMap.get("time_between_starts"), + is(TimeValue.timeValueMillis(testInstance.getTimeBetweenStarts()).toHumanReadableString(2)) + ); + } + assertThat(xContentMap.get("data_stream_count"), is(testInstance.getDataStreamStats().size())); + List> dataStreams = (List>) xContentMap.get("data_streams"); + if (testInstance.getDataStreamStats().isEmpty()) { + assertThat(dataStreams.isEmpty(), is(true)); + } else { + assertThat(dataStreams.size(), is(testInstance.getDataStreamStats().size())); + for (int i = 0; i < dataStreams.size(); i++) { + assertThat(dataStreams.get(i).get("name"), is(testInstance.getDataStreamStats().get(i).dataStreamName())); + assertThat( + dataStreams.get(i).get("backing_indices_in_total"), + is(testInstance.getDataStreamStats().get(i).backingIndicesInTotal()) + ); + assertThat( + dataStreams.get(i).get("backing_indices_in_error"), + is(testInstance.getDataStreamStats().get(i).backingIndicesInError()) + ); + } + } + } + } + + @Override + protected Writeable.Reader instanceReader() { + return GetDataStreamLifecycleStatsAction.Response::new; + } +} diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleStatsActionTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleStatsActionTests.java new file mode 100644 index 0000000000000..8c423107ea2f4 --- /dev/null +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleStatsActionTests.java @@ -0,0 +1,153 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.datastreams.lifecycle.action; + +import org.elasticsearch.action.admin.indices.rollover.MaxAgeCondition; +import org.elasticsearch.action.admin.indices.rollover.RolloverInfo; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamLifecycle; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleErrorStore; +import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.junit.Before; + +import java.time.Clock; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.newInstance; +import static org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleFixtures.createDataStream; +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class TransportGetDataStreamLifecycleStatsActionTests extends ESTestCase { + + private final DataStreamLifecycleService dataStreamLifecycleService = mock(DataStreamLifecycleService.class); + private final DataStreamLifecycleErrorStore errorStore = mock(DataStreamLifecycleErrorStore.class); + private final TransportGetDataStreamLifecycleStatsAction action = new TransportGetDataStreamLifecycleStatsAction( + mock(TransportService.class), + mock(ClusterService.class), + mock(ThreadPool.class), + mock(ActionFilters.class), + mock(IndexNameExpressionResolver.class), + dataStreamLifecycleService + ); + private Long lastRunDuration; + private Long timeBetweenStarts; + + @Before + public void setUp() throws Exception { + super.setUp(); + lastRunDuration = randomBoolean() ? randomLongBetween(0, 100000) : null; + timeBetweenStarts = randomBoolean() ? randomLongBetween(0, 100000) : null; + when(dataStreamLifecycleService.getLastRunDuration()).thenReturn(lastRunDuration); + when(dataStreamLifecycleService.getTimeBetweenStarts()).thenReturn(timeBetweenStarts); + when(dataStreamLifecycleService.getErrorStore()).thenReturn(errorStore); + when(errorStore.getAllIndices()).thenReturn(Set.of()); + } + + public void testEmptyClusterState() { + GetDataStreamLifecycleStatsAction.Response response = action.collectStats(ClusterState.EMPTY_STATE); + assertThat(response.getRunDuration(), is(lastRunDuration)); + assertThat(response.getTimeBetweenStarts(), is(timeBetweenStarts)); + assertThat(response.getDataStreamStats().isEmpty(), is(true)); + } + + public void testMixedDataStreams() { + Set indicesInError = new HashSet<>(); + int numBackingIndices = 3; + Metadata.Builder builder = Metadata.builder(); + DataStream ilmDataStream = createDataStream( + builder, + "ilm-managed-index", + numBackingIndices, + Settings.builder() + .put(IndexMetadata.LIFECYCLE_NAME, "ILM_policy") + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()), + null, + Clock.systemUTC().millis() + ); + builder.put(ilmDataStream); + DataStream dslDataStream = createDataStream( + builder, + "dsl-managed-index", + numBackingIndices, + settings(IndexVersion.current()), + DataStreamLifecycle.newBuilder().dataRetention(TimeValue.timeValueDays(10)).build(), + Clock.systemUTC().millis() + ); + indicesInError.add(dslDataStream.getIndices().get(randomInt(numBackingIndices - 1)).getName()); + builder.put(dslDataStream); + { + String dataStreamName = "mixed"; + final List backingIndices = new ArrayList<>(); + for (int k = 1; k <= 2; k++) { + IndexMetadata.Builder indexMetaBuilder = IndexMetadata.builder(DataStream.getDefaultBackingIndexName(dataStreamName, k)) + .settings( + Settings.builder() + .put(IndexMetadata.LIFECYCLE_NAME, "ILM_policy") + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + ) + .numberOfShards(1) + .numberOfReplicas(1) + .creationDate(Clock.systemUTC().millis()); + + IndexMetadata indexMetadata = indexMetaBuilder.build(); + builder.put(indexMetadata, false); + backingIndices.add(indexMetadata.getIndex()); + } + // DSL managed write index + IndexMetadata.Builder indexMetaBuilder = IndexMetadata.builder(DataStream.getDefaultBackingIndexName(dataStreamName, 3)) + .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())) + .numberOfShards(1) + .numberOfReplicas(1) + .creationDate(Clock.systemUTC().millis()); + MaxAgeCondition rolloverCondition = new MaxAgeCondition(TimeValue.timeValueMillis(Clock.systemUTC().millis() - 2000L)); + indexMetaBuilder.putRolloverInfo( + new RolloverInfo(dataStreamName, List.of(rolloverCondition), Clock.systemUTC().millis() - 2000L) + ); + IndexMetadata indexMetadata = indexMetaBuilder.build(); + builder.put(indexMetadata, false); + backingIndices.add(indexMetadata.getIndex()); + builder.put(newInstance(dataStreamName, backingIndices, 3, null, false, DataStreamLifecycle.newBuilder().build())); + } + ClusterState state = ClusterState.builder(ClusterName.DEFAULT).metadata(builder).build(); + when(errorStore.getAllIndices()).thenReturn(indicesInError); + GetDataStreamLifecycleStatsAction.Response response = action.collectStats(state); + assertThat(response.getRunDuration(), is(lastRunDuration)); + assertThat(response.getTimeBetweenStarts(), is(timeBetweenStarts)); + assertThat(response.getDataStreamStats().size(), is(2)); + for (GetDataStreamLifecycleStatsAction.Response.DataStreamStats stats : response.getDataStreamStats()) { + if (stats.dataStreamName().equals("dsl-managed-index")) { + assertThat(stats.backingIndicesInTotal(), is(3)); + assertThat(stats.backingIndicesInError(), is(1)); + } + if (stats.dataStreamName().equals("mixed")) { + assertThat(stats.backingIndicesInTotal(), is(1)); + assertThat(stats.backingIndicesInError(), is(0)); + } + } + } +} diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml index b420e8421bfba..6496930764ab8 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml @@ -206,6 +206,103 @@ setup: - do: indices.delete_index_template: name: my-template3 + +--- +"Create data stream with failure store": + - skip: + version: " - 8.10.99" + reason: "data stream failure stores only creatable in 8.11+" + + - do: + allowed_warnings: + - "index template [my-template4] has index patterns [failure-data-stream1, failure-data-stream2] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template4] will take precedence during new index creation" + indices.put_index_template: + name: my-template4 + body: + index_patterns: [ failure-data-stream1, failure-data-stream2 ] + data_stream: + failure_store: true + + - do: + indices.create_data_stream: + name: failure-data-stream1 + - is_true: acknowledged + + - do: + indices.create_data_stream: + name: failure-data-stream2 + - is_true: acknowledged + + - do: + cluster.health: + wait_for_status: green + + - do: + indices.get_data_stream: + name: "*" + - match: { data_streams.0.name: failure-data-stream1 } + - match: { data_streams.0.timestamp_field.name: '@timestamp' } + - match: { data_streams.0.generation: 1 } + - length: { data_streams.0.indices: 1 } + - match: { data_streams.0.indices.0.index_name: '/\.ds-failure-data-stream1-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { data_streams.0.status: 'GREEN' } + - match: { data_streams.0.template: 'my-template4' } + - match: { data_streams.0.hidden: false } + - match: { data_streams.0.failure_store: true } + - length: { data_streams.0.failure_indices: 1 } + - match: { data_streams.0.failure_indices.0.index_name: '/\.fs-failure-data-stream1-(\d{4}\.\d{2}\.\d{2}-)?000001/'} + + - match: { data_streams.1.name: failure-data-stream2 } + - match: { data_streams.1.timestamp_field.name: '@timestamp' } + - match: { data_streams.1.generation: 1 } + - length: { data_streams.1.indices: 1 } + - match: { data_streams.1.indices.0.index_name: '/\.ds-failure-data-stream2-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { data_streams.1.template: 'my-template4' } + - match: { data_streams.1.hidden: false } + - match: { data_streams.1.failure_store: true } + - length: { data_streams.1.failure_indices: 1 } + - match: { data_streams.1.failure_indices.0.index_name: '/\.fs-failure-data-stream2-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + + # save the backing index names for later use + - set: { data_streams.0.indices.0.index_name: idx0name } + - set: { data_streams.0.failure_indices.0.index_name: fsidx0name } + - set: { data_streams.1.indices.0.index_name: idx1name } + - set: { data_streams.1.failure_indices.0.index_name: fsidx1name } + + - do: + indices.get_mapping: + index: $idx0name + expand_wildcards: hidden + - match: { .$idx0name.mappings.properties.@timestamp.type: 'date' } + + - do: + indices.get_mapping: + index: $fsidx0name + expand_wildcards: hidden + - match: { .$fsidx0name.mappings.properties.@timestamp.type: 'date' } + + - do: + indices.get_mapping: + index: $idx1name + expand_wildcards: hidden + - match: { .$idx1name.mappings.properties.@timestamp.type: 'date' } + + - do: + indices.get_mapping: + index: $fsidx1name + expand_wildcards: hidden + - match: { .$fsidx1name.mappings.properties.@timestamp.type: 'date' } + + - do: + indices.delete_data_stream: + name: failure-data-stream1 + - is_true: acknowledged + + - do: + indices.delete_data_stream: + name: failure-data-stream2 + - is_true: acknowledged + --- "Create data stream with invalid name": - skip: diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/30_auto_create_data_stream.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/30_auto_create_data_stream.yml index 2a6beb4330e68..303a584555f8f 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/30_auto_create_data_stream.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/30_auto_create_data_stream.yml @@ -46,3 +46,56 @@ indices.delete_data_stream: name: logs-foobar - is_true: acknowledged + +--- +"Put index template with failure store": + - skip: + version: " - 8.10.99" + reason: "data stream failure stores only creatable in 8.11+" + features: allowed_warnings + + - do: + allowed_warnings: + - "index template [generic_logs_template] has index patterns [logs-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [generic_logs_template] will take precedence during new index creation" + indices.put_index_template: + name: generic_logs_template + body: + index_patterns: logs-* + data_stream: + failure_store: true + template: + settings: + number_of_shards: 1 + number_of_replicas: 1 + + - do: + index: + index: logs-foobar + refresh: true + body: + '@timestamp': '2020-12-12' + foo: bar + + - do: + search: + index: logs-foobar + body: { query: { match_all: {} } } + - length: { hits.hits: 1 } + - match: { hits.hits.0._index: "/\\.ds-logs-foobar-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000001/" } + - match: { hits.hits.0._source.foo: 'bar' } + + - do: + indices.get_data_stream: + name: logs-foobar + - match: { data_streams.0.name: logs-foobar } + - match: { data_streams.0.timestamp_field.name: '@timestamp' } + - length: { data_streams.0.indices: 1 } + - match: { data_streams.0.indices.0.index_name: '/\.ds-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { data_streams.0.failure_store: true } + - length: { data_streams.0.failure_indices: 1 } + - match: { data_streams.0.failure_indices.0.index_name: '/\.fs-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + + - do: + indices.delete_data_stream: + name: logs-foobar + - is_true: acknowledged diff --git a/modules/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java b/modules/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java index 362c0c1887261..48cb155ac2970 100644 --- a/modules/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java +++ b/modules/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java @@ -14,10 +14,10 @@ import org.apache.tika.metadata.Office; import org.apache.tika.metadata.TikaCoreProperties; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.ingest.AbstractProcessor; import org.elasticsearch.ingest.IngestDocument; import org.elasticsearch.ingest.Processor; @@ -226,15 +226,6 @@ public static final class Factory implements Processor.Factory { static final Set DEFAULT_PROPERTIES = EnumSet.allOf(Property.class); - static { - if (Version.CURRENT.major >= 9) { - throw new IllegalStateException( - "[poison pill] update the [remove_binary] default to be 'true' assuming " - + "enough time has passed. Deprecated in September 2022." - ); - } - } - @Override public AttachmentProcessor create( Map registry, @@ -249,6 +240,7 @@ public AttachmentProcessor create( int indexedChars = readIntProperty(TYPE, processorTag, config, "indexed_chars", NUMBER_OF_CHARS_INDEXED); boolean ignoreMissing = readBooleanProperty(TYPE, processorTag, config, "ignore_missing", false); String indexedCharsField = readOptionalStringProperty(TYPE, processorTag, config, "indexed_chars_field"); + @UpdateForV9 // update the [remove_binary] default to be 'true' assuming enough time has passed. Deprecated in September 2022. Boolean removeBinary = readOptionalBooleanProperty(TYPE, processorTag, config, "remove_binary"); if (removeBinary == null) { DEPRECATION_LOGGER.warn( diff --git a/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/ingest/common/IngestRestartIT.java b/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/ingest/common/IngestRestartIT.java index 5709fbd9d8bfc..0ff34cf687500 100644 --- a/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/ingest/common/IngestRestartIT.java +++ b/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/ingest/common/IngestRestartIT.java @@ -88,8 +88,7 @@ public void testFailureInConditionalProcessor() { Exception e = expectThrows( Exception.class, - () -> client().prepareIndex("index") - .setId("1") + () -> prepareIndex("index").setId("1") .setSource("x", 0) .setPipeline(pipelineId) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) @@ -144,8 +143,7 @@ public Settings onNodeStopped(String nodeName) { checkPipelineExists.accept(pipelineIdWithoutScript); checkPipelineExists.accept(pipelineIdWithScript); - client().prepareIndex("index") - .setId("1") + prepareIndex("index").setId("1") .setSource("x", 0) .setPipeline(pipelineIdWithoutScript) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) @@ -153,8 +151,7 @@ public Settings onNodeStopped(String nodeName) { IllegalStateException exception = expectThrows( IllegalStateException.class, - () -> client().prepareIndex("index") - .setId("2") + () -> prepareIndex("index").setId("2") .setSource("x", 0) .setPipeline(pipelineIdWithScript) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) @@ -194,12 +191,7 @@ public void testPipelineWithScriptProcessorThatHasStoredScript() throws Exceptio }"""); clusterAdmin().preparePutPipeline("_id", pipeline, XContentType.JSON).get(); - client().prepareIndex("index") - .setId("1") - .setSource("x", 0) - .setPipeline("_id") - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .get(); + prepareIndex("index").setId("1").setSource("x", 0).setPipeline("_id").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); Map source = client().prepareGet("index", "1").get().getSource(); assertThat(source.get("x"), equalTo(0)); @@ -213,12 +205,7 @@ public void testPipelineWithScriptProcessorThatHasStoredScript() throws Exceptio internalCluster().fullRestart(); ensureYellow("index"); - client().prepareIndex("index") - .setId("2") - .setSource("x", 0) - .setPipeline("_id") - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .get(); + prepareIndex("index").setId("2").setSource("x", 0).setPipeline("_id").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); source = client().prepareGet("index", "2").get().getSource(); assertThat(source.get("x"), equalTo(0)); @@ -238,12 +225,7 @@ public void testWithDedicatedIngestNode() throws Exception { }"""); clusterAdmin().preparePutPipeline("_id", pipeline, XContentType.JSON).get(); - client().prepareIndex("index") - .setId("1") - .setSource("x", 0) - .setPipeline("_id") - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .get(); + prepareIndex("index").setId("1").setSource("x", 0).setPipeline("_id").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); Map source = client().prepareGet("index", "1").get().getSource(); assertThat(source.get("x"), equalTo(0)); @@ -304,8 +286,7 @@ public boolean validateClusterForming() { assertThat( expectThrows( ClusterBlockException.class, - () -> client().prepareIndex("index") - .setId("fails") + () -> prepareIndex("index").setId("fails") .setSource("x", 1) .setTimeout(TimeValue.timeValueMillis(100)) // 100ms, to fail quickly .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) @@ -316,8 +297,7 @@ public boolean validateClusterForming() { // but this one should pass since it has a longer timeout final PlainActionFuture future = new PlainActionFuture<>(); - client().prepareIndex("index") - .setId("passes1") + prepareIndex("index").setId("passes1") .setSource("x", 2) .setTimeout(TimeValue.timeValueSeconds(60)) // wait for second node to start in below .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) @@ -331,7 +311,7 @@ public boolean validateClusterForming() { assertThat(indexResponse.status(), equalTo(RestStatus.CREATED)); assertThat(indexResponse.getResult(), equalTo(DocWriteResponse.Result.CREATED)); - client().prepareIndex("index").setId("passes2").setSource("x", 3).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + prepareIndex("index").setId("passes2").setSource("x", 3).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); // successfully indexed documents should have the value field set by the pipeline Map source = client().prepareGet("index", "passes1").get(timeout).getSource(); diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/310_reroute_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/310_reroute_processor.yml index 191b92806b6ce..e2f4e32777a1f 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/310_reroute_processor.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/310_reroute_processor.yml @@ -23,6 +23,9 @@ teardown: --- "Test first matching router terminates pipeline": + - skip: + version: all + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/102144" - do: ingest.put_pipeline: id: "pipeline-with-two-data-stream-processors" diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java index 76c0e6e494a74..3e04f7bfea2de 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java @@ -215,7 +215,7 @@ protected void updateTimestamp(String name, Metadata old) { } void updateTaskState() { - PlainActionFuture> future = PlainActionFuture.newFuture(); + PlainActionFuture> future = new PlainActionFuture<>(); updatePersistentTaskState(state, future); state = ((GeoIpTaskState) future.actionGet().getState()); } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java index 8534749cace61..1f170e0f796ff 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java @@ -12,7 +12,6 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.OriginSettingClient; @@ -197,8 +196,8 @@ public void clusterChanged(ClusterChangedEvent event) { } DiscoveryNode masterNode = event.state().nodes().getMasterNode(); - if (masterNode == null || masterNode.getVersion().before(Version.V_7_14_0)) { - // wait for master to be upgraded so it understands geoip task + if (masterNode == null) { + // no master yet return; } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java index 26ddbaa7ba854..30ecc96a3171c 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java @@ -9,7 +9,6 @@ package org.elasticsearch.ingest.geoip; import org.apache.lucene.util.SetOnce; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.client.internal.Client; @@ -65,6 +64,14 @@ public class IngestGeoIpPlugin extends Plugin implements IngestPlugin, SystemIndexPlugin, Closeable, PersistentTaskPlugin, ActionPlugin { public static final Setting CACHE_SIZE = Setting.longSetting("ingest.geoip.cache_size", 1000, 0, Setting.Property.NodeScope); private static final int GEOIP_INDEX_MAPPINGS_VERSION = 1; + /** + * No longer used for determining the age of mappings, but system index descriptor + * code requires something be set. We use a value that can be parsed by + * old nodes in mixed-version clusters, just in case any old code exists that + * tries to parse version from index metadata, and that will indicate + * to these old nodes that the mappings are newer than they are. + */ + private static final String LEGACY_VERSION_FIELD_VALUE = "8.12.0"; private final SetOnce ingestService = new SetOnce<>(); private final SetOnce databaseRegistry = new SetOnce<>(); @@ -204,7 +211,7 @@ private static XContentBuilder mappings() { return jsonBuilder().startObject() .startObject(SINGLE_MAPPING_NAME) .startObject("_meta") - .field("version", Version.CURRENT) + .field("version", LEGACY_VERSION_FIELD_VALUE) .field(SystemIndexDescriptor.VERSION_META_KEY, GEOIP_INDEX_MAPPINGS_VERSION) .endObject() .field("dynamic", "strict") diff --git a/modules/lang-expression/src/internalClusterTest/java/org/elasticsearch/script/expression/MoreExpressionIT.java b/modules/lang-expression/src/internalClusterTest/java/org/elasticsearch/script/expression/MoreExpressionIT.java index 23e5fcd312dcc..69e33863b0f2b 100644 --- a/modules/lang-expression/src/internalClusterTest/java/org/elasticsearch/script/expression/MoreExpressionIT.java +++ b/modules/lang-expression/src/internalClusterTest/java/org/elasticsearch/script/expression/MoreExpressionIT.java @@ -78,7 +78,7 @@ private SearchRequestBuilder buildRequest(String script, Object... params) { public void testBasic() throws Exception { createIndex("test"); ensureGreen("test"); - client().prepareIndex("test").setId("1").setSource("foo", 4).setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("foo", 4).setRefreshPolicy(IMMEDIATE).get(); assertResponse(buildRequest("doc['foo'] + 1"), rsp -> { assertEquals(1, rsp.getHits().getTotalHits().value); assertEquals(5.0, rsp.getHits().getAt(0).field("foo").getValue(), 0.0D); @@ -88,7 +88,7 @@ public void testBasic() throws Exception { public void testFunction() throws Exception { createIndex("test"); ensureGreen("test"); - client().prepareIndex("test").setId("1").setSource("foo", 4).setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("foo", 4).setRefreshPolicy(IMMEDIATE).get(); assertNoFailuresAndResponse(buildRequest("doc['foo'] + abs(1)"), rsp -> { assertEquals(1, rsp.getHits().getTotalHits().value); assertEquals(5.0, rsp.getHits().getAt(0).field("foo").getValue(), 0.0D); @@ -98,7 +98,8 @@ public void testFunction() throws Exception { public void testBasicUsingDotValue() throws Exception { createIndex("test"); ensureGreen("test"); - client().prepareIndex("test").setId("1").setSource("foo", 4).setRefreshPolicy(IMMEDIATE).get(); + + prepareIndex("test").setId("1").setSource("foo", 4).setRefreshPolicy(IMMEDIATE).get(); assertResponse(buildRequest("doc['foo'].value + 1"), rsp -> { assertEquals(1, rsp.getHits().getTotalHits().value); assertEquals(5.0, rsp.getHits().getAt(0).field("foo").getValue(), 0.0D); @@ -110,9 +111,9 @@ public void testScore() throws Exception { ensureGreen("test"); indexRandom( true, - client().prepareIndex("test").setId("1").setSource("text", "hello goodbye"), - client().prepareIndex("test").setId("2").setSource("text", "hello hello hello goodbye"), - client().prepareIndex("test").setId("3").setSource("text", "hello hello goodebye") + prepareIndex("test").setId("1").setSource("text", "hello goodbye"), + prepareIndex("test").setId("2").setSource("text", "hello hello hello goodbye"), + prepareIndex("test").setId("3").setSource("text", "hello hello goodebye") ); ScriptScoreFunctionBuilder score = ScoreFunctionBuilders.scriptFunction( new Script(ScriptType.INLINE, "expression", "1 / _score", Collections.emptyMap()) @@ -142,8 +143,8 @@ public void testDateMethods() throws Exception { ensureGreen("test"); indexRandom( true, - client().prepareIndex("test").setId("1").setSource("id", 1, "date0", "2015-04-28T04:02:07Z", "date1", "1985-09-01T23:11:01Z"), - client().prepareIndex("test").setId("2").setSource("id", 2, "date0", "2013-12-25T11:56:45Z", "date1", "1983-10-13T23:15:00Z") + prepareIndex("test").setId("1").setSource("id", 1, "date0", "2015-04-28T04:02:07Z", "date1", "1985-09-01T23:11:01Z"), + prepareIndex("test").setId("2").setSource("id", 2, "date0", "2013-12-25T11:56:45Z", "date1", "1983-10-13T23:15:00Z") ); assertResponse(buildRequest("doc['date0'].getSeconds() - doc['date0'].getMinutes()"), rsp -> { assertEquals(2, rsp.getHits().getTotalHits().value); @@ -176,8 +177,8 @@ public void testDateObjectMethods() throws Exception { ensureGreen("test"); indexRandom( true, - client().prepareIndex("test").setId("1").setSource("id", 1, "date0", "2015-04-28T04:02:07Z", "date1", "1985-09-01T23:11:01Z"), - client().prepareIndex("test").setId("2").setSource("id", 2, "date0", "2013-12-25T11:56:45Z", "date1", "1983-10-13T23:15:00Z") + prepareIndex("test").setId("1").setSource("id", 1, "date0", "2015-04-28T04:02:07Z", "date1", "1985-09-01T23:11:01Z"), + prepareIndex("test").setId("2").setSource("id", 2, "date0", "2013-12-25T11:56:45Z", "date1", "1983-10-13T23:15:00Z") ); assertResponse(buildRequest("doc['date0'].date.secondOfMinute - doc['date0'].date.minuteOfHour"), rsp -> { assertEquals(2, rsp.getHits().getTotalHits().value); @@ -229,9 +230,9 @@ public void testMultiValueMethods() throws Exception { indexRandom( true, - client().prepareIndex("test").setId("1").setSource(doc1), - client().prepareIndex("test").setId("2").setSource(doc2), - client().prepareIndex("test").setId("3").setSource(doc3) + prepareIndex("test").setId("1").setSource(doc1), + prepareIndex("test").setId("2").setSource(doc2), + prepareIndex("test").setId("3").setSource(doc3) ); assertNoFailuresAndResponse(buildRequest("doc['double0'].count() + doc['double1'].count()"), rsp -> { @@ -312,7 +313,7 @@ public void testMultiValueMethods() throws Exception { public void testInvalidDateMethodCall() throws Exception { ElasticsearchAssertions.assertAcked(prepareCreate("test").setMapping("double", "type=double")); ensureGreen("test"); - indexRandom(true, client().prepareIndex("test").setId("1").setSource("double", "178000000.0")); + indexRandom(true, prepareIndex("test").setId("1").setSource("double", "178000000.0")); try { buildRequest("doc['double'].getYear()").get(); fail(); @@ -335,8 +336,8 @@ public void testSparseField() throws Exception { ensureGreen("test"); indexRandom( true, - client().prepareIndex("test").setId("1").setSource("id", 1, "x", 4), - client().prepareIndex("test").setId("2").setSource("id", 2, "y", 2) + prepareIndex("test").setId("1").setSource("id", 1, "x", 4), + prepareIndex("test").setId("2").setSource("id", 2, "y", 2) ); assertNoFailuresAndResponse(buildRequest("doc['x'] + 1"), rsp -> { SearchHits hits = rsp.getHits(); @@ -349,7 +350,7 @@ public void testSparseField() throws Exception { public void testMissingField() throws Exception { createIndex("test"); ensureGreen("test"); - client().prepareIndex("test").setId("1").setSource("x", 4).setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("x", 4).setRefreshPolicy(IMMEDIATE).get(); try { buildRequest("doc['bogus']").get(); fail("Expected missing field to cause failure"); @@ -368,9 +369,9 @@ public void testParams() throws Exception { ensureGreen("test"); indexRandom( true, - client().prepareIndex("test").setId("1").setSource("id", 1, "x", 10), - client().prepareIndex("test").setId("2").setSource("id", 2, "x", 3), - client().prepareIndex("test").setId("3").setSource("id", 3, "x", 5) + prepareIndex("test").setId("1").setSource("id", 1, "x", 10), + prepareIndex("test").setId("2").setSource("id", 2, "x", 3), + prepareIndex("test").setId("3").setSource("id", 3, "x", 5) ); // a = int, b = double, c = long String script = "doc['x'] * a + b + ((c + doc['x']) > 5000000009 ? 1 : 0)"; @@ -384,7 +385,7 @@ public void testParams() throws Exception { } public void testCompileFailure() { - client().prepareIndex("test").setId("1").setSource("x", 1).setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("x", 1).setRefreshPolicy(IMMEDIATE).get(); try { buildRequest("garbage%@#%@").get(); fail("Expected expression compilation failure"); @@ -395,7 +396,7 @@ public void testCompileFailure() { } public void testNonNumericParam() { - client().prepareIndex("test").setId("1").setSource("x", 1).setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("x", 1).setRefreshPolicy(IMMEDIATE).get(); try { buildRequest("a", "a", "astring").get(); fail("Expected string parameter to cause failure"); @@ -410,7 +411,7 @@ public void testNonNumericParam() { } public void testNonNumericField() { - client().prepareIndex("test").setId("1").setSource("text", "this is not a number").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("text", "this is not a number").setRefreshPolicy(IMMEDIATE).get(); try { buildRequest("doc['text.keyword']").get(); fail("Expected text field to cause execution failure"); @@ -425,7 +426,7 @@ public void testNonNumericField() { } public void testInvalidGlobalVariable() { - client().prepareIndex("test").setId("1").setSource("foo", 5).setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("foo", 5).setRefreshPolicy(IMMEDIATE).get(); try { buildRequest("bogus").get(); fail("Expected bogus variable to cause execution failure"); @@ -440,7 +441,7 @@ public void testInvalidGlobalVariable() { } public void testDocWithoutField() { - client().prepareIndex("test").setId("1").setSource("foo", 5).setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("foo", 5).setRefreshPolicy(IMMEDIATE).get(); try { buildRequest("doc").get(); fail("Expected doc variable without field to cause execution failure"); @@ -455,7 +456,7 @@ public void testDocWithoutField() { } public void testInvalidFieldMember() { - client().prepareIndex("test").setId("1").setSource("foo", 5).setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("foo", 5).setRefreshPolicy(IMMEDIATE).get(); try { buildRequest("doc['foo'].bogus").get(); fail("Expected bogus field member to cause execution failure"); @@ -475,9 +476,9 @@ public void testSpecialValueVariable() throws Exception { ensureGreen("test"); indexRandom( true, - client().prepareIndex("test").setId("1").setSource("x", 5, "y", 1.2), - client().prepareIndex("test").setId("2").setSource("x", 10, "y", 1.4), - client().prepareIndex("test").setId("3").setSource("x", 13, "y", 1.8) + prepareIndex("test").setId("1").setSource("x", 5, "y", 1.2), + prepareIndex("test").setId("2").setSource("x", 10, "y", 1.4), + prepareIndex("test").setId("3").setSource("x", 13, "y", 1.8) ); SearchRequestBuilder req = prepareSearch().setIndices("test"); @@ -522,9 +523,9 @@ public void testStringSpecialValueVariable() throws Exception { ensureGreen("test"); indexRandom( true, - client().prepareIndex("test").setId("1").setSource("text", "hello"), - client().prepareIndex("test").setId("2").setSource("text", "goodbye"), - client().prepareIndex("test").setId("3").setSource("text", "hello") + prepareIndex("test").setId("1").setSource("text", "hello"), + prepareIndex("test").setId("2").setSource("text", "goodbye"), + prepareIndex("test").setId("3").setSource("text", "hello") ); SearchRequestBuilder req = prepareSearch().setIndices("test"); @@ -555,7 +556,7 @@ public void testInvalidUpdateScript() throws Exception { try { createIndex("test_index"); ensureGreen("test_index"); - indexRandom(true, client().prepareIndex("test_index").setId("1").setSource("text_field", "text")); + indexRandom(true, prepareIndex("test_index").setId("1").setSource("text_field", "text")); UpdateRequestBuilder urb = client().prepareUpdate().setIndex("test_index"); urb.setId("1"); urb.setScript(new Script(ScriptType.INLINE, ExpressionScriptEngine.NAME, "0", Collections.emptyMap())); @@ -575,11 +576,11 @@ public void testPipelineAggregationScript() throws Exception { ensureGreen("agg_index"); indexRandom( true, - client().prepareIndex("agg_index").setId("1").setSource("one", 1.0, "two", 2.0, "three", 3.0, "four", 4.0), - client().prepareIndex("agg_index").setId("2").setSource("one", 2.0, "two", 2.0, "three", 3.0, "four", 4.0), - client().prepareIndex("agg_index").setId("3").setSource("one", 3.0, "two", 2.0, "three", 3.0, "four", 4.0), - client().prepareIndex("agg_index").setId("4").setSource("one", 4.0, "two", 2.0, "three", 3.0, "four", 4.0), - client().prepareIndex("agg_index").setId("5").setSource("one", 5.0, "two", 2.0, "three", 3.0, "four", 4.0) + prepareIndex("agg_index").setId("1").setSource("one", 1.0, "two", 2.0, "three", 3.0, "four", 4.0), + prepareIndex("agg_index").setId("2").setSource("one", 2.0, "two", 2.0, "three", 3.0, "four", 4.0), + prepareIndex("agg_index").setId("3").setSource("one", 3.0, "two", 2.0, "three", 3.0, "four", 4.0), + prepareIndex("agg_index").setId("4").setSource("one", 4.0, "two", 2.0, "three", 3.0, "four", 4.0), + prepareIndex("agg_index").setId("5").setSource("one", 5.0, "two", 2.0, "three", 3.0, "four", 4.0) ); assertResponse( prepareSearch("agg_index").addAggregation( @@ -639,8 +640,7 @@ public void testGeo() throws Exception { xContentBuilder.endObject().endObject().endObject().endObject(); assertAcked(prepareCreate("test").setMapping(xContentBuilder)); ensureGreen(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("name", "test") @@ -650,8 +650,7 @@ public void testGeo() throws Exception { .endObject() .endObject() ) - .execute() - .actionGet(); + .get(); refresh(); // access .lat assertNoFailuresAndResponse(buildRequest("doc['location'].lat"), rsp -> { @@ -687,9 +686,9 @@ public void testBoolean() throws Exception { ensureGreen(); indexRandom( true, - client().prepareIndex("test").setId("1").setSource("id", 1, "price", 1.0, "vip", true), - client().prepareIndex("test").setId("2").setSource("id", 2, "price", 2.0, "vip", false), - client().prepareIndex("test").setId("3").setSource("id", 3, "price", 2.0, "vip", false) + prepareIndex("test").setId("1").setSource("id", 1, "price", 1.0, "vip", true), + prepareIndex("test").setId("2").setSource("id", 2, "price", 2.0, "vip", false), + prepareIndex("test").setId("3").setSource("id", 3, "price", 2.0, "vip", false) ); // access .value assertNoFailuresAndResponse(buildRequest("doc['vip'].value"), rsp -> { @@ -720,8 +719,8 @@ public void testFilterScript() throws Exception { ensureGreen("test"); indexRandom( true, - client().prepareIndex("test").setId("1").setSource("id", 1, "foo", 1.0), - client().prepareIndex("test").setId("2").setSource("id", 2, "foo", 0.0) + prepareIndex("test").setId("1").setSource("id", 1, "foo", 1.0), + prepareIndex("test").setId("2").setSource("id", 2, "foo", 0.0) ); SearchRequestBuilder builder = buildRequest("doc['foo'].value"); Script script = new Script(ScriptType.INLINE, "expression", "doc['foo'].value", Collections.emptyMap()); diff --git a/modules/lang-expression/src/internalClusterTest/java/org/elasticsearch/script/expression/StoredExpressionIT.java b/modules/lang-expression/src/internalClusterTest/java/org/elasticsearch/script/expression/StoredExpressionIT.java index dcf380d338c14..121a6b01ea792 100644 --- a/modules/lang-expression/src/internalClusterTest/java/org/elasticsearch/script/expression/StoredExpressionIT.java +++ b/modules/lang-expression/src/internalClusterTest/java/org/elasticsearch/script/expression/StoredExpressionIT.java @@ -41,7 +41,7 @@ protected Collection> nodePlugins() { public void testAllOpsDisabledIndexedScripts() throws IOException { clusterAdmin().preparePutStoredScript().setId("script1").setContent(new BytesArray(""" {"script": {"lang": "expression", "source": "2"} }"""), XContentType.JSON).get(); - client().prepareIndex("test").setId("1").setSource("{\"theField\":\"foo\"}", XContentType.JSON).get(); + prepareIndex("test").setId("1").setSource("{\"theField\":\"foo\"}", XContentType.JSON).get(); try { client().prepareUpdate("test", "1").setScript(new Script(ScriptType.STORED, null, "script1", Collections.emptyMap())).get(); fail("update script should have been rejected"); diff --git a/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java b/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java index 000728209456f..d9e346454aefe 100644 --- a/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java +++ b/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java @@ -59,9 +59,7 @@ public void testBasic() throws Exception { final int numDocs = randomIntBetween(10, 100); IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { - indexRequestBuilders[i] = client().prepareIndex("msearch") - .setId(String.valueOf(i)) - .setSource("odd", (i % 2 == 0), "group", (i % 3)); + indexRequestBuilders[i] = prepareIndex("msearch").setId(String.valueOf(i)).setSource("odd", (i % 2 == 0), "group", (i % 3)); } indexRandom(true, indexRequestBuilders); diff --git a/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchTemplateIT.java b/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchTemplateIT.java index 517828cbeba3c..77480e6bc9e63 100644 --- a/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchTemplateIT.java +++ b/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchTemplateIT.java @@ -55,8 +55,8 @@ protected Settings nodeSettings() { @Before public void setup() throws IOException { createIndex("test"); - client().prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("text", "value1").endObject()).get(); - client().prepareIndex("test").setId("2").setSource(jsonBuilder().startObject().field("text", "value2").endObject()).get(); + prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("text", "value1").endObject()).get(); + prepareIndex("test").setId("2").setSource(jsonBuilder().startObject().field("text", "value2").endObject()).get(); indicesAdmin().prepareRefresh().get(); } @@ -166,11 +166,11 @@ public void testIndexedTemplateClient() throws Exception { assertNotNull(getResponse.getSource()); BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); - bulkRequestBuilder.add(client().prepareIndex("test").setId("1").setSource("{\"theField\":\"foo\"}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("test").setId("2").setSource("{\"theField\":\"foo 2\"}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("test").setId("3").setSource("{\"theField\":\"foo 3\"}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("test").setId("4").setSource("{\"theField\":\"foo 4\"}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("test").setId("5").setSource("{\"theField\":\"bar\"}", XContentType.JSON)); + bulkRequestBuilder.add(prepareIndex("test").setId("1").setSource("{\"theField\":\"foo\"}", XContentType.JSON)); + bulkRequestBuilder.add(prepareIndex("test").setId("2").setSource("{\"theField\":\"foo 2\"}", XContentType.JSON)); + bulkRequestBuilder.add(prepareIndex("test").setId("3").setSource("{\"theField\":\"foo 3\"}", XContentType.JSON)); + bulkRequestBuilder.add(prepareIndex("test").setId("4").setSource("{\"theField\":\"foo 4\"}", XContentType.JSON)); + bulkRequestBuilder.add(prepareIndex("test").setId("5").setSource("{\"theField\":\"bar\"}", XContentType.JSON)); bulkRequestBuilder.get(); indicesAdmin().prepareRefresh().get(); @@ -263,11 +263,11 @@ public void testIndexedTemplate() throws Exception { assertAcked(clusterAdmin().preparePutStoredScript().setId("3").setContent(new BytesArray(script), XContentType.JSON)); BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); - bulkRequestBuilder.add(client().prepareIndex("test").setId("1").setSource("{\"theField\":\"foo\"}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("test").setId("2").setSource("{\"theField\":\"foo 2\"}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("test").setId("3").setSource("{\"theField\":\"foo 3\"}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("test").setId("4").setSource("{\"theField\":\"foo 4\"}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("test").setId("5").setSource("{\"theField\":\"bar\"}", XContentType.JSON)); + bulkRequestBuilder.add(prepareIndex("test").setId("1").setSource("{\"theField\":\"foo\"}", XContentType.JSON)); + bulkRequestBuilder.add(prepareIndex("test").setId("2").setSource("{\"theField\":\"foo 2\"}", XContentType.JSON)); + bulkRequestBuilder.add(prepareIndex("test").setId("3").setSource("{\"theField\":\"foo 3\"}", XContentType.JSON)); + bulkRequestBuilder.add(prepareIndex("test").setId("4").setSource("{\"theField\":\"foo 4\"}", XContentType.JSON)); + bulkRequestBuilder.add(prepareIndex("test").setId("5").setSource("{\"theField\":\"bar\"}", XContentType.JSON)); bulkRequestBuilder.get(); indicesAdmin().prepareRefresh().get(); @@ -304,7 +304,7 @@ public void testIndexedTemplateOverwrite() throws Exception { createIndex("testindex"); ensureGreen("testindex"); - client().prepareIndex("testindex").setId("1").setSource(jsonBuilder().startObject().field("searchtext", "dev1").endObject()).get(); + prepareIndex("testindex").setId("1").setSource(jsonBuilder().startObject().field("searchtext", "dev1").endObject()).get(); indicesAdmin().prepareRefresh().get(); int iterations = randomIntBetween(2, 11); @@ -382,11 +382,11 @@ public void testIndexedTemplateWithArray() throws Exception { }"""; assertAcked(clusterAdmin().preparePutStoredScript().setId("4").setContent(new BytesArray(multiQuery), XContentType.JSON)); BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); - bulkRequestBuilder.add(client().prepareIndex("test").setId("1").setSource("{\"theField\":\"foo\"}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("test").setId("2").setSource("{\"theField\":\"foo 2\"}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("test").setId("3").setSource("{\"theField\":\"foo 3\"}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("test").setId("4").setSource("{\"theField\":\"foo 4\"}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("test").setId("5").setSource("{\"theField\":\"bar\"}", XContentType.JSON)); + bulkRequestBuilder.add(prepareIndex("test").setId("1").setSource("{\"theField\":\"foo\"}", XContentType.JSON)); + bulkRequestBuilder.add(prepareIndex("test").setId("2").setSource("{\"theField\":\"foo 2\"}", XContentType.JSON)); + bulkRequestBuilder.add(prepareIndex("test").setId("3").setSource("{\"theField\":\"foo 3\"}", XContentType.JSON)); + bulkRequestBuilder.add(prepareIndex("test").setId("4").setSource("{\"theField\":\"foo 4\"}", XContentType.JSON)); + bulkRequestBuilder.add(prepareIndex("test").setId("5").setSource("{\"theField\":\"bar\"}", XContentType.JSON)); bulkRequestBuilder.get(); indicesAdmin().prepareRefresh().get(); diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java index ae4d3469f96c4..a26352eb3d8c7 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java @@ -170,18 +170,22 @@ static final class Fields { public static MultiSearchTemplateResponse fromXContext(XContentParser parser) { // The MultiSearchTemplateResponse is identical to the multi search response so we reuse the parsing logic in multi search response MultiSearchResponse mSearchResponse = MultiSearchResponse.fromXContext(parser); - org.elasticsearch.action.search.MultiSearchResponse.Item[] responses = mSearchResponse.getResponses(); - Item[] templateResponses = new Item[responses.length]; - int i = 0; - for (org.elasticsearch.action.search.MultiSearchResponse.Item item : responses) { - SearchTemplateResponse stResponse = null; - if (item.getResponse() != null) { - stResponse = new SearchTemplateResponse(); - stResponse.setResponse(item.getResponse()); + try { + org.elasticsearch.action.search.MultiSearchResponse.Item[] responses = mSearchResponse.getResponses(); + Item[] templateResponses = new Item[responses.length]; + int i = 0; + for (org.elasticsearch.action.search.MultiSearchResponse.Item item : responses) { + SearchTemplateResponse stResponse = null; + if (item.getResponse() != null) { + stResponse = new SearchTemplateResponse(); + stResponse.setResponse(item.getResponse()); + } + templateResponses[i++] = new Item(stResponse, item.getFailure()); } - templateResponses[i++] = new Item(stResponse, item.getFailure()); + return new MultiSearchTemplateResponse(templateResponses, mSearchResponse.getTook().millis()); + } finally { + mSearchResponse.decRef(); } - return new MultiSearchTemplateResponse(templateResponses, mSearchResponse.getTook().millis()); } @Override diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java index 30937ebcbd773..1fcf776ac8428 100644 --- a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java @@ -8,7 +8,6 @@ package org.elasticsearch.painless.spi; -import java.util.Collections; import java.util.List; import java.util.Objects; @@ -47,11 +46,10 @@ public Whitelist( List whitelistClassBindings, List whitelistInstanceBindings ) { - this.classLoader = Objects.requireNonNull(classLoader); - this.whitelistClasses = Collections.unmodifiableList(Objects.requireNonNull(whitelistClasses)); - this.whitelistImportedMethods = Collections.unmodifiableList(Objects.requireNonNull(whitelistImportedMethods)); - this.whitelistClassBindings = Collections.unmodifiableList(Objects.requireNonNull(whitelistClassBindings)); - this.whitelistInstanceBindings = Collections.unmodifiableList(Objects.requireNonNull(whitelistInstanceBindings)); + this.whitelistClasses = List.copyOf(whitelistClasses); + this.whitelistImportedMethods = List.copyOf(whitelistImportedMethods); + this.whitelistClassBindings = List.copyOf(whitelistClassBindings); + this.whitelistInstanceBindings = List.copyOf(whitelistInstanceBindings); } } diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistClass.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistClass.java index 2130f9343dfa3..1daad59768a15 100644 --- a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistClass.java +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistClass.java @@ -8,11 +8,10 @@ package org.elasticsearch.painless.spi; -import java.util.AbstractMap; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.function.Function; import java.util.stream.Collectors; /** @@ -59,23 +58,12 @@ public WhitelistClass( List whitelistFields, List painlessAnnotations ) { - this.origin = Objects.requireNonNull(origin); this.javaClassName = Objects.requireNonNull(javaClassName); - - this.whitelistConstructors = Collections.unmodifiableList(Objects.requireNonNull(whitelistConstructors)); - this.whitelistMethods = Collections.unmodifiableList(Objects.requireNonNull(whitelistMethods)); - this.whitelistFields = Collections.unmodifiableList(Objects.requireNonNull(whitelistFields)); - - if (painlessAnnotations.isEmpty()) { - this.painlessAnnotations = Collections.emptyMap(); - } else { - this.painlessAnnotations = Collections.unmodifiableMap( - Objects.requireNonNull(painlessAnnotations) - .stream() - .map(painlessAnnotation -> new AbstractMap.SimpleEntry<>(painlessAnnotation.getClass(), painlessAnnotation)) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)) - ); - } + this.whitelistConstructors = List.copyOf(whitelistConstructors); + this.whitelistMethods = List.copyOf(whitelistMethods); + this.whitelistFields = List.copyOf(whitelistFields); + this.painlessAnnotations = painlessAnnotations.stream() + .collect(Collectors.toUnmodifiableMap(Object::getClass, Function.identity())); } } diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistField.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistField.java index c1a3c43196647..872482bcf6281 100644 --- a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistField.java +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistField.java @@ -8,11 +8,10 @@ package org.elasticsearch.painless.spi; -import java.util.AbstractMap; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.function.Function; import java.util.stream.Collectors; /** @@ -39,16 +38,7 @@ public WhitelistField(String origin, String fieldName, String canonicalTypeNameP this.origin = Objects.requireNonNull(origin); this.fieldName = Objects.requireNonNull(fieldName); this.canonicalTypeNameParameter = Objects.requireNonNull(canonicalTypeNameParameter); - - if (painlessAnnotations.isEmpty()) { - this.painlessAnnotations = Collections.emptyMap(); - } else { - this.painlessAnnotations = Collections.unmodifiableMap( - Objects.requireNonNull(painlessAnnotations) - .stream() - .map(painlessAnnotation -> new AbstractMap.SimpleEntry<>(painlessAnnotation.getClass(), painlessAnnotation)) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)) - ); - } + this.painlessAnnotations = painlessAnnotations.stream() + .collect(Collectors.toUnmodifiableMap(Object::getClass, Function.identity())); } } diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistMethod.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistMethod.java index 8451d1c9f3ef4..8927d290ecc77 100644 --- a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistMethod.java +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistMethod.java @@ -8,11 +8,10 @@ package org.elasticsearch.painless.spi; -import java.util.AbstractMap; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.function.Function; import java.util.stream.Collectors; /** @@ -69,22 +68,12 @@ public WhitelistMethod( List canonicalTypeNameParameters, List painlessAnnotations ) { - this.origin = Objects.requireNonNull(origin); this.augmentedCanonicalClassName = augmentedCanonicalClassName; this.methodName = methodName; this.returnCanonicalTypeName = Objects.requireNonNull(returnCanonicalTypeName); - this.canonicalTypeNameParameters = Collections.unmodifiableList(Objects.requireNonNull(canonicalTypeNameParameters)); - - if (painlessAnnotations.isEmpty()) { - this.painlessAnnotations = Collections.emptyMap(); - } else { - this.painlessAnnotations = Collections.unmodifiableMap( - Objects.requireNonNull(painlessAnnotations) - .stream() - .map(painlessAnnotation -> new AbstractMap.SimpleEntry<>(painlessAnnotation.getClass(), painlessAnnotation)) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)) - ); - } + this.canonicalTypeNameParameters = List.copyOf(canonicalTypeNameParameters); + this.painlessAnnotations = painlessAnnotations.stream() + .collect(Collectors.toUnmodifiableMap(Object::getClass, Function.identity())); } } diff --git a/modules/lang-painless/src/internalClusterTest/java/org/elasticsearch/painless/search/SyntheticSourceIT.java b/modules/lang-painless/src/internalClusterTest/java/org/elasticsearch/painless/search/SyntheticSourceIT.java new file mode 100644 index 0000000000000..0383999d6f7e5 --- /dev/null +++ b/modules/lang-painless/src/internalClusterTest/java/org/elasticsearch/painless/search/SyntheticSourceIT.java @@ -0,0 +1,71 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.painless.search; + +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.painless.PainlessPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.json.JsonXContent; + +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; + +public class SyntheticSourceIT extends ESIntegTestCase { + @Override + protected Collection> nodePlugins() { + return Collections.singleton(PainlessPlugin.class); + } + + public void testSearchUsingRuntimeField() throws Exception { + createIndex(); + + int numDocs = between(1000, 5000); + for (int i = 0; i < numDocs; i++) { + IndexRequestBuilder indexRequest = client().prepareIndex("test").setSource("id", "" + i); + if (randomInt(100) < 5) { + indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + } + indexRequest.get(); + } + client().admin().indices().prepareRefresh("test").get(); + assertNoFailures(client().prepareSearch("test").setQuery(QueryBuilders.rangeQuery("long_id").from(0))); + } + + private void createIndex() throws IOException { + XContentBuilder mapping = JsonXContent.contentBuilder(); + mapping.startObject(); + { + mapping.startObject("_source"); + mapping.field("mode", "synthetic"); + mapping.endObject(); + } + { + mapping.startObject("runtime"); + mapping.startObject("long_id"); + mapping.field("type", "long"); + mapping.field("script", "emit(Long.parseLong(params._source.id));"); + mapping.endObject(); + mapping.endObject(); + mapping.startObject("properties"); + mapping.startObject("id").field("type", "keyword").endObject(); + mapping.endObject(); + } + mapping.endObject(); + + assertAcked(client().admin().indices().prepareCreate("test").setMapping(mapping).get()); + } +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java index e9a3b2c1fd7f7..1f8b7b909909f 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java @@ -222,10 +222,6 @@ private Location location(ParserRuleContext ctx) { return new Location(sourceName, ctx.getStart().getStartIndex()); } - private Location location(TerminalNode tn) { - return new Location(sourceName, tn.getSymbol().getStartIndex()); - } - @Override public ANode visitSource(SourceContext ctx) { List functions = new ArrayList<>(); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClass.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClass.java index 3fc572d8446bc..d32639bf3968f 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClass.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClass.java @@ -45,7 +45,7 @@ public final class PainlessClass { this.staticFields = Map.copyOf(staticFields); this.fields = Map.copyOf(fields); this.functionalInterfaceMethod = functionalInterfaceMethod; - this.annotations = annotations; + this.annotations = Map.copyOf(annotations); this.getterMethodHandles = Map.copyOf(getterMethodHandles); this.setterMethodHandles = Map.copyOf(setterMethodHandles); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java index bf001c5e49db9..0c1497b541954 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java @@ -1680,6 +1680,7 @@ public PainlessLookup build() { ); } + classesToDirectSubClasses.replaceAll((key, set) -> Set.copyOf(set)); // save some memory, especially when set is empty return new PainlessLookup( javaClassNamesToClasses, canonicalClassNamesToClasses, diff --git a/modules/legacy-geo/src/internalClusterTest/java/org/elasticsearch/legacygeo/search/LegacyGeoShapeIT.java b/modules/legacy-geo/src/internalClusterTest/java/org/elasticsearch/legacygeo/search/LegacyGeoShapeIT.java index a7fe63eb34ce6..2b9c35429c328 100644 --- a/modules/legacy-geo/src/internalClusterTest/java/org/elasticsearch/legacygeo/search/LegacyGeoShapeIT.java +++ b/modules/legacy-geo/src/internalClusterTest/java/org/elasticsearch/legacygeo/search/LegacyGeoShapeIT.java @@ -59,7 +59,7 @@ public void testLegacyCircle() throws Exception { ); ensureGreen(); - indexRandom(true, client().prepareIndex("test").setId("0").setSource("shape", (ToXContent) (builder, params) -> { + indexRandom(true, prepareIndex("test").setId("0").setSource("shape", (ToXContent) (builder, params) -> { builder.startObject() .field("type", "circle") .startArray("coordinates") diff --git a/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java b/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java index 3ae6e29802962..afd969cc17ad4 100644 --- a/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java +++ b/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java @@ -130,17 +130,6 @@ public static class PrefixTrees { public static final String GEOHASH = "geohash"; } - @Deprecated - public static class DeprecatedParameters { - - private static void checkPrefixTreeSupport(String fieldName) { - if (ShapesAvailability.JTS_AVAILABLE == false || ShapesAvailability.SPATIAL4J_AVAILABLE == false) { - throw new ElasticsearchParseException("Field parameter [{}] is not supported for [{}] field type", fieldName, CONTENT_TYPE); - } - - } - } - private static Builder builder(FieldMapper in) { return ((LegacyGeoShapeFieldMapper) in).builder; } diff --git a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/search/LegacyGeoShapeQueryTests.java b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/search/LegacyGeoShapeQueryTests.java index 6ef1f4c8a99b6..6ad4d2c06c6d4 100644 --- a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/search/LegacyGeoShapeQueryTests.java +++ b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/search/LegacyGeoShapeQueryTests.java @@ -86,16 +86,14 @@ public void testPointsOnlyExplicit() throws Exception { // MULTIPOINT MultiPoint multiPoint = GeometryTestUtils.randomMultiPoint(false); - client().prepareIndex("geo_points_only") - .setId("1") + prepareIndex("geo_points_only").setId("1") .setSource(GeoJson.toXContent(multiPoint, jsonBuilder().startObject().field(defaultFieldName), null).endObject()) .setRefreshPolicy(IMMEDIATE) .get(); // POINT Point point = GeometryTestUtils.randomPoint(false); - client().prepareIndex("geo_points_only") - .setId("2") + prepareIndex("geo_points_only").setId("2") .setSource(GeoJson.toXContent(point, jsonBuilder().startObject().field(defaultFieldName), null).endObject()) .setRefreshPolicy(IMMEDIATE) .get(); @@ -125,8 +123,7 @@ public void testPointsOnly() throws Exception { Geometry geometry = GeometryTestUtils.randomGeometry(false); try { - client().prepareIndex("geo_points_only") - .setId("1") + prepareIndex("geo_points_only").setId("1") .setSource(GeoJson.toXContent(geometry, jsonBuilder().startObject().field(defaultFieldName), null).endObject()) .setRefreshPolicy(IMMEDIATE) .get(); @@ -161,8 +158,7 @@ public void testFieldAlias() throws IOException { ensureGreen(); MultiPoint multiPoint = GeometryTestUtils.randomMultiPoint(false); - client().prepareIndex(defaultIndexName) - .setId("1") + prepareIndex(defaultIndexName).setId("1") .setSource(GeoJson.toXContent(multiPoint, jsonBuilder().startObject().field(defaultFieldName), null).endObject()) .setRefreshPolicy(IMMEDIATE) .get(); diff --git a/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/RankFeaturesMapperIntegrationIT.java b/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/RankFeaturesMapperIntegrationIT.java index 3a7f9a1ca6eb5..c6544bac2b13c 100644 --- a/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/RankFeaturesMapperIntegrationIT.java +++ b/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/RankFeaturesMapperIntegrationIT.java @@ -106,14 +106,11 @@ private void init() throws IOException { BulkResponse bulk = client().prepareBulk() .add( - client().prepareIndex(INDEX_NAME) - .setId("all") + prepareIndex(INDEX_NAME).setId("all") .setSource(Map.of("all_rank_features", Map.of(LOWER_RANKED_FEATURE, 10, HIGHER_RANKED_FEATURE, 20))) ) - .add(client().prepareIndex(INDEX_NAME).setId("lower").setSource(Map.of("all_rank_features", Map.of(LOWER_RANKED_FEATURE, 10)))) - .add( - client().prepareIndex(INDEX_NAME).setId("higher").setSource(Map.of("all_rank_features", Map.of(HIGHER_RANKED_FEATURE, 20))) - ) + .add(prepareIndex(INDEX_NAME).setId("lower").setSource(Map.of("all_rank_features", Map.of(LOWER_RANKED_FEATURE, 10)))) + .add(prepareIndex(INDEX_NAME).setId("higher").setSource(Map.of("all_rank_features", Map.of(HIGHER_RANKED_FEATURE, 20)))) .get(); assertFalse(bulk.buildFailureMessage(), bulk.hasFailures()); assertThat(refresh().getFailedShards(), equalTo(0)); diff --git a/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/TokenCountFieldMapperIntegrationIT.java b/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/TokenCountFieldMapperIntegrationIT.java index 0bdfbc3d90ead..08a3d046b00f7 100644 --- a/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/TokenCountFieldMapperIntegrationIT.java +++ b/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/TokenCountFieldMapperIntegrationIT.java @@ -176,7 +176,7 @@ private void init() throws IOException { } private IndexRequestBuilder prepareIndex(String id, String... texts) throws IOException { - return client().prepareIndex("test").setId(id).setSource("foo", texts); + return prepareIndex("test").setId(id).setSource("foo", texts); } private SearchRequestBuilder searchById(String id) { diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java index ee04346591009..161cb1674a7b9 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java @@ -324,9 +324,9 @@ public Query phrasePrefixQuery(TokenStream stream, int slop, int maxExpansions, @Override public BlockLoader blockLoader(BlockLoaderContext blContext) { if (textFieldType.isSyntheticSource()) { - return BlockStoredFieldsReader.bytesRefsFromStrings(storedFieldNameForSyntheticSource()); + return new BlockStoredFieldsReader.BytesFromStringsBlockLoader(storedFieldNameForSyntheticSource()); } - return BlockSourceReader.bytesRefs(SourceValueFetcher.toString(blContext.sourcePaths(name()))); + return new BlockSourceReader.BytesRefsBlockLoader(SourceValueFetcher.toString(blContext.sourcePaths(name()))); } @Override diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java index abed23621d5e9..b35fb09c2d053 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java @@ -310,13 +310,13 @@ public Query rangeQuery( public BlockLoader blockLoader(BlockLoaderContext blContext) { if (indexMode == IndexMode.TIME_SERIES && metricType == TimeSeriesParams.MetricType.COUNTER) { // Counters are not supported by ESQL so we load them in null - return BlockDocValuesReader.nulls(); + return BlockLoader.CONSTANT_NULLS; } if (hasDocValues()) { double scalingFactorInverse = 1d / scalingFactor; - return BlockDocValuesReader.doubles(name(), l -> l * scalingFactorInverse); + return new BlockDocValuesReader.DoublesBlockLoader(name(), l -> l * scalingFactorInverse); } - return BlockSourceReader.doubles(sourceValueFetcher(blContext.sourcePaths(name()))); + return new BlockSourceReader.DoublesBlockLoader(sourceValueFetcher(blContext.sourcePaths(name()))); } @Override diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/BWCTemplateTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/BWCTemplateTests.java index 9ddbc72e8ff94..b4ee066a0e391 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/BWCTemplateTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/BWCTemplateTests.java @@ -34,8 +34,8 @@ public void testBeatsTemplatesBWC() throws Exception { indicesAdmin().preparePutTemplate("packetbeat").setSource(packetBeat, XContentType.JSON).get(); indicesAdmin().preparePutTemplate("filebeat").setSource(fileBeat, XContentType.JSON).get(); - client().prepareIndex("metricbeat-foo").setId("1").setSource("message", "foo").get(); - client().prepareIndex("packetbeat-foo").setId("1").setSource("message", "foo").get(); - client().prepareIndex("filebeat-foo").setId("1").setSource("message", "foo").get(); + prepareIndex("metricbeat-foo").setId("1").setSource("message", "foo").get(); + prepareIndex("packetbeat-foo").setId("1").setSource("message", "foo").get(); + prepareIndex("filebeat-foo").setId("1").setSource("message", "foo").get(); } } diff --git a/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ChildQuerySearchIT.java b/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ChildQuerySearchIT.java index cc9a3a1a248db..e433ce0b60596 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ChildQuerySearchIT.java +++ b/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ChildQuerySearchIT.java @@ -511,7 +511,7 @@ public void testHasChildAndHasParentFailWhenSomeSegmentsDontContainAnyParentOrCh createIndexRequest("test", "parent", "1", null, "p_field", 1).get(); createIndexRequest("test", "child", "2", "1", "c_field", 1).get(); - client().prepareIndex("test").setId("3").setSource("p_field", 1).get(); + prepareIndex("test").setId("3").setSource("p_field", 1).get(); refresh(); assertHitCountAndNoFailures( @@ -736,8 +736,7 @@ public void testParentChildQueriesCanHandleNoRelevantTypesInIndex() throws Excep 0L ); - client().prepareIndex("test") - .setSource(jsonBuilder().startObject().field("text", "value").endObject()) + prepareIndex("test").setSource(jsonBuilder().startObject().field("text", "value").endObject()) .setRefreshPolicy(RefreshPolicy.IMMEDIATE) .get(); @@ -761,7 +760,7 @@ public void testHasChildAndHasParentFilter_withFilter() throws Exception { createIndexRequest("test", "child", "2", "1", "c_field", 1).get(); indicesAdmin().prepareFlush("test").get(); - client().prepareIndex("test").setId("3").setSource("p_field", 2).get(); + prepareIndex("test").setId("3").setSource("p_field", 2).get(); refresh(); assertNoFailuresAndResponse( @@ -1303,7 +1302,7 @@ public void testParentChildQueriesNoParentType() throws Exception { ensureGreen(); String parentId = "p1"; - client().prepareIndex("test").setId(parentId).setSource("p_field", "1").get(); + prepareIndex("test").setId(parentId).setSource("p_field", "1").get(); refresh(); try { @@ -1409,8 +1408,7 @@ public void testParentChildQueriesViaScrollApi() throws Exception { .setSize(1) .addStoredField("_id") .setQuery(query) - .execute() - .actionGet(); + .get(); assertNoFailures(scrollResponse); assertThat(scrollResponse.getHits().getTotalHits().value, equalTo(10L)); diff --git a/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/InnerHitsIT.java b/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/InnerHitsIT.java index 02eaacba0b1de..f851678b6c9d6 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/InnerHitsIT.java +++ b/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/InnerHitsIT.java @@ -626,7 +626,7 @@ public void testInnerHitsWithIgnoreUnmapped() { assertAcked(prepareCreate("index2")); createIndexRequest("index1", "parent_type", "1", null, "nested_type", Collections.singletonMap("key", "value")).get(); createIndexRequest("index1", "child_type", "2", "1").get(); - client().prepareIndex("index2").setId("3").setSource("key", "value").get(); + prepareIndex("index2").setId("3").setSource("key", "value").get(); refresh(); assertSearchHitsWithoutFailures( prepareSearch("index1", "index2").setQuery( diff --git a/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ParentChildTestCase.java b/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ParentChildTestCase.java index a67ebd4cbca22..02f24a67dda02 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ParentChildTestCase.java +++ b/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ParentChildTestCase.java @@ -100,7 +100,7 @@ private IndexRequestBuilder createIndexRequest(String index, String type, String String name = type; type = "doc"; - IndexRequestBuilder indexRequestBuilder = client().prepareIndex(index).setId(id); + IndexRequestBuilder indexRequestBuilder = prepareIndex(index).setId(id); Map joinField = new HashMap<>(); if (parentId != null) { joinField.put("name", name); diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregationBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregationBuilder.java index b130411e5e099..c7999f27834a9 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregationBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregationBuilder.java @@ -31,6 +31,7 @@ import java.io.IOException; import java.util.Map; import java.util.Objects; +import java.util.function.ToLongFunction; public class ParentAggregationBuilder extends ValuesSourceAggregationBuilder { @@ -90,7 +91,7 @@ public BucketCardinality bucketCardinality() { } @Override - public boolean supportsParallelCollection() { + public boolean supportsParallelCollection(ToLongFunction fieldCardinalityResolver) { return false; } diff --git a/modules/percolator/src/internalClusterTest/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java b/modules/percolator/src/internalClusterTest/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java index cad976411b8da..88a39fe4aebc8 100644 --- a/modules/percolator/src/internalClusterTest/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java +++ b/modules/percolator/src/internalClusterTest/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java @@ -10,8 +10,7 @@ import org.apache.lucene.search.join.ScoreMode; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.MultiSearchResponse; -import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.MultiSearchResponse.Item; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -58,6 +57,7 @@ import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xcontent.XContentFactory.yamlBuilder; @@ -80,16 +80,13 @@ public void testPercolatorQuery() throws Exception { .setMapping("id", "type=keyword", "field1", "type=keyword", "field2", "type=keyword", "query", "type=percolator") ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("id", "1").field("query", matchAllQuery()).endObject()) .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource(jsonBuilder().startObject().field("id", "2").field("query", matchQuery("field1", "value")).endObject()) .get(); - client().prepareIndex("test") - .setId("3") + prepareIndex("test").setId("3") .setSource( jsonBuilder().startObject() .field("id", "3") @@ -101,52 +98,66 @@ public void testPercolatorQuery() throws Exception { BytesReference source = BytesReference.bytes(jsonBuilder().startObject().endObject()); logger.info("percolating empty doc"); - SearchResponse response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertResponse(prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)), response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + }); source = BytesReference.bytes(jsonBuilder().startObject().field("field1", "value").endObject()); logger.info("percolating doc with 1 field"); - response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)) - .addSort("id", SortOrder.ASC) - .get(); - assertHitCount(response, 2); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); - assertThat(response.getHits().getAt(1).getId(), equalTo("2")); - assertThat(response.getHits().getAt(1).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); + assertResponse( + prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).addSort("id", SortOrder.ASC), + response -> { + assertHitCount(response, 2); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(1).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); + } + ); source = BytesReference.bytes(jsonBuilder().startObject().field("field1", "value").field("field2", "value").endObject()); logger.info("percolating doc with 2 fields"); - response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)) - .addSort("id", SortOrder.ASC) - .get(); - assertHitCount(response, 3); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); - assertThat(response.getHits().getAt(1).getId(), equalTo("2")); - assertThat(response.getHits().getAt(1).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); - assertThat(response.getHits().getAt(2).getId(), equalTo("3")); - assertThat(response.getHits().getAt(2).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); - + assertResponse( + prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).addSort("id", SortOrder.ASC), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(1).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + assertThat(response.getHits().getAt(2).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); + } + ); logger.info("percolating doc with 2 fields"); - response = prepareSearch().setQuery( - new PercolateQueryBuilder( - "query", - Arrays.asList( - BytesReference.bytes(jsonBuilder().startObject().field("field1", "value").endObject()), - BytesReference.bytes(jsonBuilder().startObject().field("field1", "value").field("field2", "value").endObject()) - ), - XContentType.JSON - ) - ).addSort("id", SortOrder.ASC).get(); - assertHitCount(response, 3); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValues(), equalTo(Arrays.asList(0, 1))); - assertThat(response.getHits().getAt(1).getId(), equalTo("2")); - assertThat(response.getHits().getAt(1).getFields().get("_percolator_document_slot").getValues(), equalTo(Arrays.asList(0, 1))); - assertThat(response.getHits().getAt(2).getId(), equalTo("3")); - assertThat(response.getHits().getAt(2).getFields().get("_percolator_document_slot").getValues(), equalTo(Arrays.asList(1))); + assertResponse( + prepareSearch().setQuery( + new PercolateQueryBuilder( + "query", + Arrays.asList( + BytesReference.bytes(jsonBuilder().startObject().field("field1", "value").endObject()), + BytesReference.bytes(jsonBuilder().startObject().field("field1", "value").field("field2", "value").endObject()) + ), + XContentType.JSON + ) + ).addSort("id", SortOrder.ASC), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat( + response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValues(), + equalTo(Arrays.asList(0, 1)) + ); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat( + response.getHits().getAt(1).getFields().get("_percolator_document_slot").getValues(), + equalTo(Arrays.asList(0, 1)) + ); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + assertThat(response.getHits().getAt(2).getFields().get("_percolator_document_slot").getValues(), equalTo(Arrays.asList(1))); + } + ); } public void testPercolatorRangeQueries() throws Exception { @@ -166,16 +177,13 @@ public void testPercolatorRangeQueries() throws Exception { ) ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("query", rangeQuery("field1").from(10).to(12)).endObject()) .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource(jsonBuilder().startObject().field("query", rangeQuery("field1").from(20).to(22)).endObject()) .get(); - client().prepareIndex("test") - .setId("3") + prepareIndex("test").setId("3") .setSource( jsonBuilder().startObject() .field("query", boolQuery().must(rangeQuery("field1").from(10).to(12)).must(rangeQuery("field1").from(12).to(14))) @@ -183,16 +191,13 @@ public void testPercolatorRangeQueries() throws Exception { ) .get(); indicesAdmin().prepareRefresh().get(); - client().prepareIndex("test") - .setId("4") + prepareIndex("test").setId("4") .setSource(jsonBuilder().startObject().field("query", rangeQuery("field2").from(10).to(12)).endObject()) .get(); - client().prepareIndex("test") - .setId("5") + prepareIndex("test").setId("5") .setSource(jsonBuilder().startObject().field("query", rangeQuery("field2").from(20).to(22)).endObject()) .get(); - client().prepareIndex("test") - .setId("6") + prepareIndex("test").setId("6") .setSource( jsonBuilder().startObject() .field("query", boolQuery().must(rangeQuery("field2").from(10).to(12)).must(rangeQuery("field2").from(12).to(14))) @@ -200,16 +205,13 @@ public void testPercolatorRangeQueries() throws Exception { ) .get(); indicesAdmin().prepareRefresh().get(); - client().prepareIndex("test") - .setId("7") + prepareIndex("test").setId("7") .setSource(jsonBuilder().startObject().field("query", rangeQuery("field3").from("192.168.1.0").to("192.168.1.5")).endObject()) .get(); - client().prepareIndex("test") - .setId("8") + prepareIndex("test").setId("8") .setSource(jsonBuilder().startObject().field("query", rangeQuery("field3").from("192.168.1.20").to("192.168.1.30")).endObject()) .get(); - client().prepareIndex("test") - .setId("9") + prepareIndex("test").setId("9") .setSource( jsonBuilder().startObject() .field( @@ -220,8 +222,7 @@ public void testPercolatorRangeQueries() throws Exception { .endObject() ) .get(); - client().prepareIndex("test") - .setId("10") + prepareIndex("test").setId("10") .setSource( jsonBuilder().startObject() .field( @@ -236,46 +237,52 @@ public void testPercolatorRangeQueries() throws Exception { // Test long range: BytesReference source = BytesReference.bytes(jsonBuilder().startObject().field("field1", 12).endObject()); - SearchResponse response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get(); - logger.info("response={}", response); - assertHitCount(response, 2); - assertThat(response.getHits().getAt(0).getId(), equalTo("3")); - assertThat(response.getHits().getAt(1).getId(), equalTo("1")); + assertResponse(prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)), response -> { + logger.info("response={}", response); + assertHitCount(response, 2); + assertThat(response.getHits().getAt(0).getId(), equalTo("3")); + assertThat(response.getHits().getAt(1).getId(), equalTo("1")); + }); source = BytesReference.bytes(jsonBuilder().startObject().field("field1", 11).endObject()); - response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - + assertResponse(prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)), response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + }); // Test double range: source = BytesReference.bytes(jsonBuilder().startObject().field("field2", 12).endObject()); - response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get(); - assertHitCount(response, 2); - assertThat(response.getHits().getAt(0).getId(), equalTo("6")); - assertThat(response.getHits().getAt(1).getId(), equalTo("4")); + assertResponse(prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)), response -> { + assertHitCount(response, 2); + assertThat(response.getHits().getAt(0).getId(), equalTo("6")); + assertThat(response.getHits().getAt(1).getId(), equalTo("4")); + }); source = BytesReference.bytes(jsonBuilder().startObject().field("field2", 11).endObject()); - response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("4")); + assertResponse(prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)), response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("4")); + }); // Test IP range: source = BytesReference.bytes(jsonBuilder().startObject().field("field3", "192.168.1.5").endObject()); - response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get(); - assertHitCount(response, 2); - assertThat(response.getHits().getAt(0).getId(), equalTo("9")); - assertThat(response.getHits().getAt(1).getId(), equalTo("7")); + assertResponse(prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)), response -> { + assertHitCount(response, 2); + assertThat(response.getHits().getAt(0).getId(), equalTo("9")); + assertThat(response.getHits().getAt(1).getId(), equalTo("7")); + }); source = BytesReference.bytes(jsonBuilder().startObject().field("field3", "192.168.1.4").endObject()); - response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("7")); + assertResponse(prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)), response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("7")); + }); // Test date range: source = BytesReference.bytes(jsonBuilder().startObject().field("field4", "2016-05-15").endObject()); - response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("10")); + assertResponse(prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)), response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("10")); + }); } public void testPercolatorGeoQueries() throws Exception { @@ -283,8 +290,7 @@ public void testPercolatorGeoQueries() throws Exception { indicesAdmin().prepareCreate("test").setMapping("id", "type=keyword", "field1", "type=geo_point", "query", "type=percolator") ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("query", geoDistanceQuery("field1").point(52.18, 4.38).distance(50, DistanceUnit.KILOMETERS)) @@ -293,8 +299,7 @@ public void testPercolatorGeoQueries() throws Exception { ) .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource( jsonBuilder().startObject() .field("query", geoBoundingBoxQuery("field1").setCorners(52.3, 4.4, 52.1, 4.6)) @@ -303,8 +308,7 @@ public void testPercolatorGeoQueries() throws Exception { ) .get(); - client().prepareIndex("test") - .setId("3") + prepareIndex("test").setId("3") .setSource( jsonBuilder().startObject() .field( @@ -323,13 +327,15 @@ public void testPercolatorGeoQueries() throws Exception { BytesReference source = BytesReference.bytes( jsonBuilder().startObject().startObject("field1").field("lat", 52.20).field("lon", 4.51).endObject().endObject() ); - SearchResponse response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)) - .addSort("id", SortOrder.ASC) - .get(); - assertHitCount(response, 3); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(1).getId(), equalTo("2")); - assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + assertResponse( + prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).addSort("id", SortOrder.ASC), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + } + ); } public void testPercolatorQueryExistingDocument() throws Exception { @@ -338,16 +344,13 @@ public void testPercolatorQueryExistingDocument() throws Exception { .setMapping("id", "type=keyword", "field1", "type=keyword", "field2", "type=keyword", "query", "type=percolator") ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("id", "1").field("query", matchAllQuery()).endObject()) .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource(jsonBuilder().startObject().field("id", "2").field("query", matchQuery("field1", "value")).endObject()) .get(); - client().prepareIndex("test") - .setId("3") + prepareIndex("test").setId("3") .setSource( jsonBuilder().startObject() .field("id", "3") @@ -356,32 +359,37 @@ public void testPercolatorQueryExistingDocument() throws Exception { ) .get(); - client().prepareIndex("test").setId("4").setSource("{\"id\": \"4\"}", XContentType.JSON).get(); - client().prepareIndex("test").setId("5").setSource(XContentType.JSON, "id", "5", "field1", "value").get(); - client().prepareIndex("test").setId("6").setSource(XContentType.JSON, "id", "6", "field1", "value", "field2", "value").get(); + prepareIndex("test").setId("4").setSource("{\"id\": \"4\"}", XContentType.JSON).get(); + prepareIndex("test").setId("5").setSource(XContentType.JSON, "id", "5", "field1", "value").get(); + prepareIndex("test").setId("6").setSource(XContentType.JSON, "id", "6", "field1", "value", "field2", "value").get(); indicesAdmin().prepareRefresh().get(); logger.info("percolating empty doc"); - SearchResponse response = prepareSearch().setQuery(new PercolateQueryBuilder("query", "test", "1", null, null, null)).get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertResponse(prepareSearch().setQuery(new PercolateQueryBuilder("query", "test", "1", null, null, null)), response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + }); logger.info("percolating doc with 1 field"); - response = prepareSearch().setQuery(new PercolateQueryBuilder("query", "test", "5", null, null, null)) - .addSort("id", SortOrder.ASC) - .get(); - assertHitCount(response, 2); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertResponse( + prepareSearch().setQuery(new PercolateQueryBuilder("query", "test", "5", null, null, null)).addSort("id", SortOrder.ASC), + response -> { + assertHitCount(response, 2); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + } + ); logger.info("percolating doc with 2 fields"); - response = prepareSearch().setQuery(new PercolateQueryBuilder("query", "test", "6", null, null, null)) - .addSort("id", SortOrder.ASC) - .get(); - assertHitCount(response, 3); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(1).getId(), equalTo("2")); - assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + assertResponse( + prepareSearch().setQuery(new PercolateQueryBuilder("query", "test", "6", null, null, null)).addSort("id", SortOrder.ASC), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + } + ); } public void testPercolatorQueryExistingDocumentSourceDisabled() throws Exception { @@ -390,9 +398,9 @@ public void testPercolatorQueryExistingDocumentSourceDisabled() throws Exception .setMapping("_source", "enabled=false", "field1", "type=keyword", "query", "type=percolator") ); - client().prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()).get(); + prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()).get(); - client().prepareIndex("test").setId("2").setSource("{}", XContentType.JSON).get(); + prepareIndex("test").setId("2").setSource("{}", XContentType.JSON).get(); indicesAdmin().prepareRefresh().get(); logger.info("percolating empty doc with source disabled"); @@ -408,8 +416,7 @@ public void testPercolatorSpecificQueries() throws Exception { .setMapping("id", "type=keyword", "field1", "type=text", "field2", "type=text", "query", "type=percolator") ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("id", "1") @@ -417,8 +424,7 @@ public void testPercolatorSpecificQueries() throws Exception { .endObject() ) .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource( jsonBuilder().startObject() .field("id", "2") @@ -433,8 +439,7 @@ public void testPercolatorSpecificQueries() throws Exception { .get(); indicesAdmin().prepareRefresh().get(); - client().prepareIndex("test") - .setId("3") + prepareIndex("test").setId("3") .setSource( jsonBuilder().startObject() .field("id", "3") @@ -454,8 +459,7 @@ public void testPercolatorSpecificQueries() throws Exception { .get(); // doesn't match - client().prepareIndex("test") - .setId("4") + prepareIndex("test").setId("4") .setSource( jsonBuilder().startObject() .field("id", "4") @@ -481,16 +485,18 @@ public void testPercolatorSpecificQueries() throws Exception { .field("field2", "the quick brown fox falls down into the well") .endObject() ); - SearchResponse response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)) - .addSort("id", SortOrder.ASC) - .get(); - assertHitCount(response, 3); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(0).getScore(), equalTo(Float.NaN)); - assertThat(response.getHits().getAt(1).getId(), equalTo("2")); - assertThat(response.getHits().getAt(1).getScore(), equalTo(Float.NaN)); - assertThat(response.getHits().getAt(2).getId(), equalTo("3")); - assertThat(response.getHits().getAt(2).getScore(), equalTo(Float.NaN)); + assertResponse( + prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).addSort("id", SortOrder.ASC), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(0).getScore(), equalTo(Float.NaN)); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(1).getScore(), equalTo(Float.NaN)); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + assertThat(response.getHits().getAt(2).getScore(), equalTo(Float.NaN)); + } + ); } public void testPercolatorQueryWithHighlighting() throws Exception { @@ -504,228 +510,245 @@ public void testPercolatorQueryWithHighlighting() throws Exception { indicesAdmin().prepareCreate("test") .setMapping("id", "type=keyword", "field1", fieldMapping.toString(), "query", "type=percolator") ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("id", "1").field("query", matchQuery("field1", "brown fox")).endObject()) - .execute() - .actionGet(); - client().prepareIndex("test") - .setId("2") + .get(); + prepareIndex("test").setId("2") .setSource(jsonBuilder().startObject().field("id", "2").field("query", matchQuery("field1", "lazy dog")).endObject()) - .execute() - .actionGet(); - client().prepareIndex("test") - .setId("3") + .get(); + prepareIndex("test").setId("3") .setSource(jsonBuilder().startObject().field("id", "3").field("query", termQuery("field1", "jumps")).endObject()) - .execute() - .actionGet(); - client().prepareIndex("test") - .setId("4") + .get(); + prepareIndex("test").setId("4") .setSource(jsonBuilder().startObject().field("id", "4").field("query", termQuery("field1", "dog")).endObject()) - .execute() - .actionGet(); - client().prepareIndex("test") - .setId("5") + .get(); + prepareIndex("test").setId("5") .setSource(jsonBuilder().startObject().field("id", "5").field("query", termQuery("field1", "fox")).endObject()) - .execute() - .actionGet(); + .get(); indicesAdmin().prepareRefresh().get(); BytesReference document = BytesReference.bytes( jsonBuilder().startObject().field("field1", "The quick brown fox jumps over the lazy dog").endObject() ); - SearchResponse searchResponse = prepareSearch().setQuery(new PercolateQueryBuilder("query", document, XContentType.JSON)) - .highlighter(new HighlightBuilder().field("field1")) - .addSort("id", SortOrder.ASC) - .get(); - assertHitCount(searchResponse, 5); - - assertThat( - searchResponse.getHits().getAt(0).getHighlightFields().get("field1").fragments()[0].string(), - equalTo("The quick brown fox jumps over the lazy dog") - ); - assertThat( - searchResponse.getHits().getAt(1).getHighlightFields().get("field1").fragments()[0].string(), - equalTo("The quick brown fox jumps over the lazy dog") - ); - assertThat( - searchResponse.getHits().getAt(2).getHighlightFields().get("field1").fragments()[0].string(), - equalTo("The quick brown fox jumps over the lazy dog") - ); - assertThat( - searchResponse.getHits().getAt(3).getHighlightFields().get("field1").fragments()[0].string(), - equalTo("The quick brown fox jumps over the lazy dog") - ); - assertThat( - searchResponse.getHits().getAt(4).getHighlightFields().get("field1").fragments()[0].string(), - equalTo("The quick brown fox jumps over the lazy dog") + assertResponse( + prepareSearch().setQuery(new PercolateQueryBuilder("query", document, XContentType.JSON)) + .highlighter(new HighlightBuilder().field("field1")) + .addSort("id", SortOrder.ASC), + searchResponse -> { + assertHitCount(searchResponse, 5); + + assertThat( + searchResponse.getHits().getAt(0).getHighlightFields().get("field1").fragments()[0].string(), + equalTo("The quick brown fox jumps over the lazy dog") + ); + assertThat( + searchResponse.getHits().getAt(1).getHighlightFields().get("field1").fragments()[0].string(), + equalTo("The quick brown fox jumps over the lazy dog") + ); + assertThat( + searchResponse.getHits().getAt(2).getHighlightFields().get("field1").fragments()[0].string(), + equalTo("The quick brown fox jumps over the lazy dog") + ); + assertThat( + searchResponse.getHits().getAt(3).getHighlightFields().get("field1").fragments()[0].string(), + equalTo("The quick brown fox jumps over the lazy dog") + ); + assertThat( + searchResponse.getHits().getAt(4).getHighlightFields().get("field1").fragments()[0].string(), + equalTo("The quick brown fox jumps over the lazy dog") + ); + } ); BytesReference document1 = BytesReference.bytes( jsonBuilder().startObject().field("field1", "The quick brown fox jumps").endObject() ); BytesReference document2 = BytesReference.bytes(jsonBuilder().startObject().field("field1", "over the lazy dog").endObject()); - searchResponse = prepareSearch().setQuery( - boolQuery().should(new PercolateQueryBuilder("query", document1, XContentType.JSON).setName("query1")) - .should(new PercolateQueryBuilder("query", document2, XContentType.JSON).setName("query2")) - ).highlighter(new HighlightBuilder().field("field1")).addSort("id", SortOrder.ASC).get(); - logger.info("searchResponse={}", searchResponse); - assertHitCount(searchResponse, 5); - - assertThat( - searchResponse.getHits().getAt(0).getHighlightFields().get("query1_field1").fragments()[0].string(), - equalTo("The quick brown fox jumps") - ); - assertThat( - searchResponse.getHits().getAt(1).getHighlightFields().get("query2_field1").fragments()[0].string(), - equalTo("over the lazy dog") - ); - assertThat( - searchResponse.getHits().getAt(2).getHighlightFields().get("query1_field1").fragments()[0].string(), - equalTo("The quick brown fox jumps") - ); - assertThat( - searchResponse.getHits().getAt(3).getHighlightFields().get("query2_field1").fragments()[0].string(), - equalTo("over the lazy dog") - ); - assertThat( - searchResponse.getHits().getAt(4).getHighlightFields().get("query1_field1").fragments()[0].string(), - equalTo("The quick brown fox jumps") - ); - - searchResponse = prepareSearch().setQuery( - new PercolateQueryBuilder( - "query", - Arrays.asList( - BytesReference.bytes(jsonBuilder().startObject().field("field1", "dog").endObject()), - BytesReference.bytes(jsonBuilder().startObject().field("field1", "fox").endObject()), - BytesReference.bytes(jsonBuilder().startObject().field("field1", "jumps").endObject()), - BytesReference.bytes(jsonBuilder().startObject().field("field1", "brown fox").endObject()) - ), - XContentType.JSON - ) - ).highlighter(new HighlightBuilder().field("field1")).addSort("id", SortOrder.ASC).get(); - assertHitCount(searchResponse, 5); - assertThat( - searchResponse.getHits().getAt(0).getFields().get("_percolator_document_slot").getValues(), - equalTo(Arrays.asList(1, 3)) - ); - assertThat(searchResponse.getHits().getAt(0).getHighlightFields().get("1_field1").fragments()[0].string(), equalTo("fox")); - assertThat( - searchResponse.getHits().getAt(0).getHighlightFields().get("3_field1").fragments()[0].string(), - equalTo("brown fox") - ); - assertThat( - searchResponse.getHits().getAt(1).getFields().get("_percolator_document_slot").getValues(), - equalTo(Collections.singletonList(0)) - ); - assertThat(searchResponse.getHits().getAt(1).getHighlightFields().get("0_field1").fragments()[0].string(), equalTo("dog")); - assertThat( - searchResponse.getHits().getAt(2).getFields().get("_percolator_document_slot").getValues(), - equalTo(Collections.singletonList(2)) - ); - assertThat( - searchResponse.getHits().getAt(2).getHighlightFields().get("2_field1").fragments()[0].string(), - equalTo("jumps") - ); - assertThat( - searchResponse.getHits().getAt(3).getFields().get("_percolator_document_slot").getValues(), - equalTo(Collections.singletonList(0)) - ); - assertThat(searchResponse.getHits().getAt(3).getHighlightFields().get("0_field1").fragments()[0].string(), equalTo("dog")); - assertThat( - searchResponse.getHits().getAt(4).getFields().get("_percolator_document_slot").getValues(), - equalTo(Arrays.asList(1, 3)) - ); - assertThat(searchResponse.getHits().getAt(4).getHighlightFields().get("1_field1").fragments()[0].string(), equalTo("fox")); - assertThat( - searchResponse.getHits().getAt(4).getHighlightFields().get("3_field1").fragments()[0].string(), - equalTo("brown fox") + assertResponse( + prepareSearch().setQuery( + boolQuery().should(new PercolateQueryBuilder("query", document1, XContentType.JSON).setName("query1")) + .should(new PercolateQueryBuilder("query", document2, XContentType.JSON).setName("query2")) + ).highlighter(new HighlightBuilder().field("field1")).addSort("id", SortOrder.ASC), + searchResponse -> { + logger.info("searchResponse={}", searchResponse); + assertHitCount(searchResponse, 5); + + assertThat( + searchResponse.getHits().getAt(0).getHighlightFields().get("query1_field1").fragments()[0].string(), + equalTo("The quick brown fox jumps") + ); + assertThat( + searchResponse.getHits().getAt(1).getHighlightFields().get("query2_field1").fragments()[0].string(), + equalTo("over the lazy dog") + ); + assertThat( + searchResponse.getHits().getAt(2).getHighlightFields().get("query1_field1").fragments()[0].string(), + equalTo("The quick brown fox jumps") + ); + assertThat( + searchResponse.getHits().getAt(3).getHighlightFields().get("query2_field1").fragments()[0].string(), + equalTo("over the lazy dog") + ); + assertThat( + searchResponse.getHits().getAt(4).getHighlightFields().get("query1_field1").fragments()[0].string(), + equalTo("The quick brown fox jumps") + ); + } ); - searchResponse = prepareSearch().setQuery( - boolQuery().should( + assertResponse( + prepareSearch().setQuery( new PercolateQueryBuilder( "query", Arrays.asList( BytesReference.bytes(jsonBuilder().startObject().field("field1", "dog").endObject()), - BytesReference.bytes(jsonBuilder().startObject().field("field1", "fox").endObject()) + BytesReference.bytes(jsonBuilder().startObject().field("field1", "fox").endObject()), + BytesReference.bytes(jsonBuilder().startObject().field("field1", "jumps").endObject()), + BytesReference.bytes(jsonBuilder().startObject().field("field1", "brown fox").endObject()) ), XContentType.JSON - ).setName("query1") - ) - .should( + ) + ).highlighter(new HighlightBuilder().field("field1")).addSort("id", SortOrder.ASC), + searchResponse -> { + assertHitCount(searchResponse, 5); + assertThat( + searchResponse.getHits().getAt(0).getFields().get("_percolator_document_slot").getValues(), + equalTo(Arrays.asList(1, 3)) + ); + assertThat( + searchResponse.getHits().getAt(0).getHighlightFields().get("1_field1").fragments()[0].string(), + equalTo("fox") + ); + assertThat( + searchResponse.getHits().getAt(0).getHighlightFields().get("3_field1").fragments()[0].string(), + equalTo("brown fox") + ); + assertThat( + searchResponse.getHits().getAt(1).getFields().get("_percolator_document_slot").getValues(), + equalTo(Collections.singletonList(0)) + ); + assertThat( + searchResponse.getHits().getAt(1).getHighlightFields().get("0_field1").fragments()[0].string(), + equalTo("dog") + ); + assertThat( + searchResponse.getHits().getAt(2).getFields().get("_percolator_document_slot").getValues(), + equalTo(Collections.singletonList(2)) + ); + assertThat( + searchResponse.getHits().getAt(2).getHighlightFields().get("2_field1").fragments()[0].string(), + equalTo("jumps") + ); + assertThat( + searchResponse.getHits().getAt(3).getFields().get("_percolator_document_slot").getValues(), + equalTo(Collections.singletonList(0)) + ); + assertThat( + searchResponse.getHits().getAt(3).getHighlightFields().get("0_field1").fragments()[0].string(), + equalTo("dog") + ); + assertThat( + searchResponse.getHits().getAt(4).getFields().get("_percolator_document_slot").getValues(), + equalTo(Arrays.asList(1, 3)) + ); + assertThat( + searchResponse.getHits().getAt(4).getHighlightFields().get("1_field1").fragments()[0].string(), + equalTo("fox") + ); + assertThat( + searchResponse.getHits().getAt(4).getHighlightFields().get("3_field1").fragments()[0].string(), + equalTo("brown fox") + ); + } + ); + + assertResponse( + prepareSearch().setQuery( + boolQuery().should( new PercolateQueryBuilder( "query", Arrays.asList( - BytesReference.bytes(jsonBuilder().startObject().field("field1", "jumps").endObject()), - BytesReference.bytes(jsonBuilder().startObject().field("field1", "brown fox").endObject()) + BytesReference.bytes(jsonBuilder().startObject().field("field1", "dog").endObject()), + BytesReference.bytes(jsonBuilder().startObject().field("field1", "fox").endObject()) ), XContentType.JSON - ).setName("query2") + ).setName("query1") ) - ).highlighter(new HighlightBuilder().field("field1")).addSort("id", SortOrder.ASC).get(); - logger.info("searchResponse={}", searchResponse); - assertHitCount(searchResponse, 5); - assertThat( - searchResponse.getHits().getAt(0).getFields().get("_percolator_document_slot_query1").getValues(), - equalTo(Collections.singletonList(1)) - ); - assertThat( - searchResponse.getHits().getAt(0).getFields().get("_percolator_document_slot_query2").getValues(), - equalTo(Collections.singletonList(1)) - ); - assertThat( - searchResponse.getHits().getAt(0).getHighlightFields().get("query1_1_field1").fragments()[0].string(), - equalTo("fox") - ); - assertThat( - searchResponse.getHits().getAt(0).getHighlightFields().get("query2_1_field1").fragments()[0].string(), - equalTo("brown fox") - ); - - assertThat( - searchResponse.getHits().getAt(1).getFields().get("_percolator_document_slot_query1").getValues(), - equalTo(Collections.singletonList(0)) - ); - assertThat( - searchResponse.getHits().getAt(1).getHighlightFields().get("query1_0_field1").fragments()[0].string(), - equalTo("dog") - ); - - assertThat( - searchResponse.getHits().getAt(2).getFields().get("_percolator_document_slot_query2").getValues(), - equalTo(Collections.singletonList(0)) - ); - assertThat( - searchResponse.getHits().getAt(2).getHighlightFields().get("query2_0_field1").fragments()[0].string(), - equalTo("jumps") - ); - - assertThat( - searchResponse.getHits().getAt(3).getFields().get("_percolator_document_slot_query1").getValues(), - equalTo(Collections.singletonList(0)) - ); - assertThat( - searchResponse.getHits().getAt(3).getHighlightFields().get("query1_0_field1").fragments()[0].string(), - equalTo("dog") - ); - - assertThat( - searchResponse.getHits().getAt(4).getFields().get("_percolator_document_slot_query1").getValues(), - equalTo(Collections.singletonList(1)) - ); - assertThat( - searchResponse.getHits().getAt(4).getFields().get("_percolator_document_slot_query2").getValues(), - equalTo(Collections.singletonList(1)) - ); - assertThat( - searchResponse.getHits().getAt(4).getHighlightFields().get("query1_1_field1").fragments()[0].string(), - equalTo("fox") - ); - assertThat( - searchResponse.getHits().getAt(4).getHighlightFields().get("query2_1_field1").fragments()[0].string(), - equalTo("brown fox") + .should( + new PercolateQueryBuilder( + "query", + Arrays.asList( + BytesReference.bytes(jsonBuilder().startObject().field("field1", "jumps").endObject()), + BytesReference.bytes(jsonBuilder().startObject().field("field1", "brown fox").endObject()) + ), + XContentType.JSON + ).setName("query2") + ) + ).highlighter(new HighlightBuilder().field("field1")).addSort("id", SortOrder.ASC), + searchResponse -> { + logger.info("searchResponse={}", searchResponse); + assertHitCount(searchResponse, 5); + assertThat( + searchResponse.getHits().getAt(0).getFields().get("_percolator_document_slot_query1").getValues(), + equalTo(Collections.singletonList(1)) + ); + assertThat( + searchResponse.getHits().getAt(0).getFields().get("_percolator_document_slot_query2").getValues(), + equalTo(Collections.singletonList(1)) + ); + assertThat( + searchResponse.getHits().getAt(0).getHighlightFields().get("query1_1_field1").fragments()[0].string(), + equalTo("fox") + ); + assertThat( + searchResponse.getHits().getAt(0).getHighlightFields().get("query2_1_field1").fragments()[0].string(), + equalTo("brown fox") + ); + + assertThat( + searchResponse.getHits().getAt(1).getFields().get("_percolator_document_slot_query1").getValues(), + equalTo(Collections.singletonList(0)) + ); + assertThat( + searchResponse.getHits().getAt(1).getHighlightFields().get("query1_0_field1").fragments()[0].string(), + equalTo("dog") + ); + + assertThat( + searchResponse.getHits().getAt(2).getFields().get("_percolator_document_slot_query2").getValues(), + equalTo(Collections.singletonList(0)) + ); + assertThat( + searchResponse.getHits().getAt(2).getHighlightFields().get("query2_0_field1").fragments()[0].string(), + equalTo("jumps") + ); + + assertThat( + searchResponse.getHits().getAt(3).getFields().get("_percolator_document_slot_query1").getValues(), + equalTo(Collections.singletonList(0)) + ); + assertThat( + searchResponse.getHits().getAt(3).getHighlightFields().get("query1_0_field1").fragments()[0].string(), + equalTo("dog") + ); + + assertThat( + searchResponse.getHits().getAt(4).getFields().get("_percolator_document_slot_query1").getValues(), + equalTo(Collections.singletonList(1)) + ); + assertThat( + searchResponse.getHits().getAt(4).getFields().get("_percolator_document_slot_query2").getValues(), + equalTo(Collections.singletonList(1)) + ); + assertThat( + searchResponse.getHits().getAt(4).getHighlightFields().get("query1_1_field1").fragments()[0].string(), + equalTo("fox") + ); + assertThat( + searchResponse.getHits().getAt(4).getHighlightFields().get("query2_1_field1").fragments()[0].string(), + equalTo("brown fox") + ); + } ); } @@ -733,21 +756,23 @@ public void testTakePositionOffsetGapIntoAccount() throws Exception { assertAcked( indicesAdmin().prepareCreate("test").setMapping("field", "type=text,position_increment_gap=5", "query", "type=percolator") ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("query", new MatchPhraseQueryBuilder("field", "brown fox").slop(4)).endObject()) .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource(jsonBuilder().startObject().field("query", new MatchPhraseQueryBuilder("field", "brown fox").slop(5)).endObject()) .get(); indicesAdmin().prepareRefresh().get(); - SearchResponse response = prepareSearch().setQuery( - new PercolateQueryBuilder("query", new BytesArray("{\"field\" : [\"brown\", \"fox\"]}"), XContentType.JSON) - ).get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("2")); + assertResponse( + prepareSearch().setQuery( + new PercolateQueryBuilder("query", new BytesArray("{\"field\" : [\"brown\", \"fox\"]}"), XContentType.JSON) + ), + response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("2")); + } + ); } public void testManyPercolatorFields() throws Exception { @@ -808,12 +833,10 @@ public void testWithMultiplePercolatorFields() throws Exception { ); // Acceptable: - client().prepareIndex("test1") - .setId("1") + prepareIndex("test1").setId("1") .setSource(jsonBuilder().startObject().field(queryFieldName, matchQuery("field", "value")).endObject()) .get(); - client().prepareIndex("test2") - .setId("1") + prepareIndex("test2").setId("1") .setSource( jsonBuilder().startObject() .startObject("object_field") @@ -825,24 +848,28 @@ public void testWithMultiplePercolatorFields() throws Exception { indicesAdmin().prepareRefresh().get(); BytesReference source = BytesReference.bytes(jsonBuilder().startObject().field("field", "value").endObject()); - SearchResponse response = prepareSearch().setQuery(new PercolateQueryBuilder(queryFieldName, source, XContentType.JSON)) - .setIndices("test1") - .get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(0).getIndex(), equalTo("test1")); + assertResponse( + prepareSearch().setQuery(new PercolateQueryBuilder(queryFieldName, source, XContentType.JSON)).setIndices("test1"), + response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(0).getIndex(), equalTo("test1")); + } + ); - response = prepareSearch().setQuery(new PercolateQueryBuilder("object_field." + queryFieldName, source, XContentType.JSON)) - .setIndices("test2") - .get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(0).getIndex(), equalTo("test2")); + assertResponse( + prepareSearch().setQuery(new PercolateQueryBuilder("object_field." + queryFieldName, source, XContentType.JSON)) + .setIndices("test2"), + response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(0).getIndex(), equalTo("test2")); + } + ); // Unacceptable: DocumentParsingException e = expectThrows(DocumentParsingException.class, () -> { - client().prepareIndex("test2") - .setId("1") + prepareIndex("test2").setId("1") .setSource( jsonBuilder().startObject() .startArray("object_field") @@ -885,8 +912,7 @@ public void testPercolateQueryWithNestedDocuments() throws Exception { .endObject() .endObject(); assertAcked(indicesAdmin().prepareCreate("test").setMapping(mapping)); - client().prepareIndex("test") - .setId("q1") + prepareIndex("test").setId("q1") .setSource( jsonBuilder().startObject() .field("id", "q1") @@ -902,8 +928,7 @@ public void testPercolateQueryWithNestedDocuments() throws Exception { ) .get(); // this query should never match as it doesn't use nested query: - client().prepareIndex("test") - .setId("q2") + prepareIndex("test").setId("q2") .setSource( jsonBuilder().startObject() .field("id", "q2") @@ -913,73 +938,15 @@ public void testPercolateQueryWithNestedDocuments() throws Exception { .get(); indicesAdmin().prepareRefresh().get(); - client().prepareIndex("test") - .setId("q3") + prepareIndex("test").setId("q3") .setSource(jsonBuilder().startObject().field("id", "q3").field("query", QueryBuilders.matchAllQuery()).endObject()) .get(); indicesAdmin().prepareRefresh().get(); - SearchResponse response = prepareSearch().setQuery( - new PercolateQueryBuilder( - "query", - BytesReference.bytes( - XContentFactory.jsonBuilder() - .startObject() - .field("companyname", "stark") - .startArray("employee") - .startObject() - .field("name", "virginia potts") - .endObject() - .startObject() - .field("name", "tony stark") - .endObject() - .endArray() - .endObject() - ), - XContentType.JSON - ) - ).addSort("id", SortOrder.ASC).get(); - assertHitCount(response, 2); - assertThat(response.getHits().getAt(0).getId(), equalTo("q1")); - assertThat(response.getHits().getAt(1).getId(), equalTo("q3")); - - response = prepareSearch().setQuery( - new PercolateQueryBuilder( - "query", - BytesReference.bytes( - XContentFactory.jsonBuilder() - .startObject() - .field("companyname", "notstark") - .startArray("employee") - .startObject() - .field("name", "virginia stark") - .endObject() - .startObject() - .field("name", "tony stark") - .endObject() - .endArray() - .endObject() - ), - XContentType.JSON - ) - ).addSort("id", SortOrder.ASC).get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("q3")); - - response = prepareSearch().setQuery( - new PercolateQueryBuilder( - "query", - BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("companyname", "notstark").endObject()), - XContentType.JSON - ) - ).addSort("id", SortOrder.ASC).get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("q3")); - - response = prepareSearch().setQuery( - new PercolateQueryBuilder( - "query", - Arrays.asList( + assertResponse( + prepareSearch().setQuery( + new PercolateQueryBuilder( + "query", BytesReference.bytes( XContentFactory.jsonBuilder() .startObject() @@ -994,149 +961,214 @@ public void testPercolateQueryWithNestedDocuments() throws Exception { .endArray() .endObject() ), + XContentType.JSON + ) + ).addSort("id", SortOrder.ASC), + response -> { + assertHitCount(response, 2); + assertThat(response.getHits().getAt(0).getId(), equalTo("q1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("q3")); + } + ); + + assertResponse( + prepareSearch().setQuery( + new PercolateQueryBuilder( + "query", BytesReference.bytes( XContentFactory.jsonBuilder() .startObject() - .field("companyname", "stark") + .field("companyname", "notstark") .startArray("employee") .startObject() - .field("name", "peter parker") + .field("name", "virginia stark") .endObject() .startObject() - .field("name", "virginia potts") + .field("name", "tony stark") .endObject() .endArray() .endObject() ), - BytesReference.bytes( - XContentFactory.jsonBuilder() - .startObject() - .field("companyname", "stark") - .startArray("employee") - .startObject() - .field("name", "peter parker") - .endObject() - .endArray() - .endObject() - ) - ), - XContentType.JSON - ) - ).addSort("id", SortOrder.ASC).get(); - assertHitCount(response, 2); - assertThat(response.getHits().getAt(0).getId(), equalTo("q1")); - assertThat(response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValues(), equalTo(Arrays.asList(0, 1))); - assertThat(response.getHits().getAt(1).getId(), equalTo("q3")); - assertThat(response.getHits().getAt(1).getFields().get("_percolator_document_slot").getValues(), equalTo(Arrays.asList(0, 1, 2))); + XContentType.JSON + ) + ).addSort("id", SortOrder.ASC), + response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("q3")); + } + ); + + assertResponse( + prepareSearch().setQuery( + new PercolateQueryBuilder( + "query", + BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("companyname", "notstark").endObject()), + XContentType.JSON + ) + ).addSort("id", SortOrder.ASC), + response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("q3")); + } + ); + + assertResponse( + prepareSearch().setQuery( + new PercolateQueryBuilder( + "query", + Arrays.asList( + BytesReference.bytes( + XContentFactory.jsonBuilder() + .startObject() + .field("companyname", "stark") + .startArray("employee") + .startObject() + .field("name", "virginia potts") + .endObject() + .startObject() + .field("name", "tony stark") + .endObject() + .endArray() + .endObject() + ), + BytesReference.bytes( + XContentFactory.jsonBuilder() + .startObject() + .field("companyname", "stark") + .startArray("employee") + .startObject() + .field("name", "peter parker") + .endObject() + .startObject() + .field("name", "virginia potts") + .endObject() + .endArray() + .endObject() + ), + BytesReference.bytes( + XContentFactory.jsonBuilder() + .startObject() + .field("companyname", "stark") + .startArray("employee") + .startObject() + .field("name", "peter parker") + .endObject() + .endArray() + .endObject() + ) + ), + XContentType.JSON + ) + ).addSort("id", SortOrder.ASC), + response -> { + assertHitCount(response, 2); + assertThat(response.getHits().getAt(0).getId(), equalTo("q1")); + assertThat( + response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValues(), + equalTo(Arrays.asList(0, 1)) + ); + assertThat(response.getHits().getAt(1).getId(), equalTo("q3")); + assertThat( + response.getHits().getAt(1).getFields().get("_percolator_document_slot").getValues(), + equalTo(Arrays.asList(0, 1, 2)) + ); + } + ); } public void testPercolatorQueryViaMultiSearch() throws Exception { assertAcked(indicesAdmin().prepareCreate("test").setMapping("field1", "type=text", "query", "type=percolator")); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject()) - .execute() - .actionGet(); - client().prepareIndex("test") - .setId("2") - .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject()) - .execute() - .actionGet(); - client().prepareIndex("test") - .setId("3") + .get(); + prepareIndex("test").setId("2").setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject()).get(); + prepareIndex("test").setId("3") .setSource( jsonBuilder().startObject() .field("query", boolQuery().must(matchQuery("field1", "b")).must(matchQuery("field1", "c"))) .endObject() ) - .execute() - .actionGet(); - client().prepareIndex("test") - .setId("4") - .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) - .execute() - .actionGet(); - client().prepareIndex("test") - .setId("5") - .setSource(jsonBuilder().startObject().field("field1", "c").endObject()) - .execute() - .actionGet(); + .get(); + prepareIndex("test").setId("4").setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()).get(); + prepareIndex("test").setId("5").setSource(jsonBuilder().startObject().field("field1", "c").endObject()).get(); indicesAdmin().prepareRefresh().get(); - MultiSearchResponse response = client().prepareMultiSearch() - .add( - prepareSearch("test").setQuery( - new PercolateQueryBuilder( - "query", - BytesReference.bytes(jsonBuilder().startObject().field("field1", "b").endObject()), - XContentType.JSON + assertResponse( + client().prepareMultiSearch() + .add( + prepareSearch("test").setQuery( + new PercolateQueryBuilder( + "query", + BytesReference.bytes(jsonBuilder().startObject().field("field1", "b").endObject()), + XContentType.JSON + ) ) ) - ) - .add( - prepareSearch("test").setQuery( - new PercolateQueryBuilder( - "query", - BytesReference.bytes(yamlBuilder().startObject().field("field1", "c").endObject()), - XContentType.YAML + .add( + prepareSearch("test").setQuery( + new PercolateQueryBuilder( + "query", + BytesReference.bytes(yamlBuilder().startObject().field("field1", "c").endObject()), + XContentType.YAML + ) ) ) - ) - .add( - prepareSearch("test").setQuery( - new PercolateQueryBuilder( - "query", - BytesReference.bytes(jsonBuilder().startObject().field("field1", "b c").endObject()), - XContentType.JSON + .add( + prepareSearch("test").setQuery( + new PercolateQueryBuilder( + "query", + BytesReference.bytes(jsonBuilder().startObject().field("field1", "b c").endObject()), + XContentType.JSON + ) ) ) - ) - .add( - prepareSearch("test").setQuery( - new PercolateQueryBuilder( - "query", - BytesReference.bytes(jsonBuilder().startObject().field("field1", "d").endObject()), - XContentType.JSON + .add( + prepareSearch("test").setQuery( + new PercolateQueryBuilder( + "query", + BytesReference.bytes(jsonBuilder().startObject().field("field1", "d").endObject()), + XContentType.JSON + ) ) ) - ) - .add(prepareSearch("test").setQuery(new PercolateQueryBuilder("query", "test", "5", null, null, null))) - .add( - prepareSearch("test") // non existing doc, so error element - .setQuery(new PercolateQueryBuilder("query", "test", "6", null, null, null)) - ) - .get(); - - MultiSearchResponse.Item item = response.getResponses()[0]; - assertHitCount(item.getResponse(), 2L); - assertSearchHits(item.getResponse(), "1", "4"); - assertThat(item.getFailureMessage(), nullValue()); - - item = response.getResponses()[1]; - assertHitCount(item.getResponse(), 2L); - assertSearchHits(item.getResponse(), "2", "4"); - assertThat(item.getFailureMessage(), nullValue()); - - item = response.getResponses()[2]; - assertHitCount(item.getResponse(), 4L); - assertSearchHits(item.getResponse(), "1", "2", "3", "4"); - assertThat(item.getFailureMessage(), nullValue()); - - item = response.getResponses()[3]; - assertHitCount(item.getResponse(), 1L); - assertSearchHits(item.getResponse(), "4"); - assertThat(item.getFailureMessage(), nullValue()); - - item = response.getResponses()[4]; - assertHitCount(item.getResponse(), 2L); - assertSearchHits(item.getResponse(), "2", "4"); - assertThat(item.getFailureMessage(), nullValue()); - - item = response.getResponses()[5]; - assertThat(item.getResponse(), nullValue()); - assertThat(item.getFailureMessage(), notNullValue()); - assertThat(item.getFailureMessage(), containsString("[test/6] couldn't be found")); + .add(prepareSearch("test").setQuery(new PercolateQueryBuilder("query", "test", "5", null, null, null))) + .add( + prepareSearch("test") // non existing doc, so error element + .setQuery(new PercolateQueryBuilder("query", "test", "6", null, null, null)) + ), + response -> { + Item item = response.getResponses()[0]; + assertHitCount(item.getResponse(), 2L); + assertSearchHits(item.getResponse(), "1", "4"); + assertThat(item.getFailureMessage(), nullValue()); + + item = response.getResponses()[1]; + assertHitCount(item.getResponse(), 2L); + assertSearchHits(item.getResponse(), "2", "4"); + assertThat(item.getFailureMessage(), nullValue()); + + item = response.getResponses()[2]; + assertHitCount(item.getResponse(), 4L); + assertSearchHits(item.getResponse(), "1", "2", "3", "4"); + assertThat(item.getFailureMessage(), nullValue()); + + item = response.getResponses()[3]; + assertHitCount(item.getResponse(), 1L); + assertSearchHits(item.getResponse(), "4"); + assertThat(item.getFailureMessage(), nullValue()); + + item = response.getResponses()[4]; + assertHitCount(item.getResponse(), 2L); + assertSearchHits(item.getResponse(), "2", "4"); + assertThat(item.getFailureMessage(), nullValue()); + + item = response.getResponses()[5]; + assertThat(item.getResponse(), nullValue()); + assertThat(item.getFailureMessage(), notNullValue()); + assertThat(item.getFailureMessage(), containsString("[test/6] couldn't be found")); + } + ); } public void testDisallowExpensiveQueries() throws IOException { @@ -1145,18 +1177,18 @@ public void testDisallowExpensiveQueries() throws IOException { indicesAdmin().prepareCreate("test").setMapping("id", "type=keyword", "field1", "type=keyword", "query", "type=percolator") ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("id", "1").field("query", matchQuery("field1", "value")).endObject()) .get(); refresh(); // Execute with search.allow_expensive_queries = null => default value = false => success BytesReference source = BytesReference.bytes(jsonBuilder().startObject().field("field1", "value").endObject()); - SearchResponse response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); + assertResponse(prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)), response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); + }); // Set search.allow_expensive_queries to "false" => assert failure updateClusterSettings(Settings.builder().put("search.allow_expensive_queries", false)); @@ -1173,10 +1205,11 @@ public void testDisallowExpensiveQueries() throws IOException { // Set search.allow_expensive_queries setting to "true" ==> success updateClusterSettings(Settings.builder().put("search.allow_expensive_queries", true)); - response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); + assertResponse(prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)), response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); + }); } finally { updateClusterSettings(Settings.builder().putNull("search.allow_expensive_queries")); } @@ -1186,49 +1219,50 @@ public void testWrappedWithConstantScore() throws Exception { assertAcked(indicesAdmin().prepareCreate("test").setMapping("d", "type=date", "q", "type=percolator")); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("q", boolQuery().must(rangeQuery("d").gt("now"))).endObject()) - .execute() - .actionGet(); + .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource(jsonBuilder().startObject().field("q", boolQuery().must(rangeQuery("d").lt("now"))).endObject()) - .execute() - .actionGet(); + .get(); indicesAdmin().prepareRefresh().get(); - SearchResponse response = prepareSearch("test").setQuery( - new PercolateQueryBuilder( - "q", - BytesReference.bytes(jsonBuilder().startObject().field("d", "2020-02-01T15:00:00.000+11:00").endObject()), - XContentType.JSON - ) - ).get(); - assertEquals(1, response.getHits().getTotalHits().value); - - response = prepareSearch("test").setQuery( - new PercolateQueryBuilder( - "q", - BytesReference.bytes(jsonBuilder().startObject().field("d", "2020-02-01T15:00:00.000+11:00").endObject()), - XContentType.JSON - ) - ).addSort("_doc", SortOrder.ASC).get(); - assertEquals(1, response.getHits().getTotalHits().value); + assertHitCount( + prepareSearch("test").setQuery( + new PercolateQueryBuilder( + "q", + BytesReference.bytes(jsonBuilder().startObject().field("d", "2020-02-01T15:00:00.000+11:00").endObject()), + XContentType.JSON + ) + ), + 1 + ); - response = prepareSearch("test").setQuery( - constantScoreQuery( + assertHitCount( + prepareSearch("test").setQuery( new PercolateQueryBuilder( "q", BytesReference.bytes(jsonBuilder().startObject().field("d", "2020-02-01T15:00:00.000+11:00").endObject()), XContentType.JSON ) - ) - ).get(); - assertEquals(1, response.getHits().getTotalHits().value); + ).addSort("_doc", SortOrder.ASC), + 1 + ); + assertHitCount( + prepareSearch("test").setQuery( + constantScoreQuery( + new PercolateQueryBuilder( + "q", + BytesReference.bytes(jsonBuilder().startObject().field("d", "2020-02-01T15:00:00.000+11:00").endObject()), + XContentType.JSON + ) + ) + ), + 1 + ); } public void testWithWildcardFieldNames() throws Exception { @@ -1248,8 +1282,7 @@ public void testWithWildcardFieldNames() throws Exception { ) ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("q_simple", simpleQueryStringQuery("yada").fields(Map.of("text*", 1f))) @@ -1259,44 +1292,51 @@ public void testWithWildcardFieldNames() throws Exception { .endObject() ) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .execute() - .actionGet(); - - SearchResponse response = prepareSearch("test").setQuery( - new PercolateQueryBuilder( - "q_simple", - BytesReference.bytes(jsonBuilder().startObject().field("text_1", "yada").endObject()), - XContentType.JSON - ) - ).get(); - assertEquals(1, response.getHits().getTotalHits().value); - - response = prepareSearch("test").setQuery( - new PercolateQueryBuilder( - "q_string", - BytesReference.bytes(jsonBuilder().startObject().field("text_1", "yada").endObject()), - XContentType.JSON - ) - ).get(); - assertEquals(1, response.getHits().getTotalHits().value); - - response = prepareSearch("test").setQuery( - new PercolateQueryBuilder( - "q_match", - BytesReference.bytes(jsonBuilder().startObject().field("text_1", "yada").endObject()), - XContentType.JSON - ) - ).get(); - assertEquals(1, response.getHits().getTotalHits().value); - - response = prepareSearch("test").setQuery( - new PercolateQueryBuilder( - "q_combo", - BytesReference.bytes(jsonBuilder().startObject().field("text_1", "yada").endObject()), - XContentType.JSON - ) - ).get(); - assertEquals(1, response.getHits().getTotalHits().value); + .get(); + + assertHitCount( + prepareSearch("test").setQuery( + new PercolateQueryBuilder( + "q_simple", + BytesReference.bytes(jsonBuilder().startObject().field("text_1", "yada").endObject()), + XContentType.JSON + ) + ), + 1 + ); + + assertHitCount( + prepareSearch("test").setQuery( + new PercolateQueryBuilder( + "q_string", + BytesReference.bytes(jsonBuilder().startObject().field("text_1", "yada").endObject()), + XContentType.JSON + ) + ), + 1 + ); + + assertHitCount( + prepareSearch("test").setQuery( + new PercolateQueryBuilder( + "q_match", + BytesReference.bytes(jsonBuilder().startObject().field("text_1", "yada").endObject()), + XContentType.JSON + ) + ), + 1 + ); + + assertHitCount( + prepareSearch("test").setQuery( + new PercolateQueryBuilder( + "q_combo", + BytesReference.bytes(jsonBuilder().startObject().field("text_1", "yada").endObject()), + XContentType.JSON + ) + ), + 1 + ); } public void testKnnQueryNotSupportedInPercolator() throws IOException { @@ -1320,8 +1360,7 @@ public void testKnnQueryNotSupportedInPercolator() throws IOException { ensureGreen(); QueryBuilder knnVectorQueryBuilder = new KnnVectorQueryBuilder("my_vector", new float[] { 1, 1, 1, 1, 1 }, 10, null); - IndexRequestBuilder indexRequestBuilder = client().prepareIndex("index1") - .setId("knn_query1") + IndexRequestBuilder indexRequestBuilder = prepareIndex("index1").setId("knn_query1") .setSource(jsonBuilder().startObject().field("my_query", knnVectorQueryBuilder).endObject()); DocumentParsingException exception = expectThrows(DocumentParsingException.class, () -> indexRequestBuilder.get()); diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java index 65b2e257de0b1..05a935229246d 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java @@ -84,6 +84,7 @@ import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.lucene.queries.BlendedTermQuery; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.index.IndexVersionUtils; import org.elasticsearch.xcontent.XContentFactory; @@ -215,93 +216,95 @@ public void testDuel() throws Exception { } Collections.sort(intValues); - SearchExecutionContext context = createSearchContext(indexService).getSearchExecutionContext(); - MappedFieldType intFieldType = mapperService.fieldType("int_field"); - - List> queryFunctions = new ArrayList<>(); - queryFunctions.add(MatchNoDocsQuery::new); - queryFunctions.add(MatchAllDocsQuery::new); - queryFunctions.add(() -> new TermQuery(new Term("unknown_field", "value"))); - String field1 = randomFrom(stringFields); - queryFunctions.add(() -> new TermQuery(new Term(field1, randomFrom(stringContent.get(field1))))); - String field2 = randomFrom(stringFields); - queryFunctions.add(() -> new TermQuery(new Term(field2, randomFrom(stringContent.get(field2))))); - queryFunctions.add(() -> intFieldType.termQuery(randomFrom(intValues), context)); - queryFunctions.add(() -> intFieldType.termsQuery(Arrays.asList(randomFrom(intValues), randomFrom(intValues)), context)); - queryFunctions.add( - () -> intFieldType.rangeQuery( - intValues.get(4), - intValues.get(intValues.size() - 4), - true, - true, - ShapeRelation.WITHIN, - null, - null, - context - ) - ); - queryFunctions.add( - () -> new TermInSetQuery( - field1, - new BytesRef(randomFrom(stringContent.get(field1))), - new BytesRef(randomFrom(stringContent.get(field1))) - ) - ); - queryFunctions.add( - () -> new TermInSetQuery( - field2, - new BytesRef(randomFrom(stringContent.get(field1))), - new BytesRef(randomFrom(stringContent.get(field1))) - ) - ); - // many iterations with boolean queries, which are the most complex queries to deal with when nested - int numRandomBoolQueries = 1000; - for (int i = 0; i < numRandomBoolQueries; i++) { - queryFunctions.add(() -> createRandomBooleanQuery(1, stringFields, stringContent, intFieldType, intValues, context)); - } - queryFunctions.add(() -> { - int numClauses = randomIntBetween(1, 1 << randomIntBetween(2, 4)); - List clauses = new ArrayList<>(); - for (int i = 0; i < numClauses; i++) { - String field = randomFrom(stringFields); - clauses.add(new TermQuery(new Term(field, randomFrom(stringContent.get(field))))); - } - return new DisjunctionMaxQuery(clauses, 0.01f); - }); - queryFunctions.add(() -> { - Float minScore = randomBoolean() ? null : (float) randomIntBetween(1, 1000); - Query innerQuery; - if (randomBoolean()) { - innerQuery = new TermQuery(new Term(field1, randomFrom(stringContent.get(field1)))); - } else { - innerQuery = new PhraseQuery(field1, randomFrom(stringContent.get(field1)), randomFrom(stringContent.get(field1))); + try (SearchContext searchContext = createSearchContext(indexService)) { + SearchExecutionContext context = searchContext.getSearchExecutionContext(); + MappedFieldType intFieldType = mapperService.fieldType("int_field"); + + List> queryFunctions = new ArrayList<>(); + queryFunctions.add(MatchNoDocsQuery::new); + queryFunctions.add(MatchAllDocsQuery::new); + queryFunctions.add(() -> new TermQuery(new Term("unknown_field", "value"))); + String field1 = randomFrom(stringFields); + queryFunctions.add(() -> new TermQuery(new Term(field1, randomFrom(stringContent.get(field1))))); + String field2 = randomFrom(stringFields); + queryFunctions.add(() -> new TermQuery(new Term(field2, randomFrom(stringContent.get(field2))))); + queryFunctions.add(() -> intFieldType.termQuery(randomFrom(intValues), context)); + queryFunctions.add(() -> intFieldType.termsQuery(Arrays.asList(randomFrom(intValues), randomFrom(intValues)), context)); + queryFunctions.add( + () -> intFieldType.rangeQuery( + intValues.get(4), + intValues.get(intValues.size() - 4), + true, + true, + ShapeRelation.WITHIN, + null, + null, + context + ) + ); + queryFunctions.add( + () -> new TermInSetQuery( + field1, + new BytesRef(randomFrom(stringContent.get(field1))), + new BytesRef(randomFrom(stringContent.get(field1))) + ) + ); + queryFunctions.add( + () -> new TermInSetQuery( + field2, + new BytesRef(randomFrom(stringContent.get(field1))), + new BytesRef(randomFrom(stringContent.get(field1))) + ) + ); + // many iterations with boolean queries, which are the most complex queries to deal with when nested + int numRandomBoolQueries = 1000; + for (int i = 0; i < numRandomBoolQueries; i++) { + queryFunctions.add(() -> createRandomBooleanQuery(1, stringFields, stringContent, intFieldType, intValues, context)); } - return new FunctionScoreQuery(innerQuery, minScore, 1f); - }); - - List documents = new ArrayList<>(); - for (Supplier queryFunction : queryFunctions) { - Query query = queryFunction.get(); - addQuery(query, documents); - } + queryFunctions.add(() -> { + int numClauses = randomIntBetween(1, 1 << randomIntBetween(2, 4)); + List clauses = new ArrayList<>(); + for (int i = 0; i < numClauses; i++) { + String field = randomFrom(stringFields); + clauses.add(new TermQuery(new Term(field, randomFrom(stringContent.get(field))))); + } + return new DisjunctionMaxQuery(clauses, 0.01f); + }); + queryFunctions.add(() -> { + Float minScore = randomBoolean() ? null : (float) randomIntBetween(1, 1000); + Query innerQuery; + if (randomBoolean()) { + innerQuery = new TermQuery(new Term(field1, randomFrom(stringContent.get(field1)))); + } else { + innerQuery = new PhraseQuery(field1, randomFrom(stringContent.get(field1)), randomFrom(stringContent.get(field1))); + } + return new FunctionScoreQuery(innerQuery, minScore, 1f); + }); - indexWriter.addDocuments(documents); - indexWriter.close(); - directoryReader = DirectoryReader.open(directory); - IndexSearcher shardSearcher = newSearcher(directoryReader); - // Disable query cache, because ControlQuery cannot be cached... - shardSearcher.setQueryCache(null); + List documents = new ArrayList<>(); + for (Supplier queryFunction : queryFunctions) { + Query query = queryFunction.get(); + addQuery(query, documents); + } - LuceneDocument document = new LuceneDocument(); - for (Map.Entry> entry : stringContent.entrySet()) { - String value = entry.getValue().stream().collect(Collectors.joining(" ")); - document.add(new TextField(entry.getKey(), value, Field.Store.NO)); - } - for (Integer intValue : intValues) { - NumberFieldMapper.NumberType.INTEGER.addFields(document, "int_field", intValue, true, true, false); + indexWriter.addDocuments(documents); + indexWriter.close(); + directoryReader = DirectoryReader.open(directory); + IndexSearcher shardSearcher = newSearcher(directoryReader); + // Disable query cache, because ControlQuery cannot be cached... + shardSearcher.setQueryCache(null); + + LuceneDocument document = new LuceneDocument(); + for (Map.Entry> entry : stringContent.entrySet()) { + String value = entry.getValue().stream().collect(Collectors.joining(" ")); + document.add(new TextField(entry.getKey(), value, Field.Store.NO)); + } + for (Integer intValue : intValues) { + NumberFieldMapper.NumberType.INTEGER.addFields(document, "int_field", intValue, true, true, false); + } + MemoryIndex memoryIndex = MemoryIndex.fromDocument(document, new WhitespaceAnalyzer()); + duelRun(queryStore, memoryIndex, shardSearcher); } - MemoryIndex memoryIndex = MemoryIndex.fromDocument(document, new WhitespaceAnalyzer()); - duelRun(queryStore, memoryIndex, shardSearcher); } private BooleanQuery createRandomBooleanQuery( @@ -376,53 +379,55 @@ public void testDuel2() throws Exception { ranges.add(new int[] { 0, 10 }); ranges.add(new int[] { 15, 50 }); - SearchExecutionContext context = createSearchContext(indexService).getSearchExecutionContext(); - List documents = new ArrayList<>(); - { - addQuery(new TermQuery(new Term("string_field", randomFrom(stringValues))), documents); - } - { - addQuery(new PhraseQuery(0, "string_field", stringValues.toArray(new String[0])), documents); - } - { - int[] range = randomFrom(ranges); - Query rangeQuery = intFieldType.rangeQuery(range[0], range[1], true, true, null, null, null, context); - addQuery(rangeQuery, documents); - } - { - int numBooleanQueries = randomIntBetween(1, 5); - for (int i = 0; i < numBooleanQueries; i++) { - Query randomBQ = randomBQ(1, stringValues, ranges, intFieldType, context); - addQuery(randomBQ, documents); + try (SearchContext searchContext = createSearchContext(indexService)) { + SearchExecutionContext context = searchContext.getSearchExecutionContext(); + List documents = new ArrayList<>(); + { + addQuery(new TermQuery(new Term("string_field", randomFrom(stringValues))), documents); + } + { + addQuery(new PhraseQuery(0, "string_field", stringValues.toArray(new String[0])), documents); + } + { + int[] range = randomFrom(ranges); + Query rangeQuery = intFieldType.rangeQuery(range[0], range[1], true, true, null, null, null, context); + addQuery(rangeQuery, documents); + } + { + int numBooleanQueries = randomIntBetween(1, 5); + for (int i = 0; i < numBooleanQueries; i++) { + Query randomBQ = randomBQ(1, stringValues, ranges, intFieldType, context); + addQuery(randomBQ, documents); + } + } + { + addQuery(new MatchNoDocsQuery(), documents); + } + { + addQuery(new MatchAllDocsQuery(), documents); } - } - { - addQuery(new MatchNoDocsQuery(), documents); - } - { - addQuery(new MatchAllDocsQuery(), documents); - } - - indexWriter.addDocuments(documents); - indexWriter.close(); - directoryReader = DirectoryReader.open(directory); - IndexSearcher shardSearcher = newSearcher(directoryReader); - // Disable query cache, because ControlQuery cannot be cached... - shardSearcher.setQueryCache(null); - LuceneDocument document = new LuceneDocument(); - for (String value : stringValues) { - document.add(new TextField("string_field", value, Field.Store.NO)); - logger.info("Test with document: {}" + document); - MemoryIndex memoryIndex = MemoryIndex.fromDocument(document, new WhitespaceAnalyzer()); - duelRun(queryStore, memoryIndex, shardSearcher); - } + indexWriter.addDocuments(documents); + indexWriter.close(); + directoryReader = DirectoryReader.open(directory); + IndexSearcher shardSearcher = newSearcher(directoryReader); + // Disable query cache, because ControlQuery cannot be cached... + shardSearcher.setQueryCache(null); + + LuceneDocument document = new LuceneDocument(); + for (String value : stringValues) { + document.add(new TextField("string_field", value, Field.Store.NO)); + logger.info("Test with document: {}" + document); + MemoryIndex memoryIndex = MemoryIndex.fromDocument(document, new WhitespaceAnalyzer()); + duelRun(queryStore, memoryIndex, shardSearcher); + } - for (int[] range : ranges) { - NumberFieldMapper.NumberType.INTEGER.addFields(document, "int_field", between(range[0], range[1]), true, true, false); - logger.info("Test with document: {}" + document); - MemoryIndex memoryIndex = MemoryIndex.fromDocument(document, new WhitespaceAnalyzer()); - duelRun(queryStore, memoryIndex, shardSearcher); + for (int[] range : ranges) { + NumberFieldMapper.NumberType.INTEGER.addFields(document, "int_field", between(range[0], range[1]), true, true, false); + logger.info("Test with document: {}" + document); + MemoryIndex memoryIndex = MemoryIndex.fromDocument(document, new WhitespaceAnalyzer()); + duelRun(queryStore, memoryIndex, shardSearcher); + } } } diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java index b47364e3b1a08..46b9e365fd0ea 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java @@ -81,6 +81,7 @@ import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.DummyQueryParserPlugin; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.xcontent.XContentBuilder; @@ -268,76 +269,79 @@ public void testExtractTerms() throws Exception { } public void testExtractRanges() throws Exception { - SearchExecutionContext context = createSearchContext(indexService).getSearchExecutionContext(); - addQueryFieldMappings(); - BooleanQuery.Builder bq = new BooleanQuery.Builder(); - Query rangeQuery1 = mapperService.fieldType("number_field1").rangeQuery(10, 20, true, true, null, null, null, context); - bq.add(rangeQuery1, Occur.MUST); - Query rangeQuery2 = mapperService.fieldType("number_field1").rangeQuery(15, 20, true, true, null, null, null, context); - bq.add(rangeQuery2, Occur.MUST); - - DocumentMapper documentMapper = mapperService.documentMapper(); - PercolatorFieldMapper fieldMapper = (PercolatorFieldMapper) documentMapper.mappers().getMapper(fieldName); - DocumentParserContext documentParserContext = new TestDocumentParserContext(); - fieldMapper.processQuery(bq.build(), documentParserContext); - LuceneDocument document = documentParserContext.doc(); - - PercolatorFieldMapper.PercolatorFieldType percolatorFieldType = (PercolatorFieldMapper.PercolatorFieldType) fieldMapper.fieldType(); - assertThat(document.getField(percolatorFieldType.extractionResultField.name()).stringValue(), equalTo(EXTRACTION_PARTIAL)); - List fields = new ArrayList<>(document.getFields(percolatorFieldType.rangeField.name())); - fields.sort(Comparator.comparing(IndexableField::binaryValue)); - assertThat( - fields, - transformedItemsMatch( - b -> b.binaryValue().bytes, - contains( - allOf( - transformedMatch(b -> IntPoint.decodeDimension(b, 12), equalTo(10)), - transformedMatch(b -> IntPoint.decodeDimension(b, 28), equalTo(20)) - ), - allOf( - transformedMatch(b -> IntPoint.decodeDimension(b, 12), equalTo(15)), - transformedMatch(b -> IntPoint.decodeDimension(b, 28), equalTo(20)) + try (SearchContext searchContext = createSearchContext(indexService)) { + SearchExecutionContext context = searchContext.getSearchExecutionContext(); + addQueryFieldMappings(); + BooleanQuery.Builder bq = new BooleanQuery.Builder(); + Query rangeQuery1 = mapperService.fieldType("number_field1").rangeQuery(10, 20, true, true, null, null, null, context); + bq.add(rangeQuery1, Occur.MUST); + Query rangeQuery2 = mapperService.fieldType("number_field1").rangeQuery(15, 20, true, true, null, null, null, context); + bq.add(rangeQuery2, Occur.MUST); + + DocumentMapper documentMapper = mapperService.documentMapper(); + PercolatorFieldMapper fieldMapper = (PercolatorFieldMapper) documentMapper.mappers().getMapper(fieldName); + DocumentParserContext documentParserContext = new TestDocumentParserContext(); + fieldMapper.processQuery(bq.build(), documentParserContext); + LuceneDocument document = documentParserContext.doc(); + + PercolatorFieldMapper.PercolatorFieldType percolatorFieldType = (PercolatorFieldMapper.PercolatorFieldType) fieldMapper + .fieldType(); + assertThat(document.getField(percolatorFieldType.extractionResultField.name()).stringValue(), equalTo(EXTRACTION_PARTIAL)); + List fields = new ArrayList<>(document.getFields(percolatorFieldType.rangeField.name())); + fields.sort(Comparator.comparing(IndexableField::binaryValue)); + assertThat( + fields, + transformedItemsMatch( + b -> b.binaryValue().bytes, + contains( + allOf( + transformedMatch(b -> IntPoint.decodeDimension(b, 12), equalTo(10)), + transformedMatch(b -> IntPoint.decodeDimension(b, 28), equalTo(20)) + ), + allOf( + transformedMatch(b -> IntPoint.decodeDimension(b, 12), equalTo(15)), + transformedMatch(b -> IntPoint.decodeDimension(b, 28), equalTo(20)) + ) ) ) - ) - ); - - fields = new ArrayList<>(document.getFields(percolatorFieldType.minimumShouldMatchField.name())); - assertThat(fields, transformedItemsMatch(IndexableField::numericValue, contains(1L))); - - // Range queries on different fields: - bq = new BooleanQuery.Builder(); - bq.add(rangeQuery1, Occur.MUST); - rangeQuery2 = mapperService.fieldType("number_field2").rangeQuery(15, 20, true, true, null, null, null, context); - bq.add(rangeQuery2, Occur.MUST); - - documentParserContext = new TestDocumentParserContext(); - fieldMapper.processQuery(bq.build(), documentParserContext); - document = documentParserContext.doc(); + ); - assertThat(document.getField(percolatorFieldType.extractionResultField.name()).stringValue(), equalTo(EXTRACTION_PARTIAL)); - fields = new ArrayList<>(document.getFields(percolatorFieldType.rangeField.name())); - fields.sort(Comparator.comparing(IndexableField::binaryValue)); - assertThat( - fields, - transformedItemsMatch( - b -> b.binaryValue().bytes, - contains( - allOf( - transformedMatch(b -> IntPoint.decodeDimension(b, 12), equalTo(10)), - transformedMatch(b -> IntPoint.decodeDimension(b, 28), equalTo(20)) - ), - allOf( - transformedMatch(b -> LongPoint.decodeDimension(b, 8), equalTo(15L)), - transformedMatch(b -> LongPoint.decodeDimension(b, 24), equalTo(20L)) + fields = new ArrayList<>(document.getFields(percolatorFieldType.minimumShouldMatchField.name())); + assertThat(fields, transformedItemsMatch(IndexableField::numericValue, contains(1L))); + + // Range queries on different fields: + bq = new BooleanQuery.Builder(); + bq.add(rangeQuery1, Occur.MUST); + rangeQuery2 = mapperService.fieldType("number_field2").rangeQuery(15, 20, true, true, null, null, null, context); + bq.add(rangeQuery2, Occur.MUST); + + documentParserContext = new TestDocumentParserContext(); + fieldMapper.processQuery(bq.build(), documentParserContext); + document = documentParserContext.doc(); + + assertThat(document.getField(percolatorFieldType.extractionResultField.name()).stringValue(), equalTo(EXTRACTION_PARTIAL)); + fields = new ArrayList<>(document.getFields(percolatorFieldType.rangeField.name())); + fields.sort(Comparator.comparing(IndexableField::binaryValue)); + assertThat( + fields, + transformedItemsMatch( + b -> b.binaryValue().bytes, + contains( + allOf( + transformedMatch(b -> IntPoint.decodeDimension(b, 12), equalTo(10)), + transformedMatch(b -> IntPoint.decodeDimension(b, 28), equalTo(20)) + ), + allOf( + transformedMatch(b -> LongPoint.decodeDimension(b, 8), equalTo(15L)), + transformedMatch(b -> LongPoint.decodeDimension(b, 24), equalTo(20L)) + ) ) ) - ) - ); + ); - fields = new ArrayList<>(document.getFields(percolatorFieldType.minimumShouldMatchField.name())); - assertThat(fields, transformedItemsMatch(IndexableField::numericValue, contains(2L))); + fields = new ArrayList<>(document.getFields(percolatorFieldType.minimumShouldMatchField.name())); + assertThat(fields, transformedItemsMatch(IndexableField::numericValue, contains(2L))); + } } public void testExtractTermsAndRanges_failed() throws Exception { @@ -616,7 +620,7 @@ public void testStoringQueries() throws Exception { public void testQueryWithRewrite() throws Exception { addQueryFieldMappings(); - client().prepareIndex("remote").setId("1").setSource("field", "value").get(); + prepareIndex("remote").setId("1").setSource("field", "value").get(); QueryBuilder queryBuilder = termsLookupQuery("field", new TermsLookup("remote", "1", "field")); ParsedDocument doc = mapperService.documentMapper() .parse( diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchTests.java index 05c2c27de40fc..5f3ff5264497a 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchTests.java @@ -73,8 +73,7 @@ protected Map, Object>> pluginScripts() { public void testPercolateScriptQuery() throws IOException { indicesAdmin().prepareCreate("index").setMapping("query", "type=percolator").get(); - client().prepareIndex("index") - .setId("1") + prepareIndex("index").setId("1") .setSource( jsonBuilder().startObject() .field( @@ -84,8 +83,7 @@ public void testPercolateScriptQuery() throws IOException { .endObject() ) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .execute() - .actionGet(); + .get(); assertSearchHitsWithoutFailures( client().prepareSearch("index") .setQuery( @@ -126,8 +124,7 @@ public void testPercolateQueryWithNestedDocuments_doNotLeakBitsetCacheEntries() .setSettings(Settings.builder().put(BitsetFilterCache.INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING.getKey(), false)) .setMapping(mapping) ); - client().prepareIndex("test") - .setId("q1") + prepareIndex("test").setId("q1") .setSource( jsonBuilder().startObject() .field( @@ -215,8 +212,7 @@ public void testPercolateQueryWithNestedDocuments_doLeakFieldDataCacheEntries() mapping.endObject(); createIndex("test", indicesAdmin().prepareCreate("test").setMapping(mapping)); Script script = new Script(ScriptType.INLINE, MockScriptPlugin.NAME, "use_fielddata_please", Collections.emptyMap()); - client().prepareIndex("test") - .setId("q1") + prepareIndex("test").setId("q1") .setSource( jsonBuilder().startObject() .field("query", QueryBuilders.nestedQuery("employees", QueryBuilders.scriptQuery(script), ScoreMode.Avg)) @@ -258,8 +254,7 @@ public void testPercolateQueryWithNestedDocuments_doLeakFieldDataCacheEntries() public void testMapUnmappedFieldAsText() throws IOException { Settings.Builder settings = Settings.builder().put("index.percolator.map_unmapped_fields_as_text", true); createIndex("test", settings.build(), "query", "query", "type=percolator"); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "value")).endObject()) .get(); indicesAdmin().prepareRefresh().get(); @@ -290,12 +285,10 @@ public void testRangeQueriesWithNow() throws Exception { "type=percolator" ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("query", rangeQuery("field2").from("now-1h").to("now+1h")).endObject()) .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource( jsonBuilder().startObject() .field( @@ -307,8 +300,7 @@ public void testRangeQueriesWithNow() throws Exception { .get(); Script script = new Script(ScriptType.INLINE, MockScriptPlugin.NAME, "1==1", Collections.emptyMap()); - client().prepareIndex("test") - .setId("3") + prepareIndex("test").setId("3") .setSource( jsonBuilder().startObject() .field("query", boolQuery().filter(scriptQuery(script)).filter(rangeQuery("field2").from("now-1h").to("now+1h"))) diff --git a/modules/rank-eval/src/internalClusterTest/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java b/modules/rank-eval/src/internalClusterTest/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java index 7e879d9959f6d..699cb307e3310 100644 --- a/modules/rank-eval/src/internalClusterTest/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java +++ b/modules/rank-eval/src/internalClusterTest/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java @@ -49,18 +49,15 @@ public void setup() { createIndex(TEST_INDEX); ensureGreen(); - client().prepareIndex(TEST_INDEX) - .setId("1") - .setSource("id", 1, "text", "berlin", "title", "Berlin, Germany", "population", 3670622) - .get(); - client().prepareIndex(TEST_INDEX).setId("2").setSource("id", 2, "text", "amsterdam", "population", 851573).get(); - client().prepareIndex(TEST_INDEX).setId("3").setSource("id", 3, "text", "amsterdam", "population", 851573).get(); - client().prepareIndex(TEST_INDEX).setId("4").setSource("id", 4, "text", "amsterdam", "population", 851573).get(); - client().prepareIndex(TEST_INDEX).setId("5").setSource("id", 5, "text", "amsterdam", "population", 851573).get(); - client().prepareIndex(TEST_INDEX).setId("6").setSource("id", 6, "text", "amsterdam", "population", 851573).get(); + prepareIndex(TEST_INDEX).setId("1").setSource("id", 1, "text", "berlin", "title", "Berlin, Germany", "population", 3670622).get(); + prepareIndex(TEST_INDEX).setId("2").setSource("id", 2, "text", "amsterdam", "population", 851573).get(); + prepareIndex(TEST_INDEX).setId("3").setSource("id", 3, "text", "amsterdam", "population", 851573).get(); + prepareIndex(TEST_INDEX).setId("4").setSource("id", 4, "text", "amsterdam", "population", 851573).get(); + prepareIndex(TEST_INDEX).setId("5").setSource("id", 5, "text", "amsterdam", "population", 851573).get(); + prepareIndex(TEST_INDEX).setId("6").setSource("id", 6, "text", "amsterdam", "population", 851573).get(); // add another index for testing closed indices etc... - client().prepareIndex("test2").setId("7").setSource("id", 7, "text", "amsterdam", "population", 851573).get(); + prepareIndex("test2").setId("7").setSource("id", 7, "text", "amsterdam", "population", 851573).get(); refresh(); // set up an alias that can also be used in tests diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java index 50284008eef48..996fbde85e474 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java @@ -272,7 +272,7 @@ private ReindexRequestBuilder reindexAndPartiallyBlock() throws Exception { false, true, IntStream.range(0, numDocs) - .mapToObj(i -> client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("n", Integer.toString(i))) + .mapToObj(i -> prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("n", Integer.toString(i))) .collect(Collectors.toList()) ); diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/BulkByScrollUsesAllScrollDocumentsAfterConflictsIntegTests.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/BulkByScrollUsesAllScrollDocumentsAfterConflictsIntegTests.java index d7f71fcc510ab..7dad062ab3bca 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/BulkByScrollUsesAllScrollDocumentsAfterConflictsIntegTests.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/BulkByScrollUsesAllScrollDocumentsAfterConflictsIntegTests.java @@ -15,7 +15,6 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; @@ -53,6 +52,7 @@ import static org.elasticsearch.common.lucene.uid.Versions.MATCH_DELETED; import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -181,7 +181,7 @@ public void testDeleteByQuery() throws Exception { source.put(RETURN_NOOP_FIELD, true); noopDocs++; } - indexRequests.add(client().prepareIndex(sourceIndex).setId(Integer.toString(i)).setSource(source)); + indexRequests.add(prepareIndex(sourceIndex).setId(Integer.toString(i)).setSource(source)); } indexRandom(true, indexRequests); @@ -201,16 +201,18 @@ public void testDeleteByQuery() throws Exception { // Ensure that the write thread blocking task is currently executing barrier.await(); - final SearchResponse searchResponse = prepareSearch(sourceIndex).setSize(numDocs) // Get all indexed docs - .addSort(SORTING_FIELD, SortOrder.DESC) - .execute() - .actionGet(); - - // Modify a subset of the target documents concurrently - final List originalDocs = Arrays.asList(searchResponse.getHits().getHits()); int conflictingOps = randomIntBetween(maxDocs, numDocs); - final List docsModifiedConcurrently = randomSubsetOf(conflictingOps, originalDocs); - + final int finalConflictingOps = conflictingOps; + final List docsModifiedConcurrently = new ArrayList<>(); + assertResponse( + prepareSearch(sourceIndex).setSize(numDocs) // Get all indexed docs + .addSort(SORTING_FIELD, SortOrder.DESC), + response -> { + // Modify a subset of the target documents concurrently + final List originalDocs = Arrays.asList(response.getHits().getHits()); + docsModifiedConcurrently.addAll(randomSubsetOf(finalConflictingOps, originalDocs)); + } + ); BulkRequest conflictingUpdatesBulkRequest = new BulkRequest(); for (SearchHit searchHit : docsModifiedConcurrently) { if (scriptEnabled && searchHit.getSourceAsMap().containsKey(RETURN_NOOP_FIELD)) { diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/AbstractFeatureMigrationIntegTest.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/AbstractFeatureMigrationIntegTest.java index 2eed369a64d6a..3bbc8e4b969ee 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/AbstractFeatureMigrationIntegTest.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/AbstractFeatureMigrationIntegTest.java @@ -192,7 +192,7 @@ public void createSystemIndexForDescriptor(SystemIndexDescriptor descriptor) thr List docs = new ArrayList<>(INDEX_DOC_COUNT); for (int i = 0; i < INDEX_DOC_COUNT; i++) { - docs.add(ESIntegTestCase.client().prepareIndex(indexName).setId(Integer.toString(i)).setSource("some_field", "words words")); + docs.add(ESIntegTestCase.prepareIndex(indexName).setId(Integer.toString(i)).setSource("some_field", "words words")); } indexRandom(true, docs); IndicesStatsResponse indexStats = ESIntegTestCase.indicesAdmin().prepareStats(indexName).setDocs(true).get(); diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java index 14647820e71f6..0c1a0e41206c7 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java @@ -396,27 +396,29 @@ private void migrateWithTemplatesV2(String prefix, SystemIndexDescriptor... desc ); client().execute(PutComponentTemplateAction.INSTANCE, new PutComponentTemplateAction.Request("a-ct").componentTemplate(ct)).get(); - ComposableIndexTemplate cit = new ComposableIndexTemplate( - Collections.singletonList(prefix + "*"), - new Template( - null, - new CompressedXContent( - "{\n" - + " \"dynamic\": false,\n" - + " \"properties\": {\n" - + " \"field2\": {\n" - + " \"type\": \"keyword\"\n" - + " }\n" - + " }\n" - + " }" - ), - null - ), - Collections.singletonList("a-ct"), - 4L, - 5L, - Collections.singletonMap("baz", "thud") - ); + ComposableIndexTemplate cit = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList(prefix + "*")) + .template( + new Template( + null, + new CompressedXContent( + "{\n" + + " \"dynamic\": false,\n" + + " \"properties\": {\n" + + " \"field2\": {\n" + + " \"type\": \"keyword\"\n" + + " }\n" + + " }\n" + + " }" + ), + null + ) + ) + .componentTemplates(Collections.singletonList("a-ct")) + .priority(4L) + .version(5L) + .metadata(Collections.singletonMap("baz", "thud")) + .build(); client().execute(PutComposableIndexTemplateAction.INSTANCE, new PutComposableIndexTemplateAction.Request("a-it").indexTemplate(cit)) .get(); diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractBaseReindexRestHandler.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractBaseReindexRestHandler.java index 952dd0585e7ba..8e7fab68ac697 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractBaseReindexRestHandler.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractBaseReindexRestHandler.java @@ -11,7 +11,7 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActiveShardCount; -import org.elasticsearch.action.support.ListenableActionFuture; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.index.reindex.AbstractBulkByScrollRequest; @@ -64,9 +64,9 @@ protected RestChannelConsumer doPrepareRequest(RestRequest request, NodeClient c if (validationException != null) { throw validationException; } - final var responseFuture = new ListenableActionFuture(); - final var task = client.executeLocally(action, internal, responseFuture); - responseFuture.addListener(new LoggingTaskListener<>(task)); + final var responseListener = new SubscribableListener(); + final var task = client.executeLocally(action, internal, responseListener); + responseListener.addListener(new LoggingTaskListener<>(task)); return sendTask(client.getLocalNodeId(), task); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/CancelTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/CancelTests.java index 35ad5fe9532cd..b211f7d92f51f 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/CancelTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/CancelTests.java @@ -97,7 +97,7 @@ private void testCancel( false, true, IntStream.range(0, numDocs) - .mapToObj(i -> client().prepareIndex().setIndex(INDEX).setId(String.valueOf(i)).setSource("n", i)) + .mapToObj(i -> prepareIndex(INDEX).setId(String.valueOf(i)).setSource("n", i)) .collect(Collectors.toList()) ); diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java index c7c441e3eaff9..0ad1867e75058 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java @@ -14,11 +14,11 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.bulk.BackoffPolicy; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchScrollAction; import org.elasticsearch.action.search.SearchScrollRequest; +import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.action.search.TransportSearchScrollAction; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.internal.ParentTaskAssigningClient; import org.elasticsearch.client.internal.support.AbstractClient; @@ -102,35 +102,39 @@ private void dotestBasicsWithRetry(int retries, int minFailures, int maxFailures hitSource.start(); for (int retry = 0; retry < randomIntBetween(minFailures, maxFailures); ++retry) { - client.fail(SearchAction.INSTANCE, new EsRejectedExecutionException()); + client.fail(TransportSearchAction.TYPE, new EsRejectedExecutionException()); if (retry >= retries) { return; } client.awaitOperation(); ++expectedSearchRetries; } - client.validateRequest(SearchAction.INSTANCE, (SearchRequest r) -> assertTrue(r.allowPartialSearchResults() == Boolean.FALSE)); + client.validateRequest(TransportSearchAction.TYPE, (SearchRequest r) -> assertTrue(r.allowPartialSearchResults() == Boolean.FALSE)); SearchResponse searchResponse = createSearchResponse(); - client.respond(SearchAction.INSTANCE, searchResponse); - - for (int i = 0; i < randomIntBetween(1, 10); ++i) { - ScrollableHitSource.AsyncResponse asyncResponse = responses.poll(10, TimeUnit.SECONDS); - assertNotNull(asyncResponse); - assertEquals(responses.size(), 0); - assertSameHits(asyncResponse.response().getHits(), searchResponse.getHits().getHits()); - asyncResponse.done(TimeValue.ZERO); - - for (int retry = 0; retry < randomIntBetween(minFailures, maxFailures); ++retry) { - client.fail(SearchScrollAction.INSTANCE, new EsRejectedExecutionException()); - client.awaitOperation(); - ++expectedSearchRetries; + try { + client.respond(TransportSearchAction.TYPE, searchResponse); + + for (int i = 0; i < randomIntBetween(1, 10); ++i) { + ScrollableHitSource.AsyncResponse asyncResponse = responses.poll(10, TimeUnit.SECONDS); + assertNotNull(asyncResponse); + assertEquals(responses.size(), 0); + assertSameHits(asyncResponse.response().getHits(), searchResponse.getHits().getHits()); + asyncResponse.done(TimeValue.ZERO); + + for (int retry = 0; retry < randomIntBetween(minFailures, maxFailures); ++retry) { + client.fail(TransportSearchScrollAction.TYPE, new EsRejectedExecutionException()); + client.awaitOperation(); + ++expectedSearchRetries; + } + + searchResponse = createSearchResponse(); + client.respond(TransportSearchScrollAction.TYPE, searchResponse); } - searchResponse = createSearchResponse(); - client.respond(SearchScrollAction.INSTANCE, searchResponse); + assertEquals(actualSearchRetries.get(), expectedSearchRetries); + } finally { + searchResponse.decRef(); } - - assertEquals(actualSearchRetries.get(), expectedSearchRetries); } public void testScrollKeepAlive() { @@ -150,7 +154,10 @@ public void testScrollKeepAlive() { ); hitSource.startNextScroll(timeValueSeconds(100)); - client.validateRequest(SearchScrollAction.INSTANCE, (SearchScrollRequest r) -> assertEquals(r.scroll().keepAlive().seconds(), 110)); + client.validateRequest( + TransportSearchScrollAction.TYPE, + (SearchScrollRequest r) -> assertEquals(r.scroll().keepAlive().seconds(), 110) + ); } private SearchResponse createSearchResponse() { diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/DeleteByQueryBasicTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/DeleteByQueryBasicTests.java index 2f2248e304989..fac18c4f6f544 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/DeleteByQueryBasicTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/DeleteByQueryBasicTests.java @@ -52,13 +52,13 @@ protected Collection> nodePlugins() { public void testBasics() throws Exception { indexRandom( true, - client().prepareIndex("test").setId("1").setSource("foo", "a"), - client().prepareIndex("test").setId("2").setSource("foo", "a"), - client().prepareIndex("test").setId("3").setSource("foo", "b"), - client().prepareIndex("test").setId("4").setSource("foo", "c"), - client().prepareIndex("test").setId("5").setSource("foo", "d"), - client().prepareIndex("test").setId("6").setSource("foo", "e"), - client().prepareIndex("test").setId("7").setSource("foo", "f") + prepareIndex("test").setId("1").setSource("foo", "a"), + prepareIndex("test").setId("2").setSource("foo", "a"), + prepareIndex("test").setId("3").setSource("foo", "b"), + prepareIndex("test").setId("4").setSource("foo", "c"), + prepareIndex("test").setId("5").setSource("foo", "d"), + prepareIndex("test").setId("6").setSource("foo", "e"), + prepareIndex("test").setId("7").setSource("foo", "f") ); assertHitCount(prepareSearch("test").setSize(0), 7); @@ -87,7 +87,7 @@ public void testDeleteByQueryWithOneIndex() throws Exception { List builders = new ArrayList<>(); for (int i = 0; i < docs; i++) { - builders.add(client().prepareIndex("test").setId(String.valueOf(i)).setSource("fields1", 1)); + builders.add(prepareIndex("test").setId(String.valueOf(i)).setSource("fields1", 1)); } indexRandom(true, true, true, builders); @@ -112,7 +112,7 @@ public void testDeleteByQueryWithMultipleIndices() throws Exception { for (int j = 0; j < docs; j++) { boolean candidate = (j < candidates[i]); - builders.add(client().prepareIndex("test-" + i).setId(String.valueOf(j)).setSource("candidate", candidate)); + builders.add(prepareIndex("test-" + i).setId(String.valueOf(j)).setSource("candidate", candidate)); } } indexRandom(true, true, true, builders); @@ -129,7 +129,7 @@ public void testDeleteByQueryWithMultipleIndices() throws Exception { } public void testDeleteByQueryWithMissingIndex() throws Exception { - indexRandom(true, client().prepareIndex("test").setId("1").setSource("foo", "a")); + indexRandom(true, prepareIndex("test").setId("1").setSource("foo", "a")); assertHitCount(prepareSearch().setSize(0), 1); try { @@ -149,7 +149,7 @@ public void testDeleteByQueryWithRouting() throws Exception { List builders = new ArrayList<>(); for (int i = 0; i < docs; i++) { - builders.add(client().prepareIndex("test").setId(String.valueOf(i)).setRouting(String.valueOf(i)).setSource("field1", 1)); + builders.add(prepareIndex("test").setId(String.valueOf(i)).setRouting(String.valueOf(i)).setSource("field1", 1)); } indexRandom(true, true, true, builders); @@ -177,10 +177,7 @@ public void testDeleteByMatchQuery() throws Exception { List builders = new ArrayList<>(); for (int i = 0; i < docs; i++) { builders.add( - client().prepareIndex("test") - .setId(Integer.toString(i)) - .setRouting(randomAlphaOfLengthBetween(1, 5)) - .setSource("foo", "bar") + prepareIndex("test").setId(Integer.toString(i)).setRouting(randomAlphaOfLengthBetween(1, 5)).setSource("foo", "bar") ); } indexRandom(true, true, true, builders); @@ -196,7 +193,7 @@ public void testDeleteByMatchQuery() throws Exception { } public void testDeleteByQueryWithDateMath() throws Exception { - indexRandom(true, client().prepareIndex("test").setId("1").setSource("d", "2013-01-01")); + indexRandom(true, prepareIndex("test").setId("1").setSource("d", "2013-01-01")); DeleteByQueryRequestBuilder delete = deleteByQuery().source("test").filter(rangeQuery("d").to("now-1h")); assertThat(delete.refresh(true).get(), matcher().deleted(1L)); @@ -210,7 +207,7 @@ public void testDeleteByQueryOnReadOnlyIndex() throws Exception { final int docs = randomIntBetween(1, 50); List builders = new ArrayList<>(); for (int i = 0; i < docs; i++) { - builders.add(client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", 1)); + builders.add(prepareIndex("test").setId(Integer.toString(i)).setSource("field", 1)); } indexRandom(true, true, true, builders); @@ -233,7 +230,7 @@ public void testDeleteByQueryOnReadOnlyAllowDeleteIndex() throws Exception { final int docs = randomIntBetween(1, 50); List builders = new ArrayList<>(); for (int i = 0; i < docs; i++) { - builders.add(client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", 1)); + builders.add(prepareIndex("test").setId(Integer.toString(i)).setSource("field", 1)); } indexRandom(true, true, true, builders); @@ -289,13 +286,13 @@ public void testDeleteByQueryOnReadOnlyAllowDeleteIndex() throws Exception { public void testSlices() throws Exception { indexRandom( true, - client().prepareIndex("test").setId("1").setSource("foo", "a"), - client().prepareIndex("test").setId("2").setSource("foo", "a"), - client().prepareIndex("test").setId("3").setSource("foo", "b"), - client().prepareIndex("test").setId("4").setSource("foo", "c"), - client().prepareIndex("test").setId("5").setSource("foo", "d"), - client().prepareIndex("test").setId("6").setSource("foo", "e"), - client().prepareIndex("test").setId("7").setSource("foo", "f") + prepareIndex("test").setId("1").setSource("foo", "a"), + prepareIndex("test").setId("2").setSource("foo", "a"), + prepareIndex("test").setId("3").setSource("foo", "b"), + prepareIndex("test").setId("4").setSource("foo", "c"), + prepareIndex("test").setId("5").setSource("foo", "d"), + prepareIndex("test").setId("6").setSource("foo", "e"), + prepareIndex("test").setId("7").setSource("foo", "f") ); assertHitCount(prepareSearch("test").setSize(0), 7); @@ -326,7 +323,7 @@ public void testMultipleSources() throws Exception { docs.put(indexName, new ArrayList<>()); int numDocs = between(5, 15); for (int i = 0; i < numDocs; i++) { - docs.get(indexName).add(client().prepareIndex(indexName).setId(Integer.toString(i)).setSource("foo", "a")); + docs.get(indexName).add(prepareIndex(indexName).setId(Integer.toString(i)).setSource("foo", "a")); } } diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/DeleteByQueryConcurrentTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/DeleteByQueryConcurrentTests.java index 81d00d98b1fec..323b829fe93ff 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/DeleteByQueryConcurrentTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/DeleteByQueryConcurrentTests.java @@ -32,7 +32,7 @@ public void testConcurrentDeleteByQueriesOnDifferentDocs() throws Throwable { List builders = new ArrayList<>(); for (int i = 0; i < docs; i++) { for (int t = 0; t < threads.length; t++) { - builders.add(client().prepareIndex("test").setSource("field", t)); + builders.add(prepareIndex("test").setSource("field", t)); } } indexRandom(true, true, true, builders); @@ -73,7 +73,7 @@ public void testConcurrentDeleteByQueriesOnSameDocs() throws Throwable { List builders = new ArrayList<>(); for (int i = 0; i < docs; i++) { - builders.add(client().prepareIndex("test").setId(String.valueOf(i)).setSource("foo", "bar")); + builders.add(prepareIndex("test").setId(String.valueOf(i)).setSource("foo", "bar")); } indexRandom(true, true, true, builders); diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexBasicTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexBasicTests.java index 45ca5a536f34f..21f6427dcb632 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexBasicTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexBasicTests.java @@ -30,10 +30,10 @@ public class ReindexBasicTests extends ReindexTestCase { public void testFiltering() throws Exception { indexRandom( true, - client().prepareIndex("source").setId("1").setSource("foo", "a"), - client().prepareIndex("source").setId("2").setSource("foo", "a"), - client().prepareIndex("source").setId("3").setSource("foo", "b"), - client().prepareIndex("source").setId("4").setSource("foo", "c") + prepareIndex("source").setId("1").setSource("foo", "a"), + prepareIndex("source").setId("2").setSource("foo", "a"), + prepareIndex("source").setId("3").setSource("foo", "b"), + prepareIndex("source").setId("4").setSource("foo", "c") ); assertHitCount(prepareSearch("source").setSize(0), 4); @@ -63,7 +63,7 @@ public void testCopyMany() throws Exception { List docs = new ArrayList<>(); int max = between(150, 500); for (int i = 0; i < max; i++) { - docs.add(client().prepareIndex("source").setId(Integer.toString(i)).setSource("foo", "a")); + docs.add(prepareIndex("source").setId(Integer.toString(i)).setSource("foo", "a")); } indexRandom(true, docs); @@ -90,7 +90,7 @@ public void testCopyManyWithSlices() throws Exception { List docs = new ArrayList<>(); int max = between(150, 500); for (int i = 0; i < max; i++) { - docs.add(client().prepareIndex("source").setId(Integer.toString(i)).setSource("foo", "a")); + docs.add(prepareIndex("source").setId(Integer.toString(i)).setSource("foo", "a")); } indexRandom(true, docs); @@ -127,7 +127,7 @@ public void testMultipleSources() throws Exception { docs.put(indexName, new ArrayList<>()); int numDocs = between(50, 200); for (int i = 0; i < numDocs; i++) { - docs.get(indexName).add(client().prepareIndex(indexName).setId("id_" + sourceIndex + "_" + i).setSource("foo", "a")); + docs.get(indexName).add(prepareIndex(indexName).setId("id_" + sourceIndex + "_" + i).setSource("foo", "a")); } } @@ -161,10 +161,10 @@ public void testReindexFromComplexDateMathIndexName() throws Exception { String destIndexName = ""; indexRandom( true, - client().prepareIndex(sourceIndexName).setId("1").setSource("foo", "a"), - client().prepareIndex(sourceIndexName).setId("2").setSource("foo", "a"), - client().prepareIndex(sourceIndexName).setId("3").setSource("foo", "b"), - client().prepareIndex(sourceIndexName).setId("4").setSource("foo", "c") + prepareIndex(sourceIndexName).setId("1").setSource("foo", "a"), + prepareIndex(sourceIndexName).setId("2").setSource("foo", "a"), + prepareIndex(sourceIndexName).setId("3").setSource("foo", "b"), + prepareIndex(sourceIndexName).setId("4").setSource("foo", "c") ); assertHitCount(prepareSearch(sourceIndexName).setSize(0), 4); diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexFailureTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexFailureTests.java index 5e868598d165e..1da998831ecc2 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexFailureTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexFailureTests.java @@ -37,7 +37,7 @@ public void testFailuresCauseAbortDefault() throws Exception { * Create the destination index such that the copy will cause a mapping * conflict on every request. */ - indexRandom(true, client().prepareIndex("dest").setId("test").setSource("test", 10) /* Its a string in the source! */); + indexRandom(true, prepareIndex("dest").setId("test").setSource("test", 10) /* Its a string in the source! */); indexDocs(100); @@ -59,7 +59,7 @@ public void testFailuresCauseAbortDefault() throws Exception { public void testAbortOnVersionConflict() throws Exception { // Just put something in the way of the copy. - indexRandom(true, client().prepareIndex("dest").setId("1").setSource("test", "test")); + indexRandom(true, prepareIndex("dest").setId("1").setSource("test", "test")); indexDocs(100); @@ -123,10 +123,10 @@ public void testDateMathResolvesSameIndexName() throws Exception { String destIndexName = ""; indexRandom( true, - client().prepareIndex(sourceIndexName).setId("1").setSource("foo", "a"), - client().prepareIndex(sourceIndexName).setId("2").setSource("foo", "a"), - client().prepareIndex(sourceIndexName).setId("3").setSource("foo", "b"), - client().prepareIndex(sourceIndexName).setId("4").setSource("foo", "c") + prepareIndex(sourceIndexName).setId("1").setSource("foo", "a"), + prepareIndex(sourceIndexName).setId("2").setSource("foo", "a"), + prepareIndex(sourceIndexName).setId("3").setSource("foo", "b"), + prepareIndex(sourceIndexName).setId("4").setSource("foo", "c") ); assertHitCount(prepareSearch(sourceIndexName).setSize(0), 4); @@ -140,7 +140,7 @@ public void testDateMathResolvesSameIndexName() throws Exception { private void indexDocs(int count) throws Exception { List docs = new ArrayList<>(count); for (int i = 0; i < count; i++) { - docs.add(client().prepareIndex("source").setId(Integer.toString(i)).setSource("test", "words words")); + docs.add(prepareIndex("source").setId(Integer.toString(i)).setSource("test", "words words")); } indexRandom(true, docs); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexFromRemoteWithAuthTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexFromRemoteWithAuthTests.java index 5509e44b52a3e..a68c390b8bd80 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexFromRemoteWithAuthTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexFromRemoteWithAuthTests.java @@ -15,7 +15,7 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; -import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.ActionFilter; import org.elasticsearch.action.support.ActionFilterChain; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; @@ -81,7 +81,7 @@ protected Settings nodeSettings() { @Before public void setupSourceIndex() { - client().prepareIndex("source").setSource("test", "test").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); + prepareIndex("source").setSource("test", "test").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); } @Before @@ -199,7 +199,7 @@ public void app ActionListener listener, ActionFilterChain chain ) { - if (false == action.equals(SearchAction.NAME)) { + if (false == action.equals(TransportSearchAction.TYPE.name())) { chain.proceed(task, action, request, listener); return; } diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexIdTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexIdTests.java index 34db459539323..644787446547e 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexIdTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexIdTests.java @@ -59,9 +59,12 @@ private ClusterState stateWithTemplate(Settings.Builder settings) { Template template = new Template(settings.build(), null, null); if (randomBoolean()) { metadata.put("c", new ComponentTemplate(template, null, null)); - metadata.put("c", new ComposableIndexTemplate(List.of("dest_index"), null, List.of("c"), null, null, null)); + metadata.put( + "c", + ComposableIndexTemplate.builder().indexPatterns(List.of("dest_index")).componentTemplates(List.of("c")).build() + ); } else { - metadata.put("c", new ComposableIndexTemplate(List.of("dest_index"), template, null, null, null, null)); + metadata.put("c", ComposableIndexTemplate.builder().indexPatterns(List.of("dest_index")).template(template).build()); } return ClusterState.builder(ClusterState.EMPTY_STATE).metadata(metadata).build(); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexSingleNodeTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexSingleNodeTests.java index 2df8caa4dd2ea..0804cccd8b8f2 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexSingleNodeTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexSingleNodeTests.java @@ -30,7 +30,7 @@ protected Collection> getPlugins() { public void testDeprecatedSort() { int max = between(2, 20); for (int i = 0; i < max; i++) { - client().prepareIndex("source").setId(Integer.toString(i)).setSource("foo", i).get(); + prepareIndex("source").setId(Integer.toString(i)).setSource("foo", i).get(); } indicesAdmin().prepareRefresh("source").get(); diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexVersioningTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexVersioningTests.java index 96f0ff50027af..8e42b29468b5c 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexVersioningTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexVersioningTests.java @@ -105,7 +105,7 @@ private BulkByScrollResponse reindexCreate() { private void setupSourceAbsent() throws Exception { indexRandom( true, - client().prepareIndex("source").setId("test").setVersionType(EXTERNAL).setVersion(SOURCE_VERSION).setSource("foo", "source") + prepareIndex("source").setId("test").setVersionType(EXTERNAL).setVersion(SOURCE_VERSION).setSource("foo", "source") ); assertEquals(SOURCE_VERSION, client().prepareGet("source", "test").get().getVersion()); @@ -113,10 +113,7 @@ private void setupSourceAbsent() throws Exception { private void setupDest(int version) throws Exception { setupSourceAbsent(); - indexRandom( - true, - client().prepareIndex("dest").setId("test").setVersionType(EXTERNAL).setVersion(version).setSource("foo", "dest") - ); + indexRandom(true, prepareIndex("dest").setId("test").setVersionType(EXTERNAL).setVersion(version).setSource("foo", "dest")); assertEquals(version, client().prepareGet("dest", "test").get().getVersion()); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/RethrottleTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/RethrottleTests.java index 5bbff9da85b20..5f1af05571585 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/RethrottleTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/RethrottleTests.java @@ -81,7 +81,7 @@ private void testCase(AbstractBulkByScrollRequestBuilder request, String a List docs = new ArrayList<>(); for (int i = 0; i < numSlices * 10; i++) { - docs.add(client().prepareIndex("test").setId(Integer.toString(i)).setSource("foo", "bar")); + docs.add(prepareIndex("test").setId(Integer.toString(i)).setSource("foo", "bar")); } indexRandom(true, docs); diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/RetryTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/RetryTests.java index 1e338b28a5d4a..36da25685a7ba 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/RetryTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/RetryTests.java @@ -175,13 +175,13 @@ private void testCase( final Settings indexSettings = indexSettings(1, 0).put("index.routing.allocation.include.color", "blue").build(); // Create the source index on the node with small thread pools so we can block them. - indicesAdmin().prepareCreate("source").setSettings(indexSettings).execute().actionGet(); + indicesAdmin().prepareCreate("source").setSettings(indexSettings).get(); // Not all test cases use the dest index but those that do require that it be on the node will small thread pools - indicesAdmin().prepareCreate("dest").setSettings(indexSettings).execute().actionGet(); + indicesAdmin().prepareCreate("dest").setSettings(indexSettings).get(); // Build the test data. Don't use indexRandom because that won't work consistently with such small thread pools. BulkRequestBuilder bulk = client().prepareBulk(); for (int i = 0; i < DOC_COUNT; i++) { - bulk.add(client().prepareIndex("source").setSource("foo", "bar " + i)); + bulk.add(prepareIndex("source").setSource("foo", "bar " + i)); } Retry retry = new Retry(BackoffPolicy.exponentialBackoff(), client().threadPool()); @@ -199,18 +199,21 @@ private void testCase( logger.info("Starting request"); ActionFuture responseListener = builder.execute(); + BulkByScrollResponse response = null; try { logger.info("Waiting for bulk rejections"); assertBusy(() -> assertThat(taskStatus(action).getBulkRetries(), greaterThan(0L))); bulkBlock.await(); logger.info("Waiting for the request to finish"); - BulkByScrollResponse response = responseListener.get(); + response = responseListener.get(); assertThat(response, matcher); assertThat(response.getBulkRetries(), greaterThan(0L)); } finally { // Fetch the response just in case we blew up half way through. This will make sure the failure is thrown up to the top level. - BulkByScrollResponse response = responseListener.get(); + if (response == null) { + response = responseListener.get(); + } assertThat(response.getSearchFailures(), empty()); assertThat(response.getBulkFailures(), empty()); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/UpdateByQueryBasicTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/UpdateByQueryBasicTests.java index f37c9b5891416..6b1f3a21a1aad 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/UpdateByQueryBasicTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/UpdateByQueryBasicTests.java @@ -29,10 +29,10 @@ public class UpdateByQueryBasicTests extends ReindexTestCase { public void testBasics() throws Exception { indexRandom( true, - client().prepareIndex("test").setId("1").setSource("foo", "a"), - client().prepareIndex("test").setId("2").setSource("foo", "a"), - client().prepareIndex("test").setId("3").setSource("foo", "b"), - client().prepareIndex("test").setId("4").setSource("foo", "c") + prepareIndex("test").setId("1").setSource("foo", "a"), + prepareIndex("test").setId("2").setSource("foo", "a"), + prepareIndex("test").setId("3").setSource("foo", "b"), + prepareIndex("test").setId("4").setSource("foo", "c") ); assertHitCount(prepareSearch("test").setSize(0), 4); assertEquals(1, client().prepareGet("test", "1").get().getVersion()); @@ -69,10 +69,10 @@ public void testBasics() throws Exception { public void testSlices() throws Exception { indexRandom( true, - client().prepareIndex("test").setId("1").setSource("foo", "a"), - client().prepareIndex("test").setId("2").setSource("foo", "a"), - client().prepareIndex("test").setId("3").setSource("foo", "b"), - client().prepareIndex("test").setId("4").setSource("foo", "c") + prepareIndex("test").setId("1").setSource("foo", "a"), + prepareIndex("test").setId("2").setSource("foo", "a"), + prepareIndex("test").setId("3").setSource("foo", "b"), + prepareIndex("test").setId("4").setSource("foo", "c") ); assertHitCount(prepareSearch("test").setSize(0), 4); assertEquals(1, client().prepareGet("test", "1").get().getVersion()); @@ -117,7 +117,7 @@ public void testMultipleSources() throws Exception { docs.put(indexName, new ArrayList<>()); int numDocs = between(5, 15); for (int i = 0; i < numDocs; i++) { - docs.get(indexName).add(client().prepareIndex(indexName).setId(Integer.toString(i)).setSource("foo", "a")); + docs.get(indexName).add(prepareIndex(indexName).setId(Integer.toString(i)).setSource("foo", "a")); } } diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/UpdateByQueryWhileModifyingTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/UpdateByQueryWhileModifyingTests.java index 11a4476dffa83..5c2e82f6d4256 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/UpdateByQueryWhileModifyingTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/UpdateByQueryWhileModifyingTests.java @@ -32,7 +32,7 @@ public class UpdateByQueryWhileModifyingTests extends ReindexTestCase { public void testUpdateWhileReindexing() throws Exception { AtomicReference value = new AtomicReference<>(randomSimpleString(random())); - indexRandom(true, client().prepareIndex("test").setId("test").setSource("test", value.get())); + indexRandom(true, prepareIndex("test").setId("test").setSource("test", value.get())); AtomicReference failure = new AtomicReference<>(); AtomicBoolean keepUpdating = new AtomicBoolean(true); @@ -56,10 +56,7 @@ public void testUpdateWhileReindexing() throws Exception { GetResponse get = client().prepareGet("test", "test").get(); assertEquals(value.get(), get.getSource().get("test")); value.set(randomSimpleString(random())); - IndexRequestBuilder index = client().prepareIndex("test") - .setId("test") - .setSource("test", value.get()) - .setRefreshPolicy(IMMEDIATE); + IndexRequestBuilder index = prepareIndex("test").setId("test").setSource("test", value.get()).setRefreshPolicy(IMMEDIATE); /* * Update by query changes the document so concurrent * indexes might get version conflict exceptions so we just diff --git a/modules/repository-azure/build.gradle b/modules/repository-azure/build.gradle index 1753ba24d5c4a..c2568d9a4db2c 100644 --- a/modules/repository-azure/build.gradle +++ b/modules/repository-azure/build.gradle @@ -30,8 +30,8 @@ versions << [ 'stax2API': '4.2.1', 'woodstox': '6.4.0', - 'reactorNetty': '1.0.24', - 'reactorCore': '3.4.23', + 'reactorNetty': '1.0.39', + 'reactorCore': '3.4.34', 'reactiveStreams': '1.0.4', ] @@ -105,6 +105,7 @@ tasks.named("thirdPartyAudit").configure { 'io.micrometer.core.instrument.DistributionSummary', 'io.micrometer.core.instrument.DistributionSummary$Builder', 'io.micrometer.core.instrument.Meter', + 'io.micrometer.core.instrument.Meter$Type', 'io.micrometer.core.instrument.MeterRegistry', 'io.micrometer.core.instrument.Metrics', 'io.micrometer.core.instrument.Tag', @@ -117,6 +118,7 @@ tasks.named("thirdPartyAudit").configure { 'io.micrometer.core.instrument.search.Search', 'io.micrometer.core.instrument.Gauge', 'io.micrometer.core.instrument.Gauge$Builder', + 'io.micrometer.context.ContextAccessor', // from reactor-core kotlin extensions (to be deprecated from the library at some point on 3.3.x release) 'kotlin.collections.ArraysKt', diff --git a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java index 4cbf40849cbe9..f5c1912d15251 100644 --- a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java +++ b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java @@ -20,7 +20,6 @@ import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; -import org.elasticsearch.common.blobstore.OperationPurpose; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.MockSecureSettings; @@ -45,6 +44,7 @@ import java.util.function.Predicate; import java.util.regex.Pattern; +import static org.elasticsearch.repositories.blobstore.BlobStoreTestUtil.randomPurpose; import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -235,11 +235,11 @@ public void testLargeBlobCountDeletion() throws Exception { for (int i = 0; i < numberOfBlobs; i++) { byte[] bytes = randomBytes(randomInt(100)); String blobName = randomAlphaOfLength(10); - container.writeBlob(OperationPurpose.SNAPSHOT, blobName, new BytesArray(bytes), false); + container.writeBlob(randomPurpose(), blobName, new BytesArray(bytes), false); } - container.delete(OperationPurpose.SNAPSHOT); - assertThat(container.listBlobs(OperationPurpose.SNAPSHOT), is(anEmptyMap())); + container.delete(randomPurpose()); + assertThat(container.listBlobs(randomPurpose()), is(anEmptyMap())); } } @@ -250,7 +250,7 @@ public void testDeleteBlobsIgnoringIfNotExists() throws Exception { for (int i = 0; i < 10; i++) { byte[] bytes = randomBytes(randomInt(100)); String blobName = randomAlphaOfLength(10); - container.writeBlob(OperationPurpose.SNAPSHOT, blobName, new BytesArray(bytes), false); + container.writeBlob(randomPurpose(), blobName, new BytesArray(bytes), false); blobsToDelete.add(blobName); } @@ -260,18 +260,15 @@ public void testDeleteBlobsIgnoringIfNotExists() throws Exception { } Randomness.shuffle(blobsToDelete); - container.deleteBlobsIgnoringIfNotExists(OperationPurpose.SNAPSHOT, blobsToDelete.iterator()); - assertThat(container.listBlobs(OperationPurpose.SNAPSHOT), is(anEmptyMap())); + container.deleteBlobsIgnoringIfNotExists(randomPurpose(), blobsToDelete.iterator()); + assertThat(container.listBlobs(randomPurpose()), is(anEmptyMap())); } } public void testNotFoundErrorMessageContainsFullKey() throws Exception { try (BlobStore store = newBlobStore()) { BlobContainer container = store.blobContainer(BlobPath.EMPTY.add("nested").add("dir")); - NoSuchFileException exception = expectThrows( - NoSuchFileException.class, - () -> container.readBlob(OperationPurpose.SNAPSHOT, "blob") - ); + NoSuchFileException exception = expectThrows(NoSuchFileException.class, () -> container.readBlob(randomPurpose(), "blob")); assertThat(exception.getMessage(), containsString("nested/dir/blob] not found")); } } @@ -281,10 +278,10 @@ public void testReadByteByByte() throws Exception { BlobContainer container = store.blobContainer(BlobPath.EMPTY.add(UUIDs.randomBase64UUID())); var data = randomBytes(randomIntBetween(128, 512)); String blobName = randomName(); - container.writeBlob(OperationPurpose.SNAPSHOT, blobName, new ByteArrayInputStream(data), data.length, true); + container.writeBlob(randomPurpose(), blobName, new ByteArrayInputStream(data), data.length, true); var originalDataInputStream = new ByteArrayInputStream(data); - try (var azureInputStream = container.readBlob(OperationPurpose.SNAPSHOT, blobName)) { + try (var azureInputStream = container.readBlob(randomPurpose(), blobName)) { for (int i = 0; i < data.length; i++) { assertThat(originalDataInputStream.read(), is(equalTo(azureInputStream.read()))); } @@ -292,7 +289,7 @@ public void testReadByteByByte() throws Exception { assertThat(azureInputStream.read(), is(equalTo(-1))); assertThat(originalDataInputStream.read(), is(equalTo(-1))); } - container.delete(OperationPurpose.SNAPSHOT); + container.delete(randomPurpose()); } } } diff --git a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java index 2cb4476f528b9..052b558a05a38 100644 --- a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java +++ b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java @@ -20,7 +20,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.blobstore.BlobContainer; -import org.elasticsearch.common.blobstore.OperationPurpose; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.SecureSettings; import org.elasticsearch.common.settings.Settings; @@ -36,6 +35,7 @@ import java.net.HttpURLConnection; import java.util.Collection; +import static org.elasticsearch.repositories.blobstore.BlobStoreTestUtil.randomPurpose; import static org.hamcrest.Matchers.blankOrNullString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; @@ -105,7 +105,7 @@ protected void createRepository(String repoName) { private void ensureSasTokenPermissions() { final BlobStoreRepository repository = getRepository(); - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); repository.threadPool().generic().execute(ActionRunnable.wrap(future, l -> { final AzureBlobStore blobStore = (AzureBlobStore) repository.blobStore(); final AzureBlobServiceClient azureBlobServiceClient = blobStore.getService().client("default", LocationMode.PRIMARY_ONLY); @@ -136,17 +136,17 @@ public void testMultiBlockUpload() throws Exception { final BlobStoreRepository repo = getRepository(); // The configured threshold for this test suite is 1mb final int blobSize = ByteSizeUnit.MB.toIntBytes(2); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); repo.threadPool().generic().execute(ActionRunnable.run(future, () -> { final BlobContainer blobContainer = repo.blobStore().blobContainer(repo.basePath().add("large_write")); blobContainer.writeBlob( - OperationPurpose.SNAPSHOT, + randomPurpose(), UUIDs.base64UUID(), new ByteArrayInputStream(randomByteArrayOfLength(blobSize)), blobSize, false ); - blobContainer.delete(OperationPurpose.SNAPSHOT); + blobContainer.delete(randomPurpose()); })); future.get(); } diff --git a/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerRetriesTests.java b/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerRetriesTests.java index f5c1d0b8ac00b..3cc56c949e852 100644 --- a/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerRetriesTests.java +++ b/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerRetriesTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.blobstore.BlobContainer; -import org.elasticsearch.common.blobstore.OperationPurpose; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.lucene.store.ByteArrayIndexInput; @@ -43,6 +42,7 @@ import java.util.stream.Collectors; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.elasticsearch.repositories.blobstore.BlobStoreTestUtil.randomPurpose; import static org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase.randomBytes; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -61,11 +61,11 @@ public void testReadNonexistentBlobThrowsNoSuchFileException() { final BlobContainer blobContainer = createBlobContainer(between(1, 5)); final Exception exception = expectThrows(NoSuchFileException.class, () -> { if (randomBoolean()) { - blobContainer.readBlob(OperationPurpose.SNAPSHOT, "read_nonexistent_blob"); + blobContainer.readBlob(randomPurpose(), "read_nonexistent_blob"); } else { final long position = randomLongBetween(0, MAX_RANGE_VAL - 1L); final long length = randomLongBetween(1, MAX_RANGE_VAL - position); - blobContainer.readBlob(OperationPurpose.SNAPSHOT, "read_nonexistent_blob", position, length); + blobContainer.readBlob(randomPurpose(), "read_nonexistent_blob", position, length); } }); assertThat(exception.toString(), exception.getMessage().toLowerCase(Locale.ROOT), containsString("not found")); @@ -112,7 +112,7 @@ public void testReadBlobWithRetries() throws Exception { }); final BlobContainer blobContainer = createBlobContainer(maxRetries); - try (InputStream inputStream = blobContainer.readBlob(OperationPurpose.SNAPSHOT, "read_blob_max_retries")) { + try (InputStream inputStream = blobContainer.readBlob(randomPurpose(), "read_blob_max_retries")) { assertArrayEquals(bytes, BytesReference.toBytes(Streams.readFully(inputStream))); assertThat(countDownHead.isCountedDown(), is(true)); assertThat(countDownGet.isCountedDown(), is(true)); @@ -160,7 +160,7 @@ public void testReadRangeBlobWithRetries() throws Exception { final BlobContainer blobContainer = createBlobContainer(maxRetries); final int position = randomIntBetween(0, bytes.length - 1); final int length = randomIntBetween(1, bytes.length - position); - try (InputStream inputStream = blobContainer.readBlob(OperationPurpose.SNAPSHOT, "read_range_blob_max_retries", position, length)) { + try (InputStream inputStream = blobContainer.readBlob(randomPurpose(), "read_range_blob_max_retries", position, length)) { final byte[] bytesRead = BytesReference.toBytes(Streams.readFully(inputStream)); assertArrayEquals(Arrays.copyOfRange(bytes, position, Math.min(bytes.length, position + length)), bytesRead); assertThat(countDownGet.isCountedDown(), is(true)); @@ -203,7 +203,7 @@ public void testWriteBlobWithRetries() throws Exception { final BlobContainer blobContainer = createBlobContainer(maxRetries); try (InputStream stream = new InputStreamIndexInput(new ByteArrayIndexInput("desc", bytes), bytes.length)) { - blobContainer.writeBlob(OperationPurpose.SNAPSHOT, "write_blob_max_retries", stream, bytes.length, false); + blobContainer.writeBlob(randomPurpose(), "write_blob_max_retries", stream, bytes.length, false); } assertThat(countDown.isCountedDown(), is(true)); } @@ -273,7 +273,7 @@ public void testWriteLargeBlob() throws Exception { final BlobContainer blobContainer = createBlobContainer(maxRetries); try (InputStream stream = new InputStreamIndexInput(new ByteArrayIndexInput("desc", data), data.length)) { - blobContainer.writeBlob(OperationPurpose.SNAPSHOT, "write_large_blob", stream, data.length, false); + blobContainer.writeBlob(randomPurpose(), "write_large_blob", stream, data.length, false); } assertThat(countDownUploads.get(), equalTo(0)); @@ -341,7 +341,7 @@ public void testWriteLargeBlobStreaming() throws Exception { }); final BlobContainer blobContainer = createBlobContainer(maxRetries); - blobContainer.writeMetadataBlob(OperationPurpose.SNAPSHOT, "write_large_blob_streaming", false, randomBoolean(), out -> { + blobContainer.writeMetadataBlob(randomPurpose(), "write_large_blob_streaming", false, randomBoolean(), out -> { int outstanding = data.length; while (outstanding > 0) { if (randomBoolean()) { @@ -391,13 +391,7 @@ public void reset() {} }) { final IOException ioe = expectThrows( IOException.class, - () -> blobContainer.writeBlob( - OperationPurpose.SNAPSHOT, - "write_blob_max_retries", - stream, - randomIntBetween(1, 128), - randomBoolean() - ) + () -> blobContainer.writeBlob(randomPurpose(), "write_blob_max_retries", stream, randomIntBetween(1, 128), randomBoolean()) ); assertThat(ioe.getMessage(), is("Unable to write blob write_blob_max_retries")); // The mock http server uses 1 thread to process the requests, it's possible that the @@ -471,7 +465,7 @@ public void testRetryFromSecondaryLocationPolicies() throws Exception { } final BlobContainer blobContainer = createBlobContainer(maxRetries, secondaryHost, locationMode); - try (InputStream inputStream = blobContainer.readBlob(OperationPurpose.SNAPSHOT, "read_blob_from_secondary")) { + try (InputStream inputStream = blobContainer.readBlob(randomPurpose(), "read_blob_from_secondary")) { assertArrayEquals(bytes, BytesReference.toBytes(Streams.readFully(inputStream))); // It does round robin, first tries on the primary, then on the secondary diff --git a/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSasTokenTests.java b/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSasTokenTests.java index cfc4e17949771..ec74918f601cc 100644 --- a/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSasTokenTests.java +++ b/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSasTokenTests.java @@ -10,7 +10,6 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.blobstore.BlobContainer; -import org.elasticsearch.common.blobstore.OperationPurpose; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.settings.MockSecureSettings; @@ -24,6 +23,7 @@ import static org.elasticsearch.repositories.azure.AzureStorageSettings.ACCOUNT_SETTING; import static org.elasticsearch.repositories.azure.AzureStorageSettings.SAS_TOKEN_SETTING; +import static org.elasticsearch.repositories.blobstore.BlobStoreTestUtil.randomPurpose; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThan; @@ -77,7 +77,7 @@ public void testSasTokenIsUsedAsProvidedInSettings() throws Exception { }); final BlobContainer blobContainer = createBlobContainer(maxRetries, null, LocationMode.PRIMARY_ONLY, clientName, secureSettings); - try (InputStream inputStream = blobContainer.readBlob(OperationPurpose.SNAPSHOT, "sas_test")) { + try (InputStream inputStream = blobContainer.readBlob(randomPurpose(), "sas_test")) { assertArrayEquals(bytes, BytesReference.toBytes(Streams.readFully(inputStream))); } } diff --git a/modules/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java b/modules/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java index b0eafb3bc37ab..87449d7153057 100644 --- a/modules/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java +++ b/modules/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java @@ -29,7 +29,6 @@ import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; -import org.elasticsearch.common.blobstore.OperationPurpose; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.Streams; @@ -59,6 +58,7 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; +import static org.elasticsearch.repositories.blobstore.BlobStoreTestUtil.randomPurpose; import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSettings.CREDENTIALS_FILE_SETTING; import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSettings.ENDPOINT_SETTING; import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSettings.TOKEN_URI_SETTING; @@ -132,7 +132,7 @@ public void testDeleteSingleItem() { f, () -> repository.blobStore() .blobContainer(repository.basePath()) - .deleteBlobsIgnoringIfNotExists(OperationPurpose.SNAPSHOT, Iterators.single("foo")) + .deleteBlobsIgnoringIfNotExists(randomPurpose(), Iterators.single("foo")) ) ) ); @@ -198,7 +198,7 @@ public void testWriteReadLarge() throws IOException { random().nextBytes(data); writeBlob(container, "foobar", new BytesArray(data), false); } - try (InputStream stream = container.readBlob(OperationPurpose.SNAPSHOT, "foobar")) { + try (InputStream stream = container.readBlob(randomPurpose(), "foobar")) { BytesRefBuilder target = new BytesRefBuilder(); while (target.length() < data.length) { byte[] buffer = new byte[scaledRandomIntBetween(1, data.length - target.length())]; @@ -209,7 +209,7 @@ public void testWriteReadLarge() throws IOException { assertEquals(data.length, target.length()); assertArrayEquals(data, Arrays.copyOfRange(target.bytes(), 0, target.length())); } - container.delete(OperationPurpose.SNAPSHOT); + container.delete(randomPurpose()); } } diff --git a/modules/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java b/modules/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java index ffb5fd71f0c09..b2df41c69eda7 100644 --- a/modules/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java +++ b/modules/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java @@ -71,7 +71,7 @@ protected SecureSettings credentials() { @Override protected void createRepository(final String repoName) { - AcknowledgedResponse putRepositoryResponse = clusterAdmin().preparePutRepository("test-repo") + AcknowledgedResponse putRepositoryResponse = clusterAdmin().preparePutRepository(repoName) .setType("gcs") .setSettings( Settings.builder() diff --git a/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java b/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java index 51d26a169ad0e..72df453a4e8f9 100644 --- a/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java +++ b/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java @@ -759,38 +759,4 @@ public int read() throws IOException { } } - private static final class PrivilegedWriteChannelStream extends OutputStream { - - private final OutputStream stream; - - PrivilegedWriteChannelStream(WritableByteChannel channel) { - stream = Channels.newOutputStream(channel); - } - - @Override - public void write(int b) throws IOException { - SocketAccess.doPrivilegedVoidIOException(() -> stream.write(b)); - } - - @Override - public void write(byte[] b) throws IOException { - SocketAccess.doPrivilegedVoidIOException(() -> stream.write(b)); - } - - @Override - public void write(byte[] b, int off, int len) throws IOException { - SocketAccess.doPrivilegedVoidIOException(() -> stream.write(b, off, len)); - } - - @Override - public void flush() throws IOException { - SocketAccess.doPrivilegedVoidIOException(stream::flush); - } - - @Override - public void close() throws IOException { - SocketAccess.doPrivilegedVoidIOException(stream::close); - } - } - } diff --git a/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java b/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java index d23d9385ab1a2..673499e4b2461 100644 --- a/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java +++ b/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java @@ -21,7 +21,6 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; -import org.elasticsearch.common.blobstore.OperationPurpose; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.Streams; @@ -64,6 +63,7 @@ import static fixture.gcs.GoogleCloudStorageHttpHandler.parseMultipartRequestBody; import static fixture.gcs.TestUtils.createServiceAccount; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.elasticsearch.repositories.blobstore.BlobStoreTestUtil.randomPurpose; import static org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase.randomBytes; import static org.elasticsearch.repositories.gcs.GoogleCloudStorageBlobStore.MAX_DELETES_PER_BATCH; import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSettings.CREDENTIALS_FILE_SETTING; @@ -188,7 +188,7 @@ public void testReadLargeBlobWithRetries() throws Exception { exchange.close(); }); - try (InputStream inputStream = blobContainer.readBlob(OperationPurpose.SNAPSHOT, "large_blob_retries")) { + try (InputStream inputStream = blobContainer.readBlob(randomPurpose(), "large_blob_retries")) { assertArrayEquals(bytes, BytesReference.toBytes(Streams.readFully(inputStream))); } } @@ -231,7 +231,7 @@ public void testWriteBlobWithRetries() throws Exception { })); try (InputStream stream = new InputStreamIndexInput(new ByteArrayIndexInput("desc", bytes), bytes.length)) { - blobContainer.writeBlob(OperationPurpose.SNAPSHOT, "write_blob_max_retries", stream, bytes.length, false); + blobContainer.writeBlob(randomPurpose(), "write_blob_max_retries", stream, bytes.length, false); } assertThat(countDown.isCountedDown(), is(true)); } @@ -254,7 +254,7 @@ public void testWriteBlobWithReadTimeouts() { Exception exception = expectThrows(StorageException.class, () -> { try (InputStream stream = new InputStreamIndexInput(new ByteArrayIndexInput("desc", bytes), bytes.length)) { - blobContainer.writeBlob(OperationPurpose.SNAPSHOT, "write_blob_timeout", stream, bytes.length, false); + blobContainer.writeBlob(randomPurpose(), "write_blob_timeout", stream, bytes.length, false); } }); assertThat(exception.getMessage().toLowerCase(Locale.ROOT), containsString("read timed out")); @@ -392,10 +392,10 @@ public void testWriteLargeBlob() throws IOException { if (randomBoolean()) { try (InputStream stream = new InputStreamIndexInput(new ByteArrayIndexInput("desc", data), data.length)) { - blobContainer.writeBlob(OperationPurpose.SNAPSHOT, "write_large_blob", stream, data.length, false); + blobContainer.writeBlob(randomPurpose(), "write_large_blob", stream, data.length, false); } } else { - blobContainer.writeMetadataBlob(OperationPurpose.SNAPSHOT, "write_large_blob", false, randomBoolean(), out -> out.write(data)); + blobContainer.writeMetadataBlob(randomPurpose(), "write_large_blob", false, randomBoolean(), out -> out.write(data)); } assertThat(countInits.get(), equalTo(0)); @@ -452,7 +452,7 @@ public String next() { exchange.getResponseBody().write(response); })); - blobContainer.deleteBlobsIgnoringIfNotExists(OperationPurpose.SNAPSHOT, blobNamesIterator); + blobContainer.deleteBlobsIgnoringIfNotExists(randomPurpose(), blobNamesIterator); // Ensure that the remaining deletes are sent in the last batch if (pendingDeletes.get() > 0) { diff --git a/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java b/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java index e38347ad30292..5a950ad2a9ecc 100644 --- a/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java +++ b/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java @@ -19,7 +19,6 @@ import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; -import org.elasticsearch.common.blobstore.OperationPurpose; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.test.ESTestCase; @@ -27,6 +26,7 @@ import java.util.Arrays; import java.util.List; +import static org.elasticsearch.repositories.blobstore.BlobStoreTestUtil.randomPurpose; import static org.hamcrest.Matchers.instanceOf; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; @@ -93,7 +93,7 @@ public void testDeleteBlobsIgnoringIfNotExistsThrowsIOException() throws Excepti IOException e = expectThrows( IOException.class, - () -> container.deleteBlobsIgnoringIfNotExists(OperationPurpose.SNAPSHOT, blobs.iterator()) + () -> container.deleteBlobsIgnoringIfNotExists(randomPurpose(), blobs.iterator()) ); assertThat(e.getCause(), instanceOf(StorageException.class)); } diff --git a/modules/repository-s3/build.gradle b/modules/repository-s3/build.gradle index 87dda19368d5a..8b1f30a1bba61 100644 --- a/modules/repository-s3/build.gradle +++ b/modules/repository-s3/build.gradle @@ -1,11 +1,7 @@ import org.apache.tools.ant.filters.ReplaceTokens import org.elasticsearch.gradle.internal.info.BuildParams -import org.elasticsearch.gradle.internal.test.RestIntegTestTask -import org.elasticsearch.gradle.internal.test.rest.LegacyYamlRestTestPlugin import org.elasticsearch.gradle.internal.test.InternalClusterTestPlugin -import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE - /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License @@ -13,7 +9,7 @@ import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.legacy-yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { @@ -46,6 +42,13 @@ dependencies { api 'javax.xml.bind:jaxb-api:2.2.2' testImplementation project(':test:fixtures:s3-fixture') + yamlRestTestImplementation project(":test:framework") + yamlRestTestImplementation project(':test:fixtures:s3-fixture') + yamlRestTestImplementation project(':test:fixtures:minio-fixture') + internalClusterTestImplementation project(':test:fixtures:minio-fixture') + + yamlRestTestRuntimeOnly "org.slf4j:slf4j-simple:${versions.slf4j}" + internalClusterTestRuntimeOnly "org.slf4j:slf4j-simple:${versions.slf4j}" } restResources { @@ -83,13 +86,6 @@ tasks.named('test').configure { boolean useFixture = false -def fixtureAddress = { fixture, name, port -> - assert useFixture: 'closure should not be used without a fixture' - int ephemeralPort = project(":test:fixtures:${fixture}").postProcessFixture.ext."test.fixtures.${name}.tcp.${port}" - assert ephemeralPort > 0 - 'http://127.0.0.1:' + ephemeralPort -} - // We test against two repositories, one which uses the usual two-part "permanent" credentials and // the other which uses three-part "temporary" or "session" credentials. @@ -123,23 +119,13 @@ if (!s3PermanentAccessKey && !s3PermanentSecretKey && !s3PermanentBucket && !s3P s3PermanentSecretKey = 's3_test_secret_key' s3PermanentBucket = 'bucket' s3PermanentBasePath = 'base_path' - - apply plugin: 'elasticsearch.test.fixtures' useFixture = true - -} else if (!s3PermanentAccessKey || !s3PermanentSecretKey || !s3PermanentBucket || !s3PermanentBasePath) { - throw new IllegalArgumentException("not all options specified to run against external S3 service as permanent credentials are present") } - if (!s3TemporaryAccessKey && !s3TemporarySecretKey && !s3TemporaryBucket && !s3TemporaryBasePath && !s3TemporarySessionToken) { s3TemporaryAccessKey = 'session_token_access_key' s3TemporarySecretKey = 'session_token_secret_key' s3TemporaryBucket = 'session_token_bucket' s3TemporaryBasePath = 'session_token_base_path' - s3TemporarySessionToken = 'session_token' - -} else if (!s3TemporaryAccessKey || !s3TemporarySecretKey || !s3TemporaryBucket || !s3TemporaryBasePath || !s3TemporarySessionToken) { - throw new IllegalArgumentException("not all options specified to run against external S3 service as temporary credentials are present") } if (!s3EC2Bucket && !s3EC2BasePath && !s3ECSBucket && !s3ECSBasePath) { @@ -147,18 +133,17 @@ if (!s3EC2Bucket && !s3EC2BasePath && !s3ECSBucket && !s3ECSBasePath) { s3EC2BasePath = 'ec2_base_path' s3ECSBucket = 'ecs_bucket' s3ECSBasePath = 'ecs_base_path' -} else if (!s3EC2Bucket || !s3EC2BasePath || !s3ECSBucket || !s3ECSBasePath) { - throw new IllegalArgumentException("not all options specified to run EC2/ECS tests are present") } if (!s3STSBucket && !s3STSBasePath) { s3STSBucket = 'sts_bucket' s3STSBasePath = 'sts_base_path' -} else if (!s3STSBucket || !s3STSBasePath) { - throw new IllegalArgumentException("not all options specified to run STS tests are present") } tasks.named("processYamlRestTestResources").configure { + from("src/test/resources") { + include "aws-web-identity-token-file" + } Map expansions = [ 'permanent_bucket' : s3PermanentBucket, 'permanent_base_path' : s3PermanentBasePath + "_integration_tests", @@ -182,162 +167,36 @@ tasks.named("internalClusterTest").configure { } tasks.named("yamlRestTest").configure { - systemProperty 'tests.rest.blacklist', ( - useFixture ? - ['repository_s3/50_repository_ecs_credentials/*', - 'repository_s3/60_repository_sts_credentials/*'] - : - [ - 'repository_s3/30_repository_temporary_credentials/*', - 'repository_s3/40_repository_ec2_credentials/*', - 'repository_s3/50_repository_ecs_credentials/*', - 'repository_s3/60_repository_sts_credentials/*' - ] - ).join(",") -} - -if (useFixture) { - testFixtures.useFixture(':test:fixtures:s3-fixture', 's3-fixture') - testFixtures.useFixture(':test:fixtures:s3-fixture', 's3-fixture-with-session-token') - testFixtures.useFixture(':test:fixtures:s3-fixture', 's3-fixture-with-ec2') + systemProperty("s3PermanentAccessKey", s3PermanentAccessKey) + systemProperty("s3PermanentSecretKey", s3PermanentSecretKey) + systemProperty("s3TemporaryAccessKey", s3TemporaryAccessKey) + systemProperty("s3TemporarySecretKey", s3TemporarySecretKey) + systemProperty("s3EC2AccessKey", s3PermanentAccessKey) - normalization { - runtimeClasspath { - // ignore generated address file for the purposes of build avoidance - ignore 's3Fixture.address' - } - } -} - -testClusters.matching { it.name == "yamlRestTest" }.configureEach { - keystore 's3.client.integration_test_permanent.access_key', s3PermanentAccessKey - keystore 's3.client.integration_test_permanent.secret_key', s3PermanentSecretKey - - keystore 's3.client.integration_test_temporary.access_key', s3TemporaryAccessKey - keystore 's3.client.integration_test_temporary.secret_key', s3TemporarySecretKey - keystore 's3.client.integration_test_temporary.session_token', s3TemporarySessionToken - - if (useFixture) { - setting 's3.client.integration_test_permanent.endpoint', { "${-> fixtureAddress('s3-fixture', 's3-fixture', '80')}" }, IGNORE_VALUE - setting 's3.client.integration_test_temporary.endpoint', { "${-> fixtureAddress('s3-fixture', 's3-fixture-with-session-token', '80')}" }, IGNORE_VALUE - setting 's3.client.integration_test_ec2.endpoint', { "${-> fixtureAddress('s3-fixture', 's3-fixture-with-ec2', '80')}" }, IGNORE_VALUE - - // to redirect InstanceProfileCredentialsProvider to custom auth point - systemProperty "com.amazonaws.sdk.ec2MetadataServiceEndpointOverride", { "${-> fixtureAddress('s3-fixture', 's3-fixture-with-ec2', '80')}" }, IGNORE_VALUE - } else { - println "Using an external service to test the repository-s3 plugin" - } -} - -// MinIO -if (useFixture) { - testFixtures.useFixture(':test:fixtures:minio-fixture', 'minio-fixture') - - tasks.register("yamlRestTestMinio", RestIntegTestTask) { - description = "Runs REST tests using the Minio repository." - SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); - SourceSet yamlRestTestSourceSet = sourceSets.getByName(LegacyYamlRestTestPlugin.SOURCE_SET_NAME) - setTestClassesDirs(yamlRestTestSourceSet.getOutput().getClassesDirs()) - setClasspath(yamlRestTestSourceSet.getRuntimeClasspath()) - - // Minio only supports a single access key, see https://github.com/minio/minio/pull/5968 - systemProperty 'tests.rest.blacklist', [ - 'repository_s3/30_repository_temporary_credentials/*', - 'repository_s3/40_repository_ec2_credentials/*', - 'repository_s3/50_repository_ecs_credentials/*', - 'repository_s3/60_repository_sts_credentials/*' - ].join(",") - } - tasks.named("check").configure { dependsOn("yamlRestTestMinio") } - - testClusters.matching { it.name == "yamlRestTestMinio" }.configureEach { - keystore 's3.client.integration_test_permanent.access_key', s3PermanentAccessKey - keystore 's3.client.integration_test_permanent.secret_key', s3PermanentSecretKey - setting 's3.client.integration_test_permanent.endpoint', { "${-> fixtureAddress('minio-fixture', 'minio-fixture', '9000')}" }, IGNORE_VALUE - module tasks.named("explodedBundlePlugin") - } -} - -// ECS -if (useFixture) { - testFixtures.useFixture(':test:fixtures:s3-fixture', 's3-fixture-with-ecs') - tasks.register("yamlRestTestECS", RestIntegTestTask.class) { - description = "Runs tests using the ECS repository." - SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); - SourceSet yamlRestTestSourceSet = sourceSets.getByName(LegacyYamlRestTestPlugin.SOURCE_SET_NAME) - setTestClassesDirs(yamlRestTestSourceSet.getOutput().getClassesDirs()) - setClasspath(yamlRestTestSourceSet.getRuntimeClasspath()) - systemProperty 'tests.rest.blacklist', [ - 'repository_s3/10_basic/*', - 'repository_s3/20_repository_permanent_credentials/*', - 'repository_s3/30_repository_temporary_credentials/*', - 'repository_s3/40_repository_ec2_credentials/*', - 'repository_s3/60_repository_sts_credentials/*' - ].join(",") - } - tasks.named("check").configure { dependsOn("yamlRestTestECS") } - - testClusters.matching { it.name == "yamlRestTestECS" }.configureEach { - setting 's3.client.integration_test_ecs.endpoint', { "${-> fixtureAddress('s3-fixture', 's3-fixture-with-ecs', '80')}" }, IGNORE_VALUE - module tasks.named('explodedBundlePlugin') - environment 'AWS_CONTAINER_CREDENTIALS_FULL_URI', { "${-> fixtureAddress('s3-fixture', 's3-fixture-with-ecs', '80')}/ecs_credentials_endpoint" }, IGNORE_VALUE - } -} - -// STS (Secure Token Service) -if (useFixture) { - testFixtures.useFixture(':test:fixtures:s3-fixture', 's3-fixture-with-sts') - tasks.register("yamlRestTestSTS", RestIntegTestTask.class) { - description = "Runs tests with the STS (Secure Token Service)" - SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); - SourceSet yamlRestTestSourceSet = sourceSets.getByName(LegacyYamlRestTestPlugin.SOURCE_SET_NAME) - setTestClassesDirs(yamlRestTestSourceSet.getOutput().getClassesDirs()) - setClasspath(yamlRestTestSourceSet.getRuntimeClasspath()) - systemProperty 'tests.rest.blacklist', [ - 'repository_s3/10_basic/*', - 'repository_s3/20_repository_permanent_credentials/*', - 'repository_s3/30_repository_temporary_credentials/*', - 'repository_s3/40_repository_ec2_credentials/*', - 'repository_s3/50_repository_ecs_credentials/*' - ].join(",") - } - tasks.named("check").configure { dependsOn("yamlRestTestSTS") } - - testClusters.matching { it.name == "yamlRestTestSTS" }.configureEach { - module tasks.named("explodedBundlePlugin") - - setting 's3.client.integration_test_sts.endpoint', { "${-> fixtureAddress('s3-fixture', 's3-fixture-with-sts', '80')}" }, IGNORE_VALUE - systemProperty 'com.amazonaws.sdk.stsMetadataServiceEndpointOverride', - { "${-> fixtureAddress('s3-fixture', 's3-fixture-with-sts', '80')}/assume-role-with-web-identity" }, IGNORE_VALUE - - File awsWebIdentityTokenExternalLocation = file('src/test/resources/aws-web-identity-token-file') - // The web identity token can be read only from the plugin config directory because of security restrictions - // Ideally we would create a symlink, but extraConfigFile doesn't support it - extraConfigFile 'repository-s3/aws-web-identity-token-file', awsWebIdentityTokenExternalLocation - environment 'AWS_WEB_IDENTITY_TOKEN_FILE', "$awsWebIdentityTokenExternalLocation" - - // The AWS STS SDK requires the role and session names to be set. We can verify that they are sent to S3S in the S3HttpFixtureWithSTS fixture - environment 'AWS_ROLE_ARN', 'arn:aws:iam::123456789012:role/FederatedWebIdentityRole' - environment 'AWS_ROLE_SESSION_NAME', 'sts-fixture-test' - } + // ideally we could resolve an env path in cluster config as resource similar to configuring a config file + // not sure how common this is, but it would be nice to support + File awsWebIdentityTokenExternalLocation = file('src/test/resources/aws-web-identity-token-file') + // The web identity token can be read only from the plugin config directory because of security restrictions + // Ideally we would create a symlink, but extraConfigFile doesn't support it + nonInputProperties.systemProperty("awsWebIdentityTokenExternalLocation", awsWebIdentityTokenExternalLocation.getAbsolutePath()) } // 3rd Party Tests -TaskProvider s3ThirdPartyTest = tasks.register("s3ThirdPartyTest", Test) { +tasks.register("s3ThirdPartyTest", Test) { SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); SourceSet internalTestSourceSet = sourceSets.getByName(InternalClusterTestPlugin.SOURCE_SET_NAME) setTestClassesDirs(internalTestSourceSet.getOutput().getClassesDirs()) setClasspath(internalTestSourceSet.getRuntimeClasspath()) include '**/S3RepositoryThirdPartyTests.class' + systemProperty("tests.use.fixture", Boolean.toString(useFixture)) + + // test container accesses ~/.testcontainers.properties read + systemProperty "tests.security.manager", "false" systemProperty 'test.s3.account', s3PermanentAccessKey systemProperty 'test.s3.key', s3PermanentSecretKey systemProperty 'test.s3.bucket', s3PermanentBucket nonInputProperties.systemProperty 'test.s3.base', s3PermanentBasePath + "_third_party_tests_" + BuildParams.testSeed - if (useFixture) { - nonInputProperties.systemProperty 'test.s3.endpoint', "${-> fixtureAddress('minio-fixture', 'minio-fixture', '9000') }" - } } -tasks.named("check").configure { dependsOn(s3ThirdPartyTest) } tasks.named("thirdPartyAudit").configure { ignoreMissingClasses( @@ -370,3 +229,8 @@ tasks.named("thirdPartyAudit").configure { 'javax.activation.DataHandler' ) } + +tasks.named("check").configure { + dependsOn(tasks.withType(Test)) +} + diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java new file mode 100644 index 0000000000000..2c759abc1e437 --- /dev/null +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java @@ -0,0 +1,217 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.repositories.s3; + +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; + +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.blobstore.BlobContainer; +import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.blobstore.OperationPurpose; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.collect.Iterators; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.repositories.s3.S3BlobStore.Operation; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.telemetry.Measurement; +import org.elasticsearch.telemetry.TestTelemetryPlugin; +import org.elasticsearch.test.ESIntegTestCase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Queue; +import java.util.concurrent.LinkedBlockingQueue; + +import static org.elasticsearch.repositories.RepositoriesModule.METRIC_EXCEPTIONS_COUNT; +import static org.elasticsearch.repositories.RepositoriesModule.METRIC_EXCEPTIONS_HISTOGRAM; +import static org.elasticsearch.repositories.RepositoriesModule.METRIC_OPERATIONS_COUNT; +import static org.elasticsearch.repositories.RepositoriesModule.METRIC_REQUESTS_COUNT; +import static org.elasticsearch.repositories.RepositoriesModule.METRIC_THROTTLES_COUNT; +import static org.elasticsearch.repositories.RepositoriesModule.METRIC_THROTTLES_HISTOGRAM; +import static org.elasticsearch.repositories.RepositoriesModule.METRIC_UNSUCCESSFUL_OPERATIONS_COUNT; +import static org.elasticsearch.rest.RestStatus.INTERNAL_SERVER_ERROR; +import static org.elasticsearch.rest.RestStatus.NOT_FOUND; +import static org.elasticsearch.rest.RestStatus.TOO_MANY_REQUESTS; +import static org.hamcrest.Matchers.equalTo; + +@SuppressForbidden(reason = "this test uses a HttpServer to emulate an S3 endpoint") +// Need to set up a new cluster for each test because cluster settings use randomized authentication settings +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) +public class S3BlobStoreRepositoryMetricsTests extends S3BlobStoreRepositoryTests { + + private final Queue errorStatusQueue = new LinkedBlockingQueue<>(); + + // Always create erroneous handler + @Override + protected Map createHttpHandlers() { + return Collections.singletonMap( + "/bucket", + new S3StatsCollectorHttpHandler(new S3MetricErroneousHttpHandler(new S3BlobStoreHttpHandler("bucket"), errorStatusQueue)) + ); + } + + @Override + protected HttpHandler createErroneousHttpHandler(final HttpHandler delegate) { + return delegate; + } + + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + final Settings settings = super.nodeSettings(nodeOrdinal, otherSettings); + return Settings.builder() + .put(settings) + .put(S3ClientSettings.MAX_RETRIES_SETTING.getConcreteSettingForNamespace("test").getKey(), 4) + .build(); + } + + public void testMetricsWithErrors() throws IOException { + final String repository = createRepository(randomRepositoryName()); + + final String dataNodeName = internalCluster().getNodeNameThat(DiscoveryNode::canContainData); + final var blobStoreRepository = (BlobStoreRepository) internalCluster().getInstance(RepositoriesService.class, dataNodeName) + .repository(repository); + final BlobStore blobStore = blobStoreRepository.blobStore(); + final TestTelemetryPlugin plugin = internalCluster().getInstance(PluginsService.class, dataNodeName) + .filterPlugins(TestTelemetryPlugin.class) + .findFirst() + .orElseThrow(); + + plugin.resetMeter(); + + final OperationPurpose purpose = randomFrom(OperationPurpose.values()); + final BlobContainer blobContainer = blobStore.blobContainer(BlobPath.EMPTY.add(randomIdentifier())); + final String blobName = randomIdentifier(); + + // Put a blob + final int nPuts = randomIntBetween(1, 3); + for (int i = 0; i < nPuts; i++) { + final long batch = i + 1; + addErrorStatus(INTERNAL_SERVER_ERROR, TOO_MANY_REQUESTS, TOO_MANY_REQUESTS); + blobContainer.writeBlob(purpose, blobName, new BytesArray("blob"), false); + assertThat(getLongCounterValue(plugin, METRIC_REQUESTS_COUNT, Operation.PUT_OBJECT), equalTo(4L * batch)); + assertThat(getLongCounterValue(plugin, METRIC_OPERATIONS_COUNT, Operation.PUT_OBJECT), equalTo(batch)); + assertThat(getLongCounterValue(plugin, METRIC_UNSUCCESSFUL_OPERATIONS_COUNT, Operation.PUT_OBJECT), equalTo(0L)); + assertThat(getLongCounterValue(plugin, METRIC_EXCEPTIONS_COUNT, Operation.PUT_OBJECT), equalTo(batch)); + assertThat(getLongCounterValue(plugin, METRIC_THROTTLES_COUNT, Operation.PUT_OBJECT), equalTo(2L * batch)); + assertThat(getLongHistogramValue(plugin, METRIC_EXCEPTIONS_HISTOGRAM, Operation.PUT_OBJECT), equalTo(batch)); + assertThat(getLongHistogramValue(plugin, METRIC_THROTTLES_HISTOGRAM, Operation.PUT_OBJECT), equalTo(2L * batch)); + } + + // Get not found + final int nGets = randomIntBetween(1, 3); + for (int i = 0; i < nGets; i++) { + final long batch = i + 1; + addErrorStatus(TOO_MANY_REQUESTS, NOT_FOUND); + try { + blobContainer.readBlob(purpose, blobName).close(); + } catch (Exception e) { + // intentional failure + } + assertThat(getLongCounterValue(plugin, METRIC_REQUESTS_COUNT, Operation.GET_OBJECT), equalTo(2L * batch)); + assertThat(getLongCounterValue(plugin, METRIC_OPERATIONS_COUNT, Operation.GET_OBJECT), equalTo(batch)); + assertThat(getLongCounterValue(plugin, METRIC_UNSUCCESSFUL_OPERATIONS_COUNT, Operation.GET_OBJECT), equalTo(batch)); + assertThat(getLongCounterValue(plugin, METRIC_EXCEPTIONS_COUNT, Operation.GET_OBJECT), equalTo(batch)); + assertThat(getLongCounterValue(plugin, METRIC_THROTTLES_COUNT, Operation.GET_OBJECT), equalTo(batch)); + assertThat(getLongHistogramValue(plugin, METRIC_EXCEPTIONS_HISTOGRAM, Operation.GET_OBJECT), equalTo(batch)); + assertThat(getLongHistogramValue(plugin, METRIC_THROTTLES_HISTOGRAM, Operation.GET_OBJECT), equalTo(batch)); + } + + // List retry exhausted + final int nLists = randomIntBetween(1, 3); + for (int i = 0; i < nLists; i++) { + final long batch = i + 1; + addErrorStatus(TOO_MANY_REQUESTS, TOO_MANY_REQUESTS, TOO_MANY_REQUESTS, TOO_MANY_REQUESTS, TOO_MANY_REQUESTS); + try { + blobContainer.listBlobs(purpose); + } catch (Exception e) { + // intentional failure + } + assertThat(getLongCounterValue(plugin, METRIC_REQUESTS_COUNT, Operation.LIST_OBJECTS), equalTo(5L * batch)); + assertThat(getLongCounterValue(plugin, METRIC_OPERATIONS_COUNT, Operation.LIST_OBJECTS), equalTo(batch)); + assertThat(getLongCounterValue(plugin, METRIC_UNSUCCESSFUL_OPERATIONS_COUNT, Operation.LIST_OBJECTS), equalTo(batch)); + assertThat(getLongCounterValue(plugin, METRIC_EXCEPTIONS_COUNT, Operation.LIST_OBJECTS), equalTo(batch)); + assertThat(getLongCounterValue(plugin, METRIC_THROTTLES_COUNT, Operation.LIST_OBJECTS), equalTo(5L * batch)); + assertThat(getLongHistogramValue(plugin, METRIC_EXCEPTIONS_HISTOGRAM, Operation.LIST_OBJECTS), equalTo(batch)); + assertThat(getLongHistogramValue(plugin, METRIC_THROTTLES_HISTOGRAM, Operation.LIST_OBJECTS), equalTo(5L * batch)); + } + + // Delete to clean up + blobContainer.deleteBlobsIgnoringIfNotExists(purpose, Iterators.single(blobName)); + assertThat(getLongCounterValue(plugin, METRIC_REQUESTS_COUNT, Operation.DELETE_OBJECTS), equalTo(1L)); + assertThat(getLongCounterValue(plugin, METRIC_OPERATIONS_COUNT, Operation.DELETE_OBJECTS), equalTo(1L)); + assertThat(getLongCounterValue(plugin, METRIC_UNSUCCESSFUL_OPERATIONS_COUNT, Operation.DELETE_OBJECTS), equalTo(0L)); + assertThat(getLongCounterValue(plugin, METRIC_EXCEPTIONS_COUNT, Operation.DELETE_OBJECTS), equalTo(0L)); + assertThat(getLongCounterValue(plugin, METRIC_THROTTLES_COUNT, Operation.DELETE_OBJECTS), equalTo(0L)); + assertThat(getLongHistogramValue(plugin, METRIC_EXCEPTIONS_HISTOGRAM, Operation.DELETE_OBJECTS), equalTo(0L)); + assertThat(getLongHistogramValue(plugin, METRIC_THROTTLES_HISTOGRAM, Operation.DELETE_OBJECTS), equalTo(0L)); + } + + private void addErrorStatus(RestStatus... statuses) { + errorStatusQueue.addAll(Arrays.asList(statuses)); + } + + private long getLongCounterValue(TestTelemetryPlugin plugin, String instrumentName, Operation operation) { + final List measurements = Measurement.combine(plugin.getLongCounterMeasurement(instrumentName)); + return measurements.stream() + .filter(m -> m.attributes().get("operation") == operation.getKey()) + .mapToLong(Measurement::getLong) + .findFirst() + .orElse(0L); + } + + private long getLongHistogramValue(TestTelemetryPlugin plugin, String instrumentName, Operation operation) { + final List measurements = Measurement.combine(plugin.getLongHistogramMeasurement(instrumentName)); + return measurements.stream() + .filter(m -> m.attributes().get("operation") == operation.getKey()) + .mapToLong(Measurement::getLong) + .findFirst() + .orElse(0L); + } + + @SuppressForbidden(reason = "this test uses a HttpServer to emulate an S3 endpoint") + private static class S3MetricErroneousHttpHandler implements DelegatingHttpHandler { + + private final HttpHandler delegate; + private final Queue errorStatusQueue; + + S3MetricErroneousHttpHandler(HttpHandler delegate, Queue errorStatusQueue) { + this.delegate = delegate; + this.errorStatusQueue = errorStatusQueue; + } + + @Override + public void handle(HttpExchange exchange) throws IOException { + final RestStatus status = errorStatusQueue.poll(); + if (status == null) { + delegate.handle(exchange); + } else if (status == INTERNAL_SERVER_ERROR) { + // Simulate an retryable exception + throw new IOException("ouch"); + } else { + try (exchange) { + drainInputStream(exchange.getRequestBody()); + exchange.sendResponseHeaders(status.getStatus(), -1); + } + } + } + + public HttpHandler getDelegate() { + return delegate; + } + } +} diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index aee61361ebd10..5a445a1524da5 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -43,6 +43,7 @@ import org.elasticsearch.repositories.RepositoryMissingException; import org.elasticsearch.repositories.RepositoryStats; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.repositories.blobstore.BlobStoreTestUtil; import org.elasticsearch.repositories.blobstore.ESMockAPIBasedRepositoryIntegTestCase; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.snapshots.SnapshotId; @@ -50,14 +51,10 @@ import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.snapshots.mockstore.BlobStoreWrapper; import org.elasticsearch.telemetry.Measurement; -import org.elasticsearch.telemetry.RecordingInstruments; -import org.elasticsearch.telemetry.RecordingMeterRegistry; import org.elasticsearch.telemetry.TestTelemetryPlugin; -import org.elasticsearch.telemetry.metric.LongCounter; -import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.BackgroundIndexer; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.test.junit.annotations.TestIssueLogging; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentFactory; @@ -78,6 +75,7 @@ import java.util.stream.StreamSupport; import static org.elasticsearch.repositories.RepositoriesModule.METRIC_REQUESTS_COUNT; +import static org.elasticsearch.repositories.blobstore.BlobStoreTestUtil.randomPurpose; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.allOf; @@ -139,7 +137,7 @@ protected Settings repositorySettings(String repoName) { @Override protected Collection> nodePlugins() { - return List.of(TestS3RepositoryPlugin.class, TestS3BlobTelemetryPlugin.class); + return List.of(TestS3RepositoryPlugin.class, TestTelemetryPlugin.class); } @Override @@ -179,7 +177,7 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { } @Override - @TestLogging(reason = "Enable request logging to debug #88841", value = "com.amazonaws.request:DEBUG") + @TestIssueLogging(issueUrl = "https://github.com/elastic/elasticsearch/issues/88841", value = "com.amazonaws.request:DEBUG") public void testRequestStats() throws Exception { super.testRequestStats(); } @@ -225,6 +223,7 @@ public void testAbortRequestStats() throws Exception { assertEquals(assertionErrorMsg, mockCalls, sdkRequestCounts); } + @TestIssueLogging(issueUrl = "https://github.com/elastic/elasticsearch/issues/101608", value = "com.amazonaws.request:DEBUG") public void testMetrics() throws Exception { // Create the repository and perform some activities final String repository = createRepository(randomRepositoryName()); @@ -266,7 +265,7 @@ public void testMetrics() throws Exception { .getStatsCollectors().collectors; final var plugins = internalCluster().getInstance(PluginsService.class, nodeName) - .filterPlugins(TestS3BlobTelemetryPlugin.class) + .filterPlugins(TestTelemetryPlugin.class) .toList(); assertThat(plugins, hasSize(1)); final List metrics = Measurement.combine(plugins.get(0).getLongCounterMeasurement(METRIC_REQUESTS_COUNT)); @@ -276,13 +275,21 @@ public void testMetrics() throws Exception { equalTo(metrics.stream().map(m -> m.attributes().get("operation")).collect(Collectors.toSet()).size()) ); metrics.forEach(metric -> { + assertThat( + metric.attributes(), + allOf(hasEntry("repo_type", S3Repository.TYPE), hasKey("repo_name"), hasKey("operation"), hasKey("purpose")) + ); final S3BlobStore.Operation operation = S3BlobStore.Operation.parse((String) metric.attributes().get("operation")); final S3BlobStore.StatsKey statsKey = new S3BlobStore.StatsKey( operation, OperationPurpose.parse((String) metric.attributes().get("purpose")) ); - assertThat(statsCollectors, hasKey(statsKey)); - assertThat(metric.getLong(), equalTo(statsCollectors.get(statsKey).counter.sum())); + assertThat(nodeName + "/" + statsKey + " exists", statsCollectors, hasKey(statsKey)); + assertThat( + nodeName + "/" + statsKey + " has correct sum", + metric.getLong(), + equalTo(statsCollectors.get(statsKey).counter.sum()) + ); aggregatedMetrics.compute(operation.getKey(), (k, v) -> v == null ? metric.getLong() : v + metric.getLong()); }); @@ -312,7 +319,7 @@ public void testRequestStatsWithOperationPurposes() throws IOException { assertThat(initialStats.keySet(), equalTo(allOperations)); // Collect more stats with an operation purpose other than the default - final OperationPurpose purpose = randomValueOtherThan(OperationPurpose.SNAPSHOT, () -> randomFrom(OperationPurpose.values())); + final OperationPurpose purpose = randomValueOtherThan(OperationPurpose.SNAPSHOT, BlobStoreTestUtil::randomPurpose); final BlobPath blobPath = repository.basePath().add(randomAlphaOfLength(10)); final BlobContainer blobContainer = blobStore.blobContainer(blobPath); final BytesArray whatToWrite = new BytesArray(randomByteArrayOfLength(randomIntBetween(100, 1000))); @@ -389,7 +396,7 @@ public void testEnforcedCooldownPeriod() throws IOException { () -> repository.blobStore() .blobContainer(repository.basePath()) .writeBlobAtomic( - OperationPurpose.SNAPSHOT, + randomPurpose(), BlobStoreRepository.INDEX_FILE_PREFIX + modifiedRepositoryData.getGenId(), serialized, true @@ -460,7 +467,7 @@ void ensureMultiPartUploadSize(long blobSize) {} } @SuppressForbidden(reason = "this test uses a HttpHandler to emulate an S3 endpoint") - private class S3BlobStoreHttpHandler extends S3HttpHandler implements BlobStoreHttpHandler { + protected class S3BlobStoreHttpHandler extends S3HttpHandler implements BlobStoreHttpHandler { S3BlobStoreHttpHandler(final String bucket) { super(bucket); @@ -494,7 +501,7 @@ private void validateAuthHeader(HttpExchange exchange) { * slow down the test suite. */ @SuppressForbidden(reason = "this test uses a HttpServer to emulate an S3 endpoint") - private static class S3ErroneousHttpHandler extends ErroneousHttpHandler { + protected static class S3ErroneousHttpHandler extends ErroneousHttpHandler { S3ErroneousHttpHandler(final HttpHandler delegate, final int maxErrorsPerRequest) { super(delegate, maxErrorsPerRequest); @@ -511,7 +518,7 @@ protected String requestUniqueId(final HttpExchange exchange) { * HTTP handler that tracks the number of requests performed against S3. */ @SuppressForbidden(reason = "this test uses a HttpServer to emulate an S3 endpoint") - private class S3StatsCollectorHttpHandler extends HttpStatsCollectorHandler { + protected class S3StatsCollectorHttpHandler extends HttpStatsCollectorHandler { S3StatsCollectorHttpHandler(final HttpHandler delegate) { super(delegate); @@ -556,46 +563,4 @@ private boolean isMultiPartUpload(String request) { || Regex.simpleMatch("PUT /*/*?*uploadId=*", request); } } - - public static class TestS3BlobTelemetryPlugin extends TestTelemetryPlugin { - protected final MeterRegistry meter = new RecordingMeterRegistry() { - private final LongCounter longCounter = new RecordingInstruments.RecordingLongCounter(METRIC_REQUESTS_COUNT, recorder) { - @Override - public void increment() { - throw new UnsupportedOperationException(); - } - - @Override - public void incrementBy(long inc) { - throw new UnsupportedOperationException(); - } - - @Override - public void incrementBy(long inc, Map attributes) { - assertThat( - attributes, - allOf(hasEntry("repo_type", S3Repository.TYPE), hasKey("repo_name"), hasKey("operation"), hasKey("purpose")) - ); - super.incrementBy(inc, attributes); - } - }; - - @Override - protected LongCounter buildLongCounter(String name, String description, String unit) { - return longCounter; - } - - @Override - public LongCounter registerLongCounter(String name, String description, String unit) { - assertThat(name, equalTo(METRIC_REQUESTS_COUNT)); - return super.registerLongCounter(name, description, unit); - } - - @Override - public LongCounter getLongCounter(String name) { - assertThat(name, equalTo(METRIC_REQUESTS_COUNT)); - return super.getLongCounter(name); - } - }; - } } diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java index b9cb2f62f8cfc..1e2ff831b8e49 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java @@ -11,11 +11,12 @@ import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; import com.amazonaws.services.s3.model.ListMultipartUploadsRequest; import com.amazonaws.services.s3.model.MultipartUpload; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.blobstore.OperationPurpose; import org.elasticsearch.common.blobstore.OptionalBytesReference; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -23,6 +24,7 @@ import org.elasticsearch.common.settings.SecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.core.Booleans; import org.elasticsearch.core.TimeValue; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.plugins.Plugin; @@ -31,8 +33,11 @@ import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ClusterServiceUtils; +import org.elasticsearch.test.fixtures.minio.MinioTestContainer; +import org.elasticsearch.test.fixtures.testcontainers.TestContainersThreadFilter; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.junit.ClassRule; import java.io.IOException; import java.util.Collection; @@ -40,6 +45,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; +import static org.elasticsearch.repositories.blobstore.BlobStoreTestUtil.randomPurpose; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.blankOrNullString; import static org.hamcrest.Matchers.equalTo; @@ -48,7 +54,13 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.not; +@ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) +@ThreadLeakScope(ThreadLeakScope.Scope.NONE) // https://github.com/elastic/elasticsearch/issues/102482 public class S3RepositoryThirdPartyTests extends AbstractThirdPartyRepositoryTestCase { + static final boolean USE_FIXTURE = Booleans.parseBoolean(System.getProperty("tests.use.fixture", "true")); + + @ClassRule + public static MinioTestContainer minio = new MinioTestContainer(USE_FIXTURE); @Override protected Collection> getPlugins() { @@ -92,7 +104,7 @@ protected void createRepository(String repoName) { Settings.Builder settings = Settings.builder() .put("bucket", System.getProperty("test.s3.bucket")) .put("base_path", System.getProperty("test.s3.base", "testpath")); - final String endpoint = System.getProperty("test.s3.endpoint"); + final String endpoint = USE_FIXTURE ? minio.getAddress() : System.getProperty("test.s3.endpoint"); if (endpoint != null) { settings.put("endpoint", endpoint); } else { @@ -109,7 +121,7 @@ protected void createRepository(String repoName) { settings.put("storage_class", storageClass); } } - AcknowledgedResponse putRepositoryResponse = clusterAdmin().preparePutRepository("test-repo") + AcknowledgedResponse putRepositoryResponse = clusterAdmin().preparePutRepository(repoName) .setType("s3") .setSettings(settings) .get(); @@ -149,7 +161,7 @@ public long absoluteTimeInMillis() { class TestHarness { boolean tryCompareAndSet(BytesReference expected, BytesReference updated) { return PlainActionFuture.get( - future -> blobContainer.compareAndSetRegister(OperationPurpose.SNAPSHOT, "key", expected, updated, future), + future -> blobContainer.compareAndSetRegister(randomPurpose(), "key", expected, updated, future), 10, TimeUnit.SECONDS ); @@ -157,11 +169,7 @@ boolean tryCompareAndSet(BytesReference expected, BytesReference updated) { BytesReference readRegister() { return PlainActionFuture.get( - future -> blobContainer.getRegister( - OperationPurpose.SNAPSHOT, - "key", - future.map(OptionalBytesReference::bytesReference) - ), + future -> blobContainer.getRegister(randomPurpose(), "key", future.map(OptionalBytesReference::bytesReference)), 10, TimeUnit.SECONDS ); @@ -208,7 +216,7 @@ List listMultipartUploads() { assertThat(testHarness.listMultipartUploads(), hasSize(0)); assertEquals(bytes2, testHarness.readRegister()); } finally { - blobContainer.delete(OperationPurpose.SNAPSHOT); + blobContainer.delete(randomPurpose()); } } finally { ThreadPool.terminate(threadpool, 10, TimeUnit.SECONDS); diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java index c0b64c5c672f6..87b3c17bfd91c 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java @@ -595,13 +595,17 @@ void run(BytesReference expected, BytesReference updated, ActionListenerandThen((l, currentValue) -> ActionListener.completeWith(l, () -> { if (currentValue.isPresent() && currentValue.bytesReference().equals(expected)) { + logger.trace("[{}] completing upload [{}]", blobKey, uploadId); completeMultipartUpload(uploadId, partETag); } else { // Best-effort attempt to clean up after ourselves. + logger.trace("[{}] aborting upload [{}]", blobKey, uploadId); safeAbortMultipartUpload(uploadId); } return currentValue; @@ -635,6 +641,7 @@ void run(BytesReference expected, BytesReference updated, ActionListener { // Best-effort attempt to clean up after ourselves. + logger.trace(() -> Strings.format("[%s] aborting upload [%s] on exception", blobKey, uploadId), e); safeAbortMultipartUpload(uploadId); l.onFailure(e); })); @@ -651,7 +658,10 @@ void run(BytesReference expected, BytesReference updated, ActionListener upload.getInitiated().after(expiryDate))) { + logger.trace("[{}] fresh preexisting uploads vs {}", blobKey, expiryDate); return true; } @@ -674,9 +685,23 @@ private boolean hasPreexistingUploads() { safeAbortMultipartUpload(upload.getUploadId()); } + logger.trace("[{}] stale preexisting uploads vs {}", blobKey, expiryDate); return false; } + private void logUploads(String description, List uploads) { + if (logger.isTraceEnabled()) { + logger.trace( + "[{}] {}: [{}]", + blobKey, + description, + uploads.stream() + .map(multipartUpload -> multipartUpload.getUploadId() + ": " + multipartUpload.getInitiated()) + .collect(Collectors.joining(",")) + ); + } + } + private List listMultipartUploads() { final var listRequest = new ListMultipartUploadsRequest(bucket); listRequest.setPrefix(blobKey); @@ -776,6 +801,7 @@ private void ensureOtherUploadsComplete( } private void cancelOtherUploads(String uploadId, List currentUploads, ActionListener listener) { + logger.trace("[{}] upload [{}] cancelling other uploads", blobKey, uploadId); final var executor = blobStore.getSnapshotExecutor(); try (var listeners = new RefCountingListener(listener)) { for (final var currentUpload : currentUploads) { @@ -826,6 +852,7 @@ public void compareAndExchangeRegister( ) { final var clientReference = blobStore.clientReference(); ActionListener.run(ActionListener.releaseAfter(listener.delegateResponse((delegate, e) -> { + logger.trace(() -> Strings.format("[%s]: compareAndExchangeRegister failed", key), e); if (e instanceof AmazonS3Exception amazonS3Exception && amazonS3Exception.getStatusCode() == 404) { // an uncaught 404 means that our multipart upload was aborted by a concurrent operation before we could complete it delegate.onResponse(OptionalBytesReference.MISSING); @@ -853,6 +880,7 @@ public void getRegister(OperationPurpose purpose, String key, ActionListener Strings.format("[%s]: getRegister failed", key), e); if (e.getStatusCode() == 404) { return OptionalBytesReference.EMPTY; } else { diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java index 25a2c4d8e1613..37d076362f396 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java @@ -17,6 +17,7 @@ import com.amazonaws.services.s3.model.MultiObjectDeleteException; import com.amazonaws.services.s3.model.StorageClass; import com.amazonaws.util.AWSRequestMetrics; +import com.amazonaws.util.TimingInfo; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -32,6 +33,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.core.TimeValue; import org.elasticsearch.telemetry.metric.LongCounter; +import org.elasticsearch.telemetry.metric.LongHistogram; import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.threadpool.ThreadPool; @@ -43,6 +45,7 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicReference; @@ -50,7 +53,13 @@ import java.util.stream.Collectors; import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.repositories.RepositoriesModule.METRIC_EXCEPTIONS_COUNT; +import static org.elasticsearch.repositories.RepositoriesModule.METRIC_EXCEPTIONS_HISTOGRAM; +import static org.elasticsearch.repositories.RepositoriesModule.METRIC_OPERATIONS_COUNT; import static org.elasticsearch.repositories.RepositoriesModule.METRIC_REQUESTS_COUNT; +import static org.elasticsearch.repositories.RepositoriesModule.METRIC_THROTTLES_COUNT; +import static org.elasticsearch.repositories.RepositoriesModule.METRIC_THROTTLES_HISTOGRAM; +import static org.elasticsearch.repositories.RepositoriesModule.METRIC_UNSUCCESSFUL_OPERATIONS_COUNT; class S3BlobStore implements BlobStore { @@ -82,6 +91,12 @@ class S3BlobStore implements BlobStore { private final Executor snapshotExecutor; private final MeterRegistry meterRegistry; private final LongCounter requestCounter; + private final LongCounter exceptionCounter; + private final LongCounter throttleCounter; + private final LongCounter operationCounter; + private final LongCounter unsuccessfulOperationCounter; + private final LongHistogram exceptionHistogram; + private final LongHistogram throttleHistogram; private final StatsCollectors statsCollectors = new StatsCollectors(); @@ -113,6 +128,12 @@ class S3BlobStore implements BlobStore { this.snapshotExecutor = threadPool.executor(ThreadPool.Names.SNAPSHOT); this.meterRegistry = meterRegistry; this.requestCounter = this.meterRegistry.getLongCounter(METRIC_REQUESTS_COUNT); + this.exceptionCounter = this.meterRegistry.getLongCounter(METRIC_EXCEPTIONS_COUNT); + this.throttleCounter = this.meterRegistry.getLongCounter(METRIC_THROTTLES_COUNT); + this.operationCounter = this.meterRegistry.getLongCounter(METRIC_OPERATIONS_COUNT); + this.unsuccessfulOperationCounter = this.meterRegistry.getLongCounter(METRIC_UNSUCCESSFUL_OPERATIONS_COUNT); + this.exceptionHistogram = this.meterRegistry.getLongHistogram(METRIC_EXCEPTIONS_HISTOGRAM); + this.throttleHistogram = this.meterRegistry.getLongHistogram(METRIC_THROTTLES_HISTOGRAM); s3RequestRetryStats = new S3RequestRetryStats(getMaxRetries()); threadPool.scheduleWithFixedDelay(() -> { var priorRetryStats = s3RequestRetryStats; @@ -168,10 +189,40 @@ private IgnoreNoResponseMetricsCollector(Operation operation, OperationPurpose p @Override public final void collectMetrics(Request request, Response response) { + assert assertConsistencyBetweenHttpRequestAndOperation(request, operation); + final AWSRequestMetrics awsRequestMetrics = request.getAWSRequestMetrics(); + final TimingInfo timingInfo = awsRequestMetrics.getTimingInfo(); + final long requestCount = getCountForMetric(timingInfo, AWSRequestMetrics.Field.RequestCount); + final long exceptionCount = getCountForMetric(timingInfo, AWSRequestMetrics.Field.Exception); + final long throttleCount = getCountForMetric(timingInfo, AWSRequestMetrics.Field.ThrottleException); + + // For stats reported by API, do not collect stats for null response for BWC. + // See https://github.com/elastic/elasticsearch/pull/71406 + // TODO Is this BWC really necessary? if (response != null) { - assert assertConsistencyBetweenHttpRequestAndOperation(request, operation); - counter.add(getRequestCount(request)); - requestCounter.incrementBy(getRequestCount(request), attributes); + counter.add(requestCount); + } + + // We collect all metrics regardless whether response is null + // There are many situations other than network where a null response can be returned. + // In addition, we are interested in the stats when there is a network outage. + final int numberOfAwsErrors = Optional.ofNullable(awsRequestMetrics.getProperty(AWSRequestMetrics.Field.AWSErrorCode)) + .map(List::size) + .orElse(0); + + operationCounter.incrementBy(1, attributes); + if (numberOfAwsErrors == requestCount) { + unsuccessfulOperationCounter.incrementBy(1, attributes); + } + + requestCounter.incrementBy(requestCount, attributes); + if (exceptionCount > 0) { + exceptionCounter.incrementBy(exceptionCount, attributes); + exceptionHistogram.record(exceptionCount, attributes); + } + if (throttleCount > 0) { + throttleCounter.incrementBy(throttleCount, attributes); + throttleHistogram.record(throttleCount, attributes); } } @@ -197,13 +248,18 @@ private boolean assertConsistencyBetweenHttpRequestAndOperation(Request reque } } - private static long getRequestCount(Request request) { - Number requestCount = request.getAWSRequestMetrics().getTimingInfo().getCounter(AWSRequestMetrics.Field.RequestCount.name()); - if (requestCount == null) { - logger.warn("Expected request count to be tracked for request [{}] but found not count.", request); + private static long getCountForMetric(TimingInfo info, AWSRequestMetrics.Field field) { + var count = info.getCounter(field.name()); + if (count == null) { + if (field == AWSRequestMetrics.Field.RequestCount) { + final String message = "Expected request count to be tracked but found not count."; + assert false : message; + logger.warn(message); + } return 0L; + } else { + return count.longValue(); } - return requestCount.longValue(); } @Override diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java index 7d1b495a0f008..ab322786fcd43 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java @@ -96,6 +96,13 @@ final class S3ClientSettings { key -> Setting.intSetting(key, 80, 0, 1 << 16, Property.NodeScope) ); + /** The proxy scheme for connecting to S3 through a proxy. */ + static final Setting.AffixSetting PROXY_SCHEME_SETTING = Setting.affixKeySetting( + PREFIX, + "proxy.scheme", + key -> new Setting<>(key, "http", s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), Property.NodeScope) + ); + /** The username of a proxy to connect to s3 through. */ static final Setting.AffixSetting PROXY_USERNAME_SETTING = Setting.affixKeySetting( PREFIX, @@ -174,6 +181,9 @@ final class S3ClientSettings { /** The port number the proxy host should be connected on. */ final int proxyPort; + /** The proxy scheme to use for connecting to s3 through a proxy. */ + final Protocol proxyScheme; + // these should be "secure" yet the api for the s3 client only takes String, so storing them // as SecureString here won't really help with anything /** An optional username for the proxy host, for basic authentication. */ @@ -209,6 +219,7 @@ private S3ClientSettings( Protocol protocol, String proxyHost, int proxyPort, + Protocol proxyScheme, String proxyUsername, String proxyPassword, int readTimeoutMillis, @@ -224,6 +235,7 @@ private S3ClientSettings( this.protocol = protocol; this.proxyHost = proxyHost; this.proxyPort = proxyPort; + this.proxyScheme = proxyScheme; this.proxyUsername = proxyUsername; this.proxyPassword = proxyPassword; this.readTimeoutMillis = readTimeoutMillis; @@ -252,6 +264,7 @@ S3ClientSettings refine(Settings repositorySettings) { final Protocol newProtocol = getRepoSettingOrDefault(PROTOCOL_SETTING, normalizedSettings, protocol); final String newProxyHost = getRepoSettingOrDefault(PROXY_HOST_SETTING, normalizedSettings, proxyHost); final int newProxyPort = getRepoSettingOrDefault(PROXY_PORT_SETTING, normalizedSettings, proxyPort); + final Protocol newProxyScheme = getRepoSettingOrDefault(PROXY_SCHEME_SETTING, normalizedSettings, proxyScheme); final int newReadTimeoutMillis = Math.toIntExact( getRepoSettingOrDefault(READ_TIMEOUT_SETTING, normalizedSettings, TimeValue.timeValueMillis(readTimeoutMillis)).millis() ); @@ -275,6 +288,7 @@ S3ClientSettings refine(Settings repositorySettings) { && protocol == newProtocol && Objects.equals(proxyHost, newProxyHost) && proxyPort == newProxyPort + && proxyScheme == newProxyScheme && newReadTimeoutMillis == readTimeoutMillis && maxRetries == newMaxRetries && newThrottleRetries == throttleRetries @@ -291,6 +305,7 @@ S3ClientSettings refine(Settings repositorySettings) { newProtocol, newProxyHost, newProxyPort, + newProxyScheme, proxyUsername, proxyPassword, newReadTimeoutMillis, @@ -398,6 +413,7 @@ static S3ClientSettings getClientSettings(final Settings settings, final String getConfigValue(settings, clientName, PROTOCOL_SETTING), getConfigValue(settings, clientName, PROXY_HOST_SETTING), getConfigValue(settings, clientName, PROXY_PORT_SETTING), + getConfigValue(settings, clientName, PROXY_SCHEME_SETTING), proxyUsername.toString(), proxyPassword.toString(), Math.toIntExact(getConfigValue(settings, clientName, READ_TIMEOUT_SETTING).millis()), @@ -428,6 +444,7 @@ public boolean equals(final Object o) { && Objects.equals(endpoint, that.endpoint) && protocol == that.protocol && Objects.equals(proxyHost, that.proxyHost) + && proxyScheme == that.proxyScheme && Objects.equals(proxyUsername, that.proxyUsername) && Objects.equals(proxyPassword, that.proxyPassword) && Objects.equals(disableChunkedEncoding, that.disableChunkedEncoding) @@ -443,6 +460,7 @@ public int hashCode() { protocol, proxyHost, proxyPort, + proxyScheme, proxyUsername, proxyPassword, readTimeoutMillis, diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java index 97c065e771ffd..f85a66c5eb367 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java @@ -8,6 +8,7 @@ package org.elasticsearch.repositories.s3; +import com.amazonaws.regions.RegionUtils; import com.amazonaws.util.json.Jackson; import org.apache.lucene.util.SetOnce; @@ -49,6 +50,8 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin, Relo // ClientConfiguration clinit has some classloader problems // TODO: fix that Class.forName("com.amazonaws.ClientConfiguration"); + // Pre-load region metadata to avoid looking them up dynamically without privileges enabled + RegionUtils.initialize(); } catch (final ClassNotFoundException e) { throw new RuntimeException(e); } @@ -116,6 +119,7 @@ public List> getSettings() { S3ClientSettings.PROTOCOL_SETTING, S3ClientSettings.PROXY_HOST_SETTING, S3ClientSettings.PROXY_PORT_SETTING, + S3ClientSettings.PROXY_SCHEME_SETTING, S3ClientSettings.PROXY_USERNAME_SETTING, S3ClientSettings.PROXY_PASSWORD_SETTING, S3ClientSettings.READ_TIMEOUT_SETTING, diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java index 25bba12db6952..195a18891ebd0 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java @@ -135,7 +135,7 @@ public AmazonS3Reference client(RepositoryMetadata repositoryMetadata) { return existing; } final AmazonS3Reference clientReference = new AmazonS3Reference(buildClient(clientSettings)); - clientReference.incRef(); + clientReference.mustIncRef(); clientsCache = Maps.copyMapWithAddedEntry(clientsCache, clientSettings, clientReference); return clientReference; } @@ -221,6 +221,7 @@ static ClientConfiguration buildConfiguration(S3ClientSettings clientSettings) { // TODO: remove this leniency, these settings should exist together and be validated clientConfiguration.setProxyHost(clientSettings.proxyHost); clientConfiguration.setProxyPort(clientSettings.proxyPort); + clientConfiguration.setProxyProtocol(clientSettings.proxyScheme); clientConfiguration.setProxyUsername(clientSettings.proxyUsername); clientConfiguration.setProxyPassword(clientSettings.proxyPassword); } @@ -370,7 +371,7 @@ static class CustomWebIdentityTokenCredentialsProvider implements AWSCredentials // https://github.com/aws/amazon-eks-pod-identity-webhook/pull/41 stsRegion = systemEnvironment.getEnv(SDKGlobalConfiguration.AWS_REGION_ENV_VAR); if (stsRegion != null) { - stsClientBuilder.withRegion(stsRegion); + SocketAccess.doPrivilegedVoid(() -> stsClientBuilder.withRegion(stsRegion)); } else { LOGGER.warn("Unable to use regional STS endpoints because the AWS_REGION environment variable is not set"); } diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java index 3875181f98ece..a8a6d71928795 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java @@ -55,6 +55,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; +import static org.elasticsearch.repositories.blobstore.BlobStoreTestUtil.randomPurpose; import static org.elasticsearch.repositories.s3.S3ClientSettings.DISABLE_CHUNKED_ENCODING; import static org.elasticsearch.repositories.s3.S3ClientSettings.ENDPOINT_SETTING; import static org.elasticsearch.repositories.s3.S3ClientSettings.MAX_RETRIES_SETTING; @@ -216,7 +217,7 @@ public void testWriteBlobWithRetries() throws Exception { } }); try (InputStream stream = new InputStreamIndexInput(new ByteArrayIndexInput("desc", bytes), bytes.length)) { - blobContainer.writeBlob(OperationPurpose.SNAPSHOT, "write_blob_max_retries", stream, bytes.length, false); + blobContainer.writeBlob(randomPurpose(), "write_blob_max_retries", stream, bytes.length, false); } assertThat(countDown.isCountedDown(), is(true)); } @@ -239,7 +240,7 @@ public void testWriteBlobWithReadTimeouts() { Exception exception = expectThrows(IOException.class, () -> { try (InputStream stream = new InputStreamIndexInput(new ByteArrayIndexInput("desc", bytes), bytes.length)) { - blobContainer.writeBlob(OperationPurpose.SNAPSHOT, "write_blob_timeout", stream, bytes.length, false); + blobContainer.writeBlob(randomPurpose(), "write_blob_timeout", stream, bytes.length, false); } }); assertThat( @@ -345,7 +346,7 @@ public void testWriteLargeBlob() throws Exception { } }); - blobContainer.writeBlob(OperationPurpose.SNAPSHOT, "write_large_blob", new ZeroInputStream(blobSize), blobSize, false); + blobContainer.writeBlob(randomPurpose(), "write_large_blob", new ZeroInputStream(blobSize), blobSize, false); assertThat(countDownInitiate.isCountedDown(), is(true)); assertThat(countDownUploads.get(), equalTo(0)); @@ -443,7 +444,7 @@ public void testWriteLargeBlobStreaming() throws Exception { } }); - blobContainer.writeMetadataBlob(OperationPurpose.SNAPSHOT, "write_large_blob_streaming", false, randomBoolean(), out -> { + blobContainer.writeMetadataBlob(randomPurpose(), "write_large_blob_streaming", false, randomBoolean(), out -> { final byte[] buffer = new byte[16 * 1024]; long outstanding = blobSize; while (outstanding > 0) { @@ -518,7 +519,7 @@ public void handle(HttpExchange exchange) throws IOException { httpServer.createContext(downloadStorageEndpoint(blobContainer, "read_blob_max_retries"), new FlakyReadHandler()); - try (InputStream inputStream = blobContainer.readBlob(OperationPurpose.SNAPSHOT, "read_blob_max_retries")) { + try (InputStream inputStream = blobContainer.readBlob(randomPurpose(), "read_blob_max_retries")) { final int readLimit; final InputStream wrappedStream; if (randomBoolean()) { diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreContainerTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreContainerTests.java index 9ae2589759d3f..fbbcfa475da44 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreContainerTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreContainerTests.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStoreException; -import org.elasticsearch.common.blobstore.OperationPurpose; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.core.Tuple; import org.elasticsearch.test.ESTestCase; @@ -40,6 +39,7 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; +import static org.elasticsearch.repositories.blobstore.BlobStoreTestUtil.randomPurpose; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.mockito.ArgumentMatchers.any; @@ -59,7 +59,7 @@ public void testExecuteSingleUploadBlobSizeTooLarge() { final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> blobContainer.executeSingleUpload(OperationPurpose.SNAPSHOT, blobStore, randomAlphaOfLengthBetween(1, 10), null, blobSize) + () -> blobContainer.executeSingleUpload(randomPurpose(), blobStore, randomAlphaOfLengthBetween(1, 10), null, blobSize) ); assertEquals("Upload request size [" + blobSize + "] can't be larger than 5gb", e.getMessage()); } @@ -74,7 +74,7 @@ public void testExecuteSingleUploadBlobSizeLargerThanBufferSize() { final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> blobContainer.executeSingleUpload( - OperationPurpose.SNAPSHOT, + randomPurpose(), blobStore, blobName, new ByteArrayInputStream(new byte[0]), @@ -121,7 +121,7 @@ public void testExecuteSingleUpload() throws IOException { when(client.putObject(argumentCaptor.capture())).thenReturn(new PutObjectResult()); final ByteArrayInputStream inputStream = new ByteArrayInputStream(new byte[blobSize]); - blobContainer.executeSingleUpload(OperationPurpose.SNAPSHOT, blobStore, blobName, inputStream, blobSize); + blobContainer.executeSingleUpload(randomPurpose(), blobStore, blobName, inputStream, blobSize); final PutObjectRequest request = argumentCaptor.getValue(); assertEquals(bucketName, request.getBucketName()); @@ -142,13 +142,7 @@ public void testExecuteMultipartUploadBlobSizeTooLarge() { final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> blobContainer.executeMultipartUpload( - OperationPurpose.SNAPSHOT, - blobStore, - randomAlphaOfLengthBetween(1, 10), - null, - blobSize - ) + () -> blobContainer.executeMultipartUpload(randomPurpose(), blobStore, randomAlphaOfLengthBetween(1, 10), null, blobSize) ); assertEquals("Multipart upload request size [" + blobSize + "] can't be larger than 5tb", e.getMessage()); } @@ -160,13 +154,7 @@ public void testExecuteMultipartUploadBlobSizeTooSmall() { final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> blobContainer.executeMultipartUpload( - OperationPurpose.SNAPSHOT, - blobStore, - randomAlphaOfLengthBetween(1, 10), - null, - blobSize - ) + () -> blobContainer.executeMultipartUpload(randomPurpose(), blobStore, randomAlphaOfLengthBetween(1, 10), null, blobSize) ); assertEquals("Multipart upload request size [" + blobSize + "] can't be smaller than 5mb", e.getMessage()); } @@ -230,7 +218,7 @@ public void testExecuteMultipartUpload() throws IOException { final ByteArrayInputStream inputStream = new ByteArrayInputStream(new byte[0]); final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); - blobContainer.executeMultipartUpload(OperationPurpose.SNAPSHOT, blobStore, blobName, inputStream, blobSize); + blobContainer.executeMultipartUpload(randomPurpose(), blobStore, blobName, inputStream, blobSize); final InitiateMultipartUploadRequest initRequest = initArgCaptor.getValue(); assertEquals(bucketName, initRequest.getBucketName()); @@ -336,13 +324,7 @@ public void testExecuteMultipartUploadAborted() { final IOException e = expectThrows(IOException.class, () -> { final S3BlobContainer blobContainer = new S3BlobContainer(BlobPath.EMPTY, blobStore); - blobContainer.executeMultipartUpload( - OperationPurpose.SNAPSHOT, - blobStore, - blobName, - new ByteArrayInputStream(new byte[0]), - blobSize - ); + blobContainer.executeMultipartUpload(randomPurpose(), blobStore, blobName, new ByteArrayInputStream(new byte[0]), blobSize); }); assertEquals("Unable to upload object [" + blobName + "] using multipart upload", e.getMessage()); diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java index 8bff849ca26c2..c48e0dc337d30 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java @@ -37,6 +37,7 @@ public void testThereIsADefaultClientByDefault() { assertThat(defaultSettings.protocol, is(Protocol.HTTPS)); assertThat(defaultSettings.proxyHost, is(emptyString())); assertThat(defaultSettings.proxyPort, is(80)); + assertThat(defaultSettings.proxyScheme, is(Protocol.HTTP)); assertThat(defaultSettings.proxyUsername, is(emptyString())); assertThat(defaultSettings.proxyPassword, is(emptyString())); assertThat(defaultSettings.readTimeoutMillis, is(ClientConfiguration.DEFAULT_SOCKET_TIMEOUT)); diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RetryingInputStreamTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RetryingInputStreamTests.java index d8366236a8184..f43fb8cfa4ed3 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RetryingInputStreamTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RetryingInputStreamTests.java @@ -14,7 +14,6 @@ import com.amazonaws.services.s3.model.S3ObjectInputStream; import org.apache.http.client.methods.HttpGet; -import org.elasticsearch.common.blobstore.OperationPurpose; import org.elasticsearch.common.io.Streams; import org.elasticsearch.core.Nullable; import org.elasticsearch.test.ESTestCase; @@ -23,6 +22,7 @@ import java.io.IOException; import java.util.Arrays; +import static org.elasticsearch.repositories.blobstore.BlobStoreTestUtil.randomPurpose; import static org.hamcrest.Matchers.is; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; @@ -94,11 +94,11 @@ private S3RetryingInputStream createInputStream(final byte[] data, @Nullable fin if (position != null && length != null) { s3Object.getObjectMetadata().setContentLength(length); s3Object.setObjectContent(new S3ObjectInputStream(new ByteArrayInputStream(data, position, length), new HttpGet())); - return new S3RetryingInputStream(OperationPurpose.SNAPSHOT, blobStore, "_blob", position, Math.addExact(position, length - 1)); + return new S3RetryingInputStream(randomPurpose(), blobStore, "_blob", position, Math.addExact(position, length - 1)); } else { s3Object.getObjectMetadata().setContentLength(data.length); s3Object.setObjectContent(new S3ObjectInputStream(new ByteArrayInputStream(data), new HttpGet())); - return new S3RetryingInputStream(OperationPurpose.SNAPSHOT, blobStore, "_blob"); + return new S3RetryingInputStream(randomPurpose(), blobStore, "_blob"); } } } diff --git a/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/AbstractRepositoryS3ClientYamlTestSuiteIT.java b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/AbstractRepositoryS3ClientYamlTestSuiteIT.java new file mode 100644 index 0000000000000..ecf6709a2fcef --- /dev/null +++ b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/AbstractRepositoryS3ClientYamlTestSuiteIT.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.repositories.s3; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; + +public abstract class AbstractRepositoryS3ClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + @ParametersFactory + public static Iterable parameters() throws Exception { + return ESClientYamlSuiteTestCase.createParameters(); + } + + public AbstractRepositoryS3ClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } +} diff --git a/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ClientYamlTestSuiteIT.java b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ClientYamlTestSuiteIT.java index 1cbdf357d821b..2f2f42974f131 100644 --- a/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ClientYamlTestSuiteIT.java +++ b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ClientYamlTestSuiteIT.java @@ -8,20 +8,65 @@ package org.elasticsearch.repositories.s3; +import fixture.s3.S3HttpFixture; +import fixture.s3.S3HttpFixtureWithEC2; +import fixture.s3.S3HttpFixtureWithSessionToken; + import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.fixtures.testcontainers.TestContainersThreadFilter; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; -import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) +@ThreadLeakScope(ThreadLeakScope.Scope.NONE) // https://github.com/elastic/elasticsearch/issues/102482 +public class RepositoryS3ClientYamlTestSuiteIT extends AbstractRepositoryS3ClientYamlTestSuiteIT { + + public static final S3HttpFixture s3Fixture = new S3HttpFixture(); + public static final S3HttpFixtureWithSessionToken s3HttpFixtureWithSessionToken = new S3HttpFixtureWithSessionToken(); + public static final S3HttpFixtureWithEC2 s3Ec2 = new S3HttpFixtureWithEC2(); -public class RepositoryS3ClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + private static final String s3TemporarySessionToken = "session_token"; + + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .module("repository-s3") + .keystore("s3.client.integration_test_permanent.access_key", System.getProperty("s3PermanentAccessKey")) + .keystore("s3.client.integration_test_permanent.secret_key", System.getProperty("s3PermanentSecretKey")) + .keystore("s3.client.integration_test_temporary.access_key", System.getProperty("s3TemporaryAccessKey")) + .keystore("s3.client.integration_test_temporary.secret_key", System.getProperty("s3TemporarySecretKey")) + .keystore("s3.client.integration_test_temporary.session_token", s3TemporarySessionToken) + .setting("s3.client.integration_test_permanent.endpoint", s3Fixture::getAddress) + .setting("s3.client.integration_test_temporary.endpoint", s3HttpFixtureWithSessionToken::getAddress) + .setting("s3.client.integration_test_ec2.endpoint", s3Ec2::getAddress) + .systemProperty("com.amazonaws.sdk.ec2MetadataServiceEndpointOverride", s3Ec2::getAddress) + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(s3Fixture).around(s3Ec2).around(s3HttpFixtureWithSessionToken).around(cluster); + + @ParametersFactory + public static Iterable parameters() throws Exception { + return createParameters( + new String[] { + "repository_s3/10_basic", + "repository_s3/20_repository_permanent_credentials", + "repository_s3/30_repository_temporary_credentials", + "repository_s3/40_repository_ec2_credentials" } + ); + } public RepositoryS3ClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { super(testCandidate); } - @ParametersFactory - public static Iterable parameters() throws Exception { - return ESClientYamlSuiteTestCase.createParameters(); + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); } } diff --git a/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3EcsClientYamlTestSuiteIT.java b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3EcsClientYamlTestSuiteIT.java new file mode 100644 index 0000000000000..e9bc9d0537cbb --- /dev/null +++ b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3EcsClientYamlTestSuiteIT.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.repositories.s3; + +import fixture.s3.S3HttpFixtureWithECS; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +public class RepositoryS3EcsClientYamlTestSuiteIT extends AbstractRepositoryS3ClientYamlTestSuiteIT { + private static final S3HttpFixtureWithECS s3Ecs = new S3HttpFixtureWithECS(); + + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .module("repository-s3") + .setting("s3.client.integration_test_ecs.endpoint", s3Ecs::getAddress) + .environment("AWS_CONTAINER_CREDENTIALS_FULL_URI", () -> (s3Ecs.getAddress() + "/ecs_credentials_endpoint")) + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(s3Ecs).around(cluster); + + @ParametersFactory + public static Iterable parameters() throws Exception { + return createParameters(new String[] { "repository_s3/50_repository_ecs_credentials" }); + } + + public RepositoryS3EcsClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3MinioClientYamlTestSuiteIT.java b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3MinioClientYamlTestSuiteIT.java new file mode 100644 index 0000000000000..41f9983ef26e6 --- /dev/null +++ b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3MinioClientYamlTestSuiteIT.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.repositories.s3; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.fixtures.minio.MinioTestContainer; +import org.elasticsearch.test.fixtures.testcontainers.TestContainersThreadFilter; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) +public class RepositoryS3MinioClientYamlTestSuiteIT extends AbstractRepositoryS3ClientYamlTestSuiteIT { + + public static MinioTestContainer minio = new MinioTestContainer(); + + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .module("repository-s3") + .keystore("s3.client.integration_test_permanent.access_key", System.getProperty("s3PermanentAccessKey")) + .keystore("s3.client.integration_test_permanent.secret_key", System.getProperty("s3PermanentSecretKey")) + .setting("s3.client.integration_test_permanent.endpoint", () -> minio.getAddress()) + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(minio).around(cluster); + + @ParametersFactory + public static Iterable parameters() throws Exception { + return createParameters(new String[] { "repository_s3/10_basic", "repository_s3/20_repository_permanent_credentials" }); + } + + public RepositoryS3MinioClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3RegionalStsClientYamlTestSuiteIT.java b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3RegionalStsClientYamlTestSuiteIT.java new file mode 100644 index 0000000000000..b0a7f84c03c85 --- /dev/null +++ b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3RegionalStsClientYamlTestSuiteIT.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.repositories.s3; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.util.resource.Resource; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.junit.ClassRule; + +public class RepositoryS3RegionalStsClientYamlTestSuiteIT extends AbstractRepositoryS3ClientYamlTestSuiteIT { + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .module("repository-s3") + .configFile("repository-s3/aws-web-identity-token-file", Resource.fromClasspath("aws-web-identity-token-file")) + .environment("AWS_WEB_IDENTITY_TOKEN_FILE", System.getProperty("awsWebIdentityTokenExternalLocation")) + // The AWS STS SDK requires the role and session names to be set. We can verify that they are sent to S3S in the + // S3HttpFixtureWithSTS fixture + .environment("AWS_ROLE_ARN", "arn:aws:iam::123456789012:role/FederatedWebIdentityRole") + .environment("AWS_ROLE_SESSION_NAME", "sts-fixture-test") + .environment("AWS_STS_REGIONAL_ENDPOINTS", "regional") + .environment("AWS_REGION", "ap-southeast-2") + .build(); + + @ParametersFactory + public static Iterable parameters() throws Exception { + return createParameters(new String[] { "repository_s3/10_basic" }); + } + + public RepositoryS3RegionalStsClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3StsClientYamlTestSuiteIT.java b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3StsClientYamlTestSuiteIT.java new file mode 100644 index 0000000000000..eb105e02353b6 --- /dev/null +++ b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3StsClientYamlTestSuiteIT.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.repositories.s3; + +import fixture.s3.S3HttpFixture; +import fixture.s3.S3HttpFixtureWithSTS; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.util.resource.Resource; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +public class RepositoryS3StsClientYamlTestSuiteIT extends AbstractRepositoryS3ClientYamlTestSuiteIT { + + public static final S3HttpFixture s3Fixture = new S3HttpFixture(); + private static final S3HttpFixtureWithSTS s3Sts = new S3HttpFixtureWithSTS(); + + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .module("repository-s3") + .setting("s3.client.integration_test_sts.endpoint", s3Sts::getAddress) + .systemProperty("com.amazonaws.sdk.stsMetadataServiceEndpointOverride", () -> s3Sts.getAddress() + "/assume-role-with-web-identity") + .configFile("repository-s3/aws-web-identity-token-file", Resource.fromClasspath("aws-web-identity-token-file")) + .environment("AWS_WEB_IDENTITY_TOKEN_FILE", System.getProperty("awsWebIdentityTokenExternalLocation")) + // // The AWS STS SDK requires the role and session names to be set. We can verify that they are sent to S3S in the + // // S3HttpFixtureWithSTS fixture + .environment("AWS_ROLE_ARN", "arn:aws:iam::123456789012:role/FederatedWebIdentityRole") + .environment("AWS_ROLE_SESSION_NAME", "sts-fixture-test") + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(s3Fixture).around(s3Sts).around(cluster); + + @ParametersFactory + public static Iterable parameters() throws Exception { + return createParameters(new String[] { "repository_s3/60_repository_sts_credentials" }); + } + + public RepositoryS3StsClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/modules/repository-url/src/internalClusterTest/java/org/elasticsearch/repositories/url/URLSnapshotRestoreIT.java b/modules/repository-url/src/internalClusterTest/java/org/elasticsearch/repositories/url/URLSnapshotRestoreIT.java index 9f807cc9f98f1..a47b9d8b622b5 100644 --- a/modules/repository-url/src/internalClusterTest/java/org/elasticsearch/repositories/url/URLSnapshotRestoreIT.java +++ b/modules/repository-url/src/internalClusterTest/java/org/elasticsearch/repositories/url/URLSnapshotRestoreIT.java @@ -107,8 +107,7 @@ public void testUrlRepository() throws Exception { .prepareRestoreSnapshot("url-repo", "test-snap") .setWaitForCompletion(true) .setIndices("test-idx") - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits().value, equalTo(100L)); diff --git a/modules/repository-url/src/test/java/org/elasticsearch/common/blobstore/url/AbstractURLBlobStoreTests.java b/modules/repository-url/src/test/java/org/elasticsearch/common/blobstore/url/AbstractURLBlobStoreTests.java index 92cb0c1cf75a2..132760c8b410b 100644 --- a/modules/repository-url/src/test/java/org/elasticsearch/common/blobstore/url/AbstractURLBlobStoreTests.java +++ b/modules/repository-url/src/test/java/org/elasticsearch/common/blobstore/url/AbstractURLBlobStoreTests.java @@ -10,7 +10,6 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.blobstore.BlobContainer; -import org.elasticsearch.common.blobstore.OperationPurpose; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.Streams; @@ -21,6 +20,7 @@ import java.io.InputStream; import java.nio.file.NoSuchFileException; +import static org.elasticsearch.repositories.blobstore.BlobStoreTestUtil.randomPurpose; import static org.hamcrest.core.IsEqual.equalTo; public abstract class AbstractURLBlobStoreTests extends ESTestCase { @@ -34,7 +34,7 @@ public void testURLBlobStoreCanReadBlob() throws IOException { BytesArray data = getOriginalData(); String blobName = getBlobName(); BlobContainer container = getBlobContainer(); - try (InputStream stream = container.readBlob(OperationPurpose.SNAPSHOT, blobName)) { + try (InputStream stream = container.readBlob(randomPurpose(), blobName)) { BytesReference bytesRead = Streams.readFully(stream); assertThat(data, equalTo(bytesRead)); } @@ -46,7 +46,7 @@ public void testURLBlobStoreCanReadBlobRange() throws IOException { BlobContainer container = getBlobContainer(); int position = randomIntBetween(0, data.length() - 1); int length = randomIntBetween(1, data.length() - position); - try (InputStream stream = container.readBlob(OperationPurpose.SNAPSHOT, blobName, position, length)) { + try (InputStream stream = container.readBlob(randomPurpose(), blobName, position, length)) { BytesReference bytesRead = Streams.readFully(stream); assertThat(data.slice(position, length), equalTo(bytesRead)); } @@ -55,7 +55,7 @@ public void testURLBlobStoreCanReadBlobRange() throws IOException { public void testNoBlobFound() throws IOException { BlobContainer container = getBlobContainer(); String incorrectBlobName = UUIDs.base64UUID(); - try (InputStream ignored = container.readBlob(OperationPurpose.SNAPSHOT, incorrectBlobName)) { + try (InputStream ignored = container.readBlob(randomPurpose(), incorrectBlobName)) { ignored.read(); fail("Should have thrown NoSuchFileException exception"); } catch (NoSuchFileException e) { diff --git a/modules/repository-url/src/test/java/org/elasticsearch/common/blobstore/url/FileURLBlobStoreTests.java b/modules/repository-url/src/test/java/org/elasticsearch/common/blobstore/url/FileURLBlobStoreTests.java index 7bc793415c63e..6254ec1be6332 100644 --- a/modules/repository-url/src/test/java/org/elasticsearch/common/blobstore/url/FileURLBlobStoreTests.java +++ b/modules/repository-url/src/test/java/org/elasticsearch/common/blobstore/url/FileURLBlobStoreTests.java @@ -10,7 +10,6 @@ import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; -import org.elasticsearch.common.blobstore.OperationPurpose; import org.elasticsearch.common.blobstore.url.http.URLHttpClient; import org.elasticsearch.common.blobstore.url.http.URLHttpClientSettings; import org.elasticsearch.common.bytes.BytesArray; @@ -21,6 +20,7 @@ import java.nio.file.Files; import java.nio.file.Path; +import static org.elasticsearch.repositories.blobstore.BlobStoreTestUtil.randomPurpose; import static org.mockito.Mockito.mock; public class FileURLBlobStoreTests extends AbstractURLBlobStoreTests { @@ -60,6 +60,6 @@ String getBlobName() { @Override public void testURLBlobStoreCanReadBlobRange() throws IOException { - expectThrows(UnsupportedOperationException.class, () -> getBlobContainer().readBlob(OperationPurpose.SNAPSHOT, "test", 0, 12)); + expectThrows(UnsupportedOperationException.class, () -> getBlobContainer().readBlob(randomPurpose(), "test", 0, 12)); } } diff --git a/modules/repository-url/src/test/java/org/elasticsearch/common/blobstore/url/HttpURLBlobStoreTests.java b/modules/repository-url/src/test/java/org/elasticsearch/common/blobstore/url/HttpURLBlobStoreTests.java index f8d55ecab6ab8..b5be5cdbbe3d9 100644 --- a/modules/repository-url/src/test/java/org/elasticsearch/common/blobstore/url/HttpURLBlobStoreTests.java +++ b/modules/repository-url/src/test/java/org/elasticsearch/common/blobstore/url/HttpURLBlobStoreTests.java @@ -13,7 +13,6 @@ import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; -import org.elasticsearch.common.blobstore.OperationPurpose; import org.elasticsearch.common.blobstore.url.http.URLHttpClient; import org.elasticsearch.common.blobstore.url.http.URLHttpClientSettings; import org.elasticsearch.common.bytes.BytesArray; @@ -36,6 +35,8 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; +import static org.elasticsearch.repositories.blobstore.BlobStoreTestUtil.randomPurpose; + @SuppressForbidden(reason = "use http server") public class HttpURLBlobStoreTests extends AbstractURLBlobStoreTests { private static final Pattern RANGE_PATTERN = Pattern.compile("bytes=(\\d+)-(\\d+)$"); @@ -127,14 +128,8 @@ String getBlobName() { public void testRangeReadOutsideOfLegalRange() { BlobContainer container = getBlobContainer(); - expectThrows( - IllegalArgumentException.class, - () -> container.readBlob(OperationPurpose.SNAPSHOT, blobName, -1, content.length).read() - ); - expectThrows( - IOException.class, - () -> container.readBlob(OperationPurpose.SNAPSHOT, blobName, content.length + 1, content.length).read() - ); + expectThrows(IllegalArgumentException.class, () -> container.readBlob(randomPurpose(), blobName, -1, content.length).read()); + expectThrows(IOException.class, () -> container.readBlob(randomPurpose(), blobName, content.length + 1, content.length).read()); } private String getEndpointForServer() { diff --git a/plugins/analysis-icu/src/internalClusterTest/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperIT.java b/plugins/analysis-icu/src/internalClusterTest/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperIT.java index ae6a0cc71789f..955bcaf8f0352 100644 --- a/plugins/analysis-icu/src/internalClusterTest/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperIT.java +++ b/plugins/analysis-icu/src/internalClusterTest/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperIT.java @@ -69,8 +69,8 @@ public void testBasicUsage() throws Exception { // both values should collate to same value indexRandom( true, - client().prepareIndex(index).setId("1").setSource("{\"id\":\"1\",\"collate\":\"" + equivalent[0] + "\"}", XContentType.JSON), - client().prepareIndex(index).setId("2").setSource("{\"id\":\"2\",\"collate\":\"" + equivalent[1] + "\"}", XContentType.JSON) + prepareIndex(index).setId("1").setSource("{\"id\":\"1\",\"collate\":\"" + equivalent[0] + "\"}", XContentType.JSON), + prepareIndex(index).setId("2").setSource("{\"id\":\"2\",\"collate\":\"" + equivalent[1] + "\"}", XContentType.JSON) ); // searching for either of the terms should return both results since they collate to the same value @@ -111,10 +111,9 @@ public void testMultipleValues() throws Exception { // everything should be indexed fine, no exceptions indexRandom( true, - client().prepareIndex(index) - .setId("1") + prepareIndex(index).setId("1") .setSource("{\"id\":\"1\", \"collate\":[\"" + equivalent[0] + "\", \"" + equivalent[1] + "\"]}", XContentType.JSON), - client().prepareIndex(index).setId("2").setSource("{\"id\":\"2\",\"collate\":\"" + equivalent[2] + "\"}", XContentType.JSON) + prepareIndex(index).setId("2").setSource("{\"id\":\"2\",\"collate\":\"" + equivalent[2] + "\"}", XContentType.JSON) ); // using sort mode = max, values B and C will be used for the sort @@ -176,8 +175,8 @@ public void testNormalization() throws Exception { indexRandom( true, - client().prepareIndex(index).setId("1").setSource("{\"id\":\"1\",\"collate\":\"" + equivalent[0] + "\"}", XContentType.JSON), - client().prepareIndex(index).setId("2").setSource("{\"id\":\"2\",\"collate\":\"" + equivalent[1] + "\"}", XContentType.JSON) + prepareIndex(index).setId("1").setSource("{\"id\":\"1\",\"collate\":\"" + equivalent[0] + "\"}", XContentType.JSON), + prepareIndex(index).setId("2").setSource("{\"id\":\"2\",\"collate\":\"" + equivalent[1] + "\"}", XContentType.JSON) ); // searching for either of the terms should return both results since they collate to the same value @@ -222,8 +221,8 @@ public void testSecondaryStrength() throws Exception { indexRandom( true, - client().prepareIndex(index).setId("1").setSource("{\"id\":\"1\",\"collate\":\"" + equivalent[0] + "\"}", XContentType.JSON), - client().prepareIndex(index).setId("2").setSource("{\"id\":\"2\",\"collate\":\"" + equivalent[1] + "\"}", XContentType.JSON) + prepareIndex(index).setId("1").setSource("{\"id\":\"1\",\"collate\":\"" + equivalent[0] + "\"}", XContentType.JSON), + prepareIndex(index).setId("2").setSource("{\"id\":\"2\",\"collate\":\"" + equivalent[1] + "\"}", XContentType.JSON) ); SearchRequest request = new SearchRequest().indices(index) @@ -268,8 +267,8 @@ public void testIgnorePunctuation() throws Exception { indexRandom( true, - client().prepareIndex(index).setId("1").setSource("{\"id\":\"1\",\"collate\":\"" + equivalent[0] + "\"}", XContentType.JSON), - client().prepareIndex(index).setId("2").setSource("{\"id\":\"2\",\"collate\":\"" + equivalent[1] + "\"}", XContentType.JSON) + prepareIndex(index).setId("1").setSource("{\"id\":\"1\",\"collate\":\"" + equivalent[0] + "\"}", XContentType.JSON), + prepareIndex(index).setId("2").setSource("{\"id\":\"2\",\"collate\":\"" + equivalent[1] + "\"}", XContentType.JSON) ); SearchRequest request = new SearchRequest().indices(index) @@ -314,9 +313,9 @@ public void testIgnoreWhitespace() throws Exception { indexRandom( true, - client().prepareIndex(index).setId("1").setSource("{\"id\":\"1\",\"collate\":\"foo bar\"}", XContentType.JSON), - client().prepareIndex(index).setId("2").setSource("{\"id\":\"2\",\"collate\":\"foobar\"}", XContentType.JSON), - client().prepareIndex(index).setId("3").setSource("{\"id\":\"3\",\"collate\":\"foo-bar\"}", XContentType.JSON) + prepareIndex(index).setId("1").setSource("{\"id\":\"1\",\"collate\":\"foo bar\"}", XContentType.JSON), + prepareIndex(index).setId("2").setSource("{\"id\":\"2\",\"collate\":\"foobar\"}", XContentType.JSON), + prepareIndex(index).setId("3").setSource("{\"id\":\"3\",\"collate\":\"foo-bar\"}", XContentType.JSON) ); SearchRequest request = new SearchRequest().indices(index) @@ -354,8 +353,8 @@ public void testNumerics() throws Exception { assertAcked(indicesAdmin().prepareCreate(index).setMapping(builder)); - indexRandom(true, client().prepareIndex(index).setId("1").setSource(""" - {"collate":"foobar-10"}""", XContentType.JSON), client().prepareIndex(index).setId("2").setSource(""" + indexRandom(true, prepareIndex(index).setId("1").setSource(""" + {"collate":"foobar-10"}""", XContentType.JSON), prepareIndex(index).setId("2").setSource(""" {"collate":"foobar-9"}""", XContentType.JSON)); SearchRequest request = new SearchRequest().indices(index) @@ -392,10 +391,10 @@ public void testIgnoreAccentsButNotCase() throws Exception { assertAcked(indicesAdmin().prepareCreate(index).setMapping(builder)); - indexRandom(true, client().prepareIndex(index).setId("1").setSource(""" - {"id":"1","collate":"résumé"}""", XContentType.JSON), client().prepareIndex(index).setId("2").setSource(""" - {"id":"2","collate":"Resume"}""", XContentType.JSON), client().prepareIndex(index).setId("3").setSource(""" - {"id":"3","collate":"resume"}""", XContentType.JSON), client().prepareIndex(index).setId("4").setSource(""" + indexRandom(true, prepareIndex(index).setId("1").setSource(""" + {"id":"1","collate":"résumé"}""", XContentType.JSON), prepareIndex(index).setId("2").setSource(""" + {"id":"2","collate":"Resume"}""", XContentType.JSON), prepareIndex(index).setId("3").setSource(""" + {"id":"3","collate":"resume"}""", XContentType.JSON), prepareIndex(index).setId("4").setSource(""" {"id":"4","collate":"Résumé"}""", XContentType.JSON)); SearchRequest request = new SearchRequest().indices(index) @@ -431,8 +430,8 @@ public void testUpperCaseFirst() throws Exception { indexRandom( true, - client().prepareIndex(index).setId("1").setSource("{\"collate\":\"resume\"}", XContentType.JSON), - client().prepareIndex(index).setId("2").setSource("{\"collate\":\"Resume\"}", XContentType.JSON) + prepareIndex(index).setId("1").setSource("{\"collate\":\"resume\"}", XContentType.JSON), + prepareIndex(index).setId("2").setSource("{\"collate\":\"Resume\"}", XContentType.JSON) ); SearchRequest request = new SearchRequest().indices(index) @@ -480,8 +479,8 @@ public void testCustomRules() throws Exception { indexRandom( true, - client().prepareIndex(index).setId("1").setSource("{\"id\":\"1\",\"collate\":\"" + equivalent[0] + "\"}", XContentType.JSON), - client().prepareIndex(index).setId("2").setSource("{\"id\":\"2\",\"collate\":\"" + equivalent[1] + "\"}", XContentType.JSON) + prepareIndex(index).setId("1").setSource("{\"id\":\"1\",\"collate\":\"" + equivalent[0] + "\"}", XContentType.JSON), + prepareIndex(index).setId("2").setSource("{\"id\":\"2\",\"collate\":\"" + equivalent[1] + "\"}", XContentType.JSON) ); SearchRequest request = new SearchRequest().indices(index) diff --git a/plugins/discovery-azure-classic/src/internalClusterTest/java/org/elasticsearch/cloud/azure/classic/AbstractAzureComputeServiceTestCase.java b/plugins/discovery-azure-classic/src/internalClusterTest/java/org/elasticsearch/cloud/azure/classic/AbstractAzureComputeServiceTestCase.java index 5d0ec97499505..dbcf64bef33e9 100644 --- a/plugins/discovery-azure-classic/src/internalClusterTest/java/org/elasticsearch/cloud/azure/classic/AbstractAzureComputeServiceTestCase.java +++ b/plugins/discovery-azure-classic/src/internalClusterTest/java/org/elasticsearch/cloud/azure/classic/AbstractAzureComputeServiceTestCase.java @@ -86,7 +86,7 @@ protected void registerAzureNode(final String nodeName) { } protected void assertNumberOfNodes(int expected) { - NodesInfoResponse nodeInfos = clusterAdmin().prepareNodesInfo().clear().execute().actionGet(); + NodesInfoResponse nodeInfos = clusterAdmin().prepareNodesInfo().clear().get(); assertNotNull(nodeInfos); assertNotNull(nodeInfos.getNodes()); assertEquals(expected, nodeInfos.getNodes().size()); diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index 5107bb9051bd1..b57d6bce26633 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -72,7 +72,10 @@ tasks.register("writeTestJavaPolicy") { "permission org.bouncycastle.crypto.CryptoServicesPermission \"exportSecretKey\";", "permission org.bouncycastle.crypto.CryptoServicesPermission \"exportPrivateKey\";", "permission java.io.FilePermission \"\${javax.net.ssl.trustStore}\", \"read\";", - " permission java.util.PropertyPermission \"com.amazonaws.sdk.ec2MetadataServiceEndpointOverride\", \"write\";", + "permission java.util.PropertyPermission \"com.amazonaws.sdk.ec2MetadataServiceEndpointOverride\", \"write\";", + "permission java.security.SecurityPermission \"getProperty.jdk.tls.disabledAlgorithms\";", + "permission java.security.SecurityPermission \"getProperty.jdk.certpath.disabledAlgorithms\";", + "permission java.security.SecurityPermission \"getProperty.keystore.type.compat\";", "};" ].join("\n") ) diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java index ff32759508038..94aa05288a55c 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java @@ -62,6 +62,7 @@ static ClientConfiguration buildConfiguration(Ec2ClientSettings clientSettings) // TODO: remove this leniency, these settings should exist together and be validated clientConfiguration.setProxyHost(clientSettings.proxyHost); clientConfiguration.setProxyPort(clientSettings.proxyPort); + clientConfiguration.setProxyProtocol(clientSettings.proxyScheme); clientConfiguration.setProxyUsername(clientSettings.proxyUsername); clientConfiguration.setProxyPassword(clientSettings.proxyPassword); } diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2ClientSettings.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2ClientSettings.java index 043114b26c81b..3a1cd1f1d33e6 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2ClientSettings.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2ClientSettings.java @@ -48,6 +48,14 @@ final class Ec2ClientSettings { /** The port of a proxy to connect to ec2 through. */ static final Setting PROXY_PORT_SETTING = Setting.intSetting("discovery.ec2.proxy.port", 80, 0, 1 << 16, Property.NodeScope); + /** The scheme to use for the proxy connection to ec2. Defaults to "http". */ + static final Setting PROXY_SCHEME_SETTING = new Setting<>( + "discovery.ec2.proxy.scheme", + "http", + s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), + Property.NodeScope + ); + /** An override for the ec2 endpoint to connect to. */ static final Setting ENDPOINT_SETTING = new Setting<>( "discovery.ec2.endpoint", @@ -56,7 +64,7 @@ final class Ec2ClientSettings { Property.NodeScope ); - /** The protocol to use to connect to to ec2. */ + /** The protocol to use to connect to ec2. */ static final Setting PROTOCOL_SETTING = new Setting<>( "discovery.ec2.protocol", "https", @@ -99,6 +107,9 @@ final class Ec2ClientSettings { /** The port number the proxy host should be connected on. */ final int proxyPort; + /** The scheme to use for the proxy connection to ec2 */ + final Protocol proxyScheme; + // these should be "secure" yet the api for the ec2 client only takes String, so // storing them // as SecureString here won't really help with anything @@ -117,6 +128,7 @@ protected Ec2ClientSettings( Protocol protocol, String proxyHost, int proxyPort, + Protocol proxyScheme, String proxyUsername, String proxyPassword, int readTimeoutMillis @@ -126,6 +138,7 @@ protected Ec2ClientSettings( this.protocol = protocol; this.proxyHost = proxyHost; this.proxyPort = proxyPort; + this.proxyScheme = proxyScheme; this.proxyUsername = proxyUsername; this.proxyPassword = proxyPassword; this.readTimeoutMillis = readTimeoutMillis; @@ -196,6 +209,7 @@ static Ec2ClientSettings getClientSettings(Settings settings) { PROTOCOL_SETTING.get(settings), PROXY_HOST_SETTING.get(settings), PROXY_PORT_SETTING.get(settings), + PROXY_SCHEME_SETTING.get(settings), proxyUsername.toString(), proxyPassword.toString(), (int) READ_TIMEOUT_SETTING.get(settings).millis() diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java index 08cf7ea559bf7..69447e800d4ac 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java @@ -104,6 +104,7 @@ public List> getSettings() { Ec2ClientSettings.PROTOCOL_SETTING, Ec2ClientSettings.PROXY_HOST_SETTING, Ec2ClientSettings.PROXY_PORT_SETTING, + Ec2ClientSettings.PROXY_SCHEME_SETTING, Ec2ClientSettings.PROXY_USERNAME_SETTING, Ec2ClientSettings.PROXY_PASSWORD_SETTING, Ec2ClientSettings.READ_TIMEOUT_SETTING, diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImplTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImplTests.java index bb73b951ca4f7..aa4a5bd6e54ea 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImplTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImplTests.java @@ -119,7 +119,16 @@ public void testRejectionOfLoneSessionToken() { } public void testAWSDefaultConfiguration() { - launchAWSConfigurationTest(Settings.EMPTY, Protocol.HTTPS, null, -1, null, null, ClientConfiguration.DEFAULT_SOCKET_TIMEOUT); + launchAWSConfigurationTest( + Settings.EMPTY, + Protocol.HTTPS, + null, + -1, + Protocol.HTTP, + null, + null, + ClientConfiguration.DEFAULT_SOCKET_TIMEOUT + ); } public void testAWSConfigurationWithAwsSettings() { @@ -130,10 +139,20 @@ public void testAWSConfigurationWithAwsSettings() { .put("discovery.ec2.protocol", "http") .put("discovery.ec2.proxy.host", "aws_proxy_host") .put("discovery.ec2.proxy.port", 8080) + .put("discovery.ec2.proxy.scheme", "http") .put("discovery.ec2.read_timeout", "10s") .setSecureSettings(secureSettings) .build(); - launchAWSConfigurationTest(settings, Protocol.HTTP, "aws_proxy_host", 8080, "aws_proxy_username", "aws_proxy_password", 10000); + launchAWSConfigurationTest( + settings, + Protocol.HTTP, + "aws_proxy_host", + 8080, + Protocol.HTTP, + "aws_proxy_username", + "aws_proxy_password", + 10000 + ); } protected void launchAWSConfigurationTest( @@ -141,6 +160,7 @@ protected void launchAWSConfigurationTest( Protocol expectedProtocol, String expectedProxyHost, int expectedProxyPort, + Protocol expectedProxyScheme, String expectedProxyUsername, String expectedProxyPassword, int expectedReadTimeout @@ -151,6 +171,7 @@ protected void launchAWSConfigurationTest( assertThat(configuration.getProtocol(), is(expectedProtocol)); assertThat(configuration.getProxyHost(), is(expectedProxyHost)); assertThat(configuration.getProxyPort(), is(expectedProxyPort)); + assertThat(configuration.getProxyProtocol(), is(expectedProxyScheme)); assertThat(configuration.getProxyUsername(), is(expectedProxyUsername)); assertThat(configuration.getProxyPassword(), is(expectedProxyPassword)); assertThat(configuration.getSocketTimeout(), is(expectedReadTimeout)); diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java index 93ff42fb50218..b9bea564e2720 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.discovery.ec2; import com.amazonaws.ClientConfiguration; +import com.amazonaws.Protocol; import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.BasicAWSCredentials; @@ -160,6 +161,7 @@ public void testClientSettingsReInit() throws IOException { final Settings settings1 = Settings.builder() .put(Ec2ClientSettings.PROXY_HOST_SETTING.getKey(), "proxy_host_1") .put(Ec2ClientSettings.PROXY_PORT_SETTING.getKey(), 881) + .put(Ec2ClientSettings.PROXY_SCHEME_SETTING.getKey(), "http") .put(Ec2ClientSettings.ENDPOINT_SETTING.getKey(), "ec2_endpoint_1") .setSecureSettings(mockSecure1) .build(); @@ -175,6 +177,7 @@ public void testClientSettingsReInit() throws IOException { final Settings settings2 = Settings.builder() .put(Ec2ClientSettings.PROXY_HOST_SETTING.getKey(), "proxy_host_2") .put(Ec2ClientSettings.PROXY_PORT_SETTING.getKey(), 882) + .put(Ec2ClientSettings.PROXY_SCHEME_SETTING.getKey(), "http") .put(Ec2ClientSettings.ENDPOINT_SETTING.getKey(), "ec2_endpoint_2") .setSecureSettings(mockSecure2) .build(); @@ -194,6 +197,7 @@ public void testClientSettingsReInit() throws IOException { assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyPassword(), is("proxy_password_1")); assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyHost(), is("proxy_host_1")); assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyPort(), is(881)); + assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyProtocol(), is(Protocol.HTTP)); assertThat(((AmazonEC2Mock) clientReference.client()).endpoint, is("ec2_endpoint_1")); } // reload secure settings2 @@ -211,6 +215,7 @@ public void testClientSettingsReInit() throws IOException { assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyPassword(), is("proxy_password_1")); assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyHost(), is("proxy_host_1")); assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyPort(), is(881)); + assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyProtocol(), is(Protocol.HTTP)); assertThat(((AmazonEC2Mock) clientReference.client()).endpoint, is("ec2_endpoint_1")); } } @@ -228,6 +233,7 @@ public void testClientSettingsReInit() throws IOException { assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyPassword(), is("proxy_password_2")); assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyHost(), is("proxy_host_2")); assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyPort(), is(882)); + assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyProtocol(), is(Protocol.HTTP)); assertThat(((AmazonEC2Mock) clientReference.client()).endpoint, is("ec2_endpoint_2")); } } diff --git a/plugins/mapper-size/src/internalClusterTest/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java b/plugins/mapper-size/src/internalClusterTest/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java index 026dabd64eb0b..ee16153a98de1 100644 --- a/plugins/mapper-size/src/internalClusterTest/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java +++ b/plugins/mapper-size/src/internalClusterTest/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java @@ -100,7 +100,7 @@ private void assertSizeMappingEnabled(String index, boolean enabled) throws IOEx public void testBasic() throws Exception { assertAcked(prepareCreate("test").setMapping("_size", "enabled=true")); final String source = "{\"f\":\"" + randomAlphaOfLengthBetween(1, 100) + "\"}"; - indexRandom(true, client().prepareIndex("test").setId("1").setSource(source, XContentType.JSON)); + indexRandom(true, prepareIndex("test").setId("1").setSource(source, XContentType.JSON)); GetResponse getResponse = client().prepareGet("test", "1").setStoredFields("_size").get(); assertNotNull(getResponse.getField("_size")); assertEquals(source.length(), (int) getResponse.getField("_size").getValue()); @@ -109,7 +109,7 @@ public void testBasic() throws Exception { public void testGetWithFields() throws Exception { assertAcked(prepareCreate("test").setMapping("_size", "enabled=true")); final String source = "{\"f\":\"" + randomAlphaOfLengthBetween(1, 100) + "\"}"; - indexRandom(true, client().prepareIndex("test").setId("1").setSource(source, XContentType.JSON)); + indexRandom(true, prepareIndex("test").setId("1").setSource(source, XContentType.JSON)); assertResponse( prepareSearch("test").addFetchField("_size"), response -> assertEquals( @@ -134,7 +134,7 @@ public void testGetWithFields() throws Exception { public void testWildCardWithFieldsWhenDisabled() throws Exception { assertAcked(prepareCreate("test").setMapping("_size", "enabled=false")); final String source = "{\"f\":\"" + randomAlphaOfLengthBetween(1, 100) + "\"}"; - indexRandom(true, client().prepareIndex("test").setId("1").setSource(source, XContentType.JSON)); + indexRandom(true, prepareIndex("test").setId("1").setSource(source, XContentType.JSON)); assertResponse( prepareSearch("test").addFetchField("_size"), response -> assertNull(response.getHits().getHits()[0].getFields().get("_size")) @@ -154,7 +154,7 @@ public void testWildCardWithFieldsWhenDisabled() throws Exception { public void testWildCardWithFieldsWhenNotProvided() throws Exception { assertAcked(prepareCreate("test")); final String source = "{\"f\":\"" + randomAlphaOfLengthBetween(1, 100) + "\"}"; - indexRandom(true, client().prepareIndex("test").setId("1").setSource(source, XContentType.JSON)); + indexRandom(true, prepareIndex("test").setId("1").setSource(source, XContentType.JSON)); assertResponse( prepareSearch("test").addFetchField("_size"), response -> assertNull(response.getHits().getHits()[0].getFields().get("_size")) diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreContainerTests.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreContainerTests.java index 6d7aca0ca1d56..592192f29c262 100644 --- a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreContainerTests.java +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreContainerTests.java @@ -20,7 +20,6 @@ import org.apache.hadoop.util.Progressable; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; -import org.elasticsearch.common.blobstore.OperationPurpose; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.core.Streams; import org.elasticsearch.core.SuppressForbidden; @@ -44,6 +43,7 @@ import javax.security.auth.Subject; +import static org.elasticsearch.repositories.blobstore.BlobStoreTestUtil.randomPurpose; import static org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase.randomBytes; import static org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase.readBlobFully; import static org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase.writeBlob; @@ -131,7 +131,7 @@ public void testReadOnly() throws Exception { byte[] data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16))); writeBlob(container, "foo", new BytesArray(data), randomBoolean()); assertArrayEquals(readBlobFully(container, "foo", data.length), data); - assertTrue(container.blobExists(OperationPurpose.SNAPSHOT, "foo")); + assertTrue(container.blobExists(randomPurpose(), "foo")); } public void testReadRange() throws Exception { @@ -162,7 +162,7 @@ public void testReadRange() throws Exception { int pos = randomIntBetween(0, data.length / 2); int len = randomIntBetween(pos, data.length) - pos; assertArrayEquals(readBlobPartially(container, "foo", pos, len), Arrays.copyOfRange(data, pos, pos + len)); - assertTrue(container.blobExists(OperationPurpose.SNAPSHOT, "foo")); + assertTrue(container.blobExists(randomPurpose(), "foo")); } public void testReplicationFactor() throws Exception { @@ -209,24 +209,24 @@ public void testListBlobsByPrefix() throws Exception { byte[] data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16))); writeBlob(container, "foo", new BytesArray(data), randomBoolean()); assertArrayEquals(readBlobFully(container, "foo", data.length), data); - assertTrue(container.blobExists(OperationPurpose.SNAPSHOT, "foo")); + assertTrue(container.blobExists(randomPurpose(), "foo")); writeBlob(container, "bar", new BytesArray(data), randomBoolean()); assertArrayEquals(readBlobFully(container, "bar", data.length), data); - assertTrue(container.blobExists(OperationPurpose.SNAPSHOT, "bar")); + assertTrue(container.blobExists(randomPurpose(), "bar")); - assertEquals(2, container.listBlobsByPrefix(OperationPurpose.SNAPSHOT, null).size()); - assertEquals(1, container.listBlobsByPrefix(OperationPurpose.SNAPSHOT, "fo").size()); - assertEquals(0, container.listBlobsByPrefix(OperationPurpose.SNAPSHOT, "noSuchFile").size()); + assertEquals(2, container.listBlobsByPrefix(randomPurpose(), null).size()); + assertEquals(1, container.listBlobsByPrefix(randomPurpose(), "fo").size()); + assertEquals(0, container.listBlobsByPrefix(randomPurpose(), "noSuchFile").size()); - container.delete(OperationPurpose.SNAPSHOT); - assertEquals(0, container.listBlobsByPrefix(OperationPurpose.SNAPSHOT, null).size()); - assertEquals(0, container.listBlobsByPrefix(OperationPurpose.SNAPSHOT, "fo").size()); - assertEquals(0, container.listBlobsByPrefix(OperationPurpose.SNAPSHOT, "noSuchFile").size()); + container.delete(randomPurpose()); + assertEquals(0, container.listBlobsByPrefix(randomPurpose(), null).size()); + assertEquals(0, container.listBlobsByPrefix(randomPurpose(), "fo").size()); + assertEquals(0, container.listBlobsByPrefix(randomPurpose(), "noSuchFile").size()); } public static byte[] readBlobPartially(BlobContainer container, String name, int pos, int length) throws IOException { byte[] data = new byte[length]; - try (InputStream inputStream = container.readBlob(OperationPurpose.SNAPSHOT, name, pos, length)) { + try (InputStream inputStream = container.readBlob(randomPurpose(), name, pos, length)) { assertThat(Streams.readFully(inputStream, data), CoreMatchers.equalTo(length)); assertThat(inputStream.read(), CoreMatchers.equalTo(-1)); } diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java index b76d2e27be66a..16e8d2610f3fb 100644 --- a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java @@ -61,9 +61,9 @@ public void testSimpleWorkflow() { logger.info("--> indexing some data"); for (int i = 0; i < 100; i++) { - client().prepareIndex("test-idx-1").setId(Integer.toString(i)).setSource("foo", "bar" + i).get(); - client().prepareIndex("test-idx-2").setId(Integer.toString(i)).setSource("foo", "bar" + i).get(); - client().prepareIndex("test-idx-3").setId(Integer.toString(i)).setSource("foo", "bar" + i).get(); + prepareIndex("test-idx-1").setId(Integer.toString(i)).setSource("foo", "bar" + i).get(); + prepareIndex("test-idx-2").setId(Integer.toString(i)).setSource("foo", "bar" + i).get(); + prepareIndex("test-idx-3").setId(Integer.toString(i)).setSource("foo", "bar" + i).get(); } client().admin().indices().prepareRefresh().get(); assertThat(count(client, "test-idx-1"), equalTo(100L)); @@ -111,8 +111,7 @@ public void testSimpleWorkflow() { .cluster() .prepareRestoreSnapshot("test-repo", "test-snap") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); ensureGreen(); @@ -129,8 +128,7 @@ public void testSimpleWorkflow() { .prepareRestoreSnapshot("test-repo", "test-snap") .setWaitForCompletion(true) .setIndices("test-idx-*", "-test-idx-2") - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); ensureGreen(); assertThat(count(client, "test-idx-1"), equalTo(100L)); diff --git a/qa/ccs-common-rest/build.gradle b/qa/ccs-common-rest/build.gradle index eb4c40044f14b..8ad306144bd98 100644 --- a/qa/ccs-common-rest/build.gradle +++ b/qa/ccs-common-rest/build.gradle @@ -41,6 +41,7 @@ tasks.named("yamlRestTest") { 'search.aggregation/50_filter/Standard queries get cached', 'search.aggregation/50_filter/Terms lookup gets cached', // terms lookup by "index" doesn't seem to work correctly 'search.aggregation/70_adjacency_matrix/Terms lookup', // terms lookup by "index" doesn't seem to work correctly + 'search/350_point_in_time/point-in-time with index filter' ].join(',') } diff --git a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java index 5ad525b472b12..7c1514d2d1a6a 100644 --- a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java +++ b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java @@ -389,10 +389,7 @@ private boolean shouldReplaceIndexWithRemote(String apiName) { if (apiName.equals("search") || apiName.equals("msearch") || apiName.equals("async_search.submit")) { final String testCandidateTestPath = testCandidate.getTestPath(); - if (testCandidateTestPath.equals("search/350_point_in_time/basic") - || testCandidateTestPath.equals("search/350_point_in_time/point-in-time with slicing") - || testCandidateTestPath.equals("search/350_point_in_time/msearch") - || testCandidateTestPath.equals("search/350_point_in_time/wildcard") + if (testCandidateTestPath.startsWith("search/350_point_in_time") || testCandidateTestPath.equals("async_search/20-with-poin-in-time/Async search with point in time")) { return false; } diff --git a/qa/ccs-unavailable-clusters/src/javaRestTest/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java b/qa/ccs-unavailable-clusters/src/javaRestTest/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java index 3279777c793ba..b17b81b6ac188 100644 --- a/qa/ccs-unavailable-clusters/src/javaRestTest/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java +++ b/qa/ccs-unavailable-clusters/src/javaRestTest/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java @@ -12,26 +12,20 @@ import org.apache.http.entity.ContentType; import org.apache.http.nio.entity.NStringEntity; import org.apache.lucene.search.TotalHits; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.TransportVersion; import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchScrollRequest; -import org.elasticsearch.action.search.SearchShardsAction; import org.elasticsearch.action.search.SearchShardsRequest; import org.elasticsearch.action.search.SearchShardsResponse; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.action.search.TransportSearchShardsAction; import org.elasticsearch.client.Request; -import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; -import org.elasticsearch.client.RestClient; -import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -45,13 +39,12 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.test.rest.ObjectPath; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; -import org.junit.AfterClass; -import org.junit.Before; import java.io.IOException; import java.util.Collections; @@ -62,28 +55,13 @@ import java.util.concurrent.TimeUnit; import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.equalTo; @SuppressWarnings("removal") public class CrossClusterSearchUnavailableClusterIT extends ESRestTestCase { - private static RestHighLevelClient restHighLevelClient; - private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); - @Before - public void initHighLevelClient() throws IOException { - super.initClient(); - if (restHighLevelClient == null) { - restHighLevelClient = new HighLevelClient(client()); - } - } - - @AfterClass - public static void cleanupClient() throws IOException { - restHighLevelClient.close(); - restHighLevelClient = null; - } - @Override public void tearDown() throws Exception { super.tearDown(); @@ -103,7 +81,7 @@ private static MockTransportService startTransport( MockTransportService newService = MockTransportService.createNewService(s, version, transportVersion, threadPool, null); try { newService.registerRequestHandler( - SearchShardsAction.NAME, + TransportSearchShardsAction.TYPE.name(), EsExecutors.DIRECT_EXECUTOR_SERVICE, SearchShardsRequest::new, (request, channel, task) -> { @@ -111,7 +89,7 @@ private static MockTransportService startTransport( } ); newService.registerRequestHandler( - SearchAction.NAME, + TransportSearchAction.TYPE.name(), EsExecutors.DIRECT_EXECUTOR_SERVICE, SearchRequest::new, (request, channel, task) -> { @@ -176,57 +154,74 @@ public void testSearchSkipUnavailable() throws IOException { updateRemoteClusterSettings(Collections.singletonMap("seeds", remoteNode.getAddress().toString())); for (int i = 0; i < 10; i++) { - restHighLevelClient.index(new IndexRequest("index").id(String.valueOf(i)).source("field", "value"), RequestOptions.DEFAULT); + Request request = new Request("POST", "/index/_doc"); + request.setJsonEntity("{ \"field\" : \"value\" }"); + Response response = client().performRequest(request); + assertEquals(201, response.getStatusLine().getStatusCode()); } Response refreshResponse = client().performRequest(new Request("POST", "/index/_refresh")); assertEquals(200, refreshResponse.getStatusLine().getStatusCode()); { - SearchResponse response = restHighLevelClient.search(new SearchRequest("index"), RequestOptions.DEFAULT); - assertSame(SearchResponse.Clusters.EMPTY, response.getClusters()); - assertEquals(10, response.getHits().getTotalHits().value); - assertEquals(10, response.getHits().getHits().length); + Response response = client().performRequest(new Request("GET", "/index/_search")); + assertEquals(200, response.getStatusLine().getStatusCode()); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + assertNull(objectPath.evaluate("_clusters")); + assertThat(objectPath.evaluate("hits.total.value"), equalTo(10)); + assertThat(objectPath.evaluateArraySize("hits.hits"), equalTo(10)); } { - SearchResponse response = restHighLevelClient.search(new SearchRequest("index", "remote1:index"), RequestOptions.DEFAULT); - assertEquals(2, response.getClusters().getTotal()); - assertEquals(2, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.RUNNING)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.FAILED)); - assertEquals(10, response.getHits().getTotalHits().value); - assertEquals(10, response.getHits().getHits().length); + Response response = client().performRequest(new Request("GET", "/index,remote1:index/_search")); + assertEquals(200, response.getStatusLine().getStatusCode()); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + assertNotNull(objectPath.evaluate("_clusters")); + assertThat(objectPath.evaluate("_clusters.total"), equalTo(2)); + assertThat(objectPath.evaluate("_clusters.successful"), equalTo(2)); + assertThat(objectPath.evaluate("_clusters.skipped"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.running"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.partial"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.failed"), equalTo(0)); + assertThat(objectPath.evaluate("hits.total.value"), equalTo(10)); + assertThat(objectPath.evaluateArraySize("hits.hits"), equalTo(10)); } { - SearchResponse response = restHighLevelClient.search(new SearchRequest("remote1:index"), RequestOptions.DEFAULT); - assertEquals(1, response.getClusters().getTotal()); - assertEquals(1, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.RUNNING)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.FAILED)); - assertEquals(0, response.getHits().getTotalHits().value); + Response response = client().performRequest(new Request("GET", "/remote1:index/_search")); + assertEquals(200, response.getStatusLine().getStatusCode()); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + assertNotNull(objectPath.evaluate("_clusters")); + assertThat(objectPath.evaluate("_clusters.total"), equalTo(1)); + assertThat(objectPath.evaluate("_clusters.successful"), equalTo(1)); + assertThat(objectPath.evaluate("_clusters.skipped"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.running"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.partial"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.failed"), equalTo(0)); + assertThat(objectPath.evaluate("hits.total.value"), equalTo(0)); + assertThat(objectPath.evaluateArraySize("hits.hits"), equalTo(0)); } { - SearchResponse response = restHighLevelClient.search( - new SearchRequest("index", "remote1:index").scroll("1m"), - RequestOptions.DEFAULT - ); - assertEquals(2, response.getClusters().getTotal()); - assertEquals(2, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.RUNNING)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.FAILED)); - assertEquals(10, response.getHits().getTotalHits().value); - assertEquals(10, response.getHits().getHits().length); - String scrollId = response.getScrollId(); - SearchResponse scrollResponse = restHighLevelClient.scroll(new SearchScrollRequest(scrollId), RequestOptions.DEFAULT); - assertSame(SearchResponse.Clusters.EMPTY, scrollResponse.getClusters()); - assertEquals(10, scrollResponse.getHits().getTotalHits().value); - assertEquals(0, scrollResponse.getHits().getHits().length); + Response response = client().performRequest(new Request("GET", "/index,remote1:index/_search?scroll=1m")); + assertEquals(200, response.getStatusLine().getStatusCode()); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + assertNotNull(objectPath.evaluate("_clusters")); + assertThat(objectPath.evaluate("_clusters.total"), equalTo(2)); + assertThat(objectPath.evaluate("_clusters.successful"), equalTo(2)); + assertThat(objectPath.evaluate("_clusters.skipped"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.running"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.partial"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.failed"), equalTo(0)); + assertThat(objectPath.evaluate("hits.total.value"), equalTo(10)); + assertThat(objectPath.evaluateArraySize("hits.hits"), equalTo(10)); + String scrollId = objectPath.evaluate("_scroll_id"); + assertNotNull(scrollId); + Request scrollRequest = new Request("POST", "/_search/scroll"); + scrollRequest.setJsonEntity("{ \"scroll_id\" : \"" + scrollId + "\" }"); + Response scrollResponse = client().performRequest(scrollRequest); + assertEquals(200, scrollResponse.getStatusLine().getStatusCode()); + ObjectPath scrollObjectPath = ObjectPath.createFromResponse(scrollResponse); + assertNull(scrollObjectPath.evaluate("_clusters")); + assertThat(scrollObjectPath.evaluate("hits.total.value"), equalTo(10)); + assertThat(scrollObjectPath.evaluateArraySize("hits.hits"), equalTo(0)); } remoteTransport.close(); @@ -234,45 +229,57 @@ public void testSearchSkipUnavailable() throws IOException { updateRemoteClusterSettings(Collections.singletonMap("skip_unavailable", true)); { - SearchResponse response = restHighLevelClient.search(new SearchRequest("index", "remote1:index"), RequestOptions.DEFAULT); - assertEquals(2, response.getClusters().getTotal()); - assertEquals(1, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertEquals(1, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.RUNNING)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.FAILED)); - assertEquals(10, response.getHits().getTotalHits().value); - assertEquals(10, response.getHits().getHits().length); + Response response = client().performRequest(new Request("GET", "/index,remote1:index/_search")); + assertEquals(200, response.getStatusLine().getStatusCode()); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + assertNotNull(objectPath.evaluate("_clusters")); + assertThat(objectPath.evaluate("_clusters.total"), equalTo(2)); + assertThat(objectPath.evaluate("_clusters.successful"), equalTo(1)); + assertThat(objectPath.evaluate("_clusters.skipped"), equalTo(1)); + assertThat(objectPath.evaluate("_clusters.running"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.partial"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.failed"), equalTo(0)); + assertThat(objectPath.evaluate("hits.total.value"), equalTo(10)); + assertThat(objectPath.evaluateArraySize("hits.hits"), equalTo(10)); } { - SearchResponse response = restHighLevelClient.search(new SearchRequest("remote1:index"), RequestOptions.DEFAULT); - assertEquals(1, response.getClusters().getTotal()); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertEquals(1, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.RUNNING)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.FAILED)); - assertEquals(0, response.getHits().getTotalHits().value); + Response response = client().performRequest(new Request("GET", "/remote1:index/_search")); + assertEquals(200, response.getStatusLine().getStatusCode()); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + assertNotNull(objectPath.evaluate("_clusters")); + assertThat(objectPath.evaluate("_clusters.total"), equalTo(1)); + assertThat(objectPath.evaluate("_clusters.successful"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.skipped"), equalTo(1)); + assertThat(objectPath.evaluate("_clusters.running"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.partial"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.failed"), equalTo(0)); + assertThat(objectPath.evaluate("hits.total.value"), equalTo(0)); + assertThat(objectPath.evaluateArraySize("hits.hits"), equalTo(0)); } { - SearchResponse response = restHighLevelClient.search( - new SearchRequest("index", "remote1:index").scroll("1m"), - RequestOptions.DEFAULT - ); - assertEquals(2, response.getClusters().getTotal()); - assertEquals(1, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertEquals(1, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.RUNNING)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.FAILED)); - assertEquals(10, response.getHits().getTotalHits().value); - assertEquals(10, response.getHits().getHits().length); - String scrollId = response.getScrollId(); - SearchResponse scrollResponse = restHighLevelClient.scroll(new SearchScrollRequest(scrollId), RequestOptions.DEFAULT); - assertSame(SearchResponse.Clusters.EMPTY, scrollResponse.getClusters()); - assertEquals(10, scrollResponse.getHits().getTotalHits().value); - assertEquals(0, scrollResponse.getHits().getHits().length); + Response response = client().performRequest(new Request("GET", "/index,remote1:index/_search?scroll=1m")); + assertEquals(200, response.getStatusLine().getStatusCode()); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + assertNotNull(objectPath.evaluate("_clusters")); + assertThat(objectPath.evaluate("_clusters.total"), equalTo(2)); + assertThat(objectPath.evaluate("_clusters.successful"), equalTo(1)); + assertThat(objectPath.evaluate("_clusters.skipped"), equalTo(1)); + assertThat(objectPath.evaluate("_clusters.running"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.partial"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.failed"), equalTo(0)); + assertThat(objectPath.evaluate("hits.total.value"), equalTo(10)); + assertThat(objectPath.evaluateArraySize("hits.hits"), equalTo(10)); + String scrollId = objectPath.evaluate("_scroll_id"); + assertNotNull(scrollId); + Request scrollRequest = new Request("POST", "/_search/scroll"); + scrollRequest.setJsonEntity("{ \"scroll_id\" : \"" + scrollId + "\" }"); + Response scrollResponse = client().performRequest(scrollRequest); + assertEquals(200, scrollResponse.getStatusLine().getStatusCode()); + ObjectPath scrollObjectPath = ObjectPath.createFromResponse(scrollResponse); + assertNull(scrollObjectPath.evaluate("_clusters")); + assertThat(scrollObjectPath.evaluate("hits.total.value"), equalTo(10)); + assertThat(scrollObjectPath.evaluateArraySize("hits.hits"), equalTo(0)); } updateRemoteClusterSettings(Collections.singletonMap("skip_unavailable", false)); @@ -344,28 +351,25 @@ public void testSkipUnavailableDependsOnSeeds() throws IOException { private static void assertSearchConnectFailure() { { - ElasticsearchException exception = expectThrows( - ElasticsearchException.class, - () -> restHighLevelClient.search(new SearchRequest("index", "remote1:index"), RequestOptions.DEFAULT) + ResponseException exception = expectThrows( + ResponseException.class, + () -> client().performRequest(new Request("POST", "/index,remote1:index/_search")) ); - ElasticsearchException rootCause = (ElasticsearchException) exception.getRootCause(); - assertThat(rootCause.getMessage(), containsString("connect_exception")); + assertThat(exception.getMessage(), containsString("connect_exception")); } { - ElasticsearchException exception = expectThrows( - ElasticsearchException.class, - () -> restHighLevelClient.search(new SearchRequest("remote1:index"), RequestOptions.DEFAULT) + ResponseException exception = expectThrows( + ResponseException.class, + () -> client().performRequest(new Request("POST", "/remote1:index/_search")) ); - ElasticsearchException rootCause = (ElasticsearchException) exception.getRootCause(); - assertThat(rootCause.getMessage(), containsString("connect_exception")); + assertThat(exception.getMessage(), containsString("connect_exception")); } { - ElasticsearchException exception = expectThrows( - ElasticsearchException.class, - () -> restHighLevelClient.search(new SearchRequest("remote1:index").scroll("1m"), RequestOptions.DEFAULT) + ResponseException exception = expectThrows( + ResponseException.class, + () -> client().performRequest(new Request("POST", "/remote1:index/_search?scroll=1m")) ); - ElasticsearchException rootCause = (ElasticsearchException) exception.getRootCause(); - assertThat(rootCause.getMessage(), containsString("connect_exception")); + assertThat(exception.getMessage(), containsString("connect_exception")); } } @@ -399,12 +403,6 @@ private static HttpEntity buildUpdateSettingsRequestBody(Map set return new NStringEntity(requestBody, ContentType.APPLICATION_JSON); } - private static class HighLevelClient extends RestHighLevelClient { - private HighLevelClient(RestClient restClient) { - super(restClient, (client) -> {}, Collections.emptyList()); - } - } - @Override protected Settings restClientSettings() { String token = basicAuthHeaderValue("admin", new SecureString("admin-password".toCharArray())); diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 6af9bc9b11723..e5bc4a729f8b1 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -27,6 +27,7 @@ import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.rest.action.admin.indices.RestPutIndexTemplateAction; import org.elasticsearch.test.NotEqualMessageBuilder; @@ -37,6 +38,7 @@ import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.ObjectPath; +import org.elasticsearch.test.rest.RestTestLegacyFeatures; import org.elasticsearch.transport.Compression; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; @@ -68,7 +70,6 @@ import static java.util.Collections.singletonMap; import static java.util.stream.Collectors.toList; import static org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.SYSTEM_INDEX_ENFORCEMENT_INDEX_VERSION; -import static org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.SYSTEM_INDEX_ENFORCEMENT_VERSION; import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; import static org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY; import static org.elasticsearch.test.MapMatcher.assertMap; @@ -889,7 +890,7 @@ public void testRecovery() throws Exception { if (isRunningAgainstOldCluster()) { count = between(200, 300); Settings.Builder settings = Settings.builder(); - if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) { + if (minimumIndexVersion().before(IndexVersions.V_8_0_0) && randomBoolean()) { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); } final String mappings = randomBoolean() ? "\"_source\": { \"enabled\": false}" : null; @@ -941,7 +942,7 @@ public void testSnapshotRestore() throws IOException { // Create the index count = between(200, 300); Settings.Builder settings = Settings.builder(); - if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) { + if (minimumIndexVersion().before(IndexVersions.V_8_0_0) && randomBoolean()) { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); } createIndex(index, settings.build()); @@ -1435,7 +1436,7 @@ public void testPeerRecoveryRetentionLeases() throws Exception { public void testOperationBasedRecovery() throws Exception { if (isRunningAgainstOldCluster()) { Settings.Builder settings = indexSettings(1, 1); - if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) { + if (minimumIndexVersion().before(IndexVersions.V_8_0_0) && randomBoolean()) { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); } final String mappings = randomBoolean() ? "\"_source\": { \"enabled\": false}" : null; @@ -1498,7 +1499,7 @@ public void testResize() throws Exception { final Settings.Builder settings = Settings.builder() .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 3) .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1); - if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) { + if (minimumIndexVersion().before(IndexVersions.V_8_0_0) && randomBoolean()) { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false); } final String mappings = randomBoolean() ? "\"_source\": { \"enabled\": false}" : null; @@ -1619,7 +1620,7 @@ public void testSystemIndexMetadataIsUpgraded() throws Exception { // If we are on 7.x create an alias that includes both a system index and a non-system index so we can be sure it gets // upgraded properly. If we're already on 8.x, skip this part of the test. - if (minimumNodeVersion().before(SYSTEM_INDEX_ENFORCEMENT_VERSION)) { + if (clusterHasFeature(RestTestLegacyFeatures.SYSTEM_INDICES_REST_ACCESS_ENFORCED) == false) { // Create an alias to make sure it gets upgraded properly Request putAliasRequest = new Request("POST", "/_aliases"); putAliasRequest.setJsonEntity(""" diff --git a/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java b/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java index 5255cbf401c9a..9c5415f1d5ea9 100644 --- a/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java +++ b/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java @@ -17,31 +17,18 @@ import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.tests.util.TimeUnits; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; -import org.elasticsearch.action.bulk.BulkProcessor2; -import org.elasticsearch.action.bulk.BulkRequest; -import org.elasticsearch.action.bulk.BulkResponse; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.aggregations.pipeline.DerivativePipelineAggregationBuilder; import org.elasticsearch.client.Request; -import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; -import org.elasticsearch.client.RestClient; -import org.elasticsearch.client.RestHighLevelClient; -import org.elasticsearch.client.asyncsearch.AsyncSearchResponse; +import org.elasticsearch.client.ResponseListener; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.CheckedFunction; -import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.InnerHitBuilder; import org.elasticsearch.index.query.MatchQueryBuilder; @@ -55,9 +42,7 @@ import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; -import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.BucketOrder; -import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; import org.elasticsearch.search.aggregations.bucket.filter.FilterAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; @@ -77,22 +62,22 @@ import org.elasticsearch.search.suggest.SuggestBuilder; import org.elasticsearch.search.suggest.completion.CompletionSuggestionBuilder; import org.elasticsearch.search.suggest.phrase.DirectCandidateGeneratorBuilder; -import org.elasticsearch.search.suggest.phrase.PhraseSuggestion; import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder; -import org.elasticsearch.search.suggest.term.TermSuggestion; import org.elasticsearch.search.suggest.term.TermSuggestionBuilder; import org.elasticsearch.test.NotEqualMessageBuilder; import org.elasticsearch.test.XContentTestUtils; import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.test.rest.ObjectPath; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; -import org.junit.AfterClass; +import org.hamcrest.Matchers; import org.junit.Before; import java.io.IOException; +import java.io.UncheckedIOException; import java.net.URLEncoder; import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; @@ -100,7 +85,6 @@ import java.time.format.DateTimeFormatter; import java.util.Arrays; import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Locale; @@ -110,16 +94,14 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Consumer; -import static java.util.stream.Collectors.toList; +import static org.hamcrest.CoreMatchers.anyOf; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.Matchers.empty; +import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; -import static org.hamcrest.Matchers.not; /** * This test class executes twice, first against the remote cluster, and then against another cluster that has the remote cluster @@ -137,13 +119,13 @@ public class CCSDuelIT extends ESRestTestCase { private static final String REMOTE_INDEX_NAME = "my_remote_cluster:" + INDEX_NAME; private static final String[] TAGS = new String[] { "java", "xml", "sql", "html", "php", "ruby", "python", "perl" }; - private static RestHighLevelClient restHighLevelClient; + private static boolean init = false; @Before public void init() throws Exception { super.initClient(); - if (restHighLevelClient == null) { - restHighLevelClient = new HighLevelClient(client()); + if (init == false) { + init = true; String destinationCluster = System.getProperty("tests.rest.suite"); // we index docs with private randomness otherwise the two clusters end up with exactly the same documents // given that this test class is run twice with same seed. @@ -155,18 +137,6 @@ public void init() throws Exception { } } - private static class HighLevelClient extends RestHighLevelClient { - private HighLevelClient(RestClient restClient) { - super(restClient, (client) -> {}, Collections.emptyList()); - } - } - - @AfterClass - public static void cleanupClient() throws IOException { - IOUtils.close(restHighLevelClient); - restHighLevelClient = null; - } - @Override protected boolean preserveIndicesUponCompletion() { return true; @@ -177,14 +147,13 @@ protected boolean preserveDataStreamsUponCompletion() { return true; } - private static void indexDocuments(String idPrefix) throws IOException, InterruptedException { + private void indexDocuments(String idPrefix) throws IOException, InterruptedException { // this index with a single document is used to test partial failures - IndexRequest indexRequest = new IndexRequest(INDEX_NAME + "_err"); - indexRequest.id("id"); - indexRequest.source("id", "id", "creationDate", "err"); - indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL); - IndexResponse indexResponse = restHighLevelClient.index(indexRequest, RequestOptions.DEFAULT); - assertEquals(201, indexResponse.status().getStatus()); + Request request = new Request("POST", "/" + INDEX_NAME + "_err/_doc"); + request.addParameter("refresh", "wait_for"); + request.setJsonEntity("{ \"id\" : \"id\", \"creationDate\" : \"err\" }"); + Response response = client().performRequest(request); + assertEquals(201, response.getStatusLine().getStatusCode()); ElasticsearchAssertions.assertAcked(createIndex(INDEX_NAME + "_empty")); @@ -209,82 +178,98 @@ private static void indexDocuments(String idPrefix) throws IOException, Interrup }"""; ElasticsearchAssertions.assertAcked(createIndex(INDEX_NAME, settings, mapping)); - BulkProcessor2 bulkProcessor = BulkProcessor2.builder( - (r, l) -> restHighLevelClient.bulkAsync(r, RequestOptions.DEFAULT, l), - new BulkProcessor2.Listener() { - @Override - public void beforeBulk(long executionId, BulkRequest request) {} - - @Override - public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { - assertFalse(response.hasFailures()); - } - - @Override - public void afterBulk(long executionId, BulkRequest request, Exception failure) { - throw new AssertionError("Failed to execute bulk", failure); - } - }, - new DeterministicTaskQueue(random()).getThreadPool() - ).build(); + CountDownLatch latch = new CountDownLatch(2); int numQuestions = randomIntBetween(50, 100); - for (int i = 0; i < numQuestions; i++) { - bulkProcessor.add(buildIndexRequest(idPrefix + i, "question", null)); + { + StringBuilder builder = new StringBuilder(); + for (int i = 0; i < numQuestions; i++) { + buildIndexRequest(builder, idPrefix + i, "question", null); + } + executeBulkAsync(builder.toString(), latch); } - int numAnswers = randomIntBetween(100, 150); - for (int i = 0; i < numAnswers; i++) { - bulkProcessor.add(buildIndexRequest(idPrefix + (i + 1000), "answer", idPrefix + randomIntBetween(0, numQuestions - 1))); + { + StringBuilder builder = new StringBuilder(); + int numAnswers = randomIntBetween(100, 150); + for (int i = 0; i < numAnswers; i++) { + buildIndexRequest(builder, idPrefix + (i + 1000), "answer", idPrefix + randomIntBetween(0, numQuestions - 1)); + } + executeBulkAsync(builder.toString(), latch); } - assertTrue(bulkProcessor.awaitClose(30, TimeUnit.SECONDS)); + + assertTrue(latch.await(30, TimeUnit.SECONDS)); RefreshResponse refreshResponse = refresh(INDEX_NAME); ElasticsearchAssertions.assertNoFailures(refreshResponse); } - private static IndexRequest buildIndexRequest(String id, String type, String questionId) { - IndexRequest indexRequest = new IndexRequest(INDEX_NAME); - indexRequest.id(id); + private void executeBulkAsync(String body, CountDownLatch latch) { + Request bulk = new Request("POST", "/_bulk"); + bulk.setJsonEntity(body); + client().performRequestAsync(bulk, new ResponseListener() { + @Override + public void onSuccess(Response response) { + try { + ObjectPath objectPath = ObjectPath.createFromResponse(response); + assertThat(objectPath.evaluate("errors"), Matchers.equalTo(false)); + } catch (IOException ioException) { + throw new UncheckedIOException(ioException); + } finally { + latch.countDown(); + } + } + + @Override + public void onFailure(Exception exception) { + try { + fail(exception.getMessage()); + } finally { + latch.countDown(); + } + } + }); + } + + private static void buildIndexRequest(StringBuilder buffer, String id, String type, String questionId) { + // { "index" : { "_index" : "test", "_id" : "1" } }/n + buffer.append("{ \"index\" : { \"_index\" : \"").append(INDEX_NAME).append("\", \"_id\" : \"").append(id).append("\""); if (questionId != null) { - indexRequest.routing(questionId); + buffer.append(", \"routing\" : \"").append(questionId).append("\""); } - indexRequest.create(true); + buffer.append(" } }\n"); int numTags = randomIntBetween(1, 3); Set tags = new HashSet<>(); if (questionId == null) { for (int i = 0; i < numTags; i++) { - tags.add(randomFrom(TAGS)); + tags.add("\"" + randomFrom(TAGS) + "\""); } } String[] tagsArray = tags.toArray(new String[0]); String date = LocalDate.of(2019, 1, randomIntBetween(1, 31)).format(DateTimeFormatter.ofPattern("yyyy/MM/dd", Locale.ROOT)); - Map joinField = new HashMap<>(); - joinField.put("name", type); + + buffer.append("{ "); + buffer.append("\"id\" : \"").append(id).append("\","); + buffer.append("\"type\" : \"").append(type).append("\","); + buffer.append("\"votes\" : ").append(randomIntBetween(0, 30)).append(","); if (questionId != null) { - joinField.put("parent", questionId); - } - indexRequest.source( - XContentType.JSON, - "id", - id, - "type", - type, - "votes", - randomIntBetween(0, 30), - "questionId", - questionId, - "tags", - tagsArray, - "user", - "user" + randomIntBetween(1, 10), - "suggest", - Collections.singletonMap("input", tagsArray), - "creationDate", - date, - "join", - joinField - ); - return indexRequest; + buffer.append("\"questionId\" : \"").append(questionId).append("\","); + } else { + buffer.append("\"questionId\" : ").append(questionId).append(","); + } + buffer.append("\"tags\" : [").append(String.join(",", Arrays.asList(tagsArray))).append("],"); + buffer.append("\"user\" : \"").append("user").append(randomIntBetween(1, 10)).append("\","); + buffer.append("\"suggest\" : ") + .append("{") + .append("\"input\" : [") + .append(String.join(",", Arrays.asList(tagsArray))) + .append("]},"); + buffer.append("\"creationDate\" : \"").append(date).append("\","); + buffer.append("\"join\" : {"); + buffer.append("\"name\" : \"").append(type).append("\""); + if (questionId != null) { + buffer.append(", \"parent\" : \"").append(questionId).append("\""); + } + buffer.append("}}\n"); } public void testMatchAll() throws Exception { @@ -376,9 +361,9 @@ public void testHighlighting() throws Exception { SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); sourceBuilder.highlighter(new HighlightBuilder().field("tags")); sourceBuilder.query(QueryBuilders.matchQuery("tags", "xml")); - Consumer responseChecker = response -> { + CheckedConsumer responseChecker = response -> { assertHits(response); - assertFalse(response.getHits().getHits()[0].getHighlightFields().isEmpty()); + assertFalse(response.evaluateMapKeys("hits.hits.0.highlight").isEmpty()); }; { SearchRequest searchRequest = initLocalAndRemoteSearchRequest(); @@ -398,9 +383,9 @@ public void testFetchSource() throws Exception { sourceBuilder.fetchSource(new String[] { "tags" }, Strings.EMPTY_ARRAY); sourceBuilder.query(QueryBuilders.matchQuery("tags", "ruby")); - Consumer responseChecker = response -> { + CheckedConsumer responseChecker = response -> { assertHits(response); - assertEquals(1, response.getHits().getHits()[0].getSourceAsMap().size()); + assertThat(response.evaluateMapKeys("hits.hits.0._source").size(), equalTo(1)); }; { SearchRequest searchRequest = initLocalAndRemoteSearchRequest(); @@ -419,10 +404,10 @@ public void testDocValueFields() throws Exception { SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); sourceBuilder.docValueField("user.keyword"); sourceBuilder.query(QueryBuilders.matchQuery("tags", "xml")); - Consumer responseChecker = response -> { + CheckedConsumer responseChecker = response -> { assertHits(response); - assertEquals(1, response.getHits().getHits()[0].getFields().size()); - assertNotNull(response.getHits().getHits()[0].getFields().get("user.keyword")); + assertThat(response.evaluateMapKeys("hits.hits.0.fields").size(), equalTo(1)); + assertTrue(response.evaluateMapKeys("hits.hits.0.fields").contains("user.keyword")); }; { SearchRequest searchRequest = initLocalAndRemoteSearchRequest(); @@ -440,10 +425,10 @@ public void testScriptFields() throws Exception { assumeMultiClusterSetup(); SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); sourceBuilder.scriptField("parent", new Script(ScriptType.INLINE, "painless", "doc['join#question']", Collections.emptyMap())); - Consumer responseChecker = response -> { + CheckedConsumer responseChecker = response -> { assertHits(response); - assertEquals(1, response.getHits().getHits()[0].getFields().size()); - assertNotNull(response.getHits().getHits()[0].getFields().get("parent")); + assertThat(response.evaluateMapKeys("hits.hits.0.fields").size(), equalTo(1)); + assertTrue(response.evaluateMapKeys("hits.hits.0.fields").contains("parent")); }; { SearchRequest searchRequest = initLocalAndRemoteSearchRequest(); @@ -462,9 +447,9 @@ public void testExplain() throws Exception { SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); sourceBuilder.explain(true); sourceBuilder.query(QueryBuilders.matchQuery("tags", "sql")); - Consumer responseChecker = response -> { + CheckedConsumer responseChecker = response -> { assertHits(response); - assertNotNull(response.getHits().getHits()[0].getExplanation()); + assertNotNull(response.evaluate("hits.hits.0._explanation")); }; { SearchRequest searchRequest = initLocalAndRemoteSearchRequest(); @@ -486,7 +471,6 @@ public void testRescore() throws Exception { rescorerBuilder.setScoreMode(QueryRescoreMode.Multiply); rescorerBuilder.setRescoreQueryWeight(5); sourceBuilder.addRescorer(rescorerBuilder); - { SearchRequest searchRequest = initLocalAndRemoteSearchRequest(); searchRequest.source(sourceBuilder); @@ -541,13 +525,18 @@ public void testProfile() throws Exception { SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); sourceBuilder.profile(true); sourceBuilder.query(QueryBuilders.matchQuery("tags", "html")); - Consumer responseChecker = response -> { + CheckedConsumer responseChecker = response -> { assertHits(response); - assertFalse(response.getProfileResults().isEmpty()); - assertThat( - response.getProfileResults().values().stream().filter(sr -> sr.getFetchPhase() != null).collect(toList()), - not(empty()) - ); + assertFalse(response.evaluateMapKeys("profile").isEmpty()); + int size = response.evaluateArraySize("profile.shards"); + boolean fail = true; + for (int i = 0; i < size; i++) { + if (response.evaluate("profile.shards." + i + ".fetch") != null) { + fail = false; + break; + } + } + assertFalse("profile might be incomplete", fail); }; { SearchRequest searchRequest = initLocalAndRemoteSearchRequest(); @@ -570,10 +559,11 @@ public void testSortByField() throws Exception { sourceBuilder.sort("type.keyword", SortOrder.ASC); sourceBuilder.sort("creationDate", SortOrder.DESC); sourceBuilder.sort("user.keyword", SortOrder.ASC); - Consumer responseChecker = response -> { + CheckedConsumer responseChecker = response -> { assertHits(response, 30); - if (response.getHits().getTotalHits().value > 30) { - assertEquals(3, response.getHits().getHits()[0].getSortValues().length); + int total = response.evaluate("hits.total.value"); + if (total > 30) { + assertThat(response.evaluateArraySize("hits.hits.0.sort"), equalTo(3)); } }; { @@ -597,16 +587,16 @@ public void testSortByFieldOneClusterHasNoResults() throws Exception { sourceBuilder.sort("type.keyword", SortOrder.ASC); sourceBuilder.sort("creationDate", SortOrder.DESC); sourceBuilder.sort("user.keyword", SortOrder.ASC); - Consumer responseChecker = response -> { + CheckedConsumer responseChecker = response -> { assertHits(response); - SearchHit[] hits = response.getHits().getHits(); - for (SearchHit hit : hits) { - assertEquals(3, hit.getSortValues().length); - assertEquals(INDEX_NAME, hit.getIndex()); + int size = response.evaluateArraySize("hits.hits"); + for (int i = 0; i < size; i++) { + String hit = "hits.hits." + i; + assertThat(response.evaluateArraySize(hit + ".sort"), equalTo(3)); if (onlyRemote) { - assertEquals("my_remote_cluster", hit.getClusterAlias()); + assertThat(response.evaluate(hit + "._index"), equalTo(REMOTE_INDEX_NAME)); } else { - assertNull(hit.getClusterAlias()); + assertThat(response.evaluate(hit + "._index"), equalTo(INDEX_NAME)); } } }; @@ -621,14 +611,15 @@ public void testFieldCollapsingOneClusterHasNoResults() throws Exception { boolean onlyRemote = randomBoolean(); sourceBuilder.query(new TermQueryBuilder("_index", onlyRemote ? REMOTE_INDEX_NAME : INDEX_NAME)); sourceBuilder.collapse(new CollapseBuilder("user.keyword")); - Consumer responseChecker = response -> { + CheckedConsumer responseChecker = response -> { assertHits(response); - for (SearchHit hit : response.getHits().getHits()) { - assertEquals(INDEX_NAME, hit.getIndex()); + int size = response.evaluateArraySize("hits.hits"); + for (int i = 0; i < size; i++) { + String hit = "hits.hits." + i; if (onlyRemote) { - assertEquals("my_remote_cluster", hit.getClusterAlias()); + assertThat(response.evaluate(hit + "._index"), equalTo(REMOTE_INDEX_NAME)); } else { - assertNull(hit.getClusterAlias()); + assertThat(response.evaluate(hit + "._index"), equalTo(INDEX_NAME)); } } }; @@ -661,9 +652,9 @@ public void testFieldCollapsingSortByField() throws Exception { sourceBuilder.sort("creationDate", SortOrder.DESC); sourceBuilder.sort(new ScoreSortBuilder()); sourceBuilder.collapse(new CollapseBuilder("user.keyword")); - Consumer responseChecker = response -> { + CheckedConsumer responseChecker = response -> { assertHits(response); - assertEquals(2, response.getHits().getHits()[0].getSortValues().length); + assertThat(response.evaluateArraySize("hits.hits.0.sort"), equalTo(2)); }; { SearchRequest searchRequest = initLocalAndRemoteSearchRequest(); @@ -804,7 +795,7 @@ public void testPipelineAggs() throws Exception { searchRequest.source(sourceBuilder); duelRequest(searchRequest, response -> { assertAggs(response); - assertNotNull(response.getAggregations().get("most_voted")); + assertTrue(response.evaluateMapKeys("aggregations").contains("bucket_metric_value#most_voted")); }); duelRequest(searchRequest, CCSDuelIT::assertAggs); } @@ -813,7 +804,7 @@ public void testPipelineAggs() throws Exception { searchRequest.source(sourceBuilder); duelRequest(searchRequest, response -> { assertAggs(response); - assertNotNull(response.getAggregations().get("most_voted")); + assertTrue(response.evaluateMapKeys("aggregations").contains("bucket_metric_value#most_voted")); }); duelRequest(searchRequest, CCSDuelIT::assertAggs); } @@ -847,12 +838,12 @@ public void testTopHits() throws Exception { public void testTermsLookup() throws Exception { assumeMultiClusterSetup(); - IndexRequest indexRequest = new IndexRequest("lookup_index"); - indexRequest.id("id"); - indexRequest.source("tags", new String[] { "java", "sql", "html", "jax-ws" }); - indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL); - IndexResponse indexResponse = restHighLevelClient.index(indexRequest, RequestOptions.DEFAULT); - assertEquals(201, indexResponse.status().getStatus()); + Request request = new Request("POST", "/lookup_index/_doc/id"); + request.addParameter("refresh", "wait_for"); + request.setJsonEntity("{ \"tags\" : [ \"java\", \"sql\", \"html\", \"jax-ws\" ] }"); + Response response = client().performRequest(request); + assertEquals(201, response.getStatusLine().getStatusCode()); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); TermsQueryBuilder termsQueryBuilder = new TermsQueryBuilder("tags", new TermsLookup("lookup_index", "id", "tags")); sourceBuilder.query(termsQueryBuilder); @@ -879,11 +870,11 @@ public void testShardFailures() throws Exception { boolean compareAsyncAndSyncResponses = false; duelRequest(searchRequest, response -> { assertMultiClusterSearchResponse(response); - assertThat(response.getHits().getTotalHits().value, greaterThan(0L)); - assertNull(response.getAggregations()); - assertNull(response.getSuggest()); - assertThat(response.getHits().getHits().length, greaterThan(0)); - assertThat(response.getFailedShards(), greaterThanOrEqualTo(2)); + assertThat(response.evaluate("hits.total.value"), greaterThan(0)); + assertNull(response.evaluate("aggregations")); + assertNull(response.evaluate("suggest")); + assertThat(response.evaluateArraySize("hits.hits"), greaterThan(0)); + assertThat(response.evaluate("_shards.failed"), greaterThanOrEqualTo(2)); }, compareAsyncAndSyncResponses); } @@ -894,24 +885,21 @@ public void testTermSuggester() throws Exception { suggestBuilder.setGlobalText("jva hml"); suggestBuilder.addSuggestion("tags", new TermSuggestionBuilder("tags").suggestMode(TermSuggestionBuilder.SuggestMode.POPULAR)); sourceBuilder.suggest(suggestBuilder); - Consumer responseChecker = response -> { - assertEquals(1, response.getSuggest().size()); - TermSuggestion tags = response.getSuggest().getSuggestion("tags"); - assertThat(tags.getEntries().size(), greaterThan(0)); + CheckedConsumer responseChecker = response -> { + assertThat(response.evaluateMapKeys("suggest").size(), equalTo(1)); + assertThat(response.evaluateArraySize("suggest.term#tags"), greaterThan(0)); }; { SearchRequest searchRequest = initLocalAndRemoteSearchRequest(); searchRequest.source(sourceBuilder); - responseChecker.andThen(CCSDuelIT::assertMultiClusterSearchResponse); // suggest-only queries are not supported by _async_search, so only test against sync search API - duelSearchSync(searchRequest, responseChecker); + duelSearchSync(searchRequest, responseChecker.andThen(CCSDuelIT::assertMultiClusterSearchResponse)); } { SearchRequest searchRequest = initRemoteOnlySearchRequest(); searchRequest.source(sourceBuilder); - responseChecker.andThen(CCSDuelIT::assertSingleRemoteClusterSearchResponse); // suggest-only queries are not supported by _async_search, so only test against sync search API - duelSearchSync(searchRequest, responseChecker); + duelSearchSync(searchRequest, responseChecker.andThen(CCSDuelIT::assertSingleRemoteClusterSearchResponse)); } } @@ -926,24 +914,21 @@ public void testPhraseSuggester() throws Exception { .highlight("", "") ); sourceBuilder.suggest(suggestBuilder); - Consumer responseChecker = response -> { - assertEquals(1, response.getSuggest().size()); - PhraseSuggestion tags = response.getSuggest().getSuggestion("tags"); - assertThat(tags.getEntries().size(), greaterThan(0)); + CheckedConsumer responseChecker = response -> { + assertEquals(1, response.evaluateMapKeys("suggest").size()); + assertThat(response.evaluateArraySize("suggest.phrase#tags"), greaterThan(0)); }; { SearchRequest searchRequest = initLocalAndRemoteSearchRequest(); searchRequest.source(sourceBuilder); // suggest-only queries are not supported by _async_search, so only test against sync search API - responseChecker.andThen(CCSDuelIT::assertMultiClusterSearchResponse); - duelSearchSync(searchRequest, responseChecker); + duelSearchSync(searchRequest, responseChecker.andThen(CCSDuelIT::assertMultiClusterSearchResponse)); } { SearchRequest searchRequest = initRemoteOnlySearchRequest(); searchRequest.source(sourceBuilder); - responseChecker.andThen(CCSDuelIT::assertSingleRemoteClusterSearchResponse); // suggest-only queries are not supported by _async_search, so only test against sync search API - duelSearchSync(searchRequest, responseChecker); + duelSearchSync(searchRequest, responseChecker.andThen(CCSDuelIT::assertSingleRemoteClusterSearchResponse)); } } @@ -955,25 +940,23 @@ public void testCompletionSuggester() throws Exception { suggestBuilder.addSuggestion("java", new CompletionSuggestionBuilder("suggest").size(20).text("jav")); suggestBuilder.addSuggestion("ruby", new CompletionSuggestionBuilder("suggest").size(30).text("rub")); sourceBuilder.suggest(suggestBuilder); - Consumer responseChecker = response -> { - assertEquals(Strings.toString(response, true, true), 3, response.getSuggest().size()); - assertThat(response.getSuggest().getSuggestion("python").getEntries().size(), greaterThan(0)); - assertThat(response.getSuggest().getSuggestion("java").getEntries().size(), greaterThan(0)); - assertThat(response.getSuggest().getSuggestion("ruby").getEntries().size(), greaterThan(0)); + CheckedConsumer responseChecker = response -> { + assertThat(response.evaluateMapKeys("suggest").size(), equalTo(3)); + assertThat(response.evaluateArraySize("suggest.completion#python"), greaterThan(0)); + assertThat(response.evaluateArraySize("suggest.completion#java"), greaterThan(0)); + assertThat(response.evaluateArraySize("suggest.completion#ruby"), greaterThan(0)); }; { SearchRequest searchRequest = initLocalAndRemoteSearchRequest(); searchRequest.source(sourceBuilder); - responseChecker.andThen(CCSDuelIT::assertMultiClusterSearchResponse); // suggest-only queries are not supported by _async_search, so only test against sync search API - duelSearchSync(searchRequest, responseChecker); + duelSearchSync(searchRequest, responseChecker.andThen(CCSDuelIT::assertMultiClusterSearchResponse)); } { SearchRequest searchRequest = initRemoteOnlySearchRequest(); searchRequest.source(sourceBuilder); - responseChecker.andThen(CCSDuelIT::assertSingleRemoteClusterSearchResponse); // suggest-only queries are not supported by _async_search, so only test against sync search API - duelSearchSync(searchRequest, responseChecker); + duelSearchSync(searchRequest, responseChecker.andThen(CCSDuelIT::assertSingleRemoteClusterSearchResponse)); } } @@ -992,7 +975,7 @@ private static SearchRequest initLocalAndRemoteSearchRequest() { } private static SearchRequest initRemoteOnlySearchRequest() { - List indices = Arrays.asList("my_remote_cluster:" + INDEX_NAME); + List indices = List.of("my_remote_cluster:" + INDEX_NAME); final SearchRequest request = new SearchRequest(indices.toArray(new String[0])); if (randomBoolean()) { request.setPreFilterShardSize(between(1, 20)); @@ -1000,12 +983,15 @@ private static SearchRequest initRemoteOnlySearchRequest() { return request; } - private void duelRequest(SearchRequest searchRequest, Consumer responseChecker) throws Exception { + private void duelRequest(SearchRequest searchRequest, CheckedConsumer responseChecker) throws Exception { duelRequest(searchRequest, responseChecker, true); } - private void duelRequest(SearchRequest searchRequest, Consumer responseChecker, boolean compareAsyncToSyncResponses) - throws Exception { + private void duelRequest( + SearchRequest searchRequest, + CheckedConsumer responseChecker, + boolean compareAsyncToSyncResponses + ) throws Exception { Map syncResponseMap = duelSearchSync(searchRequest, responseChecker); Map asyncResponseMap = duelSearchAsync(searchRequest, responseChecker); if (compareAsyncToSyncResponses) { @@ -1016,26 +1002,17 @@ private void duelRequest(SearchRequest searchRequest, Consumer r /** * @return responseMap from one of the Synchronous Search Requests */ - private static Map duelSearchSync(SearchRequest searchRequest, Consumer responseChecker) + private static Map duelSearchSync(SearchRequest searchRequest, CheckedConsumer responseChecker) throws Exception { CountDownLatch latch = new CountDownLatch(2); AtomicReference exception1 = new AtomicReference<>(); - AtomicReference minimizeRoundtripsResponse = new AtomicReference<>(); + AtomicReference minimizeRoundtripsResponse = new AtomicReference<>(); searchRequest.setCcsMinimizeRoundtrips(true); - restHighLevelClient.searchAsync( - searchRequest, - RequestOptions.DEFAULT, - new LatchedActionListener<>(ActionListener.wrap(minimizeRoundtripsResponse::set, exception1::set), latch) - ); - + submitSyncSearch(searchRequest, minimizeRoundtripsResponse, exception1, latch); AtomicReference exception2 = new AtomicReference<>(); - AtomicReference fanOutResponse = new AtomicReference<>(); + AtomicReference fanOutResponse = new AtomicReference<>(); searchRequest.setCcsMinimizeRoundtrips(false); - restHighLevelClient.searchAsync( - searchRequest, - RequestOptions.DEFAULT, - new LatchedActionListener<>(ActionListener.wrap(fanOutResponse::set, exception2::set), latch) - ); + submitSyncSearch(searchRequest, fanOutResponse, exception2, latch); latch.await(); @@ -1049,8 +1026,7 @@ private static Map duelSearchSync(SearchRequest searchRequest, C if (exception2.get() != null) { throw new AssertionError("one of the two requests returned an exception", exception2.get()); } - SearchResponse minimizeRoundtripsSearchResponse = minimizeRoundtripsResponse.get(); - + ObjectPath minimizeRoundtripsSearchResponse = ObjectPath.createFromResponse(minimizeRoundtripsResponse.get()); responseChecker.accept(minimizeRoundtripsSearchResponse); // if only the remote cluster was searched, then only one reduce phase is expected @@ -1058,133 +1034,160 @@ private static Map duelSearchSync(SearchRequest searchRequest, C if (searchRequest.indices().length > 1) { expectedReducePhasesMinRoundTrip = searchRequest.indices().length + 1; } - - assertEquals(expectedReducePhasesMinRoundTrip, minimizeRoundtripsSearchResponse.getNumReducePhases()); - SearchResponse fanOutSearchResponse = fanOutResponse.get(); + if (expectedReducePhasesMinRoundTrip == 1) { + assertThat( + minimizeRoundtripsSearchResponse.evaluate("num_reduce_phases"), + anyOf(equalTo(expectedReducePhasesMinRoundTrip), nullValue()) + ); + } else { + assertThat(minimizeRoundtripsSearchResponse.evaluate("num_reduce_phases"), equalTo(expectedReducePhasesMinRoundTrip)); + } + ObjectPath fanOutSearchResponse = ObjectPath.createFromResponse(fanOutResponse.get()); responseChecker.accept(fanOutSearchResponse); - assertEquals(1, fanOutSearchResponse.getNumReducePhases()); + assertThat(fanOutSearchResponse.evaluate("num_reduce_phases"), anyOf(equalTo(1), nullValue())); // default value is 1? // compare Clusters objects - SearchResponse.Clusters clustersMRT = minimizeRoundtripsSearchResponse.getClusters(); - SearchResponse.Clusters clustersMRTFalse = fanOutSearchResponse.getClusters(); - - assertEquals(clustersMRT.getTotal(), clustersMRTFalse.getTotal()); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL) + assertThat( + minimizeRoundtripsSearchResponse.evaluate("_cluster.total"), + equalTo(fanOutSearchResponse.evaluate("_cluster.total")) ); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED) + assertThat( + minimizeRoundtripsSearchResponse.evaluate("_cluster.successful"), + equalTo(fanOutSearchResponse.evaluate("_cluster.successful")) ); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING) + assertThat( + minimizeRoundtripsSearchResponse.evaluate("_cluster.skipped"), + equalTo(fanOutSearchResponse.evaluate("_cluster.skipped")) ); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL) + assertThat( + minimizeRoundtripsSearchResponse.evaluate("_cluster.running"), + equalTo(fanOutSearchResponse.evaluate("_cluster.running")) ); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.FAILED) + assertThat( + minimizeRoundtripsSearchResponse.evaluate("_cluster.partial"), + equalTo(fanOutSearchResponse.evaluate("_cluster.partial")) + ); + assertThat( + minimizeRoundtripsSearchResponse.evaluate("_cluster.failed"), + equalTo(fanOutSearchResponse.evaluate("_cluster.failed")) ); Map minimizeRoundtripsResponseMap = responseToMap(minimizeRoundtripsSearchResponse); - if (clustersMRT.hasClusterObjects() && clustersMRTFalse.hasClusterObjects()) { + if (minimizeRoundtripsSearchResponse.evaluate("_clusters") != null && fanOutSearchResponse.evaluate("_clusters") != null) { Map fanOutResponseMap = responseToMap(fanOutSearchResponse); compareResponseMaps(minimizeRoundtripsResponseMap, fanOutResponseMap, "Comparing sync_search minimizeRoundTrip vs. fanOut"); - assertThat(minimizeRoundtripsSearchResponse.getSkippedShards(), lessThanOrEqualTo(fanOutSearchResponse.getSkippedShards())); + assertThat( + minimizeRoundtripsSearchResponse.evaluate("_shards.skipped"), + lessThanOrEqualTo((Integer) fanOutSearchResponse.evaluate("_shards.skipped")) + ); } return minimizeRoundtripsResponseMap; } } + private static void submitSyncSearch( + SearchRequest searchRequest, + AtomicReference responseRef, + AtomicReference exceptionRef, + CountDownLatch latch + ) throws IOException { + String indices = Strings.collectionToDelimitedString(List.of(searchRequest.indices()), ","); + final Request request = new Request("POST", URLEncoder.encode(indices, StandardCharsets.UTF_8) + "/_search"); + request.addParameter("ccs_minimize_roundtrips", Boolean.toString(searchRequest.isCcsMinimizeRoundtrips())); + request.addParameter(RestSearchAction.TYPED_KEYS_PARAM, "true"); + request.setEntity(createEntity(searchRequest.source(), XContentType.JSON, ToXContent.EMPTY_PARAMS)); + client().performRequestAsync(request, new ResponseListener() { + @Override + public void onSuccess(Response response) { + try { + responseRef.set(response); + } finally { + latch.countDown(); + } + } + + @Override + public void onFailure(Exception exception) { + try { + exceptionRef.set(exception); + } finally { + latch.countDown(); + } + } + }); + } + /** * @return responseMap from one of the async searches */ - private static Map duelSearchAsync(SearchRequest searchRequest, Consumer responseChecker) - throws Exception { + private static Map duelSearchAsync( + SearchRequest searchRequest, + CheckedConsumer responseChecker + ) throws Exception { searchRequest.setCcsMinimizeRoundtrips(true); - AsyncSearchResponse minimizeRoundtripsResponse = submitAsyncSearch( - searchRequest, - TimeValue.timeValueSeconds(1), - restHighLevelClient.getParserConfig() - ); + ObjectPath minimizeRoundtripsResponse = submitAsyncSearch(searchRequest, TimeValue.timeValueSeconds(1)); try { - final String responseId = minimizeRoundtripsResponse.getId(); + final String responseId = minimizeRoundtripsResponse.evaluate("id");// minimizeRoundtripsResponse.getId(); assertBusy(() -> { - AsyncSearchResponse resp = getAsyncSearch(responseId, restHighLevelClient.getParserConfig()); - assertThat(resp.isRunning(), equalTo(false)); + ObjectPath resp = getAsyncSearch(responseId); + assertThat(resp.evaluate("is_running"), equalTo(false)); }); - minimizeRoundtripsResponse = getAsyncSearch(responseId, restHighLevelClient.getParserConfig()); + minimizeRoundtripsResponse = getAsyncSearch(responseId); } finally { - deleteAsyncSearch(minimizeRoundtripsResponse.getId()); + deleteAsyncSearch(minimizeRoundtripsResponse.evaluate("id")); } searchRequest.setCcsMinimizeRoundtrips(false); - AsyncSearchResponse fanOutResponse = submitAsyncSearch( - searchRequest, - TimeValue.timeValueSeconds(1), - restHighLevelClient.getParserConfig() - ); + ObjectPath fanOutResponse = submitAsyncSearch(searchRequest, TimeValue.timeValueSeconds(1)); try { - final String responseId = fanOutResponse.getId(); + final String responseId = fanOutResponse.evaluate("id"); assertBusy(() -> { - AsyncSearchResponse resp = getAsyncSearch(responseId, restHighLevelClient.getParserConfig()); - assertThat(resp.isRunning(), equalTo(false)); + ObjectPath resp = getAsyncSearch(responseId); + assertThat(resp.evaluate("is_running"), equalTo(false)); }); - fanOutResponse = getAsyncSearch(responseId, restHighLevelClient.getParserConfig()); + fanOutResponse = getAsyncSearch(responseId); } finally { - deleteAsyncSearch(fanOutResponse.getId()); + deleteAsyncSearch(fanOutResponse.evaluate("id")); } - SearchResponse minimizeRoundtripsSearchResponse = minimizeRoundtripsResponse.getSearchResponse(); - SearchResponse fanOutSearchResponse = fanOutResponse.getSearchResponse(); - responseChecker.accept(minimizeRoundtripsSearchResponse); + // extract the response + minimizeRoundtripsResponse = new ObjectPath(minimizeRoundtripsResponse.evaluate("response")); + fanOutResponse = new ObjectPath(fanOutResponse.evaluate("response")); + + responseChecker.accept(minimizeRoundtripsResponse); // if only the remote cluster was searched, then only one reduce phase is expected int expectedReducePhasesMinRoundTrip = 1; if (searchRequest.indices().length > 1) { expectedReducePhasesMinRoundTrip = searchRequest.indices().length + 1; } - assertEquals(expectedReducePhasesMinRoundTrip, minimizeRoundtripsSearchResponse.getNumReducePhases()); - - responseChecker.accept(fanOutSearchResponse); - assertEquals(1, fanOutSearchResponse.getNumReducePhases()); - - // compare Clusters objects - SearchResponse.Clusters clustersMRT = minimizeRoundtripsSearchResponse.getClusters(); - SearchResponse.Clusters clustersMRTFalse = fanOutSearchResponse.getClusters(); + if (expectedReducePhasesMinRoundTrip == 1) { + assertThat( + minimizeRoundtripsResponse.evaluate("num_reduce_phases"), + anyOf(equalTo(expectedReducePhasesMinRoundTrip), nullValue()) + ); + } else { + assertThat(minimizeRoundtripsResponse.evaluate("num_reduce_phases"), equalTo(expectedReducePhasesMinRoundTrip)); + } - assertEquals(clustersMRT.getTotal(), clustersMRTFalse.getTotal()); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL) - ); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED) - ); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING) - ); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL) - ); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.FAILED) - ); + responseChecker.accept(fanOutResponse); + assertThat(fanOutResponse.evaluate("num_reduce_phases"), anyOf(equalTo(1), nullValue())); // default value is 1? - Map minimizeRoundtripsResponseMap = responseToMap(minimizeRoundtripsSearchResponse); - if (clustersMRT.hasClusterObjects() && clustersMRTFalse.hasClusterObjects()) { - Map fanOutResponseMap = responseToMap(fanOutSearchResponse); + assertThat(minimizeRoundtripsResponse.evaluate("_cluster.total"), equalTo(fanOutResponse.evaluate("_cluster.total"))); + assertThat(minimizeRoundtripsResponse.evaluate("_cluster.successful"), equalTo(fanOutResponse.evaluate("_cluster.successful"))); + assertThat(minimizeRoundtripsResponse.evaluate("_cluster.skipped"), equalTo(fanOutResponse.evaluate("_cluster.skipped"))); + assertThat(minimizeRoundtripsResponse.evaluate("_cluster.running"), equalTo(fanOutResponse.evaluate("_cluster.running"))); + assertThat(minimizeRoundtripsResponse.evaluate("_cluster.partial"), equalTo(fanOutResponse.evaluate("_cluster.partial"))); + assertThat(minimizeRoundtripsResponse.evaluate("_cluster.failed"), equalTo(fanOutResponse.evaluate("_cluster.failed"))); + Map minimizeRoundtripsResponseMap = responseToMap(minimizeRoundtripsResponse); + if (minimizeRoundtripsResponse.evaluate("_clusters") != null && fanOutResponse.evaluate("_clusters") != null) { + Map fanOutResponseMap = responseToMap(fanOutResponse); compareResponseMaps(minimizeRoundtripsResponseMap, fanOutResponseMap, "Comparing async_search minimizeRoundTrip vs. fanOut"); - assertThat(minimizeRoundtripsSearchResponse.getSkippedShards(), lessThanOrEqualTo(fanOutSearchResponse.getSkippedShards())); + assertThat( + minimizeRoundtripsResponse.evaluate("_shards.skipped"), + lessThanOrEqualTo((Integer) fanOutResponse.evaluate("_shards.skipped")) + ); } return minimizeRoundtripsResponseMap; } @@ -1199,11 +1202,7 @@ private static void compareResponseMaps(Map responseMap1, Map from) { - assertThat(response.getHits().getHits().length, greaterThan(0)); + int totalHits = response.evaluate("hits.total.value"); + assertThat(totalHits, greaterThan(0)); + assertThat(response.evaluate("_shards.failed"), Matchers.equalTo(0)); + assertNull(response.evaluate("hits.aggregations")); + assertNull(response.evaluate("hits.suggest")); + if (totalHits > from) { + assertThat(response.evaluateArraySize("hits.hits"), greaterThan(0)); } else { - assertThat(response.getHits().getHits().length, equalTo(0)); + assertThat(response.evaluateArraySize("hits.hits"), equalTo(0)); } } - private static void assertAggs(SearchResponse response) { - if (response.getClusters().getTotal() == 1) { + private static void assertAggs(ObjectPath response) throws IOException { + int totalHits = response.evaluate("_clusters.total"); + if (totalHits == 1) { assertSingleRemoteClusterSearchResponse(response); } else { assertMultiClusterSearchResponse(response); } - assertThat(response.getHits().getTotalHits().value, greaterThan(0L)); - assertEquals(0, response.getHits().getHits().length); - assertNull(response.getSuggest()); - assertNotNull(response.getAggregations()); - List aggregations = response.getAggregations().asList(); - for (Aggregation aggregation : aggregations) { - if (aggregation instanceof MultiBucketsAggregation multiBucketsAggregation) { + assertThat(response.evaluate("hits.total.value"), greaterThan(0)); + assertThat(response.evaluateArraySize("hits.hits"), equalTo(0)); + assertNull(response.evaluate("suggest")); + assertNotNull(response.evaluate("aggregations")); + Set aggregations = response.evaluateMapKeys("aggregations"); + for (String aggregation : aggregations) { + if (aggregation.startsWith("date_histogram") || aggregation.startsWith("sterms")) { assertThat( - "agg " + multiBucketsAggregation.getName() + " has 0 buckets", - multiBucketsAggregation.getBuckets().size(), + aggregation + " has 0 buckets", + response.evaluateArraySize("aggregations." + aggregation + ".buckets"), greaterThan(0) ); } @@ -1324,8 +1326,8 @@ private static void assertAggs(SearchResponse response) { } @SuppressWarnings("unchecked") - private static Map responseToMap(SearchResponse response) throws IOException { - BytesReference bytesReference = XContentHelper.toXContent(response, XContentType.JSON, false); + private static Map responseToMap(ObjectPath response) throws IOException { + BytesReference bytesReference = BytesReference.bytes(response.toXContentBuilder(XContentType.JSON.xContent())); Map responseMap = XContentHelper.convertToMap(bytesReference, false, XContentType.JSON).v2(); assertNotNull(responseMap.put("took", -1)); responseMap.remove("num_reduce_phases"); diff --git a/qa/packaging/README.md b/qa/packaging/README.md index 20b4f6efa3a98..f1c556e73e962 100644 --- a/qa/packaging/README.md +++ b/qa/packaging/README.md @@ -1,4 +1,4 @@ -# packaging tests +# Packaging tests This project contains tests that verify the distributions we build work correctly on the operating systems we support. They're intended to cover the @@ -6,18 +6,11 @@ steps a user would take when installing and configuring an Elasticsearch distribution. They're not intended to have significant coverage of the behavior of Elasticsearch's features. -There are two types of tests in this project. The old tests live in -`src/test/` and are written in [Bats](https://github.com/sstephenson/bats), -which is a flavor of bash scripts that run as unit tests. These tests are -deprecated because Bats is unmaintained and cannot run on Windows. - -The new tests live in `src/main/` and are written in Java. Like the old tests, -this project's tests are run inside the VM, not on your host. All new packaging -tests should be added to this set of tests if possible. - ## Running these tests -See the section in [TESTING.asciidoc](../../TESTING.asciidoc#testing-packaging) +These tests should only be run on ephemeral machines. They will likely +have undesired side effects on a developer's computer. +For more information, see the section in [TESTING.asciidoc](../../TESTING.asciidoc#testing-packaging) ## Adding a new test class diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/ArchiveTests.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/ArchiveTests.java index e426754cd61ee..82c5909c5dfdd 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/ArchiveTests.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/ArchiveTests.java @@ -211,14 +211,14 @@ public void test50AutoConfigurationFailsWhenCertificatesNotGenerated() throws Ex FileUtils.assertPathsDoNotExist(installation.data); Path tempDir = createTempDir("bc-backup"); Files.move( - installation.lib.resolve("tools").resolve("security-cli").resolve("bcprov-jdk15on-1.64.jar"), - tempDir.resolve("bcprov-jdk15on-1.64.jar") + installation.lib.resolve("tools").resolve("security-cli").resolve("bcprov-jdk18on-1.76.jar"), + tempDir.resolve("bcprov-jdk18on-1.76.jar") ); Shell.Result result = runElasticsearchStartCommand(null, false, false); assertElasticsearchFailure(result, "java.lang.NoClassDefFoundError: org/bouncycastle/", null); Files.move( - tempDir.resolve("bcprov-jdk15on-1.64.jar"), - installation.lib.resolve("tools").resolve("security-cli").resolve("bcprov-jdk15on-1.64.jar") + tempDir.resolve("bcprov-jdk18on-1.76.jar"), + installation.lib.resolve("tools").resolve("security-cli").resolve("bcprov-jdk18on-1.76.jar") ); Platforms.onWindows(() -> sh.chown(installation.config)); FileUtils.rm(tempDir); diff --git a/qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java index 7af6ad49fb001..43d5ea842f9ef 100644 --- a/qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java @@ -305,7 +305,7 @@ public void testRecovery() throws Exception { // before timing out .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms") .put(SETTING_ALLOCATION_MAX_RETRY.getKey(), "0"); // fail faster - if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) { + if (minimumIndexVersion().before(IndexVersions.V_8_0_0) && randomBoolean()) { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); } createIndex(index, settings.build()); @@ -340,7 +340,7 @@ public void testRetentionLeasesEstablishedWhenPromotingPrimary() throws Exceptio .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), between(1, 2)) // triggers nontrivial promotion .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms") .put(SETTING_ALLOCATION_MAX_RETRY.getKey(), "0"); // fail faster - if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) { + if (minimumIndexVersion().before(IndexVersions.V_8_0_0) && randomBoolean()) { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); } createIndex(index, settings.build()); @@ -363,7 +363,7 @@ public void testRetentionLeasesEstablishedWhenRelocatingPrimary() throws Excepti .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), between(0, 1)) .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms") .put(SETTING_ALLOCATION_MAX_RETRY.getKey(), "0"); // fail faster - if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) { + if (minimumIndexVersion().before(IndexVersions.V_8_0_0) && randomBoolean()) { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); } createIndex(index, settings.build()); @@ -445,9 +445,12 @@ public void testRecoveryClosedIndex() throws Exception { * time the index was closed. */ public void testCloseIndexDuringRollingUpgrade() throws Exception { - final Version minimumNodeVersion = minimumNodeVersion(); - final String indexName = String.join("_", "index", CLUSTER_TYPE.toString(), Integer.toString(minimumNodeVersion.id)) - .toLowerCase(Locale.ROOT); + int id = switch (CLUSTER_TYPE) { + case OLD -> 1; + case MIXED -> 2; + case UPGRADED -> 3; + }; + final String indexName = String.join("_", "index", CLUSTER_TYPE.toString(), Integer.toString(id)).toLowerCase(Locale.ROOT); if (indexExists(indexName) == false) { createIndex( @@ -461,7 +464,7 @@ public void testCloseIndexDuringRollingUpgrade() throws Exception { closeIndex(indexName); } - if (minimumNodeVersion.onOrAfter(Version.V_7_2_0)) { + if (minimumIndexVersion().onOrAfter(IndexVersions.V_7_2_0)) { // index is created on a version that supports the replication of closed indices, // so we expect the index to be closed and replicated ensureGreen(indexName); @@ -501,7 +504,7 @@ public void testClosedIndexNoopRecovery() throws Exception { if (indexVersionCreated(indexName).onOrAfter(IndexVersions.V_7_2_0)) { // index was created on a version that supports the replication of closed indices, so we expect it to be closed and replicated - assertTrue(minimumNodeVersion().onOrAfter(Version.V_7_2_0)); + assertTrue(minimumIndexVersion().onOrAfter(IndexVersions.V_7_2_0)); ensureGreen(indexName); assertClosedIndex(indexName, true); if (CLUSTER_TYPE != ClusterType.OLD) { @@ -648,7 +651,7 @@ public void testOperationBasedRecovery() throws Exception { final Settings.Builder settings = Settings.builder() .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2); - if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) { + if (minimumIndexVersion().before(IndexVersions.V_8_0_0) && randomBoolean()) { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); } final String mappings = randomBoolean() ? "\"_source\": { \"enabled\": false}" : null; @@ -700,7 +703,6 @@ public void testTurnOffTranslogRetentionAfterUpgraded() throws Exception { public void testAutoExpandIndicesDuringRollingUpgrade() throws Exception { final String indexName = "test-auto-expand-filtering"; - final Version minimumNodeVersion = minimumNodeVersion(); Response response = client().performRequest(new Request("GET", "_nodes")); ObjectPath objectPath = ObjectPath.createFromResponse(response); @@ -721,11 +723,7 @@ public void testAutoExpandIndicesDuringRollingUpgrade() throws Exception { final int numberOfReplicas = Integer.parseInt( getIndexSettingsAsMap(indexName).get(IndexMetadata.SETTING_NUMBER_OF_REPLICAS).toString() ); - if (minimumNodeVersion.onOrAfter(Version.V_7_6_0)) { - assertEquals(nodes.size() - 2, numberOfReplicas); - } else { - assertEquals(nodes.size() - 1, numberOfReplicas); - } + assertThat(nodes, hasSize(numberOfReplicas + 2)); } public void testSoftDeletesDisabledWarning() throws Exception { @@ -733,7 +731,7 @@ public void testSoftDeletesDisabledWarning() throws Exception { if (CLUSTER_TYPE == ClusterType.OLD) { boolean softDeletesEnabled = true; Settings.Builder settings = Settings.builder(); - if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) { + if (minimumIndexVersion().before(IndexVersions.V_8_0_0) && randomBoolean()) { softDeletesEnabled = randomBoolean(); settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), softDeletesEnabled); } diff --git a/qa/rolling-upgrade/build.gradle b/qa/rolling-upgrade/build.gradle index 440483039256c..d3af5d25b70ff 100644 --- a/qa/rolling-upgrade/build.gradle +++ b/qa/rolling-upgrade/build.gradle @@ -9,7 +9,6 @@ import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask -apply plugin: 'elasticsearch.internal-testclusters' apply plugin: 'elasticsearch.internal-java-rest-test' apply plugin: 'elasticsearch.internal-test-artifact-base' apply plugin: 'elasticsearch.bwc-test' diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SystemIndicesUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SystemIndicesUpgradeIT.java index 44ee7f0b56d1c..fbd6ee8aa3759 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SystemIndicesUpgradeIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SystemIndicesUpgradeIT.java @@ -14,11 +14,11 @@ import org.elasticsearch.client.ResponseException; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.XContentTestUtils.JsonMapView; +import org.elasticsearch.test.rest.RestTestLegacyFeatures; import java.util.Map; import static org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.SYSTEM_INDEX_ENFORCEMENT_INDEX_VERSION; -import static org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.SYSTEM_INDEX_ENFORCEMENT_VERSION; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @@ -89,7 +89,7 @@ public void testSystemIndicesUpgrades() throws Exception { // If we are on 7.x create an alias that includes both a system index and a non-system index so we can be sure it gets // upgraded properly. If we're already on 8.x, skip this part of the test. - if (minimumNodeVersion().before(SYSTEM_INDEX_ENFORCEMENT_VERSION)) { + if (clusterHasFeature(RestTestLegacyFeatures.SYSTEM_INDICES_REST_ACCESS_ENFORCED) == false) { // Create an alias to make sure it gets upgraded properly Request putAliasRequest = new Request("POST", "/_aliases"); putAliasRequest.setJsonEntity(""" diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/HttpStatsIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/HttpStatsIT.java index c582191c085f4..ac1bde443f703 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/HttpStatsIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/HttpStatsIT.java @@ -52,7 +52,6 @@ public void testNodeHttpStats() throws IOException { assertHttpStats(new XContentTestUtils.JsonMapView((Map) nodesMap.get(nodeId))); } - @SuppressWarnings("unchecked") public void testClusterInfoHttpStats() throws IOException { internalCluster().ensureAtLeastNumDataNodes(3); performHttpRequests(); diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RestClusterInfoActionCancellationIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RestClusterInfoActionCancellationIT.java index 43d7630199bb2..896da65fa83dd 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RestClusterInfoActionCancellationIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RestClusterInfoActionCancellationIT.java @@ -114,7 +114,7 @@ public TimeValue masterNodeTimeout() { } }; - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); internalCluster().getAnyMasterNodeInstance(ClusterService.class) .submitUnbatchedStateUpdateTask("get_mappings_cancellation_test", new AckedClusterStateUpdateTask(ackedRequest, future) { @Override diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/SearchRestCancellationIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/SearchRestCancellationIT.java index a860b0855e158..73dd1525f8a08 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/SearchRestCancellationIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/SearchRestCancellationIT.java @@ -15,10 +15,10 @@ import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.bulk.BulkRequestBuilder; -import org.elasticsearch.action.search.MultiSearchAction; import org.elasticsearch.action.search.MultiSearchRequest; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.TransportMultiSearchAction; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.Cancellable; @@ -73,7 +73,7 @@ public void testAutomaticCancellationDuringQueryPhase() throws Exception { scriptQuery(new Script(ScriptType.INLINE, "mockscript", ScriptedBlockPlugin.SCRIPT_NAME, Collections.emptyMap())) ); searchRequest.setJsonEntity(Strings.toString(searchSource)); - verifyCancellationDuringQueryPhase(SearchAction.NAME, searchRequest); + verifyCancellationDuringQueryPhase(TransportSearchAction.TYPE.name(), searchRequest); } public void testAutomaticCancellationMultiSearchDuringQueryPhase() throws Exception { @@ -89,7 +89,7 @@ public void testAutomaticCancellationMultiSearchDuringQueryPhase() throws Except Request restRequest = new Request("POST", "/_msearch"); byte[] requestBody = MultiSearchRequest.writeMultiLineFormat(multiSearchRequest, contentType.xContent()); restRequest.setEntity(new NByteArrayEntity(requestBody, createContentType(contentType))); - verifyCancellationDuringQueryPhase(MultiSearchAction.NAME, restRequest); + verifyCancellationDuringQueryPhase(TransportMultiSearchAction.TYPE.name(), restRequest); } void verifyCancellationDuringQueryPhase(String searchAction, Request searchRequest) throws Exception { @@ -98,7 +98,7 @@ void verifyCancellationDuringQueryPhase(String searchAction, Request searchReque List plugins = initBlockFactory(); indexTestData(); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); Cancellable cancellable = getRestClient().performRequestAsync(searchRequest, wrapAsRestResponseListener(future)); awaitForBlock(plugins); @@ -116,7 +116,7 @@ public void testAutomaticCancellationDuringFetchPhase() throws Exception { new Script(ScriptType.INLINE, "mockscript", ScriptedBlockPlugin.SCRIPT_NAME, Collections.emptyMap()) ); searchRequest.setJsonEntity(Strings.toString(searchSource)); - verifyCancellationDuringFetchPhase(SearchAction.NAME, searchRequest); + verifyCancellationDuringFetchPhase(TransportSearchAction.TYPE.name(), searchRequest); } public void testAutomaticCancellationMultiSearchDuringFetchPhase() throws Exception { @@ -132,7 +132,7 @@ public void testAutomaticCancellationMultiSearchDuringFetchPhase() throws Except Request restRequest = new Request("POST", "/_msearch"); byte[] requestBody = MultiSearchRequest.writeMultiLineFormat(multiSearchRequest, contentType.xContent()); restRequest.setEntity(new NByteArrayEntity(requestBody, createContentType(contentType))); - verifyCancellationDuringFetchPhase(MultiSearchAction.NAME, restRequest); + verifyCancellationDuringFetchPhase(TransportMultiSearchAction.TYPE.name(), restRequest); } void verifyCancellationDuringFetchPhase(String searchAction, Request searchRequest) throws Exception { @@ -141,7 +141,7 @@ void verifyCancellationDuringFetchPhase(String searchAction, Request searchReque List plugins = initBlockFactory(); indexTestData(); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); Cancellable cancellable = getRestClient().performRequestAsync(searchRequest, wrapAsRestResponseListener(future)); awaitForBlock(plugins); @@ -186,7 +186,7 @@ private static void indexTestData() { // Make sure we have a few segments BulkRequestBuilder bulkRequestBuilder = client().prepareBulk().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); for (int j = 0; j < 20; j++) { - bulkRequestBuilder.add(client().prepareIndex("test").setId(Integer.toString(i * 5 + j)).setSource("field", "value")); + bulkRequestBuilder.add(prepareIndex("test").setId(Integer.toString(i * 5 + j)).setSource("field", "value")); } assertNoFailures(bulkRequestBuilder.get()); } diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml new file mode 100644 index 0000000000000..6d6ee1f6bed41 --- /dev/null +++ b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml @@ -0,0 +1,214 @@ +setup: + - skip: + version: ' - 8.11.99' + reason: 'ingest simulate added in 8.12' + +--- +"Test ingest simulate with reroute": + + - skip: + features: headers + + - do: + headers: + Content-Type: application/json + ingest.put_pipeline: + id: "my-pipeline-1" + body: > + { + "processors": [ + { + "set": { + "field": "my-pipeline-1-ran", + "value": true + } + }, + { + "reroute": { + "destination": "index-2-a" + } + } + ] + } + - match: { acknowledged: true } + + - do: + headers: + Content-Type: application/json + ingest.put_pipeline: + id: "my-final-pipeline-1" + body: > + { + "processors": [ + { + "set": { + "field": "my-final-pipeline-1-ran", + "value": true + } + } + ] + } + - match: { acknowledged: true } + + - do: + indices.put_template: + name: my-template-1 + body: + index_patterns: index-1-* + settings: + default_pipeline: "my-pipeline-1" + final_pipeline: "my-final-pipeline-1" + + - do: + headers: + Content-Type: application/json + ingest.put_pipeline: + id: "my-pipeline-2" + body: > + { + "processors": [ + { + "set": { + "field": "my-pipeline-2-ran", + "value": true + } + } + ] + } + - match: { acknowledged: true } + + - do: + headers: + Content-Type: application/json + ingest.put_pipeline: + id: "my-final-pipeline-2" + body: > + { + "processors": [ + { + "set": { + "field": "my-final-pipeline-2-ran", + "value": true + } + }, + { + "uppercase": { + "field": "foo" + } + } + ] + } + - match: { acknowledged: true } + + - do: + indices.put_template: + name: my-template-2 + body: + index_patterns: index-2-* + settings: + default_pipeline: "my-pipeline-2" + final_pipeline: "my-final-pipeline-2" + + - do: + headers: + Content-Type: application/json + simulate.ingest: + body: > + { + "docs": [ + { + "_index": "index-1-a", + "_id": "id", + "_source": { + "foo": "bar" + } + }, + { + "_index": "index-1-a", + "_id": "id", + "_source": { + "foo": "rab" + } + } + ], + "pipeline_substitutions": { + "my-pipeline": { + "processors": [ + ] + } + } + } + - length: { docs: 2 } + - match: { docs.0.doc._index: "index-2-a" } + - match: { docs.0.doc._source.foo: "BAR" } + - match: { docs.0.doc._source.my-pipeline-1-ran: true } + - match: { docs.0.doc._source.my-final-pipeline-1-ran: null } + - match: { docs.0.doc._source.my-pipeline-2-ran: true } + - match: { docs.0.doc._source.my-final-pipeline-2-ran: true } + - match: { docs.0.doc.executed_pipelines: ["my-pipeline-1", "my-pipeline-2", "my-final-pipeline-2"] } + - match: { docs.0.doc._index: "index-2-a" } + - match: { docs.1.doc._source.foo: "RAB" } + - match: { docs.0.doc._source.my-pipeline-1-ran: true } + - match: { docs.0.doc._source.my-final-pipeline-1-ran: null } + - match: { docs.0.doc._source.my-pipeline-2-ran: true } + - match: { docs.0.doc._source.my-final-pipeline-2-ran: true } + - match: { docs.1.doc.executed_pipelines: ["my-pipeline-1", "my-pipeline-2", "my-final-pipeline-2"] } + +--- +"Test ingest simulate with errors": + + - skip: + features: headers + + - do: + headers: + Content-Type: application/json + ingest.put_pipeline: + id: "my-pipeline" + body: > + { + "processors": [ + { + "uppercase": { + "field": "field1" + } + } + ] + } + - match: { acknowledged: true } + + - do: + indices.create: + index: index + body: + settings: + default_pipeline: "my-pipeline" + + - do: + headers: + Content-Type: application/json + simulate.ingest: + body: > + { + "docs": [ + { + "_index": "index", + "_source": { + "field1": true + } + }, + { + "_index": "index", + "_source": { + "field1": "bar" + } + } + ] + } + - length: { docs: 2 } + - match: { docs.0.doc._index: "index" } + - match: { docs.0.doc.error.type: "illegal_argument_exception" } + - match: { docs.0.doc.executed_pipelines: null } + - match: { docs.1.doc._index: "index" } + - match: { docs.1.doc._source.field1: "BAR" } + - match: { docs.1.doc.executed_pipelines: ["my-pipeline"] } diff --git a/qa/smoke-test-multinode/src/yamlRestTest/resources/rest-api-spec/test/smoke_test_multinode/30_desired_balance.yml b/qa/smoke-test-multinode/src/yamlRestTest/resources/rest-api-spec/test/smoke_test_multinode/30_desired_balance.yml index f8b1de5155527..92905243fdb12 100644 --- a/qa/smoke-test-multinode/src/yamlRestTest/resources/rest-api-spec/test/smoke_test_multinode/30_desired_balance.yml +++ b/qa/smoke-test-multinode/src/yamlRestTest/resources/rest-api-spec/test/smoke_test_multinode/30_desired_balance.yml @@ -186,3 +186,18 @@ setup: - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count.max' - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count.average' - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count.std_dev' + +--- +"Test unassigned_shards, total_allocations, undesired_allocations and undesired_allocations_fraction": + + - skip: + version: " - 8.11.99" + reason: "undesired_shard_allocation_count added in in 8.12.0" + + - do: + _internal.get_desired_balance: { } + + - gte: { 'stats.unassigned_shards' : 0 } + - gte: { 'stats.total_allocations' : 0 } + - gte: { 'stats.undesired_allocations' : 0 } + - gte: { 'stats.undesired_allocations_ratio' : 0.0 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.delete.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.delete.json new file mode 100644 index 0000000000000..6cfc0ffcaf02b --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.delete.json @@ -0,0 +1,32 @@ +{ + "connector.delete": { + "documentation": { + "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "description": "Deletes a connector." + }, + "stability": "experimental", + "visibility": "feature_flag", + "feature_flag": "es.connector_api_feature_flag_enabled", + "headers": { + "accept": [ + "application/json" + ] + }, + "url": { + "paths": [ + { + "path": "/_connector/{connector_id}", + "methods": [ + "DELETE" + ], + "parts": { + "connector_id": { + "type": "string", + "description": "The unique identifier of the connector to be deleted." + } + } + } + ] + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.get.json new file mode 100644 index 0000000000000..d866920324852 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.get.json @@ -0,0 +1,32 @@ +{ + "connector.get": { + "documentation": { + "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "description": "Returns the details about a connector." + }, + "stability": "experimental", + "visibility": "feature_flag", + "feature_flag": "es.connector_api_feature_flag_enabled", + "headers": { + "accept": [ + "application/json" + ] + }, + "url": { + "paths": [ + { + "path": "/_connector/{connector_id}", + "methods": [ + "GET" + ], + "parts": { + "connector_id": { + "type": "string", + "description": "The unique identifier of the connector to be returned." + } + } + } + ] + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.list.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.list.json new file mode 100644 index 0000000000000..a1e5ddcc5d686 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.list.json @@ -0,0 +1,38 @@ +{ + "connector.list": { + "documentation": { + "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "description": "Lists all connectors." + }, + "stability": "experimental", + "visibility": "feature_flag", + "feature_flag": "es.connector_api_feature_flag_enabled", + "headers": { + "accept": [ + "application/json" + ] + }, + "url": { + "paths": [ + { + "path": "/_connector", + "methods": [ + "GET" + ] + } + ] + }, + "params": { + "from": { + "type": "int", + "default": 0, + "description": "Starting offset (default: 0)" + }, + "size": { + "type": "int", + "default": 100, + "description": "specifies a max number of results to get (default: 100)" + } + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.put.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.put.json new file mode 100644 index 0000000000000..8511b870a2d12 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.put.json @@ -0,0 +1,39 @@ +{ + "connector.put": { + "documentation": { + "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "description": "Creates or updates a connector." + }, + "stability": "experimental", + "visibility": "feature_flag", + "feature_flag": "es.connector_api_feature_flag_enabled", + "headers": { + "accept": [ + "application/json" + ], + "content_type": [ + "application/json" + ] + }, + "url": { + "paths": [ + { + "path": "/_connector/{connector_id}", + "methods": [ + "PUT" + ], + "parts": { + "connector_id": { + "type": "string", + "description": "The unique identifier of the connector to be created or updated." + } + } + } + ] + }, + "body": { + "description": "The connector configuration.", + "required": true + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.post.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.post.json new file mode 100644 index 0000000000000..563d0022d90d3 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.post.json @@ -0,0 +1,33 @@ +{ + "connector_sync_job.post": { + "documentation": { + "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "description": "Creates a connector sync job." + }, + "stability": "experimental", + "visibility": "feature_flag", + "feature_flag": "es.connector_api_feature_flag_enabled", + "headers": { + "accept": [ + "application/json" + ], + "content_type": [ + "application/json" + ] + }, + "url": { + "paths": [ + { + "path": "/_connector/_sync_job", + "methods": [ + "POST" + ] + } + ] + }, + "body": { + "description": "The connector sync job data.", + "required": true + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/inference.inference.json b/rest-api-spec/src/main/resources/rest-api-spec/api/inference.inference.json index 127c6a5e86640..9426d6738c374 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/inference.inference.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/inference.inference.json @@ -7,7 +7,8 @@ "stability":"experimental", "visibility":"public", "headers":{ - "accept": [ "application/json"] + "accept": [ "application/json"], + "content_type": ["application/json"] }, "url":{ "paths":[ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/inference.put_model.json b/rest-api-spec/src/main/resources/rest-api-spec/api/inference.put_model.json index 3a171640367de..26ba9ddb00608 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/inference.put_model.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/inference.put_model.json @@ -7,7 +7,8 @@ "stability":"experimental", "visibility":"public", "headers":{ - "accept": [ "application/json"] + "accept": [ "application/json"], + "content_type": ["application/json"] }, "url":{ "paths":[ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/open_point_in_time.json b/rest-api-spec/src/main/resources/rest-api-spec/api/open_point_in_time.json index a25c3fee32571..bce8dfd794dca 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/open_point_in_time.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/open_point_in_time.json @@ -7,7 +7,8 @@ "stability":"stable", "visibility":"public", "headers":{ - "accept": [ "application/json"] + "accept": [ "application/json"], + "content_type": ["application/json"] }, "url":{ "paths":[ @@ -55,6 +56,9 @@ "description": "Specific the time to live for the point in time", "required": true } + }, + "body":{ + "description":"An index_filter specified with the Query DSL" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/simulate.ingest.json b/rest-api-spec/src/main/resources/rest-api-spec/api/simulate.ingest.json new file mode 100644 index 0000000000000..91e7153d466da --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/simulate.ingest.json @@ -0,0 +1,48 @@ +{ + "simulate.ingest":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/simulate-ingest-api.html", + "description":"Simulates running ingest with example documents." + }, + "stability":"experimental", + "visibility":"public", + "headers":{ + "accept": [ "application/json"], + "content_type": ["application/json"] + }, + "url":{ + "paths":[ + { + "path":"/_ingest/_simulate", + "methods":[ + "GET", + "POST" + ] + }, + { + "path":"/_ingest/{index}/_simulate", + "methods":[ + "GET", + "POST" + ], + "parts":{ + "index":{ + "type":"string", + "description":"Default index for docs which don't provide one" + } + } + } + ] + }, + "params":{ + "pipeline":{ + "type":"string", + "description":"The pipeline id to preprocess incoming documents with if no pipeline is given for a particular document" + } + }, + "body":{ + "description":"The simulate definition", + "required":true + } + } +} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_balance/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_balance/10_basic.yml index 8e1d3431069cf..a4204034bfd80 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_balance/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_balance/10_basic.yml @@ -221,3 +221,18 @@ setup: - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count.max' - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count.average' - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count.std_dev' + +--- +"Test unassigned_shards, total_allocations, undesired_allocations and undesired_allocations_fraction": + + - skip: + version: " - 8.11.99" + reason: "undesired_shard_allocation_count added in in 8.12.0" + + - do: + _internal.get_desired_balance: { } + + - gte: { 'stats.unassigned_shards' : 0 } + - gte: { 'stats.total_allocations' : 0 } + - gte: { 'stats.undesired_allocations' : 0 } + - gte: { 'stats.undesired_allocations_ratio' : 0.0 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mlt/20_docs.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mlt/20_docs.yml index c7477c5b538ab..6a347df112b47 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mlt/20_docs.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mlt/20_docs.yml @@ -2,43 +2,64 @@ "Basic mlt query with docs": - do: indices.create: - index: test_1 + index: mlt_test_index - do: index: - index: test_1 + index: mlt_test_index id: "1" body: { foo: bar } - do: index: - index: test_1 + index: mlt_test_index id: "2" body: { foo: baz } - do: index: - index: test_1 + index: mlt_test_index id: "3" body: { foo: foo } - do: indices.refresh: {} + - do: + get: + index: mlt_test_index + id: "1" + + - match: { _source.foo: "bar" } + + - do: + get: + index: mlt_test_index + id: "2" + + - match: { _source.foo: "baz" } + + - do: + get: + index: mlt_test_index + id: "3" + + - match: { _source.foo: "foo" } + - do: search: rest_total_hits_as_int: true - index: test_1 + index: mlt_test_index body: query: more_like_this: like: - - _index: test_1 + _index: mlt_test_index doc: foo: bar - - _index: test_1 + _index: mlt_test_index _id: "2" - _id: "3" diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mlt/25_docs_one_shard.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mlt/25_docs_one_shard.yml new file mode 100644 index 0000000000000..a340de50bba0c --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mlt/25_docs_one_shard.yml @@ -0,0 +1,75 @@ +--- +"Basic mlt query with docs - explicitly on same shard": + - do: + indices.create: + index: mlt_one_shard_test_index + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 1 + + - do: + index: + index: mlt_one_shard_test_index + id: "1" + body: { foo: bar } + + - do: + index: + index: mlt_one_shard_test_index + id: "2" + body: { foo: baz } + + - do: + index: + index: mlt_one_shard_test_index + id: "3" + body: { foo: foo } + + - do: + indices.refresh: {} + + - do: + get: + index: mlt_one_shard_test_index + id: "1" + + - match: { _source.foo: "bar" } + + - do: + get: + index: mlt_one_shard_test_index + id: "2" + + - match: { _source.foo: "baz" } + + - do: + get: + index: mlt_one_shard_test_index + id: "3" + + - match: { _source.foo: "foo" } + + - do: + search: + rest_total_hits_as_int: true + index: mlt_one_shard_test_index + body: + query: + more_like_this: + like: + - + _index: mlt_one_shard_test_index + doc: + foo: bar + - + _index: mlt_one_shard_test_index + _id: "2" + - + _id: "3" + include: true + min_doc_freq: 0 + min_term_freq: 0 + + - match: { hits.total: 3 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/60_dense_vector_dynamic_mapping.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/60_dense_vector_dynamic_mapping.yml index 62d752b1efe88..545953d2645da 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/60_dense_vector_dynamic_mapping.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/60_dense_vector_dynamic_mapping.yml @@ -4,6 +4,9 @@ setup: reason: 'Dynamic mapping of floats to dense_vector was added in 8.11' --- "Fields indexed as strings won't be transformed into dense_vector": + - skip: + version: ' - 8.11.0' + reason: 'Bug fix was added in 8.11.1' - do: index: index: strings-are-not-floats @@ -567,3 +570,104 @@ setup: - match: { test-copyto-index.mappings.properties.my_float2.type: float } - match: { test-copyto-index.mappings.properties.my_copyto_field.type: float } +--- +"Fields mapped as dense_vector without dims or docs have correct cluster stats values": + - skip: + version: ' - 8.11.1' + reason: 'Bug fix was added in 8.11.2' + + - do: + indices.create: + index: test-mapped-index + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + my_dense_vector_field: + type: dense_vector + + - do: + cluster.health: + wait_for_events: languid + + - do: + cluster.stats: { } + + - match: { indices.mappings.field_types.0.name: dense_vector } + - match: { indices.mappings.field_types.0.count: 1 } + - match: { indices.mappings.field_types.0.indexed_vector_count: 1 } + - match: { indices.mappings.field_types.0.indexed_vector_dim_min: -1 } + - match: { indices.mappings.field_types.0.indexed_vector_dim_max: -1 } + +--- +"Fields mapped as dense_vector have correct cluster stats min max values": + - skip: + version: ' - 8.11.1' + reason: 'Bug fix was added in 8.11.2' + + - do: + index: + index: foo-mapped-index + id: "1" + refresh: true + body: + my_dense_vector_field: [ + 233.8598,-17.6827,283.4271,-329.1247,-402.9721,404.7866,-358.7031,-267.4074,441.8363,320.2389,-128.0179,339.544,196.2018,-60.2688,336.0228,-440.1943,318.6882,-158.2596,277.0925,-487.4971,-338.9865,-275.716,136.8547,-253.6206,-40.2807,-357.0971,188.0344,-203.0674,449.9618,-223.2508,468.1441,302.4002,-65.0044,342.4431,205.6774,-118.636,-29.9706,183.9825,223.956,314.0691,137.0129,-8.0452,-15.131,-269.8643,-12.691,228.9777,-147.8384,-347.1117,-283.1905,459.2004,296.1321,-483.1799,414.3423,383.0187,-408.5525,-286.8169,482.5853,9.5232,-459.4968,-333.2521,109.0969,129.5107,43.4369,455.8283,-4.0423,-318.5019,339.1641,416.3581,-309.0429,84.2325,-355.8753,264.7671,43.8922,-298.6039,412.4413,19.4198,-251.279,-191.157,-478.2058,251.5709,-178.9633,479.293,188.399,380.9755,268.6575,120.3467,-322.0305,-255.4894,-377.515,56.9153,-133.9486,156.2546,-428.9581,-54.994,28.2146,158.7121,-426.7307,491.0086,-150.7205,-233.1005,244.5174,45.911,-406.1181,233.1636,175.9334,414.2805,421.7396,-322.8029,-252.2412,35.7622,318.5223,-141.5121,-375.4407,380.3081,222.1228,443.7844,367.377,-202.9594,-493.6231,-184.2242,-253.9838,463.1952,-416.3887,252.0867,-63.5317,411.0727,98.6261,330.7369,363.5685,-498.1848,-413.7246,-2.5996,-238.3547,-355.6041,-303.698,43.6266,383.1105,-72.3066,274.7491,321.9322,220.9543,-30.5578,400.0891,-181.7069,-386.4403,497.2206,-408.9611,138.485,-133.5666,-340.2569,-223.6313,270.884,-215.9399,74.3931,-244.1364,353.4219,-156.9905,488.3148,96.352,401.8525,-468.8344,129.9715,-27.1953,-168.631,187.7049,-336.5255,331.0652,204.3538,36.0182,366.8502,-468.6579,478.1409,-332.6136,-281.8499,63.7165,-458.8161,14.8894,-145.6397,267.1499,85.2025,326.3764,-419.6361,-133.9626,102.0618,443.3099,-207.9032,132.7032,234.001,-26.0754,105.6478,174.1252,-403.3511,-164.9714,-262.9344,-58.9668,357.6414,355.7508,-331.8443,153.5733,417.5712,260.7394,-150.1053,-435.6525,-364.1558,328.6183,-270.0863,107.1746,345.7998,480.8749,206.3896,-498.237,495.0835,481.9384,418.5571,-246.5213,-363.7304,311.7076,-53.1664,-297.3839,122.3105,-13.9226,-145.9754,-189.1748,460.9375,194.5417,-28.1346,-261.2177,-88.8396,-254.6407,-465.3148,-169.5377,24.3113,-116.2323,-420.3526,317.2107,-231.6227,-270.8239,387.8598,412.4251,428.1373,308.2044,275.2082,402.3663,-209.9843,-492.7269,225.1948,326.469,207.3557,-131.7677,371.9408,-139.3098,324.205,-126.6204,-335.0853,-248.2587,-344.907,307.2109,-441.3296,-318.027,414.6535,172.0537,-280.4991,331.0475,-158.0178,-285.1951,12.3632,149.9347,282.8302,-91.5624,-180.6097,496.0881,368.2567,357.6875,-194.2106,48.9213,-479.2956,-165.139,238.7811,302.7007,297.2805,208.7099,-5.5755,-85.7911,-358.1111,344.6131,415.7199,-219.1525,490.5003,-46.0096,498.2818,-91.8067,384.0104,396.1107,408.2827,-5.3919,-333.7992,-168.985,273.72,359.7125,227.7621,158.3406,-366.9722,3.7709,27.2728,71.9754,269.5792,-365.281,117.9152,-184.3682,356.9013,-142.6579,-496.7598,122.0194,89.1247,4.1914,-81.9905,465.0841,115.4727,169.6116,-199.9951,-223.3149,-447.3022,11.831,320.2368,105.1316,344.2462,8.6333,62.2285,-70.3944,-284.6694,-482.4229,-448.1569,-237.7858,222.3921,-172.1386,-312.5756,-390.0565,398.951,119.9784,-419.6537,121.3186,481.3011,-181.6662,-56.0219,424.1359,7.1461,138.8567,-307.0606,334.066,254.0897,473.7227,45.5936,133.7268,49.5334,-283.3406,179.4466,105.6191,-30.4162,271.5774,6.1156,110.4732,286.4325,13.3431,494.0139,-371.7624,283.3652,272.0558,-302.343,122.7245,-463.9261,299.9807,282.4502,-262.4911,183.4289,222.7474,-229.5973,141.6188,262.5468,278.1155,-331.0891,-393.6027,-230.1461,201.6657,-93.3604,-395.8877,-125.2013,-222.973,368.3759,234.6628,-28.6809,-151.0703,432.0315,253.1214,430.7065,-143.6963,499.84,85.1683,280.4354,196.6013,139.0476,120.8148,-398.8155,-335.5504,229.0516,403.8604,-383.9868,-79.975,-152.77,220.4036,135.0355,238.2176,-242.3085,-177.0743,381.8202,411.167,378.0153,456.5976,364.013,24.2316,-395.4659,-210.2581,138.7539,479.7398,-291.7797,-123.0491,188.9817,42.8931,-354.4479,358.853,-43.6168,-190.6656,-103.3037,47.8915,-358.5402,374.9758,493.9951,-427.2376,-119.1142,-453.2975,-326.2696,-212.8273,-142.2931,-179.795,355.77,-156.2903,331.2006,451.9252,185.2944,-96.1941,173.0447,345.2744,43.0151,381.7845,-143.4125,84.654,-208.7053,-293.141,333.6349,-80.472,-376.9817,214.6298,-43.0931,-254.7834,-421.6961,-368.844,467.5544,-418.61,-66.6824,-350.2671,348.8241,252.3495,41.8677,-128.869,90.0391,-136.7405,-136.7822,489.8074,-396.8204,63.8355,323.9557,-83.6674,451.263,152.8955,-291.7497,410.0787,-299.7468,51.34,-298.6066,-58.853,325.911,-281.9541,-15.3457,299.1325,-347.4959,388.407,343.1096,28.1816,24.3013,-111.3312,190.5583,279.9848,-479.8894,123.2182,233.8425,-466.2128,-134.7122,217.8674,432.9523,-186.799,-477.2512,-223.5514,64.274,141.5251,-161.2187,150.2791,-228.1087,81.172,451.0879,-230.3818,-304.9398,402.1081,199.1266,275.3423,-123.9548,-21.1815,-384.544,446.9626,208.9692,-337.4827,-58.1011,344.2642,230.2868,44.9176,245.9885,-284.1875,-351.6104,108.1289,459.649,191.4334,53.591,136.7139,10.5912,-15.8411,62.8305,448.5256,194.7705,-356.3214,84.4996,-133.2502,-358.6308,262.7949,219.8741,-355.3985,468.2922,243.7227,-408.3166,188.6111,-221.7264,-286.8234,-340.3046,-224.5375,332.2615,73.2788,-24.7857,-485.2204,-136.7196,-162.9693,92.6017,-99.611,-186.5203,495.5483,240.8051,409.6493,-58.1321,-154.1239,-335.9719,-82.4408,-471.3057,-43.373,301.0884,-96.6359,-236.6906,435.7313,-227.7263,-406.8904,-392.3187,169.0043,-371.0852,-271.3652,-57.4466,-196.8455,52.741,361.7395,-117.8599,190.5339,276.6457,-321.9851,425.881,-473.2662,-74.2968,221.3612,-465.4429,181.723,-78.4508,21.6152,148.8107,-166.1687,-281.6391,-462.3636,-420.5255,-161.4143,98.8383,-374.5345,-366.2851,187.1506,-405.1865,239.4847,-246.8352,33.1748,-344.1211,477.9759,-294.1354,-359.5015,-44.8454,151.7072,-22.7324,-260.3293,99.1414,-20.5536,173.3766,-422.6692,458.3853,-199.7898,-236.3929,365.2599,-66.4191,388.3472,283.0336,-268.9463,269.5704,360.9679,-322.102,-407.0705,-93.0994,338.9108,-189.1359,-216.9102,-249.0153,122.6058,-254.8318,-112.2771,-279.0506,-168.4431,392.888,394.7607,468.0544,340.1852,-293.1288,-8.2912,-419.2608,323.3382,-93.8793,-242.0672,427.7716,-441.6906,128.3229,424.4679,-71.8586,134.5411,-74.5205,18.4141,17.7277,126.9123,-137.6119,33.3783,222.9912,-279.3582,89.1226,-90.031,12.7221,98.7767,-80.2372,-485.9212,-481.6575,-325.9729,318.8005,-433.786,-296.6337,421.6515,-27.2786,-445.2456,451.8876,-482.1014,-143.1098,186.1258,-90.2432,-297.7479,-351.0026,-423.7518,-219.6096,-269.2043,33.5767,-325.4335,392.4866,-418.243,112.5852,-248.1306,451.2154,-419.2995,154.5752,483.6323,-315.962,-196.872,406.1769,-356.9868,67.5251,-255.6475,103.5181,-450.4418,386.9518,456.4057,99.4591,-166.636,275.5374,200.4925,99.7623,292.6794,-422.3998,419.4837,-466.548,-462.8519,-381.4489,472.8356,-129.9563,441.4941,-376.1232,-114.1945,233.5531,313.6963,394.9503,-278.7558,350.7515,47.9427,220.7074,-178.9789,-346.0485,-128.5665,8.9461,159.9838,-57.3637,351.9478,-65.9411,-258.1788,498.9494,-472.613,-428.5678,17.3981,-435.3682,-421.155,-54.9177,-490.2348,178.3777,-31.9618,-242.1805,362.3736,380.8179,446.4272,-23.9142,61.3588,-489.5704,363.6446,-186.1519,-351.8684,-322.2791,-226.0431,404.6996,203.9824,306.0958,234.0145,-180.4996,452.0633,257.171,-83.6197,-393.152,396.6934,32.156,-428.7645,183.7886,494.767,68.3905,278.9785,-40.4759,261.7298,236.5778,4.5577,-130.9582,433.2837,-298.1139,-107.9822,-196.8446,-121.1765,-292.5509,-246.4546,-258.6038,280.1334,-52.6511,483.2928,-185.7577,-75.3705,351.3411,179.1282,-479.3838,166.2733,-197.9043,282.6848,-50.4744,-492.7178,183.6435,-127.2379,483.646,433.0805,-228.5488,139.8314,-145.1337,-403.1749,306.2704,122.7149,479.6928,85.3866,108.095,-224.152,494.6848,-368.4504,-180.7579,61.7136,51.2045,-383.0103,-376.4816,-292.8217,-201.118,332.1516,425.2758,138.1284,-229.4302,432.9081,2.9898,-437.7631,-448.2151,129.9126,-170.2405,499.0396,-48.2137,363.8046,-423.2511,-28.0804,-267.826,-356.6288,-99.9371,-409.8465,170.4902,-269.2584,-277.4098,300.8819,-142.5889,339.0952,16.2275,-310.8646,201.0733,-495.5905,341.9279,-149.1184,-494.4928,-81.7343,209.9762,273.4892,380.3163,359.2424,-242.5,-42.1268,-303.9792,11.6018,361.5483,416.4178,10.3282,195.9796,148.8096,-60.9724,-205.5221,-145.4574,-341.5913,426.8996,-19.5843,60.6265,-133.4191,-139.8737,281.7465,461.2854,-270.8902,61.0182,-58.6791,-254.0193,-234.1206,-208.7334,39.7498,-14.337,-68.2319,-342.2756,403.6834,401.6122,-166.1637,47.3592,-325.7,274.5459,343.4873,328.3783,-370.1657,-122.8967,-231.3182,122.6609,119.2685,-223.5437,-210.8076,116.5022,340.2814,256.1852,-217.3487,-150.9598,331.1343,-453.8182,-448.0842,-95.2475,-340.9942,-416.7835,-96.7226,-328.7212,-373.4337,472.2214,-484.522,-465.1583,330.0712,73.2052,-55.1266,-352.8984,341.0742,-230.4845,321.0752,236.2116,35.1902,75.3489,-469.4042,110.2036,35.1156,454.7224,103.0685,-221.7499,-23.6898,-259.2362,-110.509,-261.0039,219.2391,-139.9404,155.7723,377.9713,434.0318,-365.1397,459.1471,-318.5774,323.4256,194.325,-311.9529,-153.9019,-346.5811,76.4069,443.2121,-199.407,495.6636,-138.5213,-145.3432,-151.7758,-365.3547,263.6507,-491.1686,-183.5585,-12.6044,318.5346,-443.8639,-179.0338,477.9093,-355.5118,-423.0035,-229.1166,-96.7782,-479.2384,192.9085,223.3407,-302.9472,297.3847,477.584,-297.5958,168.6023,-80.6912,-89.8717,87.1476,-129.7807,346.5576,-253.9729,-399.6858,-389.5785,35.1648,-180.451,-49.6084,83.9582,-185.2329,97.283,195.5249,-91.6969,199.202,-449.792,333.4825,-113.7558,443.434,394.3587,-94.9074,71.2092,-251.1774,-85.047,-46.4004,20.2595,341.1073,-91.2527,86.3775,303.1247,-336.9011,343.9894,-384.1261,154.4411,-465.2493,-63.3249,488.0231,348.6725,458.2093,322.401,220.2532,283.3734,-386.4252,-256.5262,-87.2205,96.8199,47.6908,-399.6307,214.7716,-19.9177,-458.513,-194.3218,-320.5342,-275.857,-301.6955,-84.9038,358.3475,-88.9271,499.7721,-161.7403,355.4894,313.6211,-176.1703,61.8427,107.603,-176.063,-426.5408,292.3612,58.3331,-115.8853,471.4131,-76.4815,-309.6263,361.4518,192.4763,-145.7968,256.3888,133.335,-474.0901,-366.9793,-495.223,457.2366,170.056,285.0152,89.8213,225.2251,354.1822,-298.374,-332.9164,-55.2409,306.9283,25.9392,218.0624,7.5085,-151.8768,-155.4932,6.0001,201.4506,-259.9874,485.1078,-362.8516,-230.1434,-398.2512,243.0012,32.302,-197.91,144.1195,-89.4196,-44.0399,-371.7866,227.6007,492.7526,499.3824,162.2475,279.0325,177.0781,341.0137,199.6009,108.1678,312.2319,-211.5001,-92.675,357.0513,-337.924,-348.984,-350.3677,173.3473,-193.7346,-318.5609,-2.0928,46.6287,-346.8513,36.634,-277.4949,-149.325,481.1378,370.3864,-139.6689,-332.2805,48.0292,109.8363,494.6994,373.6992,495.7442,400.4998,-26.2276,-308.7669,188.9497,257.9182,-116.6944,269.8932,197.005,123.1139,-356.2058,485.1982,-4.0119,397.8434,-204.67,-494.5133,-414.1299,142.1512,-36.5446,390.0718,6.9876,263.1216,457.5598,89.6086,-266.3804,17.3457,88.8182,236.6271,81.175,-170.2249,-5.7664,422.7852,180.3349,-135.2642,149.2285,-70.6607,-46.169,-389.3313,230.6125,388.4853,-438.3426,111.8034,300.0416,37.5604,-437.3868,-114.1336,312.7777,-99.1161,-312.9015,-147.3787,-434.0536,19.5034,141.706,-281.4504,-208.9608,281.4619,-361.0596,-464.2757,77.8205,232.5575,165.4104,424.8738,124.5555,342.038,86.7543,278.0216,311.2686,337.834,-90.0545,-210.1143,-488.4095,-80.7535,92.3731,-122.622,-288.0571,1.7285,-5.2998,100.0717,-395.0571,-477.5587,-160.5642,-119.4214,-232.233,415.7276,-204.3216,-436.7766,-103.4644,-427.0939,-31.0927,-440.2919,120.5971,-223.3623,-199.0988,304.8697,432.5731,-231.5791,-397.696,306.4134,330.1018,32.4345,-175.719,464.6091,-291.5686,300.1631,-167.4592,238.9574,104.5893,-187.2215,-294.0111,-361.9094,480.6847,-304.2133,-448.7144,67.7235,-255.9669,254.7379,464.5465,6.8909,-368.7554,337.5993,39.1928,-376.0625,433.4224,-109.1488,341.7731,377.843,446.839,-192.283,251.1592,437.6812,-478.3409 + ] + - do: + cluster.health: + wait_for_events: languid + + - do: + indices.get_mapping: + index: foo-mapped-index + + # sanity + - match: { foo-mapped-index.mappings.properties.my_dense_vector_field.type: dense_vector } + - match: { foo-mapped-index.mappings.properties.my_dense_vector_field.index: true } + - match: { foo-mapped-index.mappings.properties.my_dense_vector_field.similarity: cosine } + - match: { foo-mapped-index.mappings.properties.my_dense_vector_field.dims: 1276 } + + - do: + cluster.stats: { } + + - match: { indices.mappings.field_types.0.name: dense_vector } + - match: { indices.mappings.field_types.0.count: 1 } + - match: { indices.mappings.field_types.0.indexed_vector_count: 1 } + - match: { indices.mappings.field_types.0.indexed_vector_dim_min: 1276 } + - match: { indices.mappings.field_types.0.indexed_vector_dim_max: 1276 } + + - do: + index: + index: bar-mapped-index + id: "1" + refresh: true + body: + my_dense_vector_field: [ + 325.9729,318.8005,-433.786,-296.6337,421.6515,-27.2786,-445.2456,451.8876,-482.1014,-143.1098,186.1258,-90.2432,-297.7479,-351.0026,-423.7518,-219.6096,-269.2043,33.5767,-325.4335,392.4866,-418.243,112.5852,-248.1306,451.2154,-419.2995,154.5752,483.6323,-315.962,-196.872,406.1769,-356.9868,67.5251,-255.6475,103.5181,-450.4418,386.9518,456.4057,99.4591,-166.636,275.5374,200.4925,99.7623,292.6794,-422.3998,419.4837,-466.548,-462.8519,-381.4489,472.8356,-129.9563,441.4941,-376.1232,-114.1945,233.5531,313.6963,394.9503,-278.7558,350.7515,47.9427,220.7074,-178.9789,-346.0485,-128.5665,8.9461,159.9838,-57.3637,351.9478,-65.9411,-258.1788,498.9494,-472.613,-428.5678,17.3981,-435.3682,-421.155,-54.9177,-490.2348,178.3777,-31.9618,-242.1805,362.3736,380.8179,446.4272,-23.9142,61.3588,-489.5704,363.6446,-186.1519,-351.8684,-322.2791,-226.0431,404.6996,203.9824,306.0958,234.0145,-180.4996,452.0633,257.171,-83.6197,-393.152,396.6934,32.156,-428.7645,183.7886,494.767,68.3905,278.9785,-40.4759,261.7298,236.5778,4.5577,-130.9582,433.2837,-298.1139,-107.9822,-196.8446,-121.1765,-292.5509,-246.4546,-258.6038,280.1334,-52.6511,483.2928,-185.7577,-75.3705,351.3411,179.1282,-479.3838,166.2733,-197.9043,282.6848,-50.4744,-492.7178,183.6435,-127.2379,483.646,433.0805,-228.5488,139.8314,-145.1337,-403.1749,306.2704,122.7149,479.6928,85.3866,108.095,-224.152,494.6848,-368.4504,-180.7579,61.7136,51.2045,-383.0103,-376.4816,-292.8217,-399.6307,214.7716,-19.9177,-458.513,-194.3218,-320.5342,-275.857,-301.6955,-66.756 + ] + - do: + cluster.health: + wait_for_events: languid + + - do: + indices.get_mapping: + index: bar-mapped-index + + # sanity + - match: { bar-mapped-index.mappings.properties.my_dense_vector_field.type: dense_vector } + - match: { bar-mapped-index.mappings.properties.my_dense_vector_field.index: true } + - match: { bar-mapped-index.mappings.properties.my_dense_vector_field.similarity: cosine } + - match: { bar-mapped-index.mappings.properties.my_dense_vector_field.dims: 164 } + + - do: + cluster.stats: { } + + - match: { indices.mappings.field_types.0.name: dense_vector } + - match: { indices.mappings.field_types.0.count: 2 } + - match: { indices.mappings.field_types.0.indexed_vector_count: 2 } + - match: { indices.mappings.field_types.0.indexed_vector_dim_min: 164 } + - match: { indices.mappings.field_types.0.indexed_vector_dim_max: 1276 } + diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/350_point_in_time.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/350_point_in_time.yml index bc3479b705180..7e78450931df5 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/350_point_in_time.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/350_point_in_time.yml @@ -6,19 +6,19 @@ setup: index: index: test id: "1" - body: { id: 1, foo: bar, age: 18 } + body: { id: 1, foo: bar, age: 18, birth: "2022-01-01" } - do: index: index: test id: "42" - body: { id: 42, foo: bar, age: 18 } + body: { id: 42, foo: bar, age: 18, birth: "2022-02-01" } - do: index: index: test id: "172" - body: { id: 172, foo: bar, age: 24 } + body: { id: 172, foo: bar, age: 24, birth: "2022-03-01" } - do: indices.create: @@ -28,7 +28,7 @@ setup: index: index: test2 id: "45" - body: { id: 45, foo: bar, age: 19 } + body: { id: 45, foo: bar, age: 19, birth: "2023-01-01" } - do: indices.refresh: @@ -235,3 +235,32 @@ setup: close_point_in_time: body: id: "$point_in_time_id" + +--- +"point-in-time with index filter": + - skip: + version: " - 8.11.99" + reason: "support for index filter was added in 8.12" + - do: + open_point_in_time: + index: test* + keep_alive: 5m + body: { index_filter: { range: { birth: { gte: "2023-01-01" }}}} + - set: {id: point_in_time_id} + + - do: + search: + body: + size: 1 + pit: + id: "$point_in_time_id" + + - match: {hits.total.value: 1 } + - length: {hits.hits: 1 } + - match: {hits.hits.0._index: test2 } + - match: {hits.hits.0._id: "45" } + + - do: + close_point_in_time: + body: + id: "$point_in_time_id" diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/simulate.ingest/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/simulate.ingest/10_basic.yml new file mode 100644 index 0000000000000..38aaaa9847efb --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/simulate.ingest/10_basic.yml @@ -0,0 +1,344 @@ +setup: + - skip: + version: ' - 8.11.99' + reason: 'ingest simulate added in 8.12' + +--- +"Test no pipelines": + + - skip: + features: headers + + - do: + headers: + Content-Type: application/json + simulate.ingest: + body: > + { + "docs": [ + { + "_index": "index-1", + "_id": "id", + "_source": { + "foo": "bar" + } + }, + { + "_index": "index-2", + "_id": "id", + "_source": { + "foo": "rab" + } + } + ] + } + - length: { docs: 2 } + - match: { docs.0.doc._index: "index-1" } + - match: { docs.0.doc._source.foo: "bar" } + - match: { docs.0.doc.executed_pipelines: [] } + - match: { docs.1.doc._index: "index-2" } + - match: { docs.1.doc._source.foo: "rab" } + - match: { docs.1.doc.executed_pipelines: [] } + +--- +"Test existing index with pipelines": + + - skip: + features: headers + + - do: + headers: + Content-Type: application/json + ingest.put_pipeline: + id: "my-pipeline" + body: > + { + "processors": [ + ] + } + - match: { acknowledged: true } + + - do: + headers: + Content-Type: application/json + ingest.put_pipeline: + id: "my-final-pipeline" + body: > + { + "processors": [ + ] + } + - match: { acknowledged: true } + + - do: + indices.create: + index: index + body: + settings: + default_pipeline: "my-pipeline" + final_pipeline: "my-final-pipeline" + + - do: + headers: + Content-Type: application/json + simulate.ingest: + body: > + { + "docs": [ + { + "_index": "index", + "_id": "id", + "_source": { + "foo": "bar" + } + }, + { + "_index": "index", + "_id": "id", + "_source": { + "foo": "rab" + } + } + ], + "pipeline_substitutions": { + "my-pipeline": { + "processors": [ + ] + } + } + } + - length: { docs: 2 } + - match: { docs.0.doc._source.foo: "bar" } + - match: { docs.0.doc.executed_pipelines: ["my-pipeline", "my-final-pipeline"] } + - match: { docs.1.doc._source.foo: "rab" } + - match: { docs.1.doc.executed_pipelines: ["my-pipeline", "my-final-pipeline"] } + +--- +"Test index templates with pipelines": + + - skip: + features: headers + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/102339" + + - do: + headers: + Content-Type: application/json + ingest.put_pipeline: + id: "my-pipeline" + body: > + { + "processors": [ + ] + } + - match: { acknowledged: true } + + - do: + headers: + Content-Type: application/json + ingest.put_pipeline: + id: "my-final-pipeline" + body: > + { + "processors": [ + ] + } + - match: { acknowledged: true } + + - do: + indices.put_index_template: + name: my-template + body: + index_patterns: index-* + template: + settings: + default_pipeline: "my-pipeline" + final_pipeline: "my-final-pipeline" + + - do: + headers: + Content-Type: application/json + simulate.ingest: + body: > + { + "docs": [ + { + "_index": "index-1", + "_id": "id", + "_source": { + "foo": "bar" + } + }, + { + "_index": "index-1", + "_id": "id", + "_source": { + "foo": "rab" + } + } + ], + "pipeline_substitutions": { + "my-pipeline": { + "processors": [ + ] + } + } + } + - length: { docs: 2 } + - match: { docs.0.doc._index: "index-1" } + - match: { docs.0.doc._source.foo: "bar" } + - match: { docs.0.doc.executed_pipelines: ["my-pipeline", "my-final-pipeline"] } + - match: { docs.1.doc._index: "index-1" } + - match: { docs.1.doc._source.foo: "rab" } + - match: { docs.1.doc.executed_pipelines: ["my-pipeline", "my-final-pipeline"] } + +--- +"Test bad pipeline substitution": + + - skip: + features: headers + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/102339" + + - do: + headers: + Content-Type: application/json + ingest.put_pipeline: + id: "my-pipeline" + body: > + { + "processors": [ + ] + } + - match: { acknowledged: true } + + - do: + indices.put_index_template: + name: my-template + body: + index_patterns: index-* + template: + settings: + default_pipeline: "my-pipeline" + + - do: + catch: "request" + headers: + Content-Type: application/json + simulate.ingest: + body: > + { + "docs": [ + { + "_index": "index-1", + "_id": "id", + "_source": { + "foo": "bar" + } + }, + { + "_index": "index-1", + "_id": "id", + "_source": { + "foo": "rab" + } + } + ], + "pipeline_substitutions": { + "my-pipeline": { + "processors": [ + { + "non-existent-processor": { + } + } + ] + } + } + } + - match: { status: 500 } + +--- +"Test index in path": + + - skip: + features: headers + + - do: + headers: + Content-Type: application/json + simulate.ingest: + index: "test-index" + body: > + { + "docs": [ + { + "_id": "id", + "_source": { + "foo": "bar" + } + }, + { + "_id": "id", + "_source": { + "foo": "rab" + } + } + ] + } + - length: { docs: 2 } + - match: { docs.0.doc._index: "test-index" } + - match: { docs.0.doc._source.foo: "bar" } + - match: { docs.0.doc.executed_pipelines: [] } + - match: { docs.1.doc._index: "test-index" } + - match: { docs.1.doc._source.foo: "rab" } + - match: { docs.1.doc.executed_pipelines: [] } + +--- +"Test pipeline in query param": + + - skip: + features: headers + + - do: + headers: + Content-Type: application/json + ingest.put_pipeline: + id: "my-pipeline" + body: > + { + "processors": [ + ] + } + - match: { acknowledged: true } + + - do: + headers: + Content-Type: application/json + simulate.ingest: + pipeline: "my-pipeline" + body: > + { + "docs": [ + { + "_index": "index-1", + "_id": "id", + "_source": { + "foo": "bar" + } + }, + { + "_index": "index-2", + "_id": "id", + "_source": { + "foo": "rab" + } + } + ] + } + - length: { docs: 2 } + - match: { docs.0.doc._index: "index-1" } + - match: { docs.0.doc._source.foo: "bar" } + - match: { docs.0.doc.executed_pipelines: ["my-pipeline"] } + - match: { docs.1.doc._index: "index-2" } + - match: { docs.1.doc._source.foo: "rab" } + - match: { docs.1.doc.executed_pipelines: ["my-pipeline"] } diff --git a/server/build.gradle b/server/build.gradle index 0e154d2287b56..01879e232634b 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -141,9 +141,11 @@ sourceSets.main.compiledBy(generateModulesList, generatePluginsList) if (BuildParams.isSnapshotBuild() == false) { tasks.named("test").configure { systemProperty 'es.index_mode_feature_flag_registered', 'true' + systemProperty 'es.failure_store_feature_flag_enabled', 'true' } tasks.named("internalClusterTest").configure { systemProperty 'es.index_mode_feature_flag_registered', 'true' + systemProperty 'es.failure_store_feature_flag_enabled', 'true' } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java index 5df0e374ee9db..e01241da4db91 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java @@ -45,8 +45,8 @@ import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.delete.DeleteRequest; -import org.elasticsearch.action.explain.ExplainAction; import org.elasticsearch.action.explain.ExplainRequest; +import org.elasticsearch.action.explain.TransportExplainAction; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesAction; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; import org.elasticsearch.action.get.GetAction; @@ -233,7 +233,7 @@ public void testUpdate() { interceptTransportActions(updateShardActions); String indexOrAlias = randomIndexOrAlias(); - client().prepareIndex(indexOrAlias).setId("id").setSource("field", "value").get(); + prepareIndex(indexOrAlias).setId("id").setSource("field", "value").get(); UpdateRequest updateRequest = new UpdateRequest(indexOrAlias, "id").doc(Requests.INDEX_CONTENT_TYPE, "field1", "value1"); UpdateResponse updateResponse = internalCluster().coordOnlyNodeClient().update(updateRequest).actionGet(); assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult()); @@ -263,7 +263,7 @@ public void testUpdateDelete() { interceptTransportActions(updateShardActions); String indexOrAlias = randomIndexOrAlias(); - client().prepareIndex(indexOrAlias).setId("id").setSource("field", "value").get(); + prepareIndex(indexOrAlias).setId("id").setSource("field", "value").get(); UpdateRequest updateRequest = new UpdateRequest(indexOrAlias, "id").script( new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "ctx.op='delete'", Collections.emptyMap()) ); @@ -317,7 +317,7 @@ public void testGet() { } public void testExplain() { - String explainShardAction = ExplainAction.NAME + "[s]"; + String explainShardAction = TransportExplainAction.TYPE.name() + "[s]"; interceptTransportActions(explainShardAction); ExplainRequest explainRequest = new ExplainRequest(randomIndexOrAlias(), "id").query(QueryBuilders.matchAllQuery()); @@ -554,7 +554,7 @@ public void testSearchQueryThenFetch() throws Exception { String[] randomIndicesOrAliases = randomIndicesOrAliases(); for (int i = 0; i < randomIndicesOrAliases.length; i++) { - client().prepareIndex(randomIndicesOrAliases[i]).setId("id-" + i).setSource("field", "value").get(); + prepareIndex(randomIndicesOrAliases[i]).setId("id-" + i).setSource("field", "value").get(); } refresh(); @@ -584,7 +584,7 @@ public void testSearchDfsQueryThenFetch() throws Exception { String[] randomIndicesOrAliases = randomIndicesOrAliases(); for (int i = 0; i < randomIndicesOrAliases.length; i++) { - client().prepareIndex(randomIndicesOrAliases[i]).setId("id-" + i).setSource("field", "value").get(); + prepareIndex(randomIndicesOrAliases[i]).setId("id-" + i).setSource("field", "value").get(); } refresh(); @@ -609,10 +609,6 @@ private static void assertSameIndices(IndicesRequest originalRequest, String... assertSameIndices(originalRequest, false, actions); } - private static void assertSameIndicesOptionalRequests(IndicesRequest originalRequest, String... actions) { - assertSameIndices(originalRequest, true, actions); - } - private static void assertSameIndices(IndicesRequest originalRequest, boolean optional, String... actions) { for (String action : actions) { List requests = consumeTransportRequests(action); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/RejectionActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/RejectionActionIT.java index c7082f7979ed9..a30d654900c20 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/RejectionActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/RejectionActionIT.java @@ -43,7 +43,7 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { public void testSimulatedSearchRejectionLoad() throws Throwable { for (int i = 0; i < 10; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "1").get(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field", "1").get(); } int numberOfAsyncOps = randomIntBetween(200, 700); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java index 6a3a7ccfe221a..05e3b81c3683f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java @@ -100,9 +100,9 @@ public void onFailure(Exception e) { indexRandom( true, - client().prepareIndex("test").setId("1").setSource("field1", "value1"), - client().prepareIndex("test").setId("2").setSource("field1", "value2"), - client().prepareIndex("test").setId("3").setSource("field1", "value3") + prepareIndex("test").setId("1").setSource("field1", "value1"), + prepareIndex("test").setId("2").setSource("field1", "value2"), + prepareIndex("test").setId("3").setSource("field1", "value3") ); ensureSearchable(); while (latch.getCount() > 0) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java index bbd1ea67b7ef8..07c6ba4945eaa 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java @@ -225,7 +225,6 @@ public void testCancelTaskMultipleTimes() throws Exception { assertFalse(cancelFuture.isDone()); allowEntireRequest(rootRequest); assertThat(cancelFuture.actionGet().getTaskFailures(), empty()); - assertThat(cancelFuture.actionGet().getTaskFailures(), empty()); waitForRootTask(mainTaskFuture, false); CancelTasksResponse cancelError = clusterAdmin().prepareCancelTasks() .setTargetTaskId(taskId) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java index d17ae1c7fce0d..502c60b4a3402 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java @@ -26,8 +26,8 @@ import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction; import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.index.IndexAction; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchTransportService; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportReplicationActionTests; @@ -304,7 +304,7 @@ public void testTransportBulkTasks() { ensureGreen("test"); // Make sure all shards are allocated to catch replication tasks // ensures the mapping is available on all nodes so we won't retry the request (in case replicas don't have the right mapping). indicesAdmin().preparePutMapping("test").setSource("foo", "type=keyword").get(); - client().prepareBulk().add(client().prepareIndex("test").setId("test_id").setSource("{\"foo\": \"bar\"}", XContentType.JSON)).get(); + client().prepareBulk().add(prepareIndex("test").setId("test_id").setSource("{\"foo\": \"bar\"}", XContentType.JSON)).get(); // the bulk operation should produce one main task List topTask = findEvents(BulkAction.NAME, Tuple::v1); @@ -349,12 +349,11 @@ public void testTransportBulkTasks() { } public void testSearchTaskDescriptions() { - registerTaskManagerListeners(SearchAction.NAME); // main task - registerTaskManagerListeners(SearchAction.NAME + "[*]"); // shard task + registerTaskManagerListeners(TransportSearchAction.TYPE.name()); // main task + registerTaskManagerListeners(TransportSearchAction.TYPE.name() + "[*]"); // shard task createIndex("test"); ensureGreen("test"); // Make sure all shards are allocated to catch replication tasks - client().prepareIndex("test") - .setId("test_id") + prepareIndex("test").setId("test_id") .setSource("{\"foo\": \"bar\"}", XContentType.JSON) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); @@ -366,24 +365,23 @@ public void testSearchTaskDescriptions() { assertNoFailures(client().filterWithHeader(headers).prepareSearch("test").setQuery(QueryBuilders.matchAllQuery())); // the search operation should produce one main task - List mainTask = findEvents(SearchAction.NAME, Tuple::v1); + List mainTask = findEvents(TransportSearchAction.TYPE.name(), Tuple::v1); assertEquals(1, mainTask.size()); assertThat(mainTask.get(0).description(), startsWith("indices[test], search_type[")); assertThat(mainTask.get(0).description(), containsString("\"query\":{\"match_all\"")); assertTaskHeaders(mainTask.get(0)); // check that if we have any shard-level requests they all have non-zero length description - List shardTasks = findEvents(SearchAction.NAME + "[*]", Tuple::v1); + List shardTasks = findEvents(TransportSearchAction.TYPE.name() + "[*]", Tuple::v1); for (TaskInfo taskInfo : shardTasks) { assertThat(taskInfo.parentTaskId(), notNullValue()); assertEquals(mainTask.get(0).taskId(), taskInfo.parentTaskId()); assertTaskHeaders(taskInfo); switch (taskInfo.action()) { - case SearchTransportService.QUERY_ACTION_NAME, SearchTransportService.QUERY_CAN_MATCH_NAME, - SearchTransportService.DFS_ACTION_NAME -> assertTrue( - taskInfo.description(), - Regex.simpleMatch("shardId[[test][*]]", taskInfo.description()) - ); + case SearchTransportService.QUERY_ACTION_NAME, SearchTransportService.DFS_ACTION_NAME -> assertTrue( + taskInfo.description(), + Regex.simpleMatch("shardId[[test][*]]", taskInfo.description()) + ); case SearchTransportService.QUERY_ID_ACTION_NAME -> assertTrue( taskInfo.description(), Regex.simpleMatch("id[*], indices[test]", taskInfo.description()) @@ -449,7 +447,7 @@ public void onTaskRegistered(Task task) { } // Need to run the task in a separate thread because node client's .execute() is blocked by our task listener index = new Thread(() -> { - DocWriteResponse indexResponse = client().prepareIndex("test").setSource("test", "test").get(); + DocWriteResponse indexResponse = prepareIndex("test").setSource("test", "test").get(); assertArrayEquals(ReplicationResponse.NO_FAILURES, indexResponse.getShardInfo().getFailures()); }); index.start(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java index ff43db96a0057..3aee1fdf505fe 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java @@ -62,7 +62,7 @@ public void testVerifyRepositoryWithBlocks() { // This test checks that the Get Repository operation is never blocked, even if the cluster is read only. try { setClusterReadOnly(true); - VerifyRepositoryResponse response = clusterAdmin().prepareVerifyRepository("test-repo-blocks").execute().actionGet(); + VerifyRepositoryResponse response = clusterAdmin().prepareVerifyRepository("test-repo-blocks").get(); assertThat(response.getNodes().size(), equalTo(cluster().numDataAndMasterNodes())); } finally { setClusterReadOnly(false); @@ -100,7 +100,7 @@ public void testGetRepositoryWithBlocks() { // This test checks that the Get Repository operation is never blocked, even if the cluster is read only. try { setClusterReadOnly(true); - GetRepositoriesResponse response = clusterAdmin().prepareGetRepositories("test-repo-blocks").execute().actionGet(); + GetRepositoriesResponse response = clusterAdmin().prepareGetRepositories("test-repo-blocks").get(); assertThat(response.repositories(), hasSize(1)); } finally { setClusterReadOnly(false); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java index d3dbccbb6d6e5..b6b0b2e54e691 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java @@ -46,11 +46,11 @@ protected void setUpRepository() throws Exception { int docs = between(10, 100); for (int i = 0; i < docs; i++) { - client().prepareIndex(INDEX_NAME).setSource("test", "init").execute().actionGet(); + prepareIndex(INDEX_NAME).setSource("test", "init").get(); } docs = between(10, 100); for (int i = 0; i < docs; i++) { - client().prepareIndex(OTHER_INDEX_NAME).setSource("test", "init").execute().actionGet(); + prepareIndex(OTHER_INDEX_NAME).setSource("test", "init").get(); } logger.info("--> register a repository"); @@ -69,8 +69,7 @@ protected void setUpRepository() throws Exception { CreateSnapshotResponse snapshotResponse = clusterAdmin().prepareCreateSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME) .setIncludeGlobalState(true) .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(snapshotResponse.status(), equalTo(RestStatus.OK)); ensureSearchable(); } @@ -90,8 +89,7 @@ public void testCreateSnapshotWithBlocks() { logger.info("--> creating a snapshot is allowed when the cluster is not read only"); CreateSnapshotResponse response = clusterAdmin().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-2") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(response.status(), equalTo(RestStatus.OK)); } @@ -153,8 +151,7 @@ public void testRestoreSnapshotWithBlocks() { logger.info("--> creating a snapshot is allowed when the cluster is not read only"); RestoreSnapshotResponse response = clusterAdmin().prepareRestoreSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME) .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(response.status(), equalTo(RestStatus.OK)); assertTrue(indexExists(INDEX_NAME)); assertTrue(indexExists(OTHER_INDEX_NAME)); @@ -164,7 +161,7 @@ public void testGetSnapshotWithBlocks() { // This test checks that the Get Snapshot operation is never blocked, even if the cluster is read only. try { setClusterReadOnly(true); - GetSnapshotsResponse response = clusterAdmin().prepareGetSnapshots(REPOSITORY_NAME).execute().actionGet(); + GetSnapshotsResponse response = clusterAdmin().prepareGetSnapshots(REPOSITORY_NAME).get(); assertThat(response.getSnapshots(), hasSize(1)); assertThat(response.getSnapshots().get(0).snapshotId().getName(), equalTo(SNAPSHOT_NAME)); } finally { @@ -176,10 +173,7 @@ public void testSnapshotStatusWithBlocks() { // This test checks that the Snapshot Status operation is never blocked, even if the cluster is read only. try { setClusterReadOnly(true); - SnapshotsStatusResponse response = clusterAdmin().prepareSnapshotStatus(REPOSITORY_NAME) - .setSnapshots(SNAPSHOT_NAME) - .execute() - .actionGet(); + SnapshotsStatusResponse response = clusterAdmin().prepareSnapshotStatus(REPOSITORY_NAME).setSnapshots(SNAPSHOT_NAME).get(); assertThat(response.getSnapshots(), hasSize(1)); assertThat(response.getSnapshots().get(0).getState().completed(), equalTo(true)); } finally { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksIT.java index 3ace029d57521..4d37f75894d56 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksIT.java @@ -37,8 +37,7 @@ public void testClearIndicesCacheWithBlocks() { .setFieldDataCache(true) .setQueryCache(true) .setFieldDataCache(true) - .execute() - .actionGet(); + .get(); assertNoFailures(clearIndicesCacheResponse); assertThat(clearIndicesCacheResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards)); } finally { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/AutoCreateSystemIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/AutoCreateSystemIndexIT.java index e5edeccbad55d..c0d62ba54621a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/AutoCreateSystemIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/AutoCreateSystemIndexIT.java @@ -116,7 +116,7 @@ public void testWriteToAliasPrimaryAutoCreatedFirst() throws Exception { client().execute(AutoCreateAction.INSTANCE, request).get(); } - DocWriteResponse response = client().prepareIndex(INDEX_NAME).setSource("{\"foo\":\"bar\"}", XContentType.JSON).get(); + DocWriteResponse response = prepareIndex(INDEX_NAME).setSource("{\"foo\":\"bar\"}", XContentType.JSON).get(); assertThat(response.getResult(), equalTo(DocWriteResponse.Result.CREATED)); } @@ -135,7 +135,7 @@ public void testWriteToAliasSecondaryAutoCreatedFirst() throws Exception { client().execute(AutoCreateAction.INSTANCE, request).get(); } - DocWriteResponse response = client().prepareIndex(INDEX_NAME).setSource("{\"foo\":\"bar\"}", XContentType.JSON).get(); + DocWriteResponse response = prepareIndex(INDEX_NAME).setSource("{\"foo\":\"bar\"}", XContentType.JSON).get(); assertThat(response.getResult(), equalTo(DocWriteResponse.Result.CREATED)); } @@ -205,18 +205,20 @@ public void testAutoCreateSystemAliasViaV1TemplateAllowsTemplates() throws Excep } private String autoCreateSystemAliasViaComposableTemplate(String indexName) throws Exception { - ComposableIndexTemplate cit = new ComposableIndexTemplate( - Collections.singletonList(indexName + "*"), - new Template( - null, - null, - Map.of(indexName + "-composable-alias", AliasMetadata.builder(indexName + "-composable-alias").build()) - ), - Collections.emptyList(), - 4L, - 5L, - Collections.emptyMap() - ); + ComposableIndexTemplate cit = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList(indexName + "*")) + .template( + new Template( + null, + null, + Map.of(indexName + "-composable-alias", AliasMetadata.builder(indexName + "-composable-alias").build()) + ) + ) + .componentTemplates(Collections.emptyList()) + .priority(4L) + .version(5L) + .metadata(Collections.emptyMap()) + .build(); assertAcked( client().execute( PutComposableIndexTemplateAction.INSTANCE, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CloneIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CloneIndexIT.java index 93d12c686297f..d006192579ead 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CloneIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CloneIndexIT.java @@ -39,7 +39,7 @@ public void testCreateCloneIndex() { ).get(); final int docs = randomIntBetween(0, 128); for (int i = 0; i < docs; i++) { - client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); } internalCluster().ensureAtLeastNumDataNodes(2); // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node @@ -89,7 +89,7 @@ public void testCreateCloneIndex() { } for (int i = docs; i < 2 * docs; i++) { - client().prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); } flushAndRefresh(); assertHitCount(prepareSearch("target").setSize(2 * size).setQuery(new TermsQueryBuilder("foo", "bar")), 2 * docs); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java index d19c61f97efd9..b4d0286b74077 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java @@ -213,7 +213,7 @@ public void testCreateAndDeleteIndexConcurrently() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); int numDocs = randomIntBetween(1, 10); for (int i = 0; i < numDocs; i++) { - client().prepareIndex("test").setSource("index_version", indexVersion.get()).get(); + prepareIndex("test").setSource("index_version", indexVersion.get()).get(); } synchronized (indexVersionLock) { // not necessarily needed here but for completeness we lock here too indexVersion.incrementAndGet(); @@ -226,7 +226,7 @@ public void onResponse(AcknowledgedResponse deleteIndexResponse) { public void run() { try { // recreate that index - client().prepareIndex("test").setSource("index_version", indexVersion.get()).get(); + prepareIndex("test").setSource("index_version", indexVersion.get()).get(); synchronized (indexVersionLock) { // we sync here since we have to ensure that all indexing operations below for a given ID are done before // we increment the index version otherwise a doc that is in-flight could make it into an index that it @@ -252,10 +252,7 @@ public void onFailure(Exception e) { for (int i = 0; i < numDocs; i++) { try { synchronized (indexVersionLock) { - client().prepareIndex("test") - .setSource("index_version", indexVersion.get()) - .setTimeout(TimeValue.timeValueSeconds(10)) - .get(); + prepareIndex("test").setSource("index_version", indexVersion.get()).setTimeout(TimeValue.timeValueSeconds(10)).get(); } } catch (IndexNotFoundException inf) { // fine @@ -338,7 +335,7 @@ public void testInvalidPartitionSize() { .put("index.number_of_shards", shards) .put("index.number_of_routing_shards", shards) .put("index.routing_partition_size", partitionSize) - ).execute().actionGet(); + ).get(); } catch (IllegalStateException | IllegalArgumentException e) { return false; } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateSystemIndicesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateSystemIndicesIT.java index a0dffa8b7caa8..1c075442d99e6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateSystemIndicesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateSystemIndicesIT.java @@ -194,18 +194,20 @@ public void testCreateSystemAliasViaV1TemplateAllowsTemplates() throws Exception } private void createIndexWithComposableTemplates(String indexName, String primaryIndexName) throws Exception { - ComposableIndexTemplate cit = new ComposableIndexTemplate( - Collections.singletonList(indexName + "*"), - new Template( - null, - null, - Map.of(indexName + "-composable-alias", AliasMetadata.builder(indexName + "-composable-alias").build()) - ), - Collections.emptyList(), - 4L, - 5L, - Collections.emptyMap() - ); + ComposableIndexTemplate cit = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList(indexName + "*")) + .template( + new Template( + null, + null, + Map.of(indexName + "-composable-alias", AliasMetadata.builder(indexName + "-composable-alias").build()) + ) + ) + .componentTemplates(Collections.emptyList()) + .priority(4L) + .version(5L) + .metadata(Collections.emptyMap()) + .build(); assertAcked( client().execute( PutComposableIndexTemplateAction.INSTANCE, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java index b0ec5de81984a..8f6026da835b6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java @@ -81,10 +81,7 @@ public void testCreateShrinkIndexToN() { internalCluster().ensureAtLeastNumDataNodes(2); prepareCreate("source").setSettings(Settings.builder().put(indexSettings()).put("number_of_shards", shardSplits[0])).get(); for (int i = 0; i < 20; i++) { - client().prepareIndex("source") - .setId(Integer.toString(i)) - .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON) - .get(); + prepareIndex("source").setId(Integer.toString(i)).setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); } Map dataNodes = clusterAdmin().prepareState().get().getState().nodes().getDataNodes(); assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); @@ -109,8 +106,7 @@ public void testCreateShrinkIndexToN() { assertHitCount(prepareSearch("first_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")), 20); for (int i = 0; i < 20; i++) { // now update - client().prepareIndex("first_shrink") - .setId(Integer.toString(i)) + prepareIndex("first_shrink").setId(Integer.toString(i)) .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON) .get(); } @@ -142,8 +138,7 @@ public void testCreateShrinkIndexToN() { assertHitCount(prepareSearch("second_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")), 20); for (int i = 0; i < 20; i++) { // now update - client().prepareIndex("second_shrink") - .setId(Integer.toString(i)) + prepareIndex("second_shrink").setId(Integer.toString(i)) .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON) .get(); } @@ -238,7 +233,7 @@ public void testCreateShrinkIndex() { ).get(); final int docs = randomIntBetween(0, 128); for (int i = 0; i < docs; i++) { - client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); } Map dataNodes = clusterAdmin().prepareState().get().getState().nodes().getDataNodes(); assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); @@ -318,7 +313,7 @@ public void testCreateShrinkIndex() { } for (int i = docs; i < 2 * docs; i++) { - client().prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); } flushAndRefresh(); assertHitCount(prepareSearch("target").setSize(2 * size).setQuery(new TermsQueryBuilder("foo", "bar")), 2 * docs); @@ -344,7 +339,7 @@ public void testCreateShrinkIndexFails() throws Exception { Settings.builder().put(indexSettings()).put("number_of_shards", randomIntBetween(2, 7)).put("number_of_replicas", 0) ).get(); for (int i = 0; i < 20; i++) { - client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); } Map dataNodes = clusterAdmin().prepareState().get().getState().nodes().getDataNodes(); assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); @@ -424,10 +419,7 @@ public void testCreateShrinkWithIndexSort() throws Exception { .put("number_of_replicas", 0) ).setMapping("id", "type=keyword,doc_values=true").get(); for (int i = 0; i < 20; i++) { - client().prepareIndex("source") - .setId(Integer.toString(i)) - .setSource("{\"foo\" : \"bar\", \"id\" : " + i + "}", XContentType.JSON) - .get(); + prepareIndex("source").setId(Integer.toString(i)).setSource("{\"foo\" : \"bar\", \"id\" : " + i + "}", XContentType.JSON).get(); } Map dataNodes = clusterAdmin().prepareState().get().getState().nodes().getDataNodes(); assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); @@ -465,14 +457,14 @@ public void testCreateShrinkWithIndexSort() throws Exception { assertNoResizeSourceIndexSettings("target"); flushAndRefresh(); - GetSettingsResponse settingsResponse = indicesAdmin().prepareGetSettings("target").execute().actionGet(); + GetSettingsResponse settingsResponse = indicesAdmin().prepareGetSettings("target").get(); assertEquals(settingsResponse.getSetting("target", "index.sort.field"), "id"); assertEquals(settingsResponse.getSetting("target", "index.sort.order"), "desc"); assertSortedSegments("target", expectedIndexSort); // ... and that the index sort is also applied to updates for (int i = 20; i < 40; i++) { - client().prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); } flushAndRefresh(); assertSortedSegments("target", expectedIndexSort); @@ -483,7 +475,7 @@ public void testShrinkCommitsMergeOnIdle() throws Exception { Settings.builder().put(indexSettings()).put("index.number_of_replicas", 0).put("number_of_shards", 5) ).get(); for (int i = 0; i < 30; i++) { - client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); } indicesAdmin().prepareFlush("source").get(); Map dataNodes = clusterAdmin().prepareState().get().getState().nodes().getDataNodes(); @@ -610,8 +602,7 @@ static void assertNoResizeSourceIndexSettings(final String index) { .clear() .setMetadata(true) .setRoutingTable(true) - .execute() - .actionGet(); + .get(); IndexRoutingTable indexRoutingTable = clusterStateResponse.getState().routingTable().index(index); assertThat("Index " + index + " should have all primaries started", indexRoutingTable.allPrimaryShardsActive(), equalTo(true)); IndexMetadata indexMetadata = clusterStateResponse.getState().metadata().index(index); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java index e5ff2a6ce1cc5..56bbe135de66b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java @@ -128,8 +128,7 @@ private void splitToN(int sourceShards, int firstSplitShards, int secondSplitSha BiFunction indexFunc = (index, id) -> { try { - return client().prepareIndex(index) - .setId(Integer.toString(id)) + return prepareIndex(index).setId(Integer.toString(id)) .setSource( jsonBuilder().startObject() .field("foo", "bar") @@ -344,7 +343,7 @@ public void testCreateSplitIndex() throws Exception { ).get(); final int docs = randomIntBetween(0, 128); for (int i = 0; i < docs; i++) { - client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); } internalCluster().ensureAtLeastNumDataNodes(2); // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node @@ -413,7 +412,7 @@ public void testCreateSplitIndex() throws Exception { } for (int i = docs; i < 2 * docs; i++) { - client().prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); } flushAndRefresh(); assertHitCount(prepareSearch("target").setSize(2 * size).setQuery(new TermsQueryBuilder("foo", "bar")), 2 * docs); @@ -446,10 +445,7 @@ public void testCreateSplitWithIndexSort() throws Exception { .put("number_of_replicas", 0) ).setMapping("id", "type=keyword,doc_values=true").get(); for (int i = 0; i < 20; i++) { - client().prepareIndex("source") - .setId(Integer.toString(i)) - .setSource("{\"foo\" : \"bar\", \"id\" : " + i + "}", XContentType.JSON) - .get(); + prepareIndex("source").setId(Integer.toString(i)).setSource("{\"foo\" : \"bar\", \"id\" : " + i + "}", XContentType.JSON).get(); } // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due @@ -480,14 +476,14 @@ public void testCreateSplitWithIndexSort() throws Exception { ); ensureGreen(); flushAndRefresh(); - GetSettingsResponse settingsResponse = indicesAdmin().prepareGetSettings("target").execute().actionGet(); + GetSettingsResponse settingsResponse = indicesAdmin().prepareGetSettings("target").get(); assertEquals(settingsResponse.getSetting("target", "index.sort.field"), "id"); assertEquals(settingsResponse.getSetting("target", "index.sort.order"), "desc"); assertSortedSegments("target", expectedIndexSort); // ... and that the index sort is also applied to updates for (int i = 20; i < 40; i++) { - client().prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); } flushAndRefresh(); assertSortedSegments("target", expectedIndexSort); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksIT.java index dc5cc49092f7a..5df1ceea6bfce 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksIT.java @@ -44,15 +44,12 @@ public void testDeleteIndexWithBlocks() { public void testDeleteIndexOnIndexReadOnlyAllowDeleteSetting() { createIndex("test"); ensureGreen("test"); - client().prepareIndex().setIndex("test").setId("1").setSource("foo", "bar").get(); + prepareIndex("test").setId("1").setSource("foo", "bar").get(); refresh(); try { updateIndexSettings(Settings.builder().put(IndexMetadata.SETTING_READ_ONLY_ALLOW_DELETE, true), "test"); assertSearchHits(prepareSearch(), "1"); - assertBlocked( - client().prepareIndex().setIndex("test").setId("2").setSource("foo", "bar"), - IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK - ); + assertBlocked(prepareIndex("test").setId("2").setSource("foo", "bar"), IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK); assertSearchHits(prepareSearch(), "1"); assertAcked(indicesAdmin().prepareDelete("test")); } finally { @@ -70,7 +67,7 @@ public void testClusterBlockMessageHasIndexName() { updateIndexSettings(Settings.builder().put(IndexMetadata.SETTING_READ_ONLY_ALLOW_DELETE, true), "test"); ClusterBlockException e = expectThrows( ClusterBlockException.class, - () -> client().prepareIndex().setIndex("test").setId("1").setSource("foo", "bar").get() + () -> prepareIndex("test").setId("1").setSource("foo", "bar").get() ); assertEquals( "index [test] blocked by: [TOO_MANY_REQUESTS/12/disk usage exceeded flood-stage watermark, " @@ -85,15 +82,12 @@ public void testClusterBlockMessageHasIndexName() { public void testDeleteIndexOnClusterReadOnlyAllowDeleteSetting() { createIndex("test"); ensureGreen("test"); - client().prepareIndex().setIndex("test").setId("1").setSource("foo", "bar").get(); + prepareIndex("test").setId("1").setSource("foo", "bar").get(); refresh(); try { updateClusterSettings(Settings.builder().put(Metadata.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.getKey(), true)); assertSearchHits(prepareSearch(), "1"); - assertBlocked( - client().prepareIndex().setIndex("test").setId("2").setSource("foo", "bar"), - Metadata.CLUSTER_READ_ONLY_ALLOW_DELETE_BLOCK - ); + assertBlocked(prepareIndex("test").setId("2").setSource("foo", "bar"), Metadata.CLUSTER_READ_ONLY_ALLOW_DELETE_BLOCK); assertBlocked( indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.number_of_replicas", 2)), Metadata.CLUSTER_READ_ONLY_ALLOW_DELETE_BLOCK diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerIT.java index 94c08bd7e8162..d6c337dec53b8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerIT.java @@ -115,7 +115,7 @@ public void testSimple() throws Exception { .field("english_text", English.intToEnglish(value)) .field("value", value) .endObject(); - client().prepareIndex(index).setId("id-" + i).setSource(doc).get(); + prepareIndex(index).setId("id-" + i).setSource(doc).get(); } final boolean forceNorms = randomBoolean(); if (forceNorms) { @@ -123,11 +123,11 @@ public void testSimple() throws Exception { .startObject() .field("english_text", "A long sentence to make sure that norms is non-zero") .endObject(); - client().prepareIndex(index).setId("id").setSource(doc).get(); + prepareIndex(index).setId("id").setSource(doc).get(); } // Force merge to ensure that there are more than one numeric value to justify doc value. client().admin().indices().prepareForceMerge(index).setMaxNumSegments(1).get(); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); client().execute( AnalyzeIndexDiskUsageAction.INSTANCE, new AnalyzeIndexDiskUsageRequest(new String[] { index }, AnalyzeIndexDiskUsageRequest.DEFAULT_INDICES_OPTIONS, true), @@ -167,7 +167,7 @@ public void testFailOnFlush() throws Exception { .field("english_text", English.intToEnglish(value)) .field("value", value) .endObject(); - client().prepareIndex(indexName).setId("id-" + i).setSource(doc).get(); + prepareIndex(indexName).setId("id-" + i).setSource(doc).get(); } Index index = clusterService().state().metadata().index(indexName).getIndex(); List failedShards = randomSubsetOf( @@ -203,7 +203,7 @@ public void testManyShards() throws Exception { .field("english_text", English.intToEnglish(value)) .field("value", value) .endObject(); - client().prepareIndex(indexName).setId("id-" + i).setSource(doc).get(); + prepareIndex(indexName).setId("id-" + i).setSource(doc).get(); } } @@ -237,7 +237,7 @@ public void testFailingTargetShards() throws Exception { .field("english_text", English.intToEnglish(value)) .field("value", value) .endObject(); - client().prepareIndex(indexName).setId("id-" + i).setSource(doc).get(); + prepareIndex(indexName).setId("id-" + i).setSource(doc).get(); } final Index index = resolveIndex(indexName); final List failingShards = randomSubsetOf( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksIT.java index 2dc2ef0e90009..69d4f7aaef329 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksIT.java @@ -31,7 +31,7 @@ public void testFlushWithBlocks() { int docs = between(10, 100); for (int i = 0; i < docs; i++) { - client().prepareIndex("test").setId("" + i).setSource("test", "init").execute().actionGet(); + prepareIndex("test").setId("" + i).setSource("test", "init").get(); } // Request is not blocked @@ -44,7 +44,7 @@ public void testFlushWithBlocks() { )) { try { enableIndexBlock("test", blockSetting); - FlushResponse response = indicesAdmin().prepareFlush("test").execute().actionGet(); + FlushResponse response = indicesAdmin().prepareFlush("test").get(); assertNoFailures(response); assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards)); } finally { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeBlocksIT.java index 80d1b95442f44..a3474afc96c51 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeBlocksIT.java @@ -43,14 +43,14 @@ public void testForceMergeWithBlocks() { int docs = between(10, 100); for (int i = 0; i < docs; i++) { - client().prepareIndex("test").setId("" + i).setSource("test", "init").execute().actionGet(); + prepareIndex("test").setId("" + i).setSource("test", "init").get(); } // Request is not blocked for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY_ALLOW_DELETE)) { try { enableIndexBlock("test", blockSetting); - ForceMergeResponse response = indicesAdmin().prepareForceMerge("test").execute().actionGet(); + ForceMergeResponse response = indicesAdmin().prepareForceMerge("test").get(); assertNoFailures(response); assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards)); } finally { @@ -70,7 +70,7 @@ public void testForceMergeWithBlocks() { // Merging all indices is blocked when the cluster is read-only try { - ForceMergeResponse response = indicesAdmin().prepareForceMerge().execute().actionGet(); + ForceMergeResponse response = indicesAdmin().prepareForceMerge().get(); assertNoFailures(response); assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/refresh/RefreshBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/refresh/RefreshBlocksIT.java index da750103d2943..41abfc1219199 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/refresh/RefreshBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/refresh/RefreshBlocksIT.java @@ -39,7 +39,7 @@ public void testRefreshWithBlocks() { )) { try { enableIndexBlock("test", blockSetting); - RefreshResponse response = indicesAdmin().prepareRefresh("test").execute().actionGet(); + RefreshResponse response = indicesAdmin().prepareRefresh("test").get(); assertNoFailures(response); assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards)); } finally { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java index d7e4e42b73554..7ae7fc5c4a180 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java @@ -794,7 +794,9 @@ public void testRolloverConcurrently() throws Exception { null, null ); - putTemplateRequest.indexTemplate(new ComposableIndexTemplate(List.of("test-*"), template, null, 100L, null, null)); + putTemplateRequest.indexTemplate( + ComposableIndexTemplate.builder().indexPatterns(List.of("test-*")).template(template).priority(100L).build() + ); assertAcked(client().execute(PutComposableIndexTemplateAction.INSTANCE, putTemplateRequest).actionGet()); final CyclicBarrier barrier = new CyclicBarrier(numOfThreads); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsBlocksIT.java index 3a7df923a3e0c..0705e1216af43 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsBlocksIT.java @@ -29,7 +29,7 @@ public void testIndicesSegmentsWithBlocks() { int docs = between(10, 100); for (int i = 0; i < docs; i++) { - client().prepareIndex("test-blocks").setId("" + i).setSource("test", "init").execute().actionGet(); + prepareIndex("test-blocks").setId("" + i).setSource("test", "init").get(); } indicesAdmin().prepareFlush("test-blocks").get(); @@ -42,7 +42,7 @@ public void testIndicesSegmentsWithBlocks() { )) { try { enableIndexBlock("test-blocks", blockSetting); - IndicesSegmentResponse response = indicesAdmin().prepareSegments("test-blocks").execute().actionGet(); + IndicesSegmentResponse response = indicesAdmin().prepareSegments("test-blocks").get(); assertNoFailures(response); } finally { disableIndexBlock("test-blocks", blockSetting); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java index ef86bfa0bd485..310f9394f60c1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java @@ -31,7 +31,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.ExecutionException; import java.util.function.Predicate; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -197,14 +196,14 @@ public void testCorruptedShards() throws Exception { enableAllocation(index); } - private void indexRandomData(String index) throws ExecutionException, InterruptedException { + private void indexRandomData(String index) throws InterruptedException { int numDocs = scaledRandomIntBetween(10, 20); IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex(index).setSource("field", "value"); + builders[i] = prepareIndex(index).setSource("field", "value"); } indexRandom(true, builders); - indicesAdmin().prepareFlush().setForce(true).execute().actionGet(); + indicesAdmin().prepareFlush().setForce(true).get(); } private static final class IndexNodePredicate implements Predicate { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsBlocksIT.java index 93214372ef201..95b98722a8423 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsBlocksIT.java @@ -36,7 +36,7 @@ public void testIndicesStatsWithBlocks() { )) { try { enableIndexBlock("ro", blockSetting); - IndicesStatsResponse indicesStatsResponse = indicesAdmin().prepareStats("ro").execute().actionGet(); + IndicesStatsResponse indicesStatsResponse = indicesAdmin().prepareStats("ro").get(); assertNotNull(indicesStatsResponse.getIndex("ro")); } finally { disableIndexBlock("ro", blockSetting); @@ -46,7 +46,7 @@ public void testIndicesStatsWithBlocks() { // Request is blocked try { enableIndexBlock("ro", IndexMetadata.SETTING_BLOCKS_METADATA); - indicesAdmin().prepareStats("ro").execute().actionGet(); + indicesAdmin().prepareStats("ro").get(); fail("Exists should fail when " + IndexMetadata.SETTING_BLOCKS_METADATA + " is true"); } catch (ClusterBlockException e) { // Ok, a ClusterBlockException is expected diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java index 224db253675d2..8bc9bac2543d3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java @@ -159,8 +159,7 @@ public void testDeleteIndexWhileIndexing() throws Exception { while (stopped.get() == false && docID.get() < 5000) { String id = Integer.toString(docID.incrementAndGet()); try { - DocWriteResponse response = client().prepareIndex(index) - .setId(id) + DocWriteResponse response = prepareIndex(index).setId(id) .setSource(Map.of("f" + randomIntBetween(1, 10), randomNonNegativeLong()), XContentType.JSON) .get(); assertThat(response.getResult(), is(oneOf(CREATED, UPDATED))); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessor2RetryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessor2RetryIT.java index f1d4f6958f7f0..6a2ab41fae5d6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessor2RetryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessor2RetryIT.java @@ -152,9 +152,7 @@ public void afterBulk(long executionId, BulkRequest request, Exception failure) private static void indexDocs(BulkProcessor2 processor, int numDocs) { for (int i = 1; i <= numDocs; i++) { processor.add( - client().prepareIndex() - .setIndex(INDEX_NAME) - .setId(Integer.toString(i)) + prepareIndex(INDEX_NAME).setId(Integer.toString(i)) .setSource("field", randomRealisticUnicodeOfLengthBetween(1, 30)) .request() ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorClusterSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorClusterSettingsIT.java index 7beaebcfb87b8..85b720a03478e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorClusterSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorClusterSettingsIT.java @@ -28,12 +28,12 @@ public void testBulkProcessorAutoCreateRestrictions() { internalCluster().startNode(settings); createIndex("willwork"); - clusterAdmin().prepareHealth("willwork").setWaitForGreenStatus().execute().actionGet(); + clusterAdmin().prepareHealth("willwork").setWaitForGreenStatus().get(); BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); - bulkRequestBuilder.add(client().prepareIndex("willwork").setId("1").setSource("{\"foo\":1}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("wontwork").setId("2").setSource("{\"foo\":2}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("willwork").setId("3").setSource("{\"foo\":3}", XContentType.JSON)); + bulkRequestBuilder.add(prepareIndex("willwork").setId("1").setSource("{\"foo\":1}", XContentType.JSON)); + bulkRequestBuilder.add(prepareIndex("wontwork").setId("2").setSource("{\"foo\":2}", XContentType.JSON)); + bulkRequestBuilder.add(prepareIndex("willwork").setId("3").setSource("{\"foo\":3}", XContentType.JSON)); BulkResponse br = bulkRequestBuilder.get(); BulkItemResponse[] responses = br.getItems(); assertEquals(3, responses.length); @@ -52,7 +52,7 @@ public void testBulkProcessorAutoCreateRestrictions() { public void testIndexWithDisabledAutoCreateIndex() { updateClusterSettings(Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), randomFrom("-*", "+.*"))); final BulkItemResponse itemResponse = client().prepareBulk() - .add(client().prepareIndex("test-index").setSource("foo", "bar")) + .add(prepareIndex("test-index").setSource("foo", "bar")) .get() .getItems()[0]; assertThat(itemResponse.getFailure().getCause(), instanceOf(IndexNotFoundException.class)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java index 93f066d35bbc4..8bd3a6cf02868 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java @@ -157,9 +157,7 @@ private void assertRetriedCorrectly(CorrelatingBackoffPolicy internalPolicy, Obj private static void indexDocs(BulkProcessor processor, int numDocs) { for (int i = 1; i <= numDocs; i++) { processor.add( - client().prepareIndex() - .setIndex(INDEX_NAME) - .setId(Integer.toString(i)) + prepareIndex(INDEX_NAME).setId(Integer.toString(i)) .setSource("field", randomRealisticUnicodeOfLengthBetween(1, 30)) .request() ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java index 9433f93d91f58..00bd6ee7ee891 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java @@ -102,13 +102,12 @@ public void testBulkUpdateSimple() throws Exception { ensureGreen(); BulkResponse bulkResponse = client().prepareBulk() - .add(client().prepareIndex().setIndex(indexOrAlias()).setId("1").setSource("field", 1)) - .add(client().prepareIndex().setIndex(indexOrAlias()).setId("2").setSource("field", 2).setCreate(true)) - .add(client().prepareIndex().setIndex(indexOrAlias()).setId("3").setSource("field", 3)) - .add(client().prepareIndex().setIndex(indexOrAlias()).setId("4").setSource("field", 4)) - .add(client().prepareIndex().setIndex(indexOrAlias()).setId("5").setSource("field", 5)) - .execute() - .actionGet(); + .add(prepareIndex(indexOrAlias()).setId("1").setSource("field", 1)) + .add(prepareIndex(indexOrAlias()).setId("2").setSource("field", 2).setCreate(true)) + .add(prepareIndex(indexOrAlias()).setId("3").setSource("field", 3)) + .add(prepareIndex(indexOrAlias()).setId("4").setSource("field", 4)) + .add(prepareIndex(indexOrAlias()).setId("5").setSource("field", 5)) + .get(); assertThat(bulkResponse.hasFailures(), equalTo(false)); assertThat(bulkResponse.getItems().length, equalTo(5)); @@ -141,17 +140,17 @@ public void testBulkUpdateSimple() throws Exception { assertThat(bulkResponse.getItems()[2].getResponse().getId(), equalTo("3")); assertThat(bulkResponse.getItems()[2].getResponse().getVersion(), equalTo(2L)); - GetResponse getResponse = client().prepareGet().setIndex("test").setId("1").execute().actionGet(); + GetResponse getResponse = client().prepareGet().setIndex("test").setId("1").get(); assertThat(getResponse.isExists(), equalTo(true)); assertThat(getResponse.getVersion(), equalTo(2L)); assertThat(((Number) getResponse.getSource().get("field")).longValue(), equalTo(2L)); - getResponse = client().prepareGet().setIndex("test").setId("2").execute().actionGet(); + getResponse = client().prepareGet().setIndex("test").setId("2").get(); assertThat(getResponse.isExists(), equalTo(true)); assertThat(getResponse.getVersion(), equalTo(2L)); assertThat(((Number) getResponse.getSource().get("field")).longValue(), equalTo(3L)); - getResponse = client().prepareGet().setIndex("test").setId("3").execute().actionGet(); + getResponse = client().prepareGet().setIndex("test").setId("3").get(); assertThat(getResponse.isExists(), equalTo(true)); assertThat(getResponse.getVersion(), equalTo(2L)); assertThat(getResponse.getSource().get("field1").toString(), equalTo("test")); @@ -180,15 +179,15 @@ public void testBulkUpdateSimple() throws Exception { assertThat(bulkResponse.getItems()[2].getResponse().getIndex(), equalTo("test")); assertThat(bulkResponse.getItems()[2].getResponse().getVersion(), equalTo(3L)); - getResponse = client().prepareGet().setIndex("test").setId("6").execute().actionGet(); + getResponse = client().prepareGet().setIndex("test").setId("6").get(); assertThat(getResponse.isExists(), equalTo(true)); assertThat(getResponse.getVersion(), equalTo(1L)); assertThat(((Number) getResponse.getSource().get("field")).longValue(), equalTo(0L)); - getResponse = client().prepareGet().setIndex("test").setId("7").execute().actionGet(); + getResponse = client().prepareGet().setIndex("test").setId("7").get(); assertThat(getResponse.isExists(), equalTo(false)); - getResponse = client().prepareGet().setIndex("test").setId("2").execute().actionGet(); + getResponse = client().prepareGet().setIndex("test").setId("2").get(); assertThat(getResponse.isExists(), equalTo(true)); assertThat(getResponse.getVersion(), equalTo(3L)); assertThat(((Number) getResponse.getSource().get("field")).longValue(), equalTo(4L)); @@ -221,12 +220,12 @@ public void testBulkUpdateWithScriptedUpsertAndDynamicMappingUpdate() throws Exc assertThat(bulkResponse.getItems()[1].getResponse().getId(), equalTo("2")); assertThat(bulkResponse.getItems()[1].getResponse().getVersion(), equalTo(1L)); - GetResponse getResponse = client().prepareGet().setIndex("test").setId("1").execute().actionGet(); + GetResponse getResponse = client().prepareGet().setIndex("test").setId("1").get(); assertThat(getResponse.isExists(), equalTo(true)); assertThat(getResponse.getVersion(), equalTo(1L)); assertThat(((Number) getResponse.getSource().get("field")).longValue(), equalTo(2L)); - getResponse = client().prepareGet().setIndex("test").setId("2").execute().actionGet(); + getResponse = client().prepareGet().setIndex("test").setId("2").get(); assertThat(getResponse.isExists(), equalTo(true)); assertThat(getResponse.getVersion(), equalTo(1L)); assertThat(((Number) getResponse.getSource().get("field")).longValue(), equalTo(2L)); @@ -236,9 +235,9 @@ public void testBulkWithCAS() throws Exception { createIndex("test", Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).build()); ensureGreen(); BulkResponse bulkResponse = client().prepareBulk() - .add(client().prepareIndex("test").setId("1").setCreate(true).setSource("field", "1")) - .add(client().prepareIndex("test").setId("2").setCreate(true).setSource("field", "1")) - .add(client().prepareIndex("test").setId("1").setSource("field", "2")) + .add(prepareIndex("test").setId("1").setCreate(true).setSource("field", "1")) + .add(prepareIndex("test").setId("2").setCreate(true).setSource("field", "1")) + .add(prepareIndex("test").setId("1").setSource("field", "2")) .get(); assertEquals(DocWriteResponse.Result.CREATED, bulkResponse.getItems()[0].getResponse().getResult()); @@ -259,9 +258,9 @@ public void testBulkWithCAS() throws Exception { assertThat(bulkResponse.getItems()[2].getResponse().getSeqNo(), equalTo(4L)); bulkResponse = client().prepareBulk() - .add(client().prepareIndex("test").setId("e1").setSource("field", "1").setVersion(10).setVersionType(VersionType.EXTERNAL)) - .add(client().prepareIndex("test").setId("e2").setSource("field", "1").setVersion(10).setVersionType(VersionType.EXTERNAL)) - .add(client().prepareIndex("test").setId("e1").setSource("field", "2").setVersion(12).setVersionType(VersionType.EXTERNAL)) + .add(prepareIndex("test").setId("e1").setSource("field", "1").setVersion(10).setVersionType(VersionType.EXTERNAL)) + .add(prepareIndex("test").setId("e2").setSource("field", "1").setVersion(10).setVersionType(VersionType.EXTERNAL)) + .add(prepareIndex("test").setId("e1").setSource("field", "2").setVersion(12).setVersionType(VersionType.EXTERNAL)) .get(); assertEquals(DocWriteResponse.Result.CREATED, bulkResponse.getItems()[0].getResponse().getResult()); @@ -285,11 +284,10 @@ public void testBulkUpdateMalformedScripts() throws Exception { ensureGreen(); BulkResponse bulkResponse = client().prepareBulk() - .add(client().prepareIndex().setIndex("test").setId("1").setSource("field", 1)) - .add(client().prepareIndex().setIndex("test").setId("2").setSource("field", 1)) - .add(client().prepareIndex().setIndex("test").setId("3").setSource("field", 1)) - .execute() - .actionGet(); + .add(prepareIndex("test").setId("1").setSource("field", 1)) + .add(prepareIndex("test").setId("2").setSource("field", 1)) + .add(prepareIndex("test").setId("3").setSource("field", 1)) + .get(); assertThat(bulkResponse.hasFailures(), equalTo(false)); assertThat(bulkResponse.getItems().length, equalTo(3)); @@ -330,8 +328,7 @@ public void testBulkUpdateMalformedScripts() throws Exception { ) ) ) - .execute() - .actionGet(); + .get(); assertThat(bulkResponse.hasFailures(), equalTo(true)); assertThat(bulkResponse.getItems().length, equalTo(3)); @@ -385,7 +382,7 @@ public void testBulkUpdateLargerVolume() throws Exception { assertThat(((UpdateResponse) response.getItems()[i].getResponse()).getGetResult().sourceAsMap().get("counter"), equalTo(1)); for (int j = 0; j < 5; j++) { - GetResponse getResponse = client().prepareGet("test", Integer.toString(i)).execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", Integer.toString(i)).get(); assertThat(getResponse.isExists(), equalTo(true)); assertThat(getResponse.getVersion(), equalTo(1L)); assertThat(((Number) getResponse.getSource().get("counter")).longValue(), equalTo(1L)); @@ -410,7 +407,7 @@ public void testBulkUpdateLargerVolume() throws Exception { builder.add(updateBuilder); } - response = builder.execute().actionGet(); + response = builder.get(); assertThat(response.hasFailures(), equalTo(false)); assertThat(response.getItems().length, equalTo(numDocs)); for (int i = 0; i < numDocs; i++) { @@ -428,7 +425,7 @@ public void testBulkUpdateLargerVolume() throws Exception { for (int i = (numDocs / 2); i < maxDocs; i++) { builder.add(client().prepareUpdate().setIndex("test").setId(Integer.toString(i)).setScript(script)); } - response = builder.execute().actionGet(); + response = builder.get(); assertThat(response.hasFailures(), equalTo(true)); assertThat(response.getItems().length, equalTo(numDocs)); for (int i = 0; i < numDocs; i++) { @@ -453,7 +450,7 @@ public void testBulkUpdateLargerVolume() throws Exception { .setScript(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "ctx.op = \"none\"", Collections.emptyMap())) ); } - response = builder.execute().actionGet(); + response = builder.get(); assertThat(response.buildFailureMessage(), response.hasFailures(), equalTo(false)); assertThat(response.getItems().length, equalTo(numDocs)); for (int i = 0; i < numDocs; i++) { @@ -472,7 +469,7 @@ public void testBulkUpdateLargerVolume() throws Exception { .setScript(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "ctx.op = \"delete\"", Collections.emptyMap())) ); } - response = builder.execute().actionGet(); + response = builder.get(); assertThat("expected no failures but got: " + response.buildFailureMessage(), response.hasFailures(), equalTo(false)); assertThat(response.getItems().length, equalTo(numDocs)); for (int i = 0; i < numDocs; i++) { @@ -503,7 +500,7 @@ public void testBulkIndexingWhileInitializing() throws Exception { for (int i = 0; i < numDocs;) { final BulkRequestBuilder builder = client().prepareBulk(); for (int j = 0; j < bulk && i < numDocs; j++, i++) { - builder.add(client().prepareIndex("test").setId(Integer.toString(i)).setSource("val", i)); + builder.add(prepareIndex("test").setId(Integer.toString(i)).setSource("val", i)); } logger.info("bulk indexing {}-{}", i - bulk, i - 1); BulkResponse response = builder.get(); @@ -578,7 +575,7 @@ public void testThatInvalidIndexNamesShouldNotBreakCompleteBulkRequest() { } else { name = "test"; } - builder.add(client().prepareIndex().setIndex(name).setId("1").setSource("field", 1)); + builder.add(prepareIndex(name).setId("1").setSource("field", 1)); } BulkResponse bulkResponse = builder.get(); assertThat(bulkResponse.hasFailures(), is(expectFailure)); @@ -655,7 +652,7 @@ public void testThatMissingIndexDoesNotAbortFullBulkRequest() throws Exception { public void testFailedRequestsOnClosedIndex() throws Exception { createIndex("bulkindex1"); - client().prepareIndex("bulkindex1").setId("1").setSource("text", "test").get(); + prepareIndex("bulkindex1").setId("1").setSource("text", "test").get(); assertBusy(() -> assertAcked(indicesAdmin().prepareClose("bulkindex1"))); BulkRequest bulkRequest = new BulkRequest().setRefreshPolicy(RefreshPolicy.IMMEDIATE); @@ -678,7 +675,7 @@ public void testFailedRequestsOnClosedIndex() throws Exception { // issue 9821 public void testInvalidIndexNamesCorrectOpType() { BulkResponse bulkResponse = client().prepareBulk() - .add(client().prepareIndex().setIndex("INVALID.NAME").setId("1").setSource(Requests.INDEX_CONTENT_TYPE, "field", 1)) + .add(prepareIndex("INVALID.NAME").setId("1").setSource(Requests.INDEX_CONTENT_TYPE, "field", 1)) .add(client().prepareUpdate().setIndex("INVALID.NAME").setId("1").setDoc(Requests.INDEX_CONTENT_TYPE, "field", randomInt())) .add(client().prepareDelete().setIndex("INVALID.NAME").setId("1")) .get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/WriteAckDelayIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/WriteAckDelayIT.java index 61f624c19f567..29a5e491dd3fd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/WriteAckDelayIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/WriteAckDelayIT.java @@ -35,7 +35,7 @@ public void testIndexWithWriteDelayEnabled() throws Exception { logger.info("indexing [{}] docs", numOfDocs); List builders = new ArrayList<>(numOfDocs); for (int j = 0; j < numOfDocs; j++) { - builders.add(client().prepareIndex("test").setSource("field", "value_" + j)); + builders.add(prepareIndex("test").setSource("field", "value_" + j)); } indexRandom(true, builders); logger.info("verifying indexed content"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/CCSPointInTimeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/CCSPointInTimeIT.java index 36e544af90bc6..eff681f1f281b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/CCSPointInTimeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/CCSPointInTimeIT.java @@ -202,11 +202,11 @@ private static void assertOneFailedShard(SearchResponse.Cluster cluster, int tot private String openPointInTime(String[] indices, TimeValue keepAlive) { OpenPointInTimeRequest request = new OpenPointInTimeRequest(indices).keepAlive(keepAlive); - final OpenPointInTimeResponse response = client().execute(OpenPointInTimeAction.INSTANCE, request).actionGet(); + final OpenPointInTimeResponse response = client().execute(TransportOpenPointInTimeAction.TYPE, request).actionGet(); return response.getPointInTimeId(); } private void closePointInTime(String readerId) { - client().execute(ClosePointInTimeAction.INSTANCE, new ClosePointInTimeRequest(readerId)).actionGet(); + client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(readerId)).actionGet(); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/LookupRuntimeFieldIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/LookupRuntimeFieldIT.java index 7871a14264944..f2e0511ffb7ab 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/LookupRuntimeFieldIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/LookupRuntimeFieldIT.java @@ -40,7 +40,7 @@ public void populateIndex() throws Exception { Map.of("author", "jack", "first_name", "Jack", "last_name", "Austin", "joined", "1999-11-03") ); for (Map author : authors) { - client().prepareIndex("authors").setSource(author).setRefreshPolicy(randomFrom(WriteRequest.RefreshPolicy.values())).get(); + prepareIndex("authors").setSource(author).setRefreshPolicy(randomFrom(WriteRequest.RefreshPolicy.values())).get(); } indicesAdmin().prepareRefresh("authors").get(); @@ -126,7 +126,7 @@ public void populateIndex() throws Exception { Map.of("title", "the fifth book", "genre", "science", "author_id", "mike", "publisher_id", "p2", "published_date", "2021-06-30") ); for (Map book : books) { - client().prepareIndex("books").setSource(book).setRefreshPolicy(randomFrom(WriteRequest.RefreshPolicy.values())).get(); + prepareIndex("books").setSource(book).setRefreshPolicy(randomFrom(WriteRequest.RefreshPolicy.values())).get(); } indicesAdmin().prepareRefresh("books").get(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java index bb7658f5011e3..d3e312e173c29 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java @@ -50,6 +50,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -78,7 +79,7 @@ public void testBasic() { int numDocs = randomIntBetween(10, 50); for (int i = 0; i < numDocs; i++) { String id = Integer.toString(i); - client().prepareIndex("test").setId(id).setSource("value", i).get(); + prepareIndex("test").setId(id).setSource("value", i).get(); } refresh("test"); String pitId = openPointInTime(new String[] { "test" }, TimeValue.timeValueMinutes(2)); @@ -121,7 +122,7 @@ public void testMultipleIndices() { for (int i = 0; i < numDocs; i++) { String id = Integer.toString(i); String index = "index-" + randomIntBetween(1, numIndices); - client().prepareIndex(index).setId(id).setSource("value", i).get(); + prepareIndex(index).setId(id).setSource("value", i).get(); } refresh(); String pitId = openPointInTime(new String[] { "*" }, TimeValue.timeValueMinutes(2)); @@ -135,7 +136,7 @@ public void testMultipleIndices() { for (int i = 0; i < moreDocs; i++) { String id = "more-" + i; String index = "index-" + randomIntBetween(1, numIndices); - client().prepareIndex(index).setId(id).setSource("value", i).get(); + prepareIndex(index).setId(id).setSource("value", i).get(); } refresh(); resp = prepareSearch().get(); @@ -152,13 +153,62 @@ public void testMultipleIndices() { } } + public void testIndexFilter() { + int numDocs = randomIntBetween(1, 9); + for (int i = 1; i <= 3; i++) { + String index = "index-" + i; + createIndex(index); + for (int j = 1; j <= numDocs; j++) { + String id = Integer.toString(j); + client().prepareIndex(index).setId(id).setSource("@timestamp", "2023-0" + i + "-0" + j).get(); + } + } + refresh(); + + { + + OpenPointInTimeRequest request = new OpenPointInTimeRequest("*").keepAlive(TimeValue.timeValueMinutes(2)); + final OpenPointInTimeResponse response = client().execute(TransportOpenPointInTimeAction.TYPE, request).actionGet(); + try { + SearchContextId searchContextId = SearchContextId.decode(writableRegistry(), response.getPointInTimeId()); + String[] actualIndices = searchContextId.getActualIndices(); + assertEquals(3, actualIndices.length); + } finally { + closePointInTime(response.getPointInTimeId()); + } + } + { + OpenPointInTimeRequest request = new OpenPointInTimeRequest("*").keepAlive(TimeValue.timeValueMinutes(2)); + request.indexFilter(new RangeQueryBuilder("@timestamp").gte("2023-03-01")); + final OpenPointInTimeResponse response = client().execute(TransportOpenPointInTimeAction.TYPE, request).actionGet(); + String pitId = response.getPointInTimeId(); + try { + SearchContextId searchContextId = SearchContextId.decode(writableRegistry(), pitId); + String[] actualIndices = searchContextId.getActualIndices(); + assertEquals(1, actualIndices.length); + assertEquals("index-3", actualIndices[0]); + assertResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)).setSize(50), resp -> { + assertNoFailures(resp); + assertHitCount(resp, numDocs); + assertNotNull(resp.pointInTimeId()); + assertThat(resp.pointInTimeId(), equalTo(pitId)); + for (SearchHit hit : resp.getHits()) { + assertEquals("index-3", hit.getIndex()); + } + }); + } finally { + closePointInTime(pitId); + } + } + } + public void testRelocation() throws Exception { internalCluster().ensureAtLeastNumDataNodes(4); createIndex("test", Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, between(0, 1)).build()); ensureGreen("test"); int numDocs = randomIntBetween(10, 50); for (int i = 0; i < numDocs; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("value", i).get(); + prepareIndex("test").setId(Integer.toString(i)).setSource("value", i).get(); } refresh(); String pitId = openPointInTime(new String[] { "test" }, TimeValue.timeValueMinutes(2)); @@ -179,7 +229,7 @@ public void testRelocation() throws Exception { if (randomBoolean()) { int moreDocs = randomIntBetween(10, 50); for (int i = 0; i < moreDocs; i++) { - client().prepareIndex("test").setId("more-" + i).setSource("value", i).get(); + prepareIndex("test").setId("more-" + i).setSource("value", i).get(); } refresh(); } @@ -210,7 +260,7 @@ public void testPointInTimeNotFound() throws Exception { int index1 = randomIntBetween(10, 50); for (int i = 0; i < index1; i++) { String id = Integer.toString(i); - client().prepareIndex("index").setId(id).setSource("value", i).get(); + prepareIndex("index").setId(id).setSource("value", i).get(); } refresh(); String pit = openPointInTime(new String[] { "index" }, TimeValue.timeValueSeconds(5)); @@ -241,13 +291,13 @@ public void testIndexNotFound() { int index1 = randomIntBetween(10, 50); for (int i = 0; i < index1; i++) { String id = Integer.toString(i); - client().prepareIndex("index-1").setId(id).setSource("value", i).get(); + prepareIndex("index-1").setId(id).setSource("value", i).get(); } int index2 = randomIntBetween(10, 50); for (int i = 0; i < index2; i++) { String id = Integer.toString(i); - client().prepareIndex("index-2").setId(id).setSource("value", i).get(); + prepareIndex("index-2").setId(id).setSource("value", i).get(); } refresh(); String pit = openPointInTime(new String[] { "index-*" }, TimeValue.timeValueMinutes(2)); @@ -283,8 +333,8 @@ public void testIndexNotFound() { public void testAllowNoIndex() { var request = new OpenPointInTimeRequest("my_index").indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN) .keepAlive(TimeValue.timeValueMinutes(between(1, 10))); - String pit = client().execute(OpenPointInTimeAction.INSTANCE, request).actionGet().getPointInTimeId(); - var closeResp = client().execute(ClosePointInTimeAction.INSTANCE, new ClosePointInTimeRequest(pit)).actionGet(); + String pit = client().execute(TransportOpenPointInTimeAction.TYPE, request).actionGet().getPointInTimeId(); + var closeResp = client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pit)).actionGet(); assertThat(closeResp.status(), equalTo(RestStatus.OK)); } @@ -305,7 +355,7 @@ public void testCanMatch() throws Exception { } } } - client().prepareIndex("test").setId("1").setSource("created_date", "2020-01-01").get(); + prepareIndex("test").setId("1").setSource("created_date", "2020-01-01").get(); SearchResponse resp = prepareSearch().setQuery(new RangeQueryBuilder("created_date").gte("2020-01-02").lte("2020-01-03")) .setSearchType(SearchType.QUERY_THEN_FETCH) .setPreference(null) @@ -356,11 +406,11 @@ public void testPartialResults() throws Exception { int numDocs1 = randomIntBetween(10, 50); for (int i = 0; i < numDocs1; i++) { - client().prepareIndex(randomFrom("test-1")).setId(Integer.toString(i)).setSource("value", i).get(); + prepareIndex(randomFrom("test-1")).setId(Integer.toString(i)).setSource("value", i).get(); } int numDocs2 = randomIntBetween(10, 50); for (int i = 0; i < numDocs2; i++) { - client().prepareIndex(randomFrom("test-2")).setId(Integer.toString(i)).setSource("value", i).get(); + prepareIndex(randomFrom("test-2")).setId(Integer.toString(i)).setSource("value", i).get(); } refresh(); String pitId = openPointInTime(new String[] { "test-*" }, TimeValue.timeValueMinutes(2)); @@ -392,7 +442,7 @@ public void testPITTiebreak() throws Exception { createIndex(index, Settings.builder().put("index.number_of_shards", 1).build()); int numDocs = randomIntBetween(3, 20); for (int j = 0; j < numDocs; j++) { - client().prepareIndex(index).setSource("value", randomIntBetween(0, 2)).get(); + prepareIndex(index).setSource("value", randomIntBetween(0, 2)).get(); expectedNumDocs++; } } @@ -428,8 +478,11 @@ public void testPITTiebreak() throws Exception { } public void testCloseInvalidPointInTime() { - expectThrows(Exception.class, () -> client().execute(ClosePointInTimeAction.INSTANCE, new ClosePointInTimeRequest("")).actionGet()); - List tasks = clusterAdmin().prepareListTasks().setActions(ClosePointInTimeAction.NAME).get().getTasks(); + expectThrows( + Exception.class, + () -> client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest("")).actionGet() + ); + List tasks = clusterAdmin().prepareListTasks().setActions(TransportClosePointInTimeAction.TYPE.name()).get().getTasks(); assertThat(tasks, empty()); } @@ -470,7 +523,7 @@ public void testOpenPITConcurrentShardRequests() throws Exception { OpenPointInTimeRequest request = new OpenPointInTimeRequest("test").keepAlive(TimeValue.timeValueMinutes(1)); request.maxConcurrentShardRequests(maxConcurrentRequests); PlainActionFuture future = new PlainActionFuture<>(); - client().execute(OpenPointInTimeAction.INSTANCE, request, future); + client().execute(TransportOpenPointInTimeAction.TYPE, request, future); assertTrue(sentLatch.await(1, TimeUnit.MINUTES)); readyLatch.countDown(); closePointInTime(future.actionGet().getPointInTimeId()); @@ -534,11 +587,11 @@ private void assertPagination(PointInTimeBuilder pit, int expectedNumDocs, int s private String openPointInTime(String[] indices, TimeValue keepAlive) { OpenPointInTimeRequest request = new OpenPointInTimeRequest(indices).keepAlive(keepAlive); - final OpenPointInTimeResponse response = client().execute(OpenPointInTimeAction.INSTANCE, request).actionGet(); + final OpenPointInTimeResponse response = client().execute(TransportOpenPointInTimeAction.TYPE, request).actionGet(); return response.getPointInTimeId(); } private void closePointInTime(String readerId) { - client().execute(ClosePointInTimeAction.INSTANCE, new ClosePointInTimeRequest(readerId)).actionGet(); + client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(readerId)).actionGet(); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/SearchProgressActionListenerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/SearchProgressActionListenerIT.java index df36f1babd364..227a3b8612331 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/SearchProgressActionListenerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/SearchProgressActionListenerIT.java @@ -162,7 +162,7 @@ public void onFailure(Exception e) { throw new AssertionError(); } }; - client.executeLocally(SearchAction.INSTANCE, new SearchRequest(request) { + client.executeLocally(TransportSearchAction.TYPE, new SearchRequest(request) { @Override public SearchTask createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { SearchTask task = super.createTask(id, type, action, parentTaskId, headers); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/SearchShardsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/SearchShardsIT.java index 68153e5e88c44..8b1acf11a7a5d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/SearchShardsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/SearchShardsIT.java @@ -47,7 +47,7 @@ public void testBasic() { ); int numDocs = randomIntBetween(1, 10); for (int j = 0; j < numDocs; j++) { - client().prepareIndex(index).setSource("value", i).setId(Integer.toString(i)).get(); + prepareIndex(index).setSource("value", i).setId(Integer.toString(i)).get(); } indicesAdmin().prepareRefresh(index).get(); } @@ -70,7 +70,7 @@ public void testBasic() { randomBoolean(), randomBoolean() ? null : randomAlphaOfLength(10) ); - var resp = client().execute(SearchShardsAction.INSTANCE, request).actionGet(); + var resp = client().execute(TransportSearchShardsAction.TYPE, request).actionGet(); assertThat(resp.getGroups(), hasSize(indicesWithData + indicesWithoutData)); int skipped = 0; for (SearchShardsGroup g : resp.getGroups()) { @@ -97,7 +97,7 @@ public void testBasic() { randomBoolean(), randomBoolean() ? null : randomAlphaOfLength(10) ); - SearchShardsResponse resp = client().execute(SearchShardsAction.INSTANCE, request).actionGet(); + SearchShardsResponse resp = client().execute(TransportSearchShardsAction.TYPE, request).actionGet(); assertThat(resp.getGroups(), hasSize(indicesWithData + indicesWithoutData)); for (SearchShardsGroup g : resp.getGroups()) { assertFalse(g.skipped()); @@ -115,7 +115,7 @@ public void testRandom() { ); int numDocs = randomIntBetween(10, 1000); for (int j = 0; j < numDocs; j++) { - client().prepareIndex(index).setSource("value", i).setId(Integer.toString(i)).get(); + prepareIndex(index).setSource("value", i).setId(Integer.toString(i)).get(); } indicesAdmin().prepareRefresh(index).get(); } @@ -137,7 +137,7 @@ public void testRandom() { randomBoolean(), randomBoolean() ? null : randomAlphaOfLength(10) ); - var searchShardsResponse = client().execute(SearchShardsAction.INSTANCE, searchShardsRequest).actionGet(); + var searchShardsResponse = client().execute(TransportSearchShardsAction.TYPE, searchShardsRequest).actionGet(); assertThat(searchShardsResponse.getGroups(), hasSize(searchResponse.getTotalShards())); long skippedShards = searchShardsResponse.getGroups().stream().filter(SearchShardsGroup::skipped).count(); @@ -169,7 +169,7 @@ public void testNoCanMatchWithoutQuery() { totalShards += numShards; int numDocs = randomIntBetween(10, 100); for (int j = 0; j < numDocs; j++) { - client().prepareIndex(index).setSource("value", i).setId(Integer.toString(i)).get(); + prepareIndex(index).setSource("value", i).setId(Integer.toString(i)).get(); } indicesAdmin().prepareRefresh(index).get(); } @@ -182,7 +182,7 @@ public void testNoCanMatchWithoutQuery() { randomBoolean(), null ); - SearchShardsResponse resp = client().execute(SearchShardsAction.INSTANCE, request).actionGet(); + SearchShardsResponse resp = client().execute(TransportSearchShardsAction.TYPE, request).actionGet(); assertThat(resp.getGroups(), hasSize(totalShards)); for (SearchShardsGroup group : resp.getGroups()) { assertFalse(group.skipped()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/TransportSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/TransportSearchIT.java index d84d4270af24c..31ffe560be010 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/TransportSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/TransportSearchIT.java @@ -419,9 +419,9 @@ public void testSearchIdle() throws Exception { } } }); - client().prepareIndex("test").setId("1").setSource("created_date", "2020-01-01").get(); - client().prepareIndex("test").setId("2").setSource("created_date", "2020-01-02").get(); - client().prepareIndex("test").setId("3").setSource("created_date", "2020-01-03").get(); + prepareIndex("test").setId("1").setSource("created_date", "2020-01-01").get(); + prepareIndex("test").setId("2").setSource("created_date", "2020-01-02").get(); + prepareIndex("test").setId("3").setSource("created_date", "2020-01-03").get(); assertBusy(() -> { SearchResponse resp = prepareSearch("test").setQuery(new RangeQueryBuilder("created_date").gte("2020-01-02").lte("2020-01-03")) .setPreFilterShardSize(randomIntBetween(1, 3)) @@ -546,7 +546,7 @@ private void indexSomeDocs(String indexName, int numberOfShards, int numberOfDoc createIndex(indexName, Settings.builder().put("index.number_of_shards", numberOfShards).build()); for (int i = 0; i < numberOfDocs; i++) { - DocWriteResponse indexResponse = client().prepareIndex(indexName).setSource("number", randomInt()).get(); + DocWriteResponse indexResponse = prepareIndex(indexName).setSource("number", randomInt()).get(); assertEquals(RestStatus.CREATED, indexResponse.status()); } indicesAdmin().prepareRefresh(indexName).get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/support/WaitActiveShardCountIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/support/WaitActiveShardCountIT.java index c31c9cae301eb..6737d02434c0f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/support/WaitActiveShardCountIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/support/WaitActiveShardCountIT.java @@ -34,15 +34,13 @@ public void testReplicationWaitsForActiveShardCount() throws Exception { assertAcked(createIndexResponse); // indexing, by default, will work (waiting for one shard copy only) - client().prepareIndex("test").setId("1").setSource(source("1", "test"), XContentType.JSON).execute().actionGet(); + prepareIndex("test").setId("1").setSource(source("1", "test"), XContentType.JSON).get(); try { - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(source("1", "test"), XContentType.JSON) .setWaitForActiveShards(2) // wait for 2 active shard copies .setTimeout(timeValueMillis(100)) - .execute() - .actionGet(); + .get(); fail("can't index, does not enough active shard copies"); } catch (UnavailableShardsException e) { assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE)); @@ -59,29 +57,24 @@ public void testReplicationWaitsForActiveShardCount() throws Exception { .setWaitForEvents(Priority.LANGUID) .setWaitForActiveShards(2) .setWaitForYellowStatus() - .execute() - .actionGet(); + .get(); logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW)); // this should work, since we now have two - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(source("1", "test"), XContentType.JSON) .setWaitForActiveShards(2) .setTimeout(timeValueSeconds(1)) - .execute() - .actionGet(); + .get(); try { - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(source("1", "test"), XContentType.JSON) .setWaitForActiveShards(ActiveShardCount.ALL) .setTimeout(timeValueMillis(100)) - .execute() - .actionGet(); + .get(); fail("can't index, not enough active shard copies"); } catch (UnavailableShardsException e) { assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE)); @@ -101,20 +94,17 @@ public void testReplicationWaitsForActiveShardCount() throws Exception { .setWaitForEvents(Priority.LANGUID) .setWaitForActiveShards(3) .setWaitForGreenStatus() - .execute() - .actionGet(); + .get(); logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); // this should work, since we now have all shards started - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(source("1", "test"), XContentType.JSON) .setWaitForActiveShards(ActiveShardCount.ALL) .setTimeout(timeValueSeconds(1)) - .execute() - .actionGet(); + .get(); } private String source(String id, String nameValue) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java index 3d77e7164ce76..d98de846bd9da 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java @@ -70,7 +70,7 @@ public void testNoSuchDoc() throws Exception { .endObject(); assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setMapping(mapping)); - client().prepareIndex("test").setId("666").setSource("field", "foo bar").execute().actionGet(); + prepareIndex("test").setId("666").setSource("field", "foo bar").get(); refresh(); for (int i = 0; i < 20; i++) { ActionFuture termVector = client().termVectors(new TermVectorsRequest(indexOrAlias(), "" + i)); @@ -97,7 +97,7 @@ public void testExistingFieldWithNoTermVectorsNoNPE() throws Exception { assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setMapping(mapping)); // when indexing a field that simply has a question mark, the term vectors will be null - client().prepareIndex("test").setId("0").setSource("existingfield", "?").execute().actionGet(); + prepareIndex("test").setId("0").setSource("existingfield", "?").get(); refresh(); ActionFuture termVector = client().termVectors( new TermVectorsRequest(indexOrAlias(), "0").selectedFields(new String[] { "existingfield" }) @@ -125,7 +125,7 @@ public void testExistingFieldButNotInDocNPE() throws Exception { assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setMapping(mapping)); // when indexing a field that simply has a question mark, the term vectors will be null - client().prepareIndex("test").setId("0").setSource("anotherexistingfield", 1).execute().actionGet(); + prepareIndex("test").setId("0").setSource("anotherexistingfield", 1).get(); refresh(); ActionFuture termVectors = client().termVectors( new TermVectorsRequest(indexOrAlias(), "0").selectedFields(randomBoolean() ? new String[] { "existingfield" } : null) @@ -163,7 +163,7 @@ public void testNotIndexedField() throws Exception { List indexBuilders = new ArrayList<>(); for (int i = 0; i < 6; i++) { - indexBuilders.add(client().prepareIndex().setIndex("test").setId(String.valueOf(i)).setSource("field" + i, i)); + indexBuilders.add(prepareIndex("test").setId(String.valueOf(i)).setSource("field" + i, i)); } indexRandom(true, indexBuilders); @@ -205,8 +205,7 @@ public void testSimpleTermVectors() throws IOException { ) ); for (int i = 0; i < 10; i++) { - client().prepareIndex("test") - .setId(Integer.toString(i)) + prepareIndex("test").setId(Integer.toString(i)) .setSource( jsonBuilder().startObject() .field("field", "the quick brown fox jumps over the lazy dog") @@ -214,8 +213,7 @@ public void testSimpleTermVectors() throws IOException { // 31the34 35lazy39 40dog43 .endObject() ) - .execute() - .actionGet(); + .get(); refresh(); } for (int i = 0; i < 10; i++) { @@ -224,7 +222,7 @@ public void testSimpleTermVectors() throws IOException { .setOffsets(true) .setPositions(true) .setSelectedFields(); - TermVectorsResponse response = resp.execute().actionGet(); + TermVectorsResponse response = resp.get(); assertThat(response.getIndex(), equalTo("test")); assertThat("doc id: " + i + " doesn't exists but should", response.isExists(), equalTo(true)); Fields fields = response.getFields(); @@ -308,8 +306,7 @@ public void testRandomSingleTermVectors() throws IOException { ) ); for (int i = 0; i < 10; i++) { - client().prepareIndex("test") - .setId(Integer.toString(i)) + prepareIndex("test").setId(Integer.toString(i)) .setSource( jsonBuilder().startObject() .field("field", "the quick brown fox jumps over the lazy dog") @@ -317,8 +314,7 @@ public void testRandomSingleTermVectors() throws IOException { // 31the34 35lazy39 40dog43 .endObject() ) - .execute() - .actionGet(); + .get(); refresh(); } String[] values = { "brown", "dog", "fox", "jumps", "lazy", "over", "quick", "the" }; @@ -335,7 +331,7 @@ public void testRandomSingleTermVectors() throws IOException { .setOffsets(isOffsetRequested) .setPositions(isPositionsRequested) .setSelectedFields(); - TermVectorsResponse response = resp.execute().actionGet(); + TermVectorsResponse response = resp.get(); assertThat(infoString + "doc id: " + i + " doesn't exists but should", response.isExists(), equalTo(true)); Fields fields = response.getFields(); assertThat(fields.size(), equalTo(ft.storeTermVectors() ? 1 : 0)); @@ -470,7 +466,7 @@ public void testSimpleTermVectorsWithGenerate() throws IOException { ensureGreen(); for (int i = 0; i < 10; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource(source).execute().actionGet(); + prepareIndex("test").setId(Integer.toString(i)).setSource(source).get(); refresh(); } @@ -480,8 +476,7 @@ public void testSimpleTermVectorsWithGenerate() throws IOException { .setOffsets(true) .setPositions(true) .setSelectedFields(fieldNames) - .execute() - .actionGet(); + .get(); assertThat("doc id: " + i + " doesn't exists but should", response.isExists(), equalTo(true)); Fields fields = response.getFields(); assertThat(fields.size(), equalTo(fieldNames.length)); @@ -551,7 +546,7 @@ public void testDuelWithAndWithoutTermVectors() throws IOException, ExecutionExc List indexBuilders = new ArrayList<>(); for (String indexName : indexNames) { for (int id = 0; id < content.length; id++) { - indexBuilders.add(client().prepareIndex().setIndex(indexName).setId(String.valueOf(id)).setSource("field1", content[id])); + indexBuilders.add(prepareIndex(indexName).setId(String.valueOf(id)).setSource("field1", content[id])); } } indexRandom(true, indexBuilders); @@ -628,7 +623,7 @@ public void testSimpleWildCards() throws IOException { assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setMapping(mapping)); ensureGreen(); - client().prepareIndex("test").setId("0").setSource(source).get(); + prepareIndex("test").setId("0").setSource(source).get(); refresh(); TermVectorsResponse response = client().prepareTermVectors(indexOrAlias(), "0").setSelectedFields("field*").get(); @@ -652,7 +647,7 @@ public void testArtificialVsExisting() throws ExecutionException, InterruptedExc List indexBuilders = new ArrayList<>(); for (int i = 0; i < content.length; i++) { - indexBuilders.add(client().prepareIndex().setIndex("test").setId(String.valueOf(i)).setSource("field1", content[i])); + indexBuilders.add(prepareIndex("test").setId(String.valueOf(i)).setSource("field1", content[i])); } indexRandom(true, indexBuilders); @@ -740,7 +735,7 @@ public void testPerFieldAnalyzer() throws IOException { ensureGreen(); // index a single document with prepared source - client().prepareIndex("test").setId("0").setSource(source).get(); + prepareIndex("test").setId("0").setSource(source).get(); refresh(); // create random per_field_analyzer and selected fields @@ -814,7 +809,7 @@ public void testTermVectorsWithVersion() { assertThat(response.isExists(), equalTo(false)); logger.info("--> index doc 1"); - client().prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").get(); + prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").get(); // From translog: @@ -860,7 +855,7 @@ public void testTermVectorsWithVersion() { } logger.info("--> index doc 1 again, so increasing the version"); - client().prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").get(); + prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").get(); // From translog: @@ -923,7 +918,7 @@ public void testFilterLength() throws ExecutionException, InterruptedException, } tags.add(tag); } - indexRandom(true, client().prepareIndex("test").setId("1").setSource("tags", tags)); + indexRandom(true, prepareIndex("test").setId("1").setSource("tags", tags)); logger.info("Checking best tags by longest to shortest size ..."); TermVectorsRequest.FilterSettings filterSettings = new TermVectorsRequest.FilterSettings(); @@ -959,7 +954,7 @@ public void testFilterTermFreq() throws ExecutionException, InterruptedException } uniqueTags.add(tag); } - indexRandom(true, client().prepareIndex("test").setId("1").setSource("tags", tags)); + indexRandom(true, prepareIndex("test").setId("1").setSource("tags", tags)); logger.info("Checking best tags by highest to lowest term freq ..."); TermVectorsRequest.FilterSettings filterSettings = new TermVectorsRequest.FilterSettings(); @@ -990,7 +985,7 @@ public void testFilterDocFreq() throws ExecutionException, InterruptedException, List tags = new ArrayList<>(); for (int i = 0; i < numDocs; i++) { tags.add("tag_" + i); - builders.add(client().prepareIndex("test").setId(i + "").setSource("tags", tags)); + builders.add(prepareIndex("test").setId(i + "").setSource("tags", tags)); } indexRandom(true, builders); @@ -1016,7 +1011,7 @@ public void testArtificialDocWithPreference() throws InterruptedException, IOExc ensureGreen(); // index document - indexRandom(true, client().prepareIndex("test").setId("1").setSource("field1", "random permutation")); + indexRandom(true, prepareIndex("test").setId("1").setSource("field1", "random permutation")); // Get search shards ClusterSearchShardsResponse searchShardsResponse = clusterAdmin().prepareSearchShards("test").get(); @@ -1052,7 +1047,7 @@ public void testTermVectorsWithIgnoredField() throws IOException, InterruptedExc ensureGreen(); // add a doc with a bad long field - indexRandom(true, client().prepareIndex("index").setId("1").setSource("{\"field\":\"foo\"}", XContentType.JSON)); + indexRandom(true, prepareIndex("index").setId("1").setSource("{\"field\":\"foo\"}", XContentType.JSON)); // do a tv request for all fields, _ignored should be returned TermVectorsResponse resp = client().prepareTermVectors("index", "1").setSelectedFields("*").get(); @@ -1089,10 +1084,7 @@ public void testWithKeywordAndNormalizer() throws IOException, ExecutionExceptio for (String indexName : indexNames) { for (int id = 0; id < content.length; id++) { indexBuilders.add( - client().prepareIndex() - .setIndex(indexName) - .setId(String.valueOf(id)) - .setSource("field1", content[id], "field2", content[id]) + prepareIndex(indexName).setId(String.valueOf(id)).setSource("field1", content[id], "field2", content[id]) ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/termvectors/MultiTermVectorsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/termvectors/MultiTermVectorsIT.java index 875b29a3de771..9beffa7f964d9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/termvectors/MultiTermVectorsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/termvectors/MultiTermVectorsIT.java @@ -69,7 +69,7 @@ public void testMissingIndexThrowsMissingIndex() throws Exception { TermVectorsRequestBuilder requestBuilder = client().prepareTermVectors("testX", Integer.toString(1)); MultiTermVectorsRequestBuilder mtvBuilder = client().prepareMultiTermVectors(); mtvBuilder.add(requestBuilder.request()); - MultiTermVectorsResponse response = mtvBuilder.execute().actionGet(); + MultiTermVectorsResponse response = mtvBuilder.get(); assertThat(response.getResponses().length, equalTo(1)); assertThat(response.getResponses()[0].getFailure().getCause(), instanceOf(IndexNotFoundException.class)); assertThat(response.getResponses()[0].getFailure().getCause().getMessage(), equalTo("no such index [testX]")); @@ -84,7 +84,7 @@ public void testMultiTermVectorsWithVersion() throws Exception { assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(false)); for (int i = 0; i < 3; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).get(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).get(); } // Version from translog @@ -133,7 +133,7 @@ public void testMultiTermVectorsWithVersion() throws Exception { assertThat(response.getResponses()[2].getFailure().getCause().getCause(), instanceOf(VersionConflictEngineException.class)); for (int i = 0; i < 3; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).get(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).get(); } // Version from translog diff --git a/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java index 2074e38f891c0..8ede5dc5ef29f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java @@ -677,7 +677,7 @@ public void testDeleteAliases() throws Exception { assertFalse(indicesAdmin().prepareGetAliases("foo").setIndices("bar_bar").get().getAliases().isEmpty()); IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareAliases().addAliasAction(AliasActions.remove().index("foo").alias("foo")).execute().actionGet() + () -> indicesAdmin().prepareAliases().addAliasAction(AliasActions.remove().index("foo").alias("foo")).get() ); assertEquals( "The provided expression [foo] matches an alias, specify the corresponding concrete indices instead.", @@ -1090,13 +1090,13 @@ public void testAliasesCanBeAddedToIndicesOnly() throws Exception { IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index("week_20").alias("tmp")).execute().actionGet() + () -> indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index("week_20").alias("tmp")).get() ); assertEquals( "The provided expression [week_20] matches an alias, specify the corresponding concrete indices instead.", iae.getMessage() ); - assertAcked(indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index("2017-05-20").alias("tmp")).execute().get()); + assertAcked(indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index("2017-05-20").alias("tmp"))); } // Before 2.0 alias filters were parsed at alias creation time, in order @@ -1137,7 +1137,7 @@ public void testAliasFilterWithNowInRangeFilterAndQuery() { final int numDocs = scaledRandomIntBetween(5, 52); for (int i = 1; i <= numDocs; i++) { - client().prepareIndex("my-index").setSource("timestamp", "2016-12-12").get(); + prepareIndex("my-index").setSource("timestamp", "2016-12-12").get(); if (i % 2 == 0) { refresh(); assertHitCount(prepareSearch("filter1"), i); @@ -1159,7 +1159,7 @@ public void testAliasesWithBlocks() { () -> assertAcked(indicesAdmin().prepareAliases().addAlias("test", "alias1").addAlias("test", "alias2")) ); assertAliasesVersionIncreases("test", () -> assertAcked(indicesAdmin().prepareAliases().removeAlias("test", "alias1"))); - assertThat(indicesAdmin().prepareGetAliases("alias2").execute().actionGet().getAliases().get("test").size(), equalTo(1)); + assertThat(indicesAdmin().prepareGetAliases("alias2").get().getAliases().get("test").size(), equalTo(1)); assertFalse(indicesAdmin().prepareGetAliases("alias2").get().getAliases().isEmpty()); } finally { disableIndexBlock("test", block); @@ -1177,7 +1177,7 @@ public void testAliasesWithBlocks() { "test", () -> assertBlocked(indicesAdmin().prepareAliases().removeAlias("test", "alias2"), INDEX_READ_ONLY_BLOCK) ); - assertThat(indicesAdmin().prepareGetAliases("alias2").execute().actionGet().getAliases().get("test").size(), equalTo(1)); + assertThat(indicesAdmin().prepareGetAliases("alias2").get().getAliases().get("test").size(), equalTo(1)); assertFalse(indicesAdmin().prepareGetAliases("alias2").get().getAliases().isEmpty()); } finally { @@ -1213,7 +1213,7 @@ public void testAliasActionRemoveIndex() throws InterruptedException, ExecutionE IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareAliases().removeIndex("foo").execute().actionGet() + () -> indicesAdmin().prepareAliases().removeIndex("foo").get() ); assertEquals( "The provided expression [foo] matches an alias, specify the corresponding concrete indices instead.", @@ -1233,7 +1233,7 @@ public void testAliasActionRemoveIndex() throws InterruptedException, ExecutionE public void testRemoveIndexAndReplaceWithAlias() throws InterruptedException, ExecutionException { assertAcked(indicesAdmin().prepareCreate("test")); - indexRandom(true, client().prepareIndex("test_2").setId("test").setSource("test", "test")); + indexRandom(true, prepareIndex("test_2").setId("test").setSource("test", "test")); assertAliasesVersionIncreases( "test_2", () -> assertAcked(indicesAdmin().prepareAliases().addAlias("test_2", "test").removeIndex("test")) @@ -1359,7 +1359,7 @@ public void testCreateIndexAndAliasWithSameNameFails() { final String indexName = "index-name"; final IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareCreate(indexName).addAlias(new Alias(indexName)).execute().actionGet() + () -> indicesAdmin().prepareCreate(indexName).addAlias(new Alias(indexName)).get() ); assertEquals("alias name [" + indexName + "] self-conflicts with index name", iae.getMessage()); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/blocks/SimpleBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/blocks/SimpleBlocksIT.java index 9b78bb9369fd7..6206f2357218c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/blocks/SimpleBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/blocks/SimpleBlocksIT.java @@ -107,7 +107,7 @@ public void testIndexReadWriteMetadataBlocks() { private void canCreateIndex(String index) { try { - CreateIndexResponse r = indicesAdmin().prepareCreate(index).execute().actionGet(); + CreateIndexResponse r = indicesAdmin().prepareCreate(index).get(); assertThat(r, notNullValue()); } catch (ClusterBlockException e) { fail(); @@ -116,7 +116,7 @@ private void canCreateIndex(String index) { private void canNotCreateIndex(String index) { try { - indicesAdmin().prepareCreate(index).execute().actionGet(); + indicesAdmin().prepareCreate(index).get(); fail(); } catch (ClusterBlockException e) { // all is well @@ -125,9 +125,9 @@ private void canNotCreateIndex(String index) { private void canIndexDocument(String index) { try { - IndexRequestBuilder builder = client().prepareIndex(index); + IndexRequestBuilder builder = prepareIndex(index); builder.setSource("foo", "bar"); - DocWriteResponse r = builder.execute().actionGet(); + DocWriteResponse r = builder.get(); assertThat(r, notNullValue()); } catch (ClusterBlockException e) { fail(); @@ -136,9 +136,9 @@ private void canIndexDocument(String index) { private void canNotIndexDocument(String index) { try { - IndexRequestBuilder builder = client().prepareIndex(index); + IndexRequestBuilder builder = prepareIndex(index); builder.setSource("foo", "bar"); - builder.execute().actionGet(); + builder.get(); fail(); } catch (ClusterBlockException e) { // all is well @@ -250,9 +250,7 @@ public void testAddIndexBlock() throws Exception { randomBoolean(), false, randomBoolean(), - IntStream.range(0, nbDocs) - .mapToObj(i -> client().prepareIndex(indexName).setId(String.valueOf(i)).setSource("num", i)) - .collect(toList()) + IntStream.range(0, nbDocs).mapToObj(i -> prepareIndex(indexName).setId(String.valueOf(i)).setSource("num", i)).collect(toList()) ); final APIBlock block = randomAddableBlock(); @@ -278,7 +276,7 @@ public void testSameBlockTwice() throws Exception { false, randomBoolean(), IntStream.range(0, randomIntBetween(1, 10)) - .mapToObj(i -> client().prepareIndex(indexName).setId(String.valueOf(i)).setSource("num", i)) + .mapToObj(i -> prepareIndex(indexName).setId(String.valueOf(i)).setSource("num", i)) .collect(toList()) ); } @@ -323,9 +321,7 @@ public void testConcurrentAddBlock() throws InterruptedException { randomBoolean(), false, randomBoolean(), - IntStream.range(0, nbDocs) - .mapToObj(i -> client().prepareIndex(indexName).setId(String.valueOf(i)).setSource("num", i)) - .collect(toList()) + IntStream.range(0, nbDocs).mapToObj(i -> prepareIndex(indexName).setId(String.valueOf(i)).setSource("num", i)).collect(toList()) ); ensureYellowAndNoInitializingShards(indexName); @@ -404,7 +400,7 @@ public void testAddBlockWhileDeletingIndices() throws Exception { false, randomBoolean(), IntStream.range(0, 10) - .mapToObj(n -> client().prepareIndex(indexName).setId(String.valueOf(n)).setSource("num", n)) + .mapToObj(n -> prepareIndex(indexName).setId(String.valueOf(n)).setSource("num", n)) .collect(toList()) ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterHealthIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterHealthIT.java index a6609e70f963d..214e3f73144d9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterHealthIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterHealthIT.java @@ -56,17 +56,13 @@ public void testSimpleLocalHealth() { public void testHealth() { logger.info("--> running cluster health on an index that does not exists"); - ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth("test1") - .setWaitForYellowStatus() - .setTimeout("1s") - .execute() - .actionGet(); + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth("test1").setWaitForYellowStatus().setTimeout("1s").get(); assertThat(healthResponse.isTimedOut(), equalTo(true)); assertThat(healthResponse.getStatus(), equalTo(ClusterHealthStatus.RED)); assertThat(healthResponse.getIndices().isEmpty(), equalTo(true)); logger.info("--> running cluster wide health"); - healthResponse = clusterAdmin().prepareHealth().setWaitForGreenStatus().setTimeout("10s").execute().actionGet(); + healthResponse = clusterAdmin().prepareHealth().setWaitForGreenStatus().setTimeout("10s").get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); assertThat(healthResponse.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(healthResponse.getIndices().isEmpty(), equalTo(true)); @@ -75,13 +71,13 @@ public void testHealth() { createIndex("test1"); logger.info("--> running cluster health on an index that does exists"); - healthResponse = clusterAdmin().prepareHealth("test1").setWaitForGreenStatus().setTimeout("10s").execute().actionGet(); + healthResponse = clusterAdmin().prepareHealth("test1").setWaitForGreenStatus().setTimeout("10s").get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); assertThat(healthResponse.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(healthResponse.getIndices().get("test1").getStatus(), equalTo(ClusterHealthStatus.GREEN)); logger.info("--> running cluster health on an index that does exists and an index that doesn't exists"); - healthResponse = clusterAdmin().prepareHealth("test1", "test2").setWaitForYellowStatus().setTimeout("1s").execute().actionGet(); + healthResponse = clusterAdmin().prepareHealth("test1", "test2").setWaitForYellowStatus().setTimeout("1s").get(); assertThat(healthResponse.isTimedOut(), equalTo(true)); assertThat(healthResponse.getStatus(), equalTo(ClusterHealthStatus.RED)); assertThat(healthResponse.getIndices().get("test1").getStatus(), equalTo(ClusterHealthStatus.GREEN)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java index c437f2b5a4c8c..746ddc56870ae 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java @@ -67,7 +67,7 @@ public void testTwoNodesNoMasterBlock() throws Exception { String node1Name = internalCluster().startNode(settings); logger.info("--> should be blocked, no master..."); - ClusterState state = clusterAdmin().prepareState().setLocal(true).execute().actionGet().getState(); + ClusterState state = clusterAdmin().prepareState().setLocal(true).get().getState(); assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(true)); assertThat(state.nodes().getSize(), equalTo(1)); // verify that we still see the local node in the cluster state @@ -77,16 +77,15 @@ public void testTwoNodesNoMasterBlock() throws Exception { ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth() .setWaitForEvents(Priority.LANGUID) .setWaitForNodes("2") - .execute() - .actionGet(); + .get(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); - state = clusterAdmin().prepareState().setLocal(true).execute().actionGet().getState(); + state = clusterAdmin().prepareState().setLocal(true).get().getState(); assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); - state = clusterAdmin().prepareState().setLocal(true).execute().actionGet().getState(); + state = clusterAdmin().prepareState().setLocal(true).get().getState(); assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); - state = clusterAdmin().prepareState().execute().actionGet().getState(); + state = clusterAdmin().prepareState().get().getState(); assertThat(state.nodes().getSize(), equalTo(2)); assertThat(state.metadata().indices().containsKey("test"), equalTo(false)); @@ -94,11 +93,11 @@ public void testTwoNodesNoMasterBlock() throws Exception { NumShards numShards = getNumShards("test"); logger.info("--> indexing some data"); for (int i = 0; i < 100; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value").execute().actionGet(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value").get(); } // make sure that all shards recovered before trying to flush assertThat( - clusterAdmin().prepareHealth("test").setWaitForActiveShards(numShards.totalNumShards).execute().actionGet().getActiveShards(), + clusterAdmin().prepareHealth("test").setWaitForActiveShards(numShards.totalNumShards).get().getActiveShards(), equalTo(numShards.totalNumShards) ); // flush for simpler debugging @@ -107,7 +106,7 @@ public void testTwoNodesNoMasterBlock() throws Exception { logger.info("--> verify we get the data back"); for (int i = 0; i < 10; i++) { assertThat( - prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, + prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(100L) ); } @@ -121,11 +120,11 @@ public void testTwoNodesNoMasterBlock() throws Exception { internalCluster().stopNode(masterNode); assertBusy(() -> { - ClusterState clusterState = clusterAdmin().prepareState().setLocal(true).execute().actionGet().getState(); + ClusterState clusterState = clusterAdmin().prepareState().setLocal(true).get().getState(); assertTrue(clusterState.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); }); - state = clusterAdmin().prepareState().setLocal(true).execute().actionGet().getState(); + state = clusterAdmin().prepareState().setLocal(true).get().getState(); assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(true)); // verify that both nodes are still in the cluster state but there is no master assertThat(state.nodes().getSize(), equalTo(2)); @@ -138,16 +137,15 @@ public void testTwoNodesNoMasterBlock() throws Exception { .setWaitForEvents(Priority.LANGUID) .setWaitForYellowStatus() .setWaitForNodes("2") - .execute() - .actionGet(); + .get(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); - state = clusterAdmin().prepareState().setLocal(true).execute().actionGet().getState(); + state = clusterAdmin().prepareState().setLocal(true).get().getState(); assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); - state = clusterAdmin().prepareState().setLocal(true).execute().actionGet().getState(); + state = clusterAdmin().prepareState().setLocal(true).get().getState(); assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); - state = clusterAdmin().prepareState().execute().actionGet().getState(); + state = clusterAdmin().prepareState().get().getState(); assertThat(state.nodes().getSize(), equalTo(2)); assertThat(state.metadata().indices().containsKey("test"), equalTo(true)); @@ -172,7 +170,7 @@ public void testTwoNodesNoMasterBlock() throws Exception { internalCluster().stopNode(otherNode); assertBusy(() -> { - ClusterState state1 = clusterAdmin().prepareState().setLocal(true).execute().actionGet().getState(); + ClusterState state1 = clusterAdmin().prepareState().setLocal(true).get().getState(); assertThat(state1.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(true)); }); @@ -184,16 +182,15 @@ public void testTwoNodesNoMasterBlock() throws Exception { .setWaitForEvents(Priority.LANGUID) .setWaitForNodes("2") .setWaitForGreenStatus() - .execute() - .actionGet(); + .get(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); - state = clusterAdmin().prepareState().setLocal(true).execute().actionGet().getState(); + state = clusterAdmin().prepareState().setLocal(true).get().getState(); assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); - state = clusterAdmin().prepareState().setLocal(true).execute().actionGet().getState(); + state = clusterAdmin().prepareState().setLocal(true).get().getState(); assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); - state = clusterAdmin().prepareState().execute().actionGet().getState(); + state = clusterAdmin().prepareState().get().getState(); assertThat(state.nodes().getSize(), equalTo(2)); assertThat(state.metadata().indices().containsKey("test"), equalTo(true)); @@ -218,7 +215,7 @@ public void testThreeNodesNoMasterBlock() throws Exception { assertBusy(() -> { for (Client client : clients()) { - ClusterState state1 = client.admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); + ClusterState state1 = client.admin().cluster().prepareState().setLocal(true).get().getState(); assertThat(state1.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(true)); } }); @@ -230,27 +227,26 @@ public void testThreeNodesNoMasterBlock() throws Exception { ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth() .setWaitForEvents(Priority.LANGUID) .setWaitForNodes("3") - .execute() - .actionGet(); + .get(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); - state = clusterAdmin().prepareState().execute().actionGet().getState(); + state = clusterAdmin().prepareState().get().getState(); assertThat(state.nodes().getSize(), equalTo(3)); createIndex("test"); NumShards numShards = getNumShards("test"); logger.info("--> indexing some data"); for (int i = 0; i < 100; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value").execute().actionGet(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value").get(); } ensureGreen(); // make sure that all shards recovered before trying to flush assertThat( - clusterAdmin().prepareHealth("test").setWaitForActiveShards(numShards.totalNumShards).execute().actionGet().isTimedOut(), + clusterAdmin().prepareHealth("test").setWaitForActiveShards(numShards.totalNumShards).get().isTimedOut(), equalTo(false) ); // flush for simpler debugging - indicesAdmin().prepareFlush().execute().actionGet(); + indicesAdmin().prepareFlush().get(); refresh(); logger.info("--> verify we get the data back"); @@ -269,7 +265,7 @@ public void testThreeNodesNoMasterBlock() throws Exception { logger.info("--> verify that there is no master anymore on remaining node"); // spin here to wait till the state is set assertBusy(() -> { - ClusterState st = clusterAdmin().prepareState().setLocal(true).execute().actionGet().getState(); + ClusterState st = clusterAdmin().prepareState().setLocal(true).get().getState(); assertThat(st.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(true)); }); @@ -279,7 +275,7 @@ public void testThreeNodesNoMasterBlock() throws Exception { internalCluster().validateClusterFormed(); ensureGreen(); - state = clusterAdmin().prepareState().execute().actionGet().getState(); + state = clusterAdmin().prepareState().get().getState(); assertThat(state.nodes().getSize(), equalTo(3)); logger.info("--> verify we the data back"); @@ -346,8 +342,7 @@ public void onFailure(Exception e) { .admin() .cluster() .prepareState() - .execute() - .actionGet() + .get() .getState() .nodes() .getMasterNode(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java index d8acd45e8525f..23c13a3dbf579 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java @@ -72,7 +72,7 @@ public void testNoMasterActions() throws Exception { final List nodes = internalCluster().startNodes(3, settings); createIndex("test"); - clusterAdmin().prepareHealth("test").setWaitForGreenStatus().execute().actionGet(); + clusterAdmin().prepareHealth("test").setWaitForGreenStatus().get(); final NetworkDisruption disruptionScheme = new NetworkDisruption( new IsolateAllNodes(new HashSet<>(nodes)), @@ -84,7 +84,7 @@ public void testNoMasterActions() throws Exception { final Client clientToMasterlessNode = client(); assertBusy(() -> { - ClusterState state = clientToMasterlessNode.admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); + ClusterState state = clientToMasterlessNode.admin().cluster().prepareState().setLocal(true).get().getState(); assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); }); @@ -224,8 +224,8 @@ public void testNoMasterActionsWriteMasterBlock() throws Exception { prepareCreate("test1").setSettings(indexSettings(1, 2)).get(); prepareCreate("test2").setSettings(indexSettings(3, 0)).get(); clusterAdmin().prepareHealth("_all").setWaitForGreenStatus().get(); - client().prepareIndex("test1").setId("1").setSource("field", "value1").get(); - client().prepareIndex("test2").setId("1").setSource("field", "value1").get(); + prepareIndex("test1").setId("1").setSource("field", "value1").get(); + prepareIndex("test2").setId("1").setSource("field", "value1").get(); refresh(); ensureSearchable("test1", "test2"); @@ -300,7 +300,7 @@ public void testNoMasterActionsMetadataWriteMasterBlock() throws Exception { prepareCreate("test1").setSettings(indexSettings(1, 1)).get(); clusterAdmin().prepareHealth("_all").setWaitForGreenStatus().get(); - client().prepareIndex("test1").setId("1").setSource("field", "value1").get(); + prepareIndex("test1").setId("1").setSource("field", "value1").get(); refresh(); ensureGreen("test1"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateNodeRemovalIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateNodeRemovalIT.java index 11b3027c23550..f53e559bfda5d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateNodeRemovalIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateNodeRemovalIT.java @@ -205,8 +205,7 @@ private void ensureRed(String indexName) throws Exception { ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(indexName) .setWaitForStatus(ClusterHealthStatus.RED) .setWaitForEvents(Priority.LANGUID) - .execute() - .actionGet(); + .get(); assertThat(healthResponse.getStatus(), equalTo(ClusterHealthStatus.RED)); }); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleDataNodesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleDataNodesIT.java index 8618104fadc26..770ca21fd6898 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleDataNodesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleDataNodesIT.java @@ -46,13 +46,7 @@ public void testIndexingBeforeAndAfterDataNodesStart() { internalCluster().startNode(nonDataNode()); assertThat( - clusterAdmin().prepareHealth() - .setWaitForEvents(Priority.LANGUID) - .setWaitForNodes("2") - .setLocal(true) - .execute() - .actionGet() - .isTimedOut(), + clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").setLocal(true).get().isTimedOut(), equalTo(false) ); @@ -67,13 +61,7 @@ public void testIndexingBeforeAndAfterDataNodesStart() { // now, start a node data, and see that it gets with shards internalCluster().startNode(dataNode()); assertThat( - clusterAdmin().prepareHealth() - .setWaitForEvents(Priority.LANGUID) - .setWaitForNodes("3") - .setLocal(true) - .execute() - .actionGet() - .isTimedOut(), + clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("3").setLocal(true).get().isTimedOut(), equalTo(false) ); @@ -87,10 +75,7 @@ public void testShardsAllocatedAfterDataNodesStart() { new CreateIndexRequest("test").settings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)) .waitForActiveShards(ActiveShardCount.NONE) ).actionGet(); - final ClusterHealthResponse healthResponse1 = clusterAdmin().prepareHealth() - .setWaitForEvents(Priority.LANGUID) - .execute() - .actionGet(); + final ClusterHealthResponse healthResponse1 = clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); assertThat(healthResponse1.isTimedOut(), equalTo(false)); assertThat(healthResponse1.getStatus(), equalTo(ClusterHealthStatus.RED)); assertThat(healthResponse1.getActiveShards(), equalTo(0)); @@ -102,8 +87,7 @@ public void testShardsAllocatedAfterDataNodesStart() { .setWaitForEvents(Priority.LANGUID) .setWaitForNodes("2") .setWaitForGreenStatus() - .execute() - .actionGet() + .get() .isTimedOut(), equalTo(false) ); @@ -115,10 +99,7 @@ public void testAutoExpandReplicasAdjustedWhenDataNodeJoins() { new CreateIndexRequest("test").settings(Settings.builder().put(IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS, "0-all")) .waitForActiveShards(ActiveShardCount.NONE) ).actionGet(); - final ClusterHealthResponse healthResponse1 = clusterAdmin().prepareHealth() - .setWaitForEvents(Priority.LANGUID) - .execute() - .actionGet(); + final ClusterHealthResponse healthResponse1 = clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); assertThat(healthResponse1.isTimedOut(), equalTo(false)); assertThat(healthResponse1.getStatus(), equalTo(ClusterHealthStatus.RED)); assertThat(healthResponse1.getActiveShards(), equalTo(0)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java index 34682637b0632..93d714c79c391 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java @@ -35,10 +35,7 @@ public void testSimpleOnlyMasterNodeElection() throws IOException { logger.info("--> start data node / non master node"); internalCluster().startNode(Settings.builder().put(dataOnlyNode()).put("discovery.initial_state_timeout", "1s")); try { - assertThat( - clusterAdmin().prepareState().setMasterNodeTimeout("100ms").execute().actionGet().getState().nodes().getMasterNodeId(), - nullValue() - ); + assertThat(clusterAdmin().prepareState().setMasterNodeTimeout("100ms").get().getState().nodes().getMasterNodeId(), nullValue()); fail("should not be able to find master"); } catch (MasterNotDiscoveredException e) { // all is well, no master elected @@ -46,29 +43,11 @@ public void testSimpleOnlyMasterNodeElection() throws IOException { logger.info("--> start master node"); final String masterNodeName = internalCluster().startMasterOnlyNode(); assertThat( - internalCluster().nonMasterClient() - .admin() - .cluster() - .prepareState() - .execute() - .actionGet() - .getState() - .nodes() - .getMasterNode() - .getName(), + internalCluster().nonMasterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName) ); assertThat( - internalCluster().masterClient() - .admin() - .cluster() - .prepareState() - .execute() - .actionGet() - .getState() - .nodes() - .getMasterNode() - .getName(), + internalCluster().masterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName) ); @@ -77,10 +56,7 @@ public void testSimpleOnlyMasterNodeElection() throws IOException { internalCluster().stopCurrentMasterNode(); try { - assertThat( - clusterAdmin().prepareState().setMasterNodeTimeout("100ms").execute().actionGet().getState().nodes().getMasterNodeId(), - nullValue() - ); + assertThat(clusterAdmin().prepareState().setMasterNodeTimeout("100ms").get().getState().nodes().getMasterNodeId(), nullValue()); fail("should not be able to find master"); } catch (MasterNotDiscoveredException e) { // all is well, no master elected @@ -91,29 +67,11 @@ public void testSimpleOnlyMasterNodeElection() throws IOException { Settings.builder().put(nonDataNode(masterNode())).put(masterDataPathSettings) ); assertThat( - internalCluster().nonMasterClient() - .admin() - .cluster() - .prepareState() - .execute() - .actionGet() - .getState() - .nodes() - .getMasterNode() - .getName(), + internalCluster().nonMasterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligibleNodeName) ); assertThat( - internalCluster().masterClient() - .admin() - .cluster() - .prepareState() - .execute() - .actionGet() - .getState() - .nodes() - .getMasterNode() - .getName(), + internalCluster().masterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligibleNodeName) ); } @@ -123,10 +81,7 @@ public void testElectOnlyBetweenMasterNodes() throws Exception { logger.info("--> start data node / non master node"); internalCluster().startNode(Settings.builder().put(dataOnlyNode()).put("discovery.initial_state_timeout", "1s")); try { - assertThat( - clusterAdmin().prepareState().setMasterNodeTimeout("100ms").execute().actionGet().getState().nodes().getMasterNodeId(), - nullValue() - ); + assertThat(clusterAdmin().prepareState().setMasterNodeTimeout("100ms").get().getState().nodes().getMasterNodeId(), nullValue()); fail("should not be able to find master"); } catch (MasterNotDiscoveredException e) { // all is well, no master elected @@ -134,71 +89,26 @@ public void testElectOnlyBetweenMasterNodes() throws Exception { logger.info("--> start master node (1)"); final String masterNodeName = internalCluster().startMasterOnlyNode(); assertThat( - internalCluster().nonMasterClient() - .admin() - .cluster() - .prepareState() - .execute() - .actionGet() - .getState() - .nodes() - .getMasterNode() - .getName(), + internalCluster().nonMasterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName) ); assertThat( - internalCluster().masterClient() - .admin() - .cluster() - .prepareState() - .execute() - .actionGet() - .getState() - .nodes() - .getMasterNode() - .getName(), + internalCluster().masterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName) ); logger.info("--> start master node (2)"); final String nextMasterEligableNodeName = internalCluster().startMasterOnlyNode(); assertThat( - internalCluster().nonMasterClient() - .admin() - .cluster() - .prepareState() - .execute() - .actionGet() - .getState() - .nodes() - .getMasterNode() - .getName(), + internalCluster().nonMasterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName) ); assertThat( - internalCluster().nonMasterClient() - .admin() - .cluster() - .prepareState() - .execute() - .actionGet() - .getState() - .nodes() - .getMasterNode() - .getName(), + internalCluster().nonMasterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName) ); assertThat( - internalCluster().masterClient() - .admin() - .cluster() - .prepareState() - .execute() - .actionGet() - .getState() - .nodes() - .getMasterNode() - .getName(), + internalCluster().masterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName) ); @@ -207,57 +117,21 @@ public void testElectOnlyBetweenMasterNodes() throws Exception { // removing the master from the voting configuration immediately triggers the master to step down assertBusy(() -> { assertThat( - internalCluster().nonMasterClient() - .admin() - .cluster() - .prepareState() - .execute() - .actionGet() - .getState() - .nodes() - .getMasterNode() - .getName(), + internalCluster().nonMasterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligableNodeName) ); assertThat( - internalCluster().masterClient() - .admin() - .cluster() - .prepareState() - .execute() - .actionGet() - .getState() - .nodes() - .getMasterNode() - .getName(), + internalCluster().masterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligableNodeName) ); }); internalCluster().stopNode(masterNodeName); assertThat( - internalCluster().nonMasterClient() - .admin() - .cluster() - .prepareState() - .execute() - .actionGet() - .getState() - .nodes() - .getMasterNode() - .getName(), + internalCluster().nonMasterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligableNodeName) ); assertThat( - internalCluster().masterClient() - .admin() - .cluster() - .prepareState() - .execute() - .actionGet() - .getState() - .nodes() - .getMasterNode() - .getName(), + internalCluster().masterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligableNodeName) ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/UpdateSettingsValidationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/UpdateSettingsValidationIT.java index 8775d8949dc55..64ac8318dce23 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/UpdateSettingsValidationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/UpdateSettingsValidationIT.java @@ -31,25 +31,17 @@ public void testUpdateSettingsValidation() throws Exception { .setWaitForEvents(Priority.LANGUID) .setWaitForNodes("3") .setWaitForGreenStatus() - .execute() - .actionGet(); + .get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); assertThat(healthResponse.getIndices().get("test").getActiveShards(), equalTo(test.totalNumShards)); setReplicaCount(0, "test"); - healthResponse = clusterAdmin().prepareHealth("test") - .setWaitForEvents(Priority.LANGUID) - .setWaitForGreenStatus() - .execute() - .actionGet(); + healthResponse = clusterAdmin().prepareHealth("test").setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); assertThat(healthResponse.getIndices().get("test").getActiveShards(), equalTo(test.numPrimaries)); try { - indicesAdmin().prepareUpdateSettings("test") - .setSettings(Settings.builder().put("index.refresh_interval", "")) - .execute() - .actionGet(); + indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.refresh_interval", "")).get(); fail(); } catch (IllegalArgumentException ex) { logger.info("Error message: [{}]", ex.getMessage()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java index f55ac7172266d..c4f06cc90fdf3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java @@ -81,7 +81,7 @@ public void testSimpleAwareness() throws Exception { assertThat("Cluster health request timed out", clusterHealth.isTimedOut(), equalTo(false)); logger.info("--> checking current state"); - ClusterState clusterState = clusterAdmin().prepareState().execute().actionGet().getState(); + ClusterState clusterState = clusterAdmin().prepareState().get().getState(); // check that closed indices are effectively closed final List notClosedIndices = indicesToClose.stream() @@ -114,7 +114,7 @@ public void testAwarenessZones() { String A_1 = nodes.get(3); logger.info("--> waiting for nodes to form a cluster"); - ClusterHealthResponse health = clusterAdmin().prepareHealth().setWaitForNodes("4").execute().actionGet(); + ClusterHealthResponse health = clusterAdmin().prepareHealth().setWaitForNodes("4").get(); assertThat(health.isTimedOut(), equalTo(false)); createIndex("test", 5, 1); @@ -129,11 +129,10 @@ public void testAwarenessZones() { .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForNoRelocatingShards(true) - .execute() - .actionGet(); + .get(); assertThat(health.isTimedOut(), equalTo(false)); - ClusterState clusterState = clusterAdmin().prepareState().execute().actionGet().getState(); + ClusterState clusterState = clusterAdmin().prepareState().get().getState(); Map counts = computeShardCounts(clusterState); assertThat(counts.get(A_1), anyOf(equalTo(2), equalTo(3))); @@ -168,10 +167,9 @@ public void testAwarenessZonesIncrementalNodes() { .setWaitForGreenStatus() .setWaitForNodes("2") .setWaitForNoRelocatingShards(true) - .execute() - .actionGet(); + .get(); assertThat(health.isTimedOut(), equalTo(false)); - ClusterState clusterState = clusterAdmin().prepareState().execute().actionGet().getState(); + ClusterState clusterState = clusterAdmin().prepareState().get().getState(); Map counts = computeShardCounts(clusterState); assertThat(counts.get(A_0), equalTo(5)); @@ -184,8 +182,7 @@ public void testAwarenessZonesIncrementalNodes() { .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForNodes("3") - .execute() - .actionGet(); + .get(); assertThat(health.isTimedOut(), equalTo(false)); clusterAdmin().prepareReroute().get(); health = clusterAdmin().prepareHealth() @@ -195,11 +192,10 @@ public void testAwarenessZonesIncrementalNodes() { .setWaitForNodes("3") .setWaitForActiveShards(10) .setWaitForNoRelocatingShards(true) - .execute() - .actionGet(); + .get(); assertThat(health.isTimedOut(), equalTo(false)); - clusterState = clusterAdmin().prepareState().execute().actionGet().getState(); + clusterState = clusterAdmin().prepareState().get().getState(); counts = computeShardCounts(clusterState); assertThat(counts.get(A_0), equalTo(5)); @@ -212,8 +208,7 @@ public void testAwarenessZonesIncrementalNodes() { .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForNodes("4") - .execute() - .actionGet(); + .get(); assertThat(health.isTimedOut(), equalTo(false)); clusterAdmin().prepareReroute().get(); health = clusterAdmin().prepareHealth() @@ -223,11 +218,10 @@ public void testAwarenessZonesIncrementalNodes() { .setWaitForNodes("4") .setWaitForActiveShards(10) .setWaitForNoRelocatingShards(true) - .execute() - .actionGet(); + .get(); assertThat(health.isTimedOut(), equalTo(false)); - clusterState = clusterAdmin().prepareState().execute().actionGet().getState(); + clusterState = clusterAdmin().prepareState().get().getState(); counts = computeShardCounts(clusterState); assertThat(counts.get(A_0), equalTo(5)); @@ -242,11 +236,10 @@ public void testAwarenessZonesIncrementalNodes() { .setWaitForNodes("4") .setWaitForActiveShards(10) .setWaitForNoRelocatingShards(true) - .execute() - .actionGet(); + .get(); assertThat(health.isTimedOut(), equalTo(false)); - clusterState = clusterAdmin().prepareState().execute().actionGet().getState(); + clusterState = clusterAdmin().prepareState().get().getState(); counts = computeShardCounts(clusterState); assertThat(counts.get(A_0), equalTo(3)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java index 6175395803e88..3b9d3e133b63a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java @@ -89,14 +89,13 @@ private void rerouteWithCommands(Settings commonSettings) throws Exception { indicesAdmin().prepareCreate("test") .setWaitForActiveShards(ActiveShardCount.NONE) .setSettings(Settings.builder().put("index.number_of_shards", 1)) - .execute() - .actionGet(); + .get(); if (randomBoolean()) { indicesAdmin().prepareClose("test").setWaitForActiveShards(ActiveShardCount.NONE).get(); } - ClusterState state = clusterAdmin().prepareState().execute().actionGet().getState(); + ClusterState state = clusterAdmin().prepareState().get().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(2)); logger.info("--> explicitly allocate shard 1, *under dry_run*"); @@ -104,8 +103,7 @@ private void rerouteWithCommands(Settings commonSettings) throws Exception { .setExplain(randomBoolean()) .add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true)) .setDryRun(true) - .execute() - .actionGet() + .get() .getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1)); assertThat( @@ -114,15 +112,14 @@ private void rerouteWithCommands(Settings commonSettings) throws Exception { ); logger.info("--> get the state, verify nothing changed because of the dry run"); - state = clusterAdmin().prepareState().execute().actionGet().getState(); + state = clusterAdmin().prepareState().get().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(2)); logger.info("--> explicitly allocate shard 1, actually allocating, no dry run"); state = clusterAdmin().prepareReroute() .setExplain(randomBoolean()) .add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true)) - .execute() - .actionGet() + .get() .getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1)); assertThat( @@ -134,12 +131,11 @@ private void rerouteWithCommands(Settings commonSettings) throws Exception { .setIndices("test") .setWaitForEvents(Priority.LANGUID) .setWaitForYellowStatus() - .execute() - .actionGet(); + .get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); logger.info("--> get the state, verify shard 1 primary allocated"); - state = clusterAdmin().prepareState().execute().actionGet().getState(); + state = clusterAdmin().prepareState().get().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1)); assertThat( state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), @@ -150,8 +146,7 @@ private void rerouteWithCommands(Settings commonSettings) throws Exception { state = clusterAdmin().prepareReroute() .setExplain(randomBoolean()) .add(new MoveAllocationCommand("test", 0, node_1, node_2)) - .execute() - .actionGet() + .get() .getState(); assertThat( @@ -168,12 +163,11 @@ private void rerouteWithCommands(Settings commonSettings) throws Exception { .setWaitForEvents(Priority.LANGUID) .setWaitForYellowStatus() .setWaitForNoRelocatingShards(true) - .execute() - .actionGet(); + .get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); logger.info("--> get the state, verify shard 1 primary moved from node1 to node2"); - state = clusterAdmin().prepareState().execute().actionGet().getState(); + state = clusterAdmin().prepareState().get().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1)); assertThat( state.getRoutingNodes().node(state.nodes().resolveNode(node_2).getId()).iterator().next().state(), @@ -208,7 +202,7 @@ public void testDelayWithALargeAmountOfShards() throws Exception { internalCluster().startNode(commonSettings); assertThat(cluster().size(), equalTo(4)); - ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth().setWaitForNodes("4").execute().actionGet(); + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth().setWaitForNodes("4").get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); logger.info("--> create indices"); @@ -238,30 +232,28 @@ private void rerouteWithAllocateLocalGateway(Settings commonSettings) throws Exc String node_1 = internalCluster().startNode(commonSettings); internalCluster().startNode(commonSettings); assertThat(cluster().size(), equalTo(2)); - ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth().setWaitForNodes("2").execute().actionGet(); + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth().setWaitForNodes("2").get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); logger.info("--> create an index with 1 shard, 1 replica, nothing should allocate"); indicesAdmin().prepareCreate("test") .setWaitForActiveShards(ActiveShardCount.NONE) .setSettings(Settings.builder().put("index.number_of_shards", 1)) - .execute() - .actionGet(); + .get(); final boolean closed = randomBoolean(); if (closed) { indicesAdmin().prepareClose("test").setWaitForActiveShards(ActiveShardCount.NONE).get(); } - ClusterState state = clusterAdmin().prepareState().execute().actionGet().getState(); + ClusterState state = clusterAdmin().prepareState().get().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(2)); logger.info("--> explicitly allocate shard 1, actually allocating, no dry run"); state = clusterAdmin().prepareReroute() .setExplain(randomBoolean()) .add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true)) - .execute() - .actionGet() + .get() .getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1)); assertThat( @@ -273,12 +265,11 @@ private void rerouteWithAllocateLocalGateway(Settings commonSettings) throws Exc .setIndices("test") .setWaitForEvents(Priority.LANGUID) .setWaitForYellowStatus() - .execute() - .actionGet(); + .get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); logger.info("--> get the state, verify shard 1 primary allocated"); - state = clusterAdmin().prepareState().execute().actionGet().getState(); + state = clusterAdmin().prepareState().get().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1)); assertThat( state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), @@ -286,7 +277,7 @@ private void rerouteWithAllocateLocalGateway(Settings commonSettings) throws Exc ); if (closed == false) { - client().prepareIndex("test").setId("1").setSource("field", "value").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("field", "value").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); } final Index index = resolveIndex("test"); @@ -306,15 +297,14 @@ private void rerouteWithAllocateLocalGateway(Settings commonSettings) throws Exc // TODO can we get around this? the cluster is RED, so what do we wait for? clusterAdmin().prepareReroute().get(); assertThat( - clusterAdmin().prepareHealth().setIndices("test").setWaitForNodes("2").execute().actionGet().getStatus(), + clusterAdmin().prepareHealth().setIndices("test").setWaitForNodes("2").get().getStatus(), equalTo(ClusterHealthStatus.RED) ); logger.info("--> explicitly allocate primary"); state = clusterAdmin().prepareReroute() .setExplain(randomBoolean()) .add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true)) - .execute() - .actionGet() + .get() .getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1)); assertThat( @@ -325,7 +315,7 @@ private void rerouteWithAllocateLocalGateway(Settings commonSettings) throws Exc logger.info("--> get the state, verify shard 1 primary allocated"); final String nodeToCheck = node_1; assertBusy(() -> { - ClusterState clusterState = clusterAdmin().prepareState().execute().actionGet().getState(); + ClusterState clusterState = clusterAdmin().prepareState().get().getState(); String nodeId = clusterState.nodes().resolveNode(nodeToCheck).getId(); assertThat(clusterState.getRoutingNodes().node(nodeId).iterator().next().state(), equalTo(ShardRoutingState.STARTED)); }); @@ -338,7 +328,7 @@ public void testRerouteExplain() { String node_1 = internalCluster().startNode(commonSettings); assertThat(cluster().size(), equalTo(1)); - ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth().setWaitForNodes("1").execute().actionGet(); + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth().setWaitForNodes("1").get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); logger.info("--> create an index with 1 shard"); @@ -355,12 +345,12 @@ public void testRerouteExplain() { logger.info("--> starting a second node"); String node_2 = internalCluster().startNode(commonSettings); assertThat(cluster().size(), equalTo(2)); - healthResponse = clusterAdmin().prepareHealth().setWaitForNodes("2").execute().actionGet(); + healthResponse = clusterAdmin().prepareHealth().setWaitForNodes("2").get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); logger.info("--> try to move the shard from node1 to node2"); MoveAllocationCommand cmd = new MoveAllocationCommand("test", 0, node_1, node_2); - ClusterRerouteResponse resp = clusterAdmin().prepareReroute().add(cmd).setExplain(true).execute().actionGet(); + ClusterRerouteResponse resp = clusterAdmin().prepareReroute().add(cmd).setExplain(true).get(); RoutingExplanations e = resp.getExplanations(); assertThat(e.explanations().size(), equalTo(1)); RerouteExplanation explanation = e.explanations().get(0); @@ -379,12 +369,12 @@ public void testMessageLogging() { final String nodeName1 = internalCluster().startNode(settings); assertThat(cluster().size(), equalTo(1)); - ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth().setWaitForNodes("1").execute().actionGet(); + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth().setWaitForNodes("1").get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); final String nodeName2 = internalCluster().startNode(settings); assertThat(cluster().size(), equalTo(2)); - healthResponse = clusterAdmin().prepareHealth().setWaitForNodes("2").execute().actionGet(); + healthResponse = clusterAdmin().prepareHealth().setWaitForNodes("2").get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); final String indexName = "test_index"; @@ -395,8 +385,7 @@ public void testMessageLogging() { .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 2) .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) ) - .execute() - .actionGet(); + .get(); MockLogAppender dryRunMockLog = new MockLogAppender(); dryRunMockLog.addExpectation( @@ -414,8 +403,7 @@ public void testMessageLogging() { .setExplain(randomBoolean()) .setDryRun(true) .add(dryRunAllocation) - .execute() - .actionGet(); + .get(); // during a dry run, messages exist but are not logged or exposed assertThat(dryRunResponse.getExplanations().getYesDecisionMessages(), hasSize(1)); @@ -449,8 +437,7 @@ public void testMessageLogging() { .setExplain(true) // so we get a NO decision back rather than an exception .add(yesDecisionAllocation) .add(noDecisionAllocation) - .execute() - .actionGet(); + .get(); assertThat(response.getExplanations().getYesDecisionMessages(), hasSize(1)); assertThat(response.getExplanations().getYesDecisionMessages().get(0), containsString("allocated an empty primary")); @@ -472,7 +459,7 @@ public void testClusterRerouteWithBlocks() { ensureGreen("test-blocks"); logger.info("--> check that the index has 1 shard"); - ClusterState state = clusterAdmin().prepareState().execute().actionGet().getState(); + ClusterState state = clusterAdmin().prepareState().get().getState(); List shards = state.routingTable().allShards("test-blocks"); assertThat(shards, hasSize(1)); @@ -506,8 +493,7 @@ public void testClusterRerouteWithBlocks() { .setIndices("test-blocks") .setWaitForYellowStatus() .setWaitForNoRelocatingShards(true) - .execute() - .actionGet(); + .get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); } finally { disableIndexBlock("test-blocks", blockSetting); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java index f2fb19825371f..04fba1f46074f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java @@ -48,13 +48,10 @@ public void testDecommissionNodeNoReplicas() { ensureGreen("test"); logger.info("--> index some data"); for (int i = 0; i < 100; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).execute().actionGet(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).get(); } - indicesAdmin().prepareRefresh().execute().actionGet(); - assertThat( - prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, - equalTo(100L) - ); + indicesAdmin().prepareRefresh().get(); + assertThat(prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(100L)); final boolean closed = randomBoolean(); if (closed) { @@ -67,7 +64,7 @@ public void testDecommissionNodeNoReplicas() { ensureGreen("test"); logger.info("--> verify all are allocated on node1 now"); - ClusterState clusterState = clusterAdmin().prepareState().execute().actionGet().getState(); + ClusterState clusterState = clusterAdmin().prepareState().get().getState(); for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) { for (int shardId = 0; shardId < indexRoutingTable.size(); shardId++) { final IndexShardRoutingTable indexShardRoutingTable = indexRoutingTable.shard(shardId); @@ -81,11 +78,8 @@ public void testDecommissionNodeNoReplicas() { assertAcked(indicesAdmin().prepareOpen("test")); } - indicesAdmin().prepareRefresh().execute().actionGet(); - assertThat( - prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, - equalTo(100L) - ); + indicesAdmin().prepareRefresh().get(); + assertThat(prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(100L)); } public void testAutoExpandReplicasToFilteredNodes() { @@ -97,7 +91,7 @@ public void testAutoExpandReplicasToFilteredNodes() { logger.info("--> creating an index with auto-expand replicas"); createIndex("test", Settings.builder().put(AutoExpandReplicas.SETTING.getKey(), "0-all").build()); - ClusterState clusterState = clusterAdmin().prepareState().execute().actionGet().getState(); + ClusterState clusterState = clusterAdmin().prepareState().get().getState(); assertThat(clusterState.metadata().index("test").getNumberOfReplicas(), equalTo(1)); ensureGreen("test"); @@ -110,7 +104,7 @@ public void testAutoExpandReplicasToFilteredNodes() { ensureGreen("test"); logger.info("--> verify all are allocated on node1 now"); - clusterState = clusterAdmin().prepareState().execute().actionGet().getState(); + clusterState = clusterAdmin().prepareState().get().getState(); assertThat(clusterState.metadata().index("test").getNumberOfReplicas(), equalTo(0)); for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) { for (int shardId = 0; shardId < indexRoutingTable.size(); shardId++) { @@ -135,13 +129,10 @@ public void testDisablingAllocationFiltering() { logger.info("--> index some data"); for (int i = 0; i < 100; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).execute().actionGet(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).get(); } - indicesAdmin().prepareRefresh().execute().actionGet(); - assertThat( - prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, - equalTo(100L) - ); + indicesAdmin().prepareRefresh().get(); + assertThat(prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(100L)); final boolean closed = randomBoolean(); if (closed) { @@ -149,7 +140,7 @@ public void testDisablingAllocationFiltering() { ensureGreen("test"); } - ClusterState clusterState = clusterAdmin().prepareState().execute().actionGet().getState(); + ClusterState clusterState = clusterAdmin().prepareState().get().getState(); IndexRoutingTable indexRoutingTable = clusterState.routingTable().index("test"); int numShardsOnNode1 = 0; for (int shardId = 0; shardId < indexRoutingTable.size(); shardId++) { @@ -172,7 +163,7 @@ public void testDisablingAllocationFiltering() { ensureGreen("test"); logger.info("--> verify all shards are allocated on node_1 now"); - clusterState = clusterAdmin().prepareState().execute().actionGet().getState(); + clusterState = clusterAdmin().prepareState().get().getState(); indexRoutingTable = clusterState.routingTable().index("test"); for (int shardId = 0; shardId < indexRoutingTable.size(); shardId++) { final IndexShardRoutingTable indexShardRoutingTable = indexRoutingTable.shard(shardId); @@ -187,7 +178,7 @@ public void testDisablingAllocationFiltering() { ensureGreen("test"); logger.info("--> verify that there are shards allocated on both nodes now"); - clusterState = clusterAdmin().prepareState().execute().actionGet().getState(); + clusterState = clusterAdmin().prepareState().get().getState(); assertThat(clusterState.routingTable().index("test").numberOfNodesShardsAreAllocatedOn(), equalTo(2)); } @@ -202,8 +193,7 @@ public void testInvalidIPFilterClusterSettings() { IllegalArgumentException.class, () -> clusterAdmin().prepareUpdateSettings() .setPersistentSettings(Settings.builder().put(filterSetting.getKey() + ipKey, "192.168.1.1.")) - .execute() - .actionGet() + .get() ); assertEquals("invalid IP address [192.168.1.1.] for [" + filterSetting.getKey() + ipKey + "]", e.getMessage()); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/SimpleAllocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/SimpleAllocationIT.java index 55814dba33562..f66430871c9d8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/SimpleAllocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/SimpleAllocationIT.java @@ -33,7 +33,7 @@ public void testSaneAllocation() { } ensureGreen("test"); - ClusterState state = clusterAdmin().prepareState().execute().actionGet().getState(); + ClusterState state = clusterAdmin().prepareState().get().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(0)); for (RoutingNode node : state.getRoutingNodes()) { if (node.isEmpty() == false) { @@ -42,7 +42,7 @@ public void testSaneAllocation() { } setReplicaCount(0, "test"); ensureGreen("test"); - state = clusterAdmin().prepareState().execute().actionGet().getState(); + state = clusterAdmin().prepareState().get().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(0)); for (RoutingNode node : state.getRoutingNodes()) { @@ -60,7 +60,7 @@ public void testSaneAllocation() { setReplicaCount(1, "test"); ensureGreen("test"); - state = clusterAdmin().prepareState().execute().actionGet().getState(); + state = clusterAdmin().prepareState().get().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(0)); for (RoutingNode node : state.getRoutingNodes()) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java index 80bba57270aa5..3869952bf3b7e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java @@ -284,7 +284,7 @@ public void testDelayedMappingPropagationOnPrimary() throws Exception { // this request does not change the cluster state, because mapping is already created, // we don't await and cancel committed publication - ActionFuture docIndexResponse = client().prepareIndex("index").setId("1").setSource("field", 42).execute(); + ActionFuture docIndexResponse = prepareIndex("index").setId("1").setSource("field", 42).execute(); // Wait a bit to make sure that the reason why we did not get a response // is that cluster state processing is blocked and not just that it takes @@ -373,7 +373,7 @@ public void testDelayedMappingPropagationOnReplica() throws Exception { assertEquals(minVersion, maxVersion); }); - final ActionFuture docIndexResponse = client().prepareIndex("index").setId("1").setSource("field", 42).execute(); + final ActionFuture docIndexResponse = prepareIndex("index").setId("1").setSource("field", 42).execute(); assertBusy(() -> assertTrue(client().prepareGet("index", "1").get().isExists())); @@ -383,7 +383,7 @@ public void testDelayedMappingPropagationOnReplica() throws Exception { // this request does not change the cluster state, because the mapping is dynamic, // we need to await and cancel committed publication ActionFuture dynamicMappingsFut = executeAndCancelCommittedPublication( - client().prepareIndex("index").setId("2").setSource("field2", 42) + prepareIndex("index").setId("2").setSource("field2", 42) ); // ...and wait for second mapping to be available on master diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java index 265cc9ee364db..00e171a7a132a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java @@ -137,7 +137,7 @@ public void testBootstrapNotBootstrappedCluster() throws Exception { .build() ); assertBusy(() -> { - ClusterState state = clusterAdmin().prepareState().setLocal(true).execute().actionGet().getState(); + ClusterState state = clusterAdmin().prepareState().setLocal(true).get().getState(); assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); }); @@ -242,14 +242,7 @@ public void test3MasterNodes2Failed() throws Exception { logger.info("--> ensure NO_MASTER_BLOCK on data-only node"); assertBusy(() -> { - ClusterState state = internalCluster().client(dataNode) - .admin() - .cluster() - .prepareState() - .setLocal(true) - .execute() - .actionGet() - .getState(); + ClusterState state = internalCluster().client(dataNode).admin().cluster().prepareState().setLocal(true).get().getState(); assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); }); @@ -295,14 +288,7 @@ public void test3MasterNodes2Failed() throws Exception { logger.info("--> ensure there is no NO_MASTER_BLOCK and unsafe-bootstrap is reflected in cluster state"); assertBusy(() -> { - ClusterState state = internalCluster().client(dataNode2) - .admin() - .cluster() - .prepareState() - .setLocal(true) - .execute() - .actionGet() - .getState(); + ClusterState state = internalCluster().client(dataNode2).admin().cluster().prepareState().setLocal(true).get().getState(); assertFalse(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); assertTrue(state.metadata().persistentSettings().getAsBoolean(UnsafeBootstrapMasterCommand.UNSAFE_BOOTSTRAP.getKey(), false)); }); @@ -347,7 +333,7 @@ public void testNoInitialBootstrapAfterDetach() throws Exception { .build() ); - ClusterState state = internalCluster().client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); + ClusterState state = internalCluster().client().admin().cluster().prepareState().setLocal(true).get().getState(); assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); internalCluster().stopNode(node); @@ -359,7 +345,7 @@ public void testCanRunUnsafeBootstrapAfterErroneousDetachWithoutLoosingMetadata( Settings masterNodeDataPathSettings = internalCluster().dataPathSettings(masterNode); updateClusterSettings(Settings.builder().put(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "1234kb")); - ClusterState state = internalCluster().client().admin().cluster().prepareState().execute().actionGet().getState(); + ClusterState state = internalCluster().client().admin().cluster().prepareState().get().getState(); assertThat(state.metadata().persistentSettings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()), equalTo("1234kb")); internalCluster().stopCurrentMasterNode(); @@ -373,7 +359,7 @@ public void testCanRunUnsafeBootstrapAfterErroneousDetachWithoutLoosingMetadata( internalCluster().startMasterOnlyNode(masterNodeDataPathSettings); ensureGreen(); - state = internalCluster().client().admin().cluster().prepareState().execute().actionGet().getState(); + state = internalCluster().client().admin().cluster().prepareState().get().getState(); assertThat(state.metadata().settings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()), equalTo("1234kb")); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/AllocationIdIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/AllocationIdIT.java index ae8662dc5517d..012cb826a4403 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/AllocationIdIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/AllocationIdIT.java @@ -154,7 +154,7 @@ private int indexDocs(String indexName, Object... source) throws InterruptedExce final int numExtraDocs = between(10, 100); IndexRequestBuilder[] builders = new IndexRequestBuilder[numExtraDocs]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex(indexName).setSource(source); + builders[i] = prepareIndex(indexName).setSource(source); } indexRandom(true, false, true, Arrays.asList(builders)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java index 609a47815b07e..543b0be8ae48d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java @@ -138,7 +138,7 @@ private void indexRandomData() throws Exception { int numDocs = scaledRandomIntBetween(100, 1000); IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex("test").setSource("field", "value"); + builders[i] = prepareIndex("test").setSource("field", "value"); } // we want to test both full divergent copies of the shard in terms of segments, and // a case where they are the same (using sync flush), index Random does all this goodness diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java index 2f3618f1d6aa7..b65e715b454dc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java @@ -91,10 +91,9 @@ public void testBulkWeirdScenario() throws Exception { ensureGreen(); BulkResponse bulkResponse = client().prepareBulk() - .add(client().prepareIndex().setIndex("test").setId("1").setSource("field1", "value1")) + .add(prepareIndex("test").setId("1").setSource("field1", "value1")) .add(client().prepareUpdate().setIndex("test").setId("1").setDoc("field2", "value2")) - .execute() - .actionGet(); + .get(); assertThat(bulkResponse.hasFailures(), equalTo(false)); assertThat(bulkResponse.getItems().length, equalTo(2)); @@ -113,7 +112,7 @@ public void testBulkWeirdScenario() throws Exception { // returns data paths settings of in-sync shard copy private Settings createStaleReplicaScenario(String master) throws Exception { - client().prepareIndex("test").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); + prepareIndex("test").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); refresh(); ClusterState state = clusterAdmin().prepareState().all().get().getState(); List shards = state.routingTable().allShards("test"); @@ -437,7 +436,7 @@ public void testRemoveAllocationIdOnWriteAfterNodeLeave() throws Exception { ensureYellow("test"); assertEquals(2, clusterAdmin().prepareState().get().getState().metadata().index("test").inSyncAllocationIds(0).size()); logger.info("--> indexing..."); - client().prepareIndex("test").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); + prepareIndex("test").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); assertEquals(1, clusterAdmin().prepareState().get().getState().metadata().index("test").inSyncAllocationIds(0).size()); internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { @Override @@ -464,7 +463,7 @@ public void testNotWaitForQuorumCopies() throws Exception { logger.info("--> creating index with 1 primary and 2 replicas"); createIndex("test", randomIntBetween(1, 3), 2); ensureGreen("test"); - client().prepareIndex("test").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); + prepareIndex("test").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); logger.info("--> removing 2 nodes from cluster"); internalCluster().stopNode(nodes.get(1)); internalCluster().stopNode(nodes.get(2)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java index a12f7feb05b48..3418874bd5902 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java @@ -14,10 +14,10 @@ import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.admin.indices.refresh.TransportUnpromotableShardRefreshAction; -import org.elasticsearch.action.search.ClosePointInTimeAction; import org.elasticsearch.action.search.ClosePointInTimeRequest; -import org.elasticsearch.action.search.OpenPointInTimeAction; import org.elasticsearch.action.search.OpenPointInTimeRequest; +import org.elasticsearch.action.search.TransportClosePointInTimeAction; +import org.elasticsearch.action.search.TransportOpenPointInTimeAction; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; @@ -522,7 +522,7 @@ public void testSearchRouting() throws Exception { // do nothing } } - String pitId = client().execute(OpenPointInTimeAction.INSTANCE, openRequest).actionGet().getPointInTimeId(); + String pitId = client().execute(TransportOpenPointInTimeAction.TYPE, openRequest).actionGet().getPointInTimeId(); try { final var profileResults = prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)) .setProfile(true) @@ -533,7 +533,7 @@ public void testSearchRouting() throws Exception { assertThat(profileKey, in(searchShardProfileKeys)); } } finally { - client().execute(ClosePointInTimeAction.INSTANCE, new ClosePointInTimeRequest(pitId)); + client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pitId)); } } // search-shards API @@ -697,7 +697,7 @@ public void testRefreshFailsIfUnpromotableDisconnects() throws Exception { }); } - RefreshResponse response = indicesAdmin().prepareRefresh(INDEX_NAME).execute().actionGet(); + RefreshResponse response = indicesAdmin().prepareRefresh(INDEX_NAME).get(); assertThat( "each unpromotable replica shard should be added to the shard failures", response.getFailedShards(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorIT.java index 3e38ef22834d5..31e45e64d8afe 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorIT.java @@ -60,10 +60,7 @@ public void testFloodStageExceeded() throws Exception { getTestFileStore(dataNodeName).setTotalSpace(1L); refreshClusterInfo(); assertBusy(() -> { - assertBlocked( - client().prepareIndex().setIndex(indexName).setId("1").setSource("f", "g"), - IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK - ); + assertBlocked(prepareIndex(indexName).setId("1").setSource("f", "g"), IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK); assertThat(getIndexBlock(indexName, IndexMetadata.SETTING_READ_ONLY_ALLOW_DELETE), equalTo("true")); }); @@ -115,10 +112,7 @@ public void testRemoveExistingIndexBlocksWhenDiskThresholdMonitorIsDisabled() th getTestFileStore(dataNodeName).setTotalSpace(1L); refreshClusterInfo(); assertBusy(() -> { - assertBlocked( - client().prepareIndex().setIndex(indexName).setId("1").setSource("f", "g"), - IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK - ); + assertBlocked(prepareIndex(indexName).setId("1").setSource("f", "g"), IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK); assertThat(getIndexBlock(indexName, IndexMetadata.SETTING_READ_ONLY_ALLOW_DELETE), equalTo("true")); }); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java index ead474e6eea24..b1ac5b02f7dd2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java @@ -182,7 +182,7 @@ private SmallestShards createReasonableSizedShards(final String indexName) throw while (true) { final IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[scaledRandomIntBetween(100, 10000)]; for (int i = 0; i < indexRequestBuilders.length; i++) { - indexRequestBuilders[i] = client().prepareIndex(indexName).setSource("field", randomAlphaOfLength(10)); + indexRequestBuilders[i] = prepareIndex(indexName).setSource("field", randomAlphaOfLength(10)); } indexRandom(true, indexRequestBuilders); forceMerge(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java index 965674b772998..fd5e54631fd7a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java @@ -194,7 +194,7 @@ public void testAutomaticReleaseOfIndexBlock() throws Exception { assertThat("node2 has 2 shards", shardCountByNodeId.get(nodeIds.get(2)), equalTo(2)); } - client().prepareIndex("test").setId("1").setSource("foo", "bar").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("foo", "bar").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); assertSearchHits(prepareSearch("test"), "1"); // Move all nodes above the low watermark so no shard movement can occur, and at least one node above the flood stage watermark so @@ -208,19 +208,13 @@ public void testAutomaticReleaseOfIndexBlock() throws Exception { ); assertBusy( - () -> assertBlocked( - client().prepareIndex().setIndex("test").setId("1").setSource("foo", "bar"), - IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK - ) + () -> assertBlocked(prepareIndex("test").setId("1").setSource("foo", "bar"), IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK) ); assertFalse(clusterAdmin().prepareHealth("test").setWaitForEvents(Priority.LANGUID).get().isTimedOut()); // Cannot add further documents - assertBlocked( - client().prepareIndex().setIndex("test").setId("2").setSource("foo", "bar"), - IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK - ); + assertBlocked(prepareIndex("test").setId("2").setSource("foo", "bar"), IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK); assertSearchHits(prepareSearch("test"), "1"); logger.info("--> index is confirmed read-only, releasing disk space"); @@ -231,11 +225,7 @@ public void testAutomaticReleaseOfIndexBlock() throws Exception { // Attempt to create a new document until DiskUsageMonitor unblocks the index assertBusy(() -> { try { - client().prepareIndex("test") - .setId("3") - .setSource("foo", "bar") - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .get(); + prepareIndex("test").setId("3").setSource("foo", "bar").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); } catch (ClusterBlockException e) { throw new AssertionError("retrying", e); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java index 7e3adf8e0283f..9818b0a89bc8e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java @@ -212,8 +212,7 @@ public void testClusterSettingsUpdateResponse() { ClusterUpdateSettingsResponse response1 = clusterAdmin().prepareUpdateSettings() .setTransientSettings(transientSettings1) .setPersistentSettings(persistentSettings1) - .execute() - .actionGet(); + .get(); assertAcked(response1); assertThat(response1.getTransientSettings().get(key1), notNullValue()); @@ -227,8 +226,7 @@ public void testClusterSettingsUpdateResponse() { ClusterUpdateSettingsResponse response2 = clusterAdmin().prepareUpdateSettings() .setTransientSettings(transientSettings2) .setPersistentSettings(persistentSettings2) - .execute() - .actionGet(); + .get(); assertAcked(response2); assertThat(response2.getTransientSettings().get(key1), notNullValue()); @@ -242,8 +240,7 @@ public void testClusterSettingsUpdateResponse() { ClusterUpdateSettingsResponse response3 = clusterAdmin().prepareUpdateSettings() .setTransientSettings(transientSettings3) .setPersistentSettings(persistentSettings3) - .execute() - .actionGet(); + .get(); assertAcked(response3); assertThat(response3.getTransientSettings().get(key1), nullValue()); @@ -502,7 +499,7 @@ public void testClusterUpdateSettingsWithBlocks() { } // It should work now - ClusterUpdateSettingsResponse response = request.execute().actionGet(); + ClusterUpdateSettingsResponse response = request.get(); assertAcked(response); assertThat(response.getTransientSettings().get(key1), notNullValue()); @@ -515,10 +512,7 @@ public void testMissingUnits() { assertAcked(prepareCreate("test")); try { - indicesAdmin().prepareUpdateSettings("test") - .setSettings(Settings.builder().put("index.refresh_interval", "10")) - .execute() - .actionGet(); + indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.refresh_interval", "10")).get(); fail("Expected IllegalArgumentException"); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("[index.refresh_interval] with value [10]")); @@ -542,7 +536,7 @@ private void testLoggerLevelUpdate(final BiConsumer throwBuilder.execute().actionGet()); + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> throwBuilder.get()); assertEquals("Unknown level constant [BOOM].", e.getMessage()); try { @@ -550,7 +544,7 @@ private void testLoggerLevelUpdate(final BiConsumer=2") - .setLocal(true) - .execute() - .actionGet() - .isTimedOut(), + clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes(">=2").setLocal(true).get().isTimedOut(), equalTo(false) ); dataNodes = clusterAdmin().prepareState().get().getState().getNodes().getDataNodes().size(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionCleanSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionCleanSettingsIT.java index 2561799b475ad..5ea78a6b1e3a0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionCleanSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionCleanSettingsIT.java @@ -57,7 +57,7 @@ public void testSearchWithRelocationAndSlowClusterStateProcessing() throws Excep final String node_2 = internalCluster().startDataOnlyNode(); List indexRequestBuilderList = new ArrayList<>(); for (int i = 0; i < 100; i++) { - indexRequestBuilderList.add(client().prepareIndex().setIndex("test").setSource("{\"int_field\":1}", XContentType.JSON)); + indexRequestBuilderList.add(prepareIndex("test").setSource("{\"int_field\":1}", XContentType.JSON)); } indexRandom(true, indexRequestBuilderList); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java index 586e95484afa4..4aabf0ac66a32 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java @@ -482,8 +482,7 @@ public void testRestartNodeWhileIndexing() throws Exception { while (stopped.get() == false && docID.get() < 5000) { String id = Integer.toString(docID.incrementAndGet()); try { - DocWriteResponse response = client().prepareIndex(index) - .setId(id) + DocWriteResponse response = prepareIndex(index).setId(id) .setSource(Map.of("f" + randomIntBetween(1, 10), randomNonNegativeLong()), XContentType.JSON) .get(); assertThat(response.getResult(), is(oneOf(CREATED, UPDATED))); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/MasterDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/MasterDisruptionIT.java index 8a83dcf808007..af254d42ec3ee 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/MasterDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/MasterDisruptionIT.java @@ -240,9 +240,9 @@ public void testMappingTimeout() throws Exception { disruption.startDisrupting(); BulkRequestBuilder bulk = client().prepareBulk(); - bulk.add(client().prepareIndex("test").setId("2").setSource("{ \"f\": 1 }", XContentType.JSON)); - bulk.add(client().prepareIndex("test").setId("3").setSource("{ \"g\": 1 }", XContentType.JSON)); - bulk.add(client().prepareIndex("test").setId("4").setSource("{ \"f\": 1 }", XContentType.JSON)); + bulk.add(prepareIndex("test").setId("2").setSource("{ \"f\": 1 }", XContentType.JSON)); + bulk.add(prepareIndex("test").setId("3").setSource("{ \"g\": 1 }", XContentType.JSON)); + bulk.add(prepareIndex("test").setId("4").setSource("{ \"f\": 1 }", XContentType.JSON)); BulkResponse bulkResponse = bulk.get(); assertTrue(bulkResponse.hasFailures()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java index bf5970f5ea402..526921fdc95ba 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java @@ -222,7 +222,7 @@ public void testMasterFailOverDuringShardSnapshots() throws Exception { final String indexName = "index-one"; createIndex(indexName); - client().prepareIndex(indexName).setSource("foo", "bar").get(); + prepareIndex(indexName).setSource("foo", "bar").get(); blockDataNode(repoName, dataNode); @@ -272,7 +272,7 @@ private void createRandomIndex(String idxName) throws InterruptedException { final int numdocs = randomIntBetween(10, 100); IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex(idxName).setId(Integer.toString(i)).setSource("field1", "bar " + i); + builders[i] = prepareIndex(idxName).setId(Integer.toString(i)).setSource("field1", "bar " + i); } indexRandom(true, builders); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java index ba086cb4e9788..407b1aae40600 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java @@ -74,22 +74,21 @@ public Path nodeConfigPath(int nodeOrdinal) { return null; } }; - try ( - InternalTestCluster other = new InternalTestCluster( - randomLong(), - createTempDir(), - false, - false, - 1, - 1, - internalCluster().getClusterName(), - configurationSource, - 0, - "other", - Arrays.asList(getTestTransportPlugin(), MockHttpTransport.TestPlugin.class), - Function.identity() - ) - ) { + final InternalTestCluster other = new InternalTestCluster( + randomLong(), + createTempDir(), + false, + false, + 1, + 1, + internalCluster().getClusterName(), + configurationSource, + 0, + "other", + Arrays.asList(getTestTransportPlugin(), MockHttpTransport.TestPlugin.class), + Function.identity() + ); + try { other.beforeTest(random()); final ClusterState first = internalCluster().getInstance(ClusterService.class).state(); final ClusterState second = other.getInstance(ClusterService.class).state(); @@ -97,6 +96,8 @@ public Path nodeConfigPath(int nodeOrdinal) { assertThat(second.nodes().getSize(), equalTo(1)); assertThat(first.nodes().getMasterNodeId(), not(equalTo(second.nodes().getMasterNodeId()))); assertThat(first.metadata().clusterUUID(), not(equalTo(second.metadata().clusterUUID()))); + } finally { + other.close(); } } @@ -140,27 +141,27 @@ public Path nodeConfigPath(int nodeOrdinal) { return null; } }; - try ( - InternalTestCluster other = new InternalTestCluster( - randomLong(), - createTempDir(), - false, - false, - 1, - 1, - internalCluster().getClusterName(), - configurationSource, - 0, - "other", - Arrays.asList(getTestTransportPlugin(), MockHttpTransport.TestPlugin.class), - Function.identity() - ); - var ignored = mockAppender.capturing(JoinHelper.class) - ) { + final InternalTestCluster other = new InternalTestCluster( + randomLong(), + createTempDir(), + false, + false, + 1, + 1, + internalCluster().getClusterName(), + configurationSource, + 0, + "other", + Arrays.asList(getTestTransportPlugin(), MockHttpTransport.TestPlugin.class), + Function.identity() + ); + try (var ignored = mockAppender.capturing(JoinHelper.class)) { other.beforeTest(random()); final ClusterState first = internalCluster().getInstance(ClusterService.class).state(); assertThat(first.nodes().getSize(), equalTo(1)); assertBusy(mockAppender::assertAllExpectationsMatched); + } finally { + other.close(); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/document/AliasedIndexDocumentActionsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/document/AliasedIndexDocumentActionsIT.java index c475961336b51..a928b1a2eaecc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/document/AliasedIndexDocumentActionsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/document/AliasedIndexDocumentActionsIT.java @@ -17,7 +17,7 @@ public class AliasedIndexDocumentActionsIT extends DocumentActionsIT { protected void createIndex() { logger.info("Creating index [test1] with alias [test]"); try { - indicesAdmin().prepareDelete("test1").execute().actionGet(); + indicesAdmin().prepareDelete("test1").get(); } catch (Exception e) { // ignore } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/document/DocumentActionsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/document/DocumentActionsIT.java index 1ee91c5cd5f3b..d3001f485846e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/document/DocumentActionsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/document/DocumentActionsIT.java @@ -56,9 +56,7 @@ public void testIndexActions() throws Exception { logger.info("Running Cluster Health"); ensureGreen(); logger.info("Indexing [type1/1]"); - DocWriteResponse indexResponse = client().prepareIndex() - .setIndex("test") - .setId("1") + DocWriteResponse indexResponse = prepareIndex("test").setId("1") .setSource(source("1", "test")) .setRefreshPolicy(RefreshPolicy.IMMEDIATE) .get(); @@ -89,7 +87,7 @@ public void testIndexActions() throws Exception { logger.info("Get [type1/1]"); for (int i = 0; i < 5; i++) { - getResult = client().prepareGet("test", "1").execute().actionGet(); + getResult = client().prepareGet("test", "1").get(); assertThat(getResult.getIndex(), equalTo(getConcreteIndexName())); assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(Strings.toString(source("1", "test")))); assertThat("cycle(map) #" + i, (String) getResult.getSourceAsMap().get("name"), equalTo("test")); @@ -100,10 +98,10 @@ public void testIndexActions() throws Exception { logger.info("Get [type1/1] with script"); for (int i = 0; i < 5; i++) { - getResult = client().prepareGet("test", "1").setStoredFields("name").execute().actionGet(); + getResult = client().prepareGet("test", "1").setStoredFields("name").get(); assertThat(getResult.getIndex(), equalTo(getConcreteIndexName())); assertThat(getResult.isExists(), equalTo(true)); - assertThat(getResult.getSourceAsBytes(), nullValue()); + assertThat(getResult.getSourceAsBytesRef(), nullValue()); assertThat(getResult.getField("name").getValues().get(0).toString(), equalTo("test")); } @@ -114,7 +112,7 @@ public void testIndexActions() throws Exception { } logger.info("Delete [type1/1]"); - DeleteResponse deleteResponse = client().prepareDelete("test", "1").execute().actionGet(); + DeleteResponse deleteResponse = client().prepareDelete("test", "1").get(); assertThat(deleteResponse.getIndex(), equalTo(getConcreteIndexName())); assertThat(deleteResponse.getId(), equalTo("1")); logger.info("Refreshing"); @@ -132,7 +130,7 @@ public void testIndexActions() throws Exception { client().index(new IndexRequest("test").id("2").source(source("2", "test2"))).actionGet(); logger.info("Flushing"); - FlushResponse flushResult = indicesAdmin().prepareFlush("test").execute().actionGet(); + FlushResponse flushResult = indicesAdmin().prepareFlush("test").get(); assertThat(flushResult.getSuccessfulShards(), equalTo(numShards.totalNumShards)); assertThat(flushResult.getFailedShards(), equalTo(0)); logger.info("Refreshing"); @@ -181,14 +179,13 @@ public void testBulk() throws Exception { ensureGreen(); BulkResponse bulkResponse = client().prepareBulk() - .add(client().prepareIndex().setIndex("test").setId("1").setSource(source("1", "test"))) - .add(client().prepareIndex().setIndex("test").setId("2").setSource(source("2", "test")).setCreate(true)) - .add(client().prepareIndex().setIndex("test").setSource(source("3", "test"))) - .add(client().prepareIndex().setIndex("test").setCreate(true).setSource(source("4", "test"))) + .add(prepareIndex("test").setId("1").setSource(source("1", "test"))) + .add(prepareIndex("test").setId("2").setSource(source("2", "test")).setCreate(true)) + .add(prepareIndex("test").setSource(source("3", "test"))) + .add(prepareIndex("test").setCreate(true).setSource(source("4", "test"))) .add(client().prepareDelete().setIndex("test").setId("1")) - .add(client().prepareIndex().setIndex("test").setSource("{ xxx }", XContentType.JSON)) // failure - .execute() - .actionGet(); + .add(prepareIndex("test").setSource("{ xxx }", XContentType.JSON)) // failure + .get(); assertThat(bulkResponse.hasFailures(), equalTo(true)); assertThat(bulkResponse.getItems().length, equalTo(6)); @@ -223,7 +220,7 @@ public void testBulk() throws Exception { assertThat(bulkResponse.getItems()[5].getIndex(), equalTo(getConcreteIndexName())); waitForRelocation(ClusterHealthStatus.GREEN); - RefreshResponse refreshResponse = indicesAdmin().prepareRefresh("test").execute().actionGet(); + RefreshResponse refreshResponse = indicesAdmin().prepareRefresh("test").get(); assertNoFailures(refreshResponse); assertThat(refreshResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/document/ShardInfoIT.java b/server/src/internalClusterTest/java/org/elasticsearch/document/ShardInfoIT.java index 6571b9a6c928c..3aa97f79a82da 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/document/ShardInfoIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/document/ShardInfoIT.java @@ -33,7 +33,7 @@ public class ShardInfoIT extends ESIntegTestCase { public void testIndexAndDelete() throws Exception { prepareIndex(1); - DocWriteResponse indexResponse = client().prepareIndex("idx").setSource("{}", XContentType.JSON).get(); + DocWriteResponse indexResponse = prepareIndex("idx").setSource("{}", XContentType.JSON).get(); assertShardInfo(indexResponse); DeleteResponse deleteResponse = client().prepareDelete("idx", indexResponse.getId()).get(); assertShardInfo(deleteResponse); @@ -49,7 +49,7 @@ public void testBulkWithIndexAndDeleteItems() throws Exception { prepareIndex(1); BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); for (int i = 0; i < 10; i++) { - bulkRequestBuilder.add(client().prepareIndex("idx").setSource("{}", XContentType.JSON)); + bulkRequestBuilder.add(prepareIndex("idx").setSource("{}", XContentType.JSON)); } BulkResponse bulkResponse = bulkRequestBuilder.get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/env/NodeEnvironmentIT.java b/server/src/internalClusterTest/java/org/elasticsearch/env/NodeEnvironmentIT.java index 30940c1e154b0..8c6abc3e14cd8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/env/NodeEnvironmentIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/env/NodeEnvironmentIT.java @@ -84,7 +84,7 @@ public Settings onNodeStopped(String nodeName) { internalCluster().startNode(dataPathSettings); logger.info("--> indexing a simple document"); - client().prepareIndex(indexName).setId("1").setSource("field1", "value1").get(); + prepareIndex(indexName).setId("1").setSource("field1", "value1").get(); logger.info("--> restarting the node without the data role"); ex = expectThrows( @@ -152,7 +152,7 @@ public void testFailsToStartIfUpgradedTooFar() { public void testUpgradeDataFolder() throws IOException, InterruptedException { String node = internalCluster().startNode(); prepareCreate("test").get(); - indexRandom(true, client().prepareIndex("test").setId("1").setSource("{}", XContentType.JSON)); + indexRandom(true, prepareIndex("test").setId("1").setSource("{}", XContentType.JSON)); String nodeId = clusterAdmin().prepareState().get().getState().nodes().getMasterNodeId(); final Settings dataPathSettings = internalCluster().dataPathSettings(node); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/env/NodeRepurposeCommandIT.java b/server/src/internalClusterTest/java/org/elasticsearch/env/NodeRepurposeCommandIT.java index 06bb86b8e072d..2a5295caf31b2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/env/NodeRepurposeCommandIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/env/NodeRepurposeCommandIT.java @@ -35,7 +35,7 @@ public void testRepurpose() throws Exception { prepareCreate(indexName, indexSettings(1, 0)).get(); logger.info("--> indexing a simple document"); - client().prepareIndex(indexName).setId("1").setSource("field1", "value1").get(); + prepareIndex(indexName).setId("1").setSource("field1", "value1").get(); ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/explain/ExplainActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/explain/ExplainActionIT.java index fe62452fece47..cdb418182fff2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/explain/ExplainActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/explain/ExplainActionIT.java @@ -42,7 +42,7 @@ public void testSimple() throws Exception { assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setSettings(Settings.builder().put("index.refresh_interval", -1))); ensureGreen("test"); - client().prepareIndex("test").setId("1").setSource("field", "value1").get(); + prepareIndex("test").setId("1").setSource("field", "value1").get(); ExplainResponse response = client().prepareExplain(indexOrAlias(), "1").setQuery(QueryBuilders.matchAllQuery()).get(); assertNotNull(response); @@ -99,8 +99,7 @@ public void testExplainWithFields() throws Exception { ); ensureGreen("test"); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject().startObject("obj1").field("field1", "value1").field("field2", "value2").endObject().endObject() ) @@ -158,8 +157,7 @@ public void testExplainWithSource() throws Exception { assertAcked(prepareCreate("test").addAlias(new Alias("alias"))); ensureGreen("test"); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject().startObject("obj1").field("field1", "value1").field("field2", "value2").endObject().endObject() ) @@ -177,8 +175,8 @@ public void testExplainWithSource() throws Exception { assertThat(response.getExplanation().getValue(), equalTo(1.0f)); assertThat(response.getGetResult().isExists(), equalTo(true)); assertThat(response.getGetResult().getId(), equalTo("1")); - assertThat(response.getGetResult().getSource().size(), equalTo(1)); - assertThat(((Map) response.getGetResult().getSource().get("obj1")).get("field1").toString(), equalTo("value1")); + assertThat(response.getGetResult().sourceAsMap().size(), equalTo(1)); + assertThat(((Map) response.getGetResult().sourceAsMap().get("obj1")).get("field1").toString(), equalTo("value1")); response = client().prepareExplain(indexOrAlias(), "1") .setQuery(QueryBuilders.matchAllQuery()) @@ -186,7 +184,7 @@ public void testExplainWithSource() throws Exception { .get(); assertNotNull(response); assertTrue(response.isMatch()); - assertThat(((Map) response.getGetResult().getSource().get("obj1")).get("field1").toString(), equalTo("value1")); + assertThat(((Map) response.getGetResult().sourceAsMap().get("obj1")).get("field1").toString(), equalTo("value1")); } public void testExplainWithFilteredAlias() { @@ -196,7 +194,7 @@ public void testExplainWithFilteredAlias() { ); ensureGreen("test"); - client().prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value1").get(); + prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value1").get(); refresh(); ExplainResponse response = client().prepareExplain("alias1", "1").setQuery(QueryBuilders.matchAllQuery()).get(); @@ -213,7 +211,7 @@ public void testExplainWithFilteredAliasFetchSource() { ); ensureGreen("test"); - client().prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value1").get(); + prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value1").get(); refresh(); ExplainResponse response = client().prepareExplain("alias1", "1") @@ -229,8 +227,8 @@ public void testExplainWithFilteredAliasFetchSource() { assertThat(response.getGetResult(), notNullValue()); assertThat(response.getGetResult().getIndex(), equalTo("test")); assertThat(response.getGetResult().getId(), equalTo("1")); - assertThat(response.getGetResult().getSource(), notNullValue()); - assertThat(response.getGetResult().getSource().get("field1"), equalTo("value1")); + assertThat(response.getGetResult().sourceAsMap(), notNullValue()); + assertThat(response.getGetResult().sourceAsMap().get("field1"), equalTo("value1")); } public void testExplainDateRangeInQueryString() { @@ -240,7 +238,7 @@ public void testExplainDateRangeInQueryString() { String aMonthAgo = DateTimeFormatter.ISO_LOCAL_DATE.format(now.minusMonths(1)); String aMonthFromNow = DateTimeFormatter.ISO_LOCAL_DATE.format(now.plusMonths(1)); - client().prepareIndex("test").setId("1").setSource("past", aMonthAgo, "future", aMonthFromNow).get(); + prepareIndex("test").setId("1").setSource("past", aMonthAgo, "future", aMonthFromNow).get(); refresh(); @@ -290,12 +288,12 @@ public void testQueryRewrite() { .get(); ensureGreen("twitter"); - client().prepareIndex("twitter").setId("1").setSource("user", "user1", "followers", new String[] { "user2", "user3" }).get(); - client().prepareIndex("twitter").setId("2").setSource("user", "user2", "followers", new String[] { "user1" }).get(); + prepareIndex("twitter").setId("1").setSource("user", "user1", "followers", new String[] { "user2", "user3" }).get(); + prepareIndex("twitter").setId("2").setSource("user", "user2", "followers", new String[] { "user1" }).get(); refresh(); TermsQueryBuilder termsLookupQuery = QueryBuilders.termsLookupQuery("user", new TermsLookup("twitter", "2", "followers")); - ExplainResponse response = client().prepareExplain("twitter", "1").setQuery(termsLookupQuery).execute().actionGet(); + ExplainResponse response = client().prepareExplain("twitter", "1").setQuery(termsLookupQuery).get(); Explanation explanation = response.getExplanation(); assertNotNull(explanation); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/features/ClusterFeaturesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/features/ClusterFeaturesIT.java index fe447eca6e8fd..24bf198b7b42f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/features/ClusterFeaturesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/features/ClusterFeaturesIT.java @@ -18,7 +18,7 @@ import java.util.stream.Collectors; import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasKey; public class ClusterFeaturesIT extends ESIntegTestCase { @@ -29,7 +29,7 @@ public void testClusterHasFeatures() { FeatureService service = internalCluster().getCurrentMasterNodeInstance(FeatureService.class); - assertThat(service.getNodeFeatures(), hasItem(FeatureService.FEATURES_SUPPORTED.id())); + assertThat(service.getNodeFeatures(), hasKey(FeatureService.FEATURES_SUPPORTED.id())); // check the nodes all have a feature in their cluster state (there should always be features_supported) var response = clusterAdmin().state(new ClusterStateRequest().clear().nodes(true)).actionGet(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index d92664f55416a..600219da3d90f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -89,11 +89,10 @@ public void testMappingMetadataParsed() throws Exception { .endObject() .endObject() ) - .execute() - .actionGet(); + .get(); logger.info("--> verify meta _routing required exists"); - MappingMetadata mappingMd = clusterAdmin().prepareState().execute().actionGet().getState().metadata().index("test").mapping(); + MappingMetadata mappingMd = clusterAdmin().prepareState().get().getState().metadata().index("test").mapping(); assertThat(mappingMd.routingRequired(), equalTo(true)); logger.info("--> restarting nodes..."); @@ -103,7 +102,7 @@ public void testMappingMetadataParsed() throws Exception { ensureYellow(); logger.info("--> verify meta _routing required exists"); - mappingMd = clusterAdmin().prepareState().execute().actionGet().getState().metadata().index("test").mapping(); + mappingMd = clusterAdmin().prepareState().get().getState().metadata().index("test").mapping(); assertThat(mappingMd.routingRequired(), equalTo(true)); } @@ -119,7 +118,7 @@ public void testSimpleOpenClose() throws Exception { logger.info("--> waiting for green status"); ensureGreen(); - ClusterStateResponse stateResponse = clusterAdmin().prepareState().execute().actionGet(); + ClusterStateResponse stateResponse = clusterAdmin().prepareState().get(); assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.OPEN)); assertThat(stateResponse.getState().routingTable().index("test").size(), equalTo(test.numPrimaries)); assertThat( @@ -128,12 +127,12 @@ public void testSimpleOpenClose() throws Exception { ); logger.info("--> indexing a simple document"); - client().prepareIndex("test").setId("1").setSource("field1", "value1").get(); + prepareIndex("test").setId("1").setSource("field1", "value1").get(); logger.info("--> closing test index..."); assertAcked(indicesAdmin().prepareClose("test")); - stateResponse = clusterAdmin().prepareState().execute().actionGet(); + stateResponse = clusterAdmin().prepareState().get(); assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.CLOSE)); assertThat(stateResponse.getState().routingTable().index("test"), notNullValue()); @@ -142,14 +141,14 @@ public void testSimpleOpenClose() throws Exception { logger.info("--> trying to index into a closed index ..."); try { - client().prepareIndex("test").setId("1").setSource("field1", "value1").execute().actionGet(); + prepareIndex("test").setId("1").setSource("field1", "value1").get(); fail(); } catch (IndexClosedException e) { // all is well } logger.info("--> creating another index (test2) by indexing into it"); - client().prepareIndex("test2").setId("1").setSource("field1", "value1").execute().actionGet(); + prepareIndex("test2").setId("1").setSource("field1", "value1").get(); logger.info("--> verifying that the state is green"); ensureGreen(); @@ -159,7 +158,7 @@ public void testSimpleOpenClose() throws Exception { logger.info("--> verifying that the state is green"); ensureGreen(); - stateResponse = clusterAdmin().prepareState().execute().actionGet(); + stateResponse = clusterAdmin().prepareState().get(); assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.OPEN)); assertThat(stateResponse.getState().routingTable().index("test").size(), equalTo(test.numPrimaries)); assertThat( @@ -168,12 +167,12 @@ public void testSimpleOpenClose() throws Exception { ); logger.info("--> trying to get the indexed document on the first index"); - GetResponse getResponse = client().prepareGet("test", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "1").get(); assertThat(getResponse.isExists(), equalTo(true)); logger.info("--> closing test index..."); assertAcked(indicesAdmin().prepareClose("test")); - stateResponse = clusterAdmin().prepareState().execute().actionGet(); + stateResponse = clusterAdmin().prepareState().get(); assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.CLOSE)); assertThat(stateResponse.getState().routingTable().index("test"), notNullValue()); @@ -182,25 +181,25 @@ public void testSimpleOpenClose() throws Exception { logger.info("--> waiting for two nodes and green status"); ensureGreen(); - stateResponse = clusterAdmin().prepareState().execute().actionGet(); + stateResponse = clusterAdmin().prepareState().get(); assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.CLOSE)); assertThat(stateResponse.getState().routingTable().index("test"), notNullValue()); logger.info("--> trying to index into a closed index ..."); try { - client().prepareIndex("test").setId("1").setSource("field1", "value1").execute().actionGet(); + prepareIndex("test").setId("1").setSource("field1", "value1").get(); fail(); } catch (IndexClosedException e) { // all is well } logger.info("--> opening index..."); - indicesAdmin().prepareOpen("test").execute().actionGet(); + indicesAdmin().prepareOpen("test").get(); logger.info("--> waiting for green status"); ensureGreen(); - stateResponse = clusterAdmin().prepareState().execute().actionGet(); + stateResponse = clusterAdmin().prepareState().get(); assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.OPEN)); assertThat(stateResponse.getState().routingTable().index("test").size(), equalTo(test.numPrimaries)); assertThat( @@ -209,11 +208,11 @@ public void testSimpleOpenClose() throws Exception { ); logger.info("--> trying to get the indexed document on the first round (before close and shutdown)"); - getResponse = client().prepareGet("test", "1").execute().actionGet(); + getResponse = client().prepareGet("test", "1").get(); assertThat(getResponse.isExists(), equalTo(true)); logger.info("--> indexing a simple document"); - client().prepareIndex("test").setId("2").setSource("field1", "value1").execute().actionGet(); + prepareIndex("test").setId("2").setSource("field1", "value1").get(); } public void testJustMasterNode() throws Exception { @@ -223,7 +222,7 @@ public void testJustMasterNode() throws Exception { internalCluster().startNode(nonDataNode()); logger.info("--> create an index"); - indicesAdmin().prepareCreate("test").setWaitForActiveShards(ActiveShardCount.NONE).execute().actionGet(); + indicesAdmin().prepareCreate("test").setWaitForActiveShards(ActiveShardCount.NONE).get(); logger.info("--> restarting master node"); internalCluster().fullRestart(new RestartCallback() { @@ -234,15 +233,11 @@ public Settings onNodeStopped(String nodeName) { }); logger.info("--> waiting for test index to be created"); - ClusterHealthResponse health = clusterAdmin().prepareHealth() - .setWaitForEvents(Priority.LANGUID) - .setIndices("test") - .execute() - .actionGet(); + ClusterHealthResponse health = clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setIndices("test").get(); assertThat(health.isTimedOut(), equalTo(false)); logger.info("--> verify we have an index"); - ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState().setIndices("test").execute().actionGet(); + ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState().setIndices("test").get(); assertThat(clusterStateResponse.getState().metadata().hasIndex("test"), equalTo(true)); } @@ -254,9 +249,9 @@ public void testJustMasterNodeAndJustDataNode() { internalCluster().startDataOnlyNode(); logger.info("--> create an index"); - indicesAdmin().prepareCreate("test").execute().actionGet(); + indicesAdmin().prepareCreate("test").get(); - client().prepareIndex("test").setSource("field1", "value1").execute().actionGet(); + prepareIndex("test").setSource("field1", "value1").get(); } public void testTwoNodesSingleDoc() throws Exception { @@ -266,15 +261,14 @@ public void testTwoNodesSingleDoc() throws Exception { internalCluster().startNodes(2); logger.info("--> indexing a simple document"); - client().prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); logger.info("--> waiting for green status"); ClusterHealthResponse health = clusterAdmin().prepareHealth() .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForNodes("2") - .execute() - .actionGet(); + .get(); assertThat(health.isTimedOut(), equalTo(false)); logger.info("--> verify 1 doc in the index"); @@ -285,20 +279,15 @@ public void testTwoNodesSingleDoc() throws Exception { logger.info("--> closing test index..."); assertAcked(indicesAdmin().prepareClose("test")); - ClusterStateResponse stateResponse = clusterAdmin().prepareState().execute().actionGet(); + ClusterStateResponse stateResponse = clusterAdmin().prepareState().get(); assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.CLOSE)); assertThat(stateResponse.getState().routingTable().index("test"), notNullValue()); logger.info("--> opening the index..."); - indicesAdmin().prepareOpen("test").execute().actionGet(); + indicesAdmin().prepareOpen("test").get(); logger.info("--> waiting for green status"); - health = clusterAdmin().prepareHealth() - .setWaitForEvents(Priority.LANGUID) - .setWaitForGreenStatus() - .setWaitForNodes("2") - .execute() - .actionGet(); + health = clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").get(); assertThat(health.isTimedOut(), equalTo(false)); logger.info("--> verify 1 doc in the index"); @@ -339,7 +328,7 @@ public Settings onNodeStopped(final String nodeName) throws Exception { final String otherNode = nodes.get(0); logger.info("--> delete index and verify it is deleted"); final Client client = client(otherNode); - client.admin().indices().prepareDelete(indexName).execute().actionGet(); + client.admin().indices().prepareDelete(indexName).get(); assertFalse(indexExists(indexName, client)); logger.info("--> index deleted"); return super.onNodeStopped(nodeName); @@ -376,7 +365,7 @@ public void testRecoverBrokenIndexMetadata() throws Exception { logger.info("--> starting one node"); internalCluster().startNode(); logger.info("--> indexing a simple document"); - client().prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); logger.info("--> waiting for green status"); if (usually()) { ensureYellow(); @@ -453,7 +442,7 @@ public void testRecoverMissingAnalyzer() throws Exception { } }""").get(); logger.info("--> indexing a simple document"); - client().prepareIndex("test").setId("1").setSource("field1", "value one").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("field1", "value one").setRefreshPolicy(IMMEDIATE).get(); logger.info("--> waiting for green status"); if (usually()) { ensureYellow(); @@ -501,7 +490,7 @@ public void testRecoverMissingAnalyzer() throws Exception { public void testArchiveBrokenClusterSettings() throws Exception { logger.info("--> starting one node"); internalCluster().startNode(); - client().prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); logger.info("--> waiting for green status"); if (usually()) { ensureYellow(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/QuorumGatewayIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/QuorumGatewayIT.java index a77201e1e141a..15a72e3534b50 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/QuorumGatewayIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/QuorumGatewayIT.java @@ -42,11 +42,11 @@ public void testQuorumRecovery() throws Exception { final NumShards test = getNumShards("test"); logger.info("--> indexing..."); - client().prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); + prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); // We don't check for failures in the flush response: if we do we might get the following: // FlushNotAllowedEngineException[[test][1] recovery is in progress, flush [COMMIT_TRANSLOG] is not allowed] flush(); - client().prepareIndex("test").setId("2").setSource(jsonBuilder().startObject().field("field", "value2").endObject()).get(); + prepareIndex("test").setId("2").setSource(jsonBuilder().startObject().field("field", "value2").endObject()).get(); refresh(); for (int i = 0; i < 10; i++) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoverAfterNodesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoverAfterNodesIT.java index ec87a0d1fa2fa..b55dd5e207c41 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoverAfterNodesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoverAfterNodesIT.java @@ -37,8 +37,7 @@ public Set waitForNoBlocksOnNode(TimeValue timeout, Client nodeCli .cluster() .prepareState() .setLocal(true) - .execute() - .actionGet() + .get() .getState() .blocks() .global(ClusterBlockLevel.METADATA_WRITE); @@ -56,81 +55,33 @@ public void testRecoverAfterDataNodes() { logger.info("--> start master_node (1)"); Client master1 = startNode(Settings.builder().put(RECOVER_AFTER_DATA_NODES_SETTING.getKey(), 2).put(masterOnlyNode())); assertThat( - master1.admin() - .cluster() - .prepareState() - .setLocal(true) - .execute() - .actionGet() - .getState() - .blocks() - .global(ClusterBlockLevel.METADATA_WRITE), + master1.admin().cluster().prepareState().setLocal(true).get().getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK) ); logger.info("--> start data_node (1)"); Client data1 = startNode(Settings.builder().put(RECOVER_AFTER_DATA_NODES_SETTING.getKey(), 2).put(dataOnlyNode())); assertThat( - master1.admin() - .cluster() - .prepareState() - .setLocal(true) - .execute() - .actionGet() - .getState() - .blocks() - .global(ClusterBlockLevel.METADATA_WRITE), + master1.admin().cluster().prepareState().setLocal(true).get().getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK) ); assertThat( - data1.admin() - .cluster() - .prepareState() - .setLocal(true) - .execute() - .actionGet() - .getState() - .blocks() - .global(ClusterBlockLevel.METADATA_WRITE), + data1.admin().cluster().prepareState().setLocal(true).get().getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK) ); logger.info("--> start master_node (2)"); Client master2 = startNode(Settings.builder().put(RECOVER_AFTER_DATA_NODES_SETTING.getKey(), 2).put(masterOnlyNode())); assertThat( - master2.admin() - .cluster() - .prepareState() - .setLocal(true) - .execute() - .actionGet() - .getState() - .blocks() - .global(ClusterBlockLevel.METADATA_WRITE), + master2.admin().cluster().prepareState().setLocal(true).get().getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK) ); assertThat( - data1.admin() - .cluster() - .prepareState() - .setLocal(true) - .execute() - .actionGet() - .getState() - .blocks() - .global(ClusterBlockLevel.METADATA_WRITE), + data1.admin().cluster().prepareState().setLocal(true).get().getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK) ); assertThat( - master2.admin() - .cluster() - .prepareState() - .setLocal(true) - .execute() - .actionGet() - .getState() - .blocks() - .global(ClusterBlockLevel.METADATA_WRITE), + master2.admin().cluster().prepareState().setLocal(true).get().getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK) ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java index 81149efb1596f..f05a83e861e52 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java @@ -96,31 +96,21 @@ public void testOneNodeRecoverFromGateway() throws Exception { ); assertAcked(prepareCreate("test").setMapping(mapping)); - client().prepareIndex("test") - .setId("10990239") + prepareIndex("test").setId("10990239") .setSource(jsonBuilder().startObject().startArray("appAccountIds").value(14).value(179).endArray().endObject()) - .execute() - .actionGet(); - client().prepareIndex("test") - .setId("10990473") + .get(); + prepareIndex("test").setId("10990473") .setSource(jsonBuilder().startObject().startArray("appAccountIds").value(14).endArray().endObject()) - .execute() - .actionGet(); - client().prepareIndex("test") - .setId("10990513") + .get(); + prepareIndex("test").setId("10990513") .setSource(jsonBuilder().startObject().startArray("appAccountIds").value(14).value(179).endArray().endObject()) - .execute() - .actionGet(); - client().prepareIndex("test") - .setId("10990695") + .get(); + prepareIndex("test").setId("10990695") .setSource(jsonBuilder().startObject().startArray("appAccountIds").value(14).endArray().endObject()) - .execute() - .actionGet(); - client().prepareIndex("test") - .setId("11026351") + .get(); + prepareIndex("test").setId("11026351") .setSource(jsonBuilder().startObject().startArray("appAccountIds").value(14).endArray().endObject()) - .execute() - .actionGet(); + .get(); refresh(); assertHitCount(prepareSearch().setSize(0).setQuery(termQuery("appAccountIds", 179)), 2); @@ -134,7 +124,7 @@ public void testOneNodeRecoverFromGateway() throws Exception { ensureYellow(); primaryTerms = assertAndCapturePrimaryTerms(primaryTerms); - indicesAdmin().prepareRefresh().execute().actionGet(); + indicesAdmin().prepareRefresh().get(); assertHitCount(prepareSearch().setSize(0).setQuery(termQuery("appAccountIds", 179)), 2); internalCluster().fullRestart(); @@ -143,7 +133,7 @@ public void testOneNodeRecoverFromGateway() throws Exception { ensureYellow(); primaryTerms = assertAndCapturePrimaryTerms(primaryTerms); - indicesAdmin().prepareRefresh().execute().actionGet(); + indicesAdmin().prepareRefresh().get(); assertHitCount(prepareSearch().setSize(0).setQuery(termQuery("appAccountIds", 179)), 2); } @@ -275,17 +265,9 @@ public void testSingleNodeNoFlush() throws Exception { public void testSingleNodeWithFlush() throws Exception { internalCluster().startNode(); - client().prepareIndex("test") - .setId("1") - .setSource(jsonBuilder().startObject().field("field", "value1").endObject()) - .execute() - .actionGet(); + prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); flush(); - client().prepareIndex("test") - .setId("2") - .setSource(jsonBuilder().startObject().field("field", "value2").endObject()) - .execute() - .actionGet(); + prepareIndex("test").setId("2").setSource(jsonBuilder().startObject().field("field", "value2").endObject()).get(); refresh(); assertHitCount(prepareSearch().setSize(0).setQuery(matchAllQuery()), 2); @@ -320,17 +302,9 @@ public void testTwoNodeFirstNodeCleared() throws Exception { final String firstNode = internalCluster().startNode(); internalCluster().startNode(); - client().prepareIndex("test") - .setId("1") - .setSource(jsonBuilder().startObject().field("field", "value1").endObject()) - .execute() - .actionGet(); + prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); flush(); - client().prepareIndex("test") - .setId("2") - .setSource(jsonBuilder().startObject().field("field", "value2").endObject()) - .execute() - .actionGet(); + prepareIndex("test").setId("2").setSource(jsonBuilder().startObject().field("field", "value2").endObject()).get(); refresh(); logger.info("Running Cluster Health (wait for the shards to startup)"); @@ -378,18 +352,10 @@ public void testLatestVersionLoaded() throws Exception { Settings node2DataPathSettings = internalCluster().dataPathSettings(nodes.get(1)); assertAcked(indicesAdmin().prepareCreate("test")); - client().prepareIndex("test") - .setId("1") - .setSource(jsonBuilder().startObject().field("field", "value1").endObject()) - .execute() - .actionGet(); - indicesAdmin().prepareFlush().execute().actionGet(); - client().prepareIndex("test") - .setId("2") - .setSource(jsonBuilder().startObject().field("field", "value2").endObject()) - .execute() - .actionGet(); - indicesAdmin().prepareRefresh().execute().actionGet(); + prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); + indicesAdmin().prepareFlush().get(); + prepareIndex("test").setId("2").setSource(jsonBuilder().startObject().field("field", "value2").endObject()).get(); + indicesAdmin().prepareRefresh().get(); logger.info("--> running cluster_health (wait for the shards to startup)"); ensureGreen(); @@ -405,16 +371,12 @@ public void testLatestVersionLoaded() throws Exception { internalCluster().stopRandomDataNode(); logger.info("--> one node is closed - start indexing data into the second one"); - client().prepareIndex("test") - .setId("3") - .setSource(jsonBuilder().startObject().field("field", "value3").endObject()) - .execute() - .actionGet(); + prepareIndex("test").setId("3").setSource(jsonBuilder().startObject().field("field", "value3").endObject()).get(); // TODO: remove once refresh doesn't fail immediately if there a master block: // https://github.com/elastic/elasticsearch/issues/9997 // clusterAdmin().prepareHealth("test").setWaitForYellowStatus().get(); logger.info("--> refreshing all indices after indexing is complete"); - indicesAdmin().prepareRefresh().execute().actionGet(); + indicesAdmin().prepareRefresh().get(); logger.info("--> checking if documents exist, there should be 3"); for (int i = 0; i < 10; i++) { @@ -442,9 +404,8 @@ public void testLatestVersionLoaded() throws Exception { .endObject() .endObject() ) - .execute() - .actionGet(); - indicesAdmin().prepareAliases().addAlias("test", "test_alias", QueryBuilders.termQuery("field", "value")).execute().actionGet(); + .get(); + indicesAdmin().prepareAliases().addAlias("test", "test_alias", QueryBuilders.termQuery("field", "value")).get(); logger.info("--> stopping the second node"); internalCluster().stopRandomDataNode(); @@ -465,7 +426,7 @@ public void testLatestVersionLoaded() throws Exception { assertHitCount(prepareSearch().setSize(0).setQuery(matchAllQuery()), 3); } - ClusterState state = clusterAdmin().prepareState().execute().actionGet().getState(); + ClusterState state = clusterAdmin().prepareState().get().getState(); assertThat(state.metadata().templates().get("template_1").patterns(), equalTo(Collections.singletonList("te*"))); assertThat(state.metadata().index("test").getAliases().get("test_alias"), notNullValue()); assertThat(state.metadata().index("test").getAliases().get("test_alias").filter(), notNullValue()); @@ -495,7 +456,7 @@ public void testReuseInFileBasedPeerRecovery() throws Exception { logger.info("--> indexing docs"); int numDocs = randomIntBetween(1, 1024); for (int i = 0; i < numDocs; i++) { - client(primaryNode).prepareIndex("test").setSource("field", "value").execute().actionGet(); + client(primaryNode).prepareIndex("test").setSource("field", "value").get(); } client(primaryNode).admin().indices().prepareFlush("test").setForce(true).get(); @@ -528,7 +489,7 @@ public void testReuseInFileBasedPeerRecovery() throws Exception { public Settings onNodeStopped(String nodeName) throws Exception { // index some more documents; we expect to reuse the files that already exist on the replica for (int i = 0; i < moreDocs; i++) { - client(primaryNode).prepareIndex("test").setSource("field", "value").execute().actionGet(); + client(primaryNode).prepareIndex("test").setSource("field", "value").get(); } // prevent a sequence-number-based recovery from being possible diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java index 8cbce0cc098ed..e7988d447571a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java @@ -89,7 +89,7 @@ public void testPreferCopyCanPerformNoopRecovery() throws Exception { randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, between(100, 500)).mapToObj(n -> client().prepareIndex(indexName).setSource("f", "v")).toList() + IntStream.range(0, between(100, 500)).mapToObj(n -> prepareIndex(indexName).setSource("f", "v")).toList() ); indicesAdmin().prepareFlush(indexName).get(); if (randomBoolean()) { @@ -97,7 +97,7 @@ public void testPreferCopyCanPerformNoopRecovery() throws Exception { randomBoolean(), false, randomBoolean(), - IntStream.range(0, between(0, 80)).mapToObj(n -> client().prepareIndex(indexName).setSource("f", "v")).toList() + IntStream.range(0, between(0, 80)).mapToObj(n -> prepareIndex(indexName).setSource("f", "v")).toList() ); } ensureActivePeerRecoveryRetentionLeasesAdvanced(indexName); @@ -152,7 +152,7 @@ public void testRecentPrimaryInformation() throws Exception { randomBoolean(), false, randomBoolean(), - IntStream.range(0, between(10, 100)).mapToObj(n -> client().prepareIndex(indexName).setSource("f", "v")).toList() + IntStream.range(0, between(10, 100)).mapToObj(n -> prepareIndex(indexName).setSource("f", "v")).toList() ); internalCluster().stopNode(nodeWithReplica); if (randomBoolean()) { @@ -160,7 +160,7 @@ public void testRecentPrimaryInformation() throws Exception { randomBoolean(), false, randomBoolean(), - IntStream.range(0, between(10, 100)).mapToObj(n -> client().prepareIndex(indexName).setSource("f", "v")).toList() + IntStream.range(0, between(10, 100)).mapToObj(n -> prepareIndex(indexName).setSource("f", "v")).toList() ); } CountDownLatch blockRecovery = new CountDownLatch(1); @@ -184,7 +184,7 @@ public void testRecentPrimaryInformation() throws Exception { randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, between(50, 200)).mapToObj(n -> client().prepareIndex(indexName).setSource("f", "v")).toList() + IntStream.range(0, between(50, 200)).mapToObj(n -> prepareIndex(indexName).setSource("f", "v")).toList() ); indicesAdmin().prepareFlush(indexName).get(); assertBusy(() -> { @@ -235,14 +235,14 @@ public void testFullClusterRestartPerformNoopRecovery() throws Exception { randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, between(200, 500)).mapToObj(n -> client().prepareIndex(indexName).setSource("f", "v")).toList() + IntStream.range(0, between(200, 500)).mapToObj(n -> prepareIndex(indexName).setSource("f", "v")).toList() ); indicesAdmin().prepareFlush(indexName).get(); indexRandom( randomBoolean(), false, randomBoolean(), - IntStream.range(0, between(0, 80)).mapToObj(n -> client().prepareIndex(indexName).setSource("f", "v")).toList() + IntStream.range(0, between(0, 80)).mapToObj(n -> prepareIndex(indexName).setSource("f", "v")).toList() ); if (randomBoolean()) { indicesAdmin().prepareForceMerge(indexName).get(); @@ -281,7 +281,7 @@ public void testPreferCopyWithHighestMatchingOperations() throws Exception { randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, between(200, 500)).mapToObj(n -> client().prepareIndex(indexName).setSource("f", "v")).toList() + IntStream.range(0, between(200, 500)).mapToObj(n -> prepareIndex(indexName).setSource("f", "v")).toList() ); indicesAdmin().prepareFlush(indexName).get(); String nodeWithLowerMatching = randomFrom(internalCluster().nodesInclude(indexName)); @@ -293,7 +293,7 @@ public void testPreferCopyWithHighestMatchingOperations() throws Exception { randomBoolean(), false, randomBoolean(), - IntStream.range(0, between(1, 100)).mapToObj(n -> client().prepareIndex(indexName).setSource("f", "v")).toList() + IntStream.range(0, between(1, 100)).mapToObj(n -> prepareIndex(indexName).setSource("f", "v")).toList() ); ensureActivePeerRecoveryRetentionLeasesAdvanced(indexName); String nodeWithHigherMatching = randomFrom(internalCluster().nodesInclude(indexName)); @@ -304,7 +304,7 @@ public void testPreferCopyWithHighestMatchingOperations() throws Exception { randomBoolean(), false, randomBoolean(), - IntStream.range(0, between(1, 100)).mapToObj(n -> client().prepareIndex(indexName).setSource("f", "v")).toList() + IntStream.range(0, between(1, 100)).mapToObj(n -> prepareIndex(indexName).setSource("f", "v")).toList() ); } @@ -338,7 +338,7 @@ public void testDoNotCancelRecoveryForBrokenNode() throws Exception { randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, between(200, 500)).mapToObj(n -> client().prepareIndex(indexName).setSource("f", "v")).toList() + IntStream.range(0, between(200, 500)).mapToObj(n -> prepareIndex(indexName).setSource("f", "v")).toList() ); indicesAdmin().prepareFlush(indexName).get(); String brokenNode = internalCluster().startDataOnlyNode(); @@ -384,7 +384,7 @@ public void testPeerRecoveryForClosedIndices() throws Exception { randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, randomIntBetween(1, 100)).mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).toList() + IntStream.range(0, randomIntBetween(1, 100)).mapToObj(n -> prepareIndex(indexName).setSource("num", n)).toList() ); ensureActivePeerRecoveryRetentionLeasesAdvanced(indexName); assertAcked(indicesAdmin().prepareClose(indexName)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorSyncIdIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorSyncIdIT.java index 313d1e686e1fd..fffa0ad05496b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorSyncIdIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorSyncIdIT.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ReleasableLock; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; @@ -59,9 +60,9 @@ /** * A legacy version of {@link ReplicaShardAllocatorIT#testPreferCopyCanPerformNoopRecovery()} verifying * that the {@link ReplicaShardAllocator} prefers copies with matching sync_id. - * TODO: Remove this test in 9.0 */ @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) +@UpdateForV9 // remove this test in v9 public class ReplicaShardAllocatorSyncIdIT extends ESIntegTestCase { private static final AtomicBoolean allowFlush = new AtomicBoolean(); @@ -170,7 +171,7 @@ public void testPreferCopyCanPerformNoopRecovery() throws Exception { randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, between(100, 500)).mapToObj(n -> client().prepareIndex(indexName).setSource("f", "v")).toList() + IntStream.range(0, between(100, 500)).mapToObj(n -> prepareIndex(indexName).setSource("f", "v")).toList() ); if (randomBoolean()) { indicesAdmin().prepareFlush(indexName).get(); @@ -227,7 +228,7 @@ public void testFullClusterRestartPerformNoopRecovery() throws Exception { randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, between(200, 500)).mapToObj(n -> client().prepareIndex(indexName).setSource("f", "v")).toList() + IntStream.range(0, between(200, 500)).mapToObj(n -> prepareIndex(indexName).setSource("f", "v")).toList() ); if (randomBoolean()) { indicesAdmin().prepareFlush(indexName).get(); @@ -265,7 +266,7 @@ public void testSimulateRecoverySourceOnOldNode() throws Exception { randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, between(200, 500)).mapToObj(n -> client().prepareIndex(indexName).setSource("f", "v")).toList() + IntStream.range(0, between(200, 500)).mapToObj(n -> prepareIndex(indexName).setSource("f", "v")).toList() ); } if (randomBoolean()) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java index 129b83f664927..3a12856fb92b5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java @@ -81,7 +81,7 @@ public void testSimpleGet() { assertThat(response.isExists(), equalTo(false)); logger.info("--> index doc 1"); - client().prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").get(); + prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").get(); logger.info("--> non realtime get 1"); response = client().prepareGet(indexOrAlias(), "1").setRealtime(false).get(); @@ -100,7 +100,7 @@ public void testSimpleGet() { assertThat(response.getIndex(), equalTo("test")); Set fields = new HashSet<>(response.getFields().keySet()); assertThat(fields, equalTo(Collections.emptySet())); - assertThat(response.getSourceAsBytes(), nullValue()); + assertThat(response.getSourceAsBytesRef(), nullValue()); logger.info("--> realtime get 1 (no source, explicit)"); response = client().prepareGet(indexOrAlias(), "1").setFetchSource(false).get(); @@ -108,7 +108,7 @@ public void testSimpleGet() { assertThat(response.getIndex(), equalTo("test")); fields = new HashSet<>(response.getFields().keySet()); assertThat(fields, equalTo(Collections.emptySet())); - assertThat(response.getSourceAsBytes(), nullValue()); + assertThat(response.getSourceAsBytesRef(), nullValue()); logger.info("--> realtime get 1 (no type)"); response = client().prepareGet(indexOrAlias(), "1").get(); @@ -121,7 +121,7 @@ public void testSimpleGet() { response = client().prepareGet(indexOrAlias(), "1").setStoredFields("field1").get(); assertThat(response.isExists(), equalTo(true)); assertThat(response.getIndex(), equalTo("test")); - assertThat(response.getSourceAsBytes(), nullValue()); + assertThat(response.getSourceAsBytesRef(), nullValue()); assertThat(response.getField("field1").getValues().get(0).toString(), equalTo("value1")); assertThat(response.getField("field2"), nullValue()); @@ -155,7 +155,7 @@ public void testSimpleGet() { response = client().prepareGet(indexOrAlias(), "1").setStoredFields("field1").get(); assertThat(response.isExists(), equalTo(true)); assertThat(response.getIndex(), equalTo("test")); - assertThat(response.getSourceAsBytes(), nullValue()); + assertThat(response.getSourceAsBytesRef(), nullValue()); assertThat(response.getField("field1").getValues().get(0).toString(), equalTo("value1")); assertThat(response.getField("field2"), nullValue()); @@ -163,12 +163,12 @@ public void testSimpleGet() { response = client().prepareGet(indexOrAlias(), "1").setStoredFields("field1").setFetchSource(true).get(); assertThat(response.isExists(), equalTo(true)); assertThat(response.getIndex(), equalTo("test")); - assertThat(response.getSourceAsBytes(), not(nullValue())); + assertThat(response.getSourceAsBytesRef(), not(nullValue())); assertThat(response.getField("field1").getValues().get(0).toString(), equalTo("value1")); assertThat(response.getField("field2"), nullValue()); logger.info("--> update doc 1"); - client().prepareIndex("test").setId("1").setSource("field1", "value1_1", "field2", "value2_1").get(); + prepareIndex("test").setId("1").setSource("field1", "value1_1", "field2", "value2_1").get(); logger.info("--> realtime get 1"); response = client().prepareGet(indexOrAlias(), "1").get(); @@ -178,7 +178,7 @@ public void testSimpleGet() { assertThat(response.getSourceAsMap().get("field2").toString(), equalTo("value2_1")); logger.info("--> update doc 1 again"); - client().prepareIndex("test").setId("1").setSource("field1", "value1_2", "field2", "value2_2").get(); + prepareIndex("test").setId("1").setSource("field1", "value1_2", "field2", "value2_2").get(); response = client().prepareGet(indexOrAlias(), "1").get(); assertThat(response.isExists(), equalTo(true)); @@ -202,10 +202,7 @@ public void testGetWithAliasPointingToMultipleIndices() { } else { indicesAdmin().prepareCreate("index3").addAlias(new Alias("alias1").indexRouting("1").writeIndex(true)).get(); } - DocWriteResponse indexResponse = client().prepareIndex("index1") - .setId("id") - .setSource(Collections.singletonMap("foo", "bar")) - .get(); + DocWriteResponse indexResponse = prepareIndex("index1").setId("id").setSource(Collections.singletonMap("foo", "bar")).get(); assertThat(indexResponse.status().getStatus(), equalTo(RestStatus.CREATED.getStatus())); IllegalArgumentException exception = expectThrows( @@ -232,7 +229,7 @@ public void testSimpleMultiGet() throws Exception { assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(false)); for (int i = 0; i < 10; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).get(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).get(); } response = client().prepareMultiGet() @@ -271,7 +268,7 @@ public void testSimpleMultiGet() throws Exception { .get(); assertThat(response.getResponses().length, equalTo(2)); - assertThat(response.getResponses()[0].getResponse().getSourceAsBytes(), nullValue()); + assertThat(response.getResponses()[0].getResponse().getSourceAsBytesRef(), nullValue()); assertThat(response.getResponses()[0].getResponse().getField("field").getValues().get(0).toString(), equalTo("value1")); } @@ -294,7 +291,7 @@ public void testGetDocWithMultivaluedFields() throws Exception { assertThat(response.isExists(), equalTo(false)); assertThat(response.isExists(), equalTo(false)); - client().prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().array("field", "1", "2").endObject()).get(); + prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().array("field", "1", "2").endObject()).get(); response = client().prepareGet("test", "1").setStoredFields("field").get(); assertThat(response.isExists(), equalTo(true)); @@ -325,7 +322,7 @@ public void testGetWithVersion() { assertThat(response.isExists(), equalTo(false)); logger.info("--> index doc 1"); - client().prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").get(); + prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").get(); // From translog: @@ -369,7 +366,7 @@ public void testGetWithVersion() { } logger.info("--> index doc 1 again, so increasing the version"); - client().prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").get(); + prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").get(); // From translog: @@ -424,7 +421,7 @@ public void testMultiGetWithVersion() throws Exception { assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(false)); for (int i = 0; i < 3; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).get(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).get(); } // Version from translog @@ -474,7 +471,7 @@ public void testMultiGetWithVersion() throws Exception { assertThat(response.getResponses()[2].getFailure().getFailure(), instanceOf(VersionConflictEngineException.class)); for (int i = 0; i < 3; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).get(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).get(); } // Version from translog @@ -547,8 +544,7 @@ public void testGetFieldsNonLeafField() throws Exception { .setSettings(Settings.builder().put("index.refresh_interval", -1)) ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().startObject("field1").field("field2", "value1").endObject().endObject()) .get(); @@ -625,7 +621,7 @@ public void testGetFieldsComplexField() throws Exception { logger.info("indexing documents"); - client().prepareIndex("my-index").setId("1").setSource(source, XContentType.JSON).get(); + prepareIndex("my-index").setId("1").setSource(source, XContentType.JSON).get(); logger.info("checking real time retrieval"); @@ -720,7 +716,7 @@ public void testUngeneratedFieldsThatAreAlwaysStored() throws IOException { assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setSource(createIndexSource, XContentType.JSON)); ensureGreen(); - client().prepareIndex("test").setId("1").setRouting("routingValue").setId("1").setSource("{}", XContentType.JSON).get(); + prepareIndex("test").setId("1").setRouting("routingValue").setId("1").setSource("{}", XContentType.JSON).get(); String[] fieldsList = { "_routing" }; // before refresh - document is only in translog @@ -749,7 +745,7 @@ public void testUngeneratedFieldsNotPartOfSourceStored() throws IOException { "text": "some text." } """; - client().prepareIndex("test").setId("1").setSource(doc, XContentType.JSON).setRouting("1").get(); + prepareIndex("test").setId("1").setSource(doc, XContentType.JSON).setRouting("1").get(); String[] fieldsList = { "_routing" }; // before refresh - document is only in translog assertGetFieldsAlwaysWorks(indexOrAlias(), "1", fieldsList, "1"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/get/GetFromTranslogActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/get/GetFromTranslogActionIT.java index 33038a0fb32a0..c9432ede04911 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/get/GetFromTranslogActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/get/GetFromTranslogActionIT.java @@ -44,11 +44,7 @@ public void testGetFromTranslog() throws Exception { // There hasn't been any switches from unsafe to safe map assertThat(response.segmentGeneration(), equalTo(-1L)); - var indexResponse = client().prepareIndex("test") - .setId("1") - .setSource("field1", "value1") - .setRefreshPolicy(RefreshPolicy.NONE) - .get(); + var indexResponse = prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(RefreshPolicy.NONE).get(); response = getFromTranslog(indexOrAlias(), "1"); assertNotNull(response.getResult()); assertThat(response.getResult().isExists(), equalTo(true)); @@ -61,7 +57,7 @@ public void testGetFromTranslog() throws Exception { assertThat(response.getResult().isExists(), equalTo(false)); assertThat(response.segmentGeneration(), equalTo(-1L)); - indexResponse = client().prepareIndex("test").setSource("field1", "value2").get(); + indexResponse = prepareIndex("test").setSource("field1", "value2").get(); response = getFromTranslog(indexOrAlias(), indexResponse.getId()); assertNotNull(response.getResult()); assertThat(response.getResult().isExists(), equalTo(true)); @@ -74,11 +70,11 @@ public void testGetFromTranslog() throws Exception { assertThat(response.segmentGeneration(), equalTo(-1L)); // After two refreshes the LiveVersionMap switches back to append-only and stops tracking IDs // Refreshing with empty LiveVersionMap doesn't cause the switch, see {@link LiveVersionMap.Maps#shouldInheritSafeAccess()}. - client().prepareIndex("test").setSource("field1", "value3").get(); + prepareIndex("test").setSource("field1", "value3").get(); refresh("test"); refresh("test"); // An optimized index operation marks the maps as unsafe - client().prepareIndex("test").setSource("field1", "value4").get(); + prepareIndex("test").setSource("field1", "value4").get(); response = getFromTranslog(indexOrAlias(), "non-existent"); assertNull(response.getResult()); assertThat(response.segmentGeneration(), greaterThan(0L)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/get/ShardMultiGetFomTranslogActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/get/ShardMultiGetFomTranslogActionIT.java index fd14b18bb8a3b..2d8a48cf48668 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/get/ShardMultiGetFomTranslogActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/get/ShardMultiGetFomTranslogActionIT.java @@ -48,7 +48,7 @@ public void testShardMultiGetFromTranslog() throws Exception { // Do a single get to enable storing locations in translog. Otherwise, we could get unwanted refreshes that // prune the LiveVersionMap and would make the test fail/flaky. - var indexResponse = client().prepareIndex("test").setId("0").setSource("field1", "value2").get(); + var indexResponse = prepareIndex("test").setId("0").setSource("field1", "value2").get(); client().prepareGet("test", indexResponse.getId()).get(); var mgetIds = List.of("1", "2", "3"); @@ -107,7 +107,7 @@ public void testShardMultiGetFromTranslog() throws Exception { } assertThat(response.segmentGeneration(), equalTo(-1L)); - indexResponse = client().prepareIndex("test").setSource("field1", "value2").get(); + indexResponse = prepareIndex("test").setSource("field1", "value2").get(); response = getFromTranslog(indexOrAlias(), List.of(indexResponse.getId())); multiGetShardResponse = response.multiGetShardResponse(); assertThat(getLocations(multiGetShardResponse).size(), equalTo(1)); @@ -131,11 +131,11 @@ public void testShardMultiGetFromTranslog() throws Exception { assertThat(response.segmentGeneration(), equalTo(-1L)); // After two refreshes the LiveVersionMap switches back to append-only and stops tracking IDs // Refreshing with empty LiveVersionMap doesn't cause the switch, see {@link LiveVersionMap.Maps#shouldInheritSafeAccess()}. - client().prepareIndex("test").setSource("field1", "value3").get(); + prepareIndex("test").setSource("field1", "value3").get(); refresh("test"); refresh("test"); // An optimized index operation marks the maps as unsafe - client().prepareIndex("test").setSource("field1", "value4").get(); + prepareIndex("test").setSource("field1", "value4").get(); response = getFromTranslog(indexOrAlias(), List.of("non-existent")); multiGetShardResponse = response.multiGetShardResponse(); assertThat(getLocations(multiGetShardResponse).size(), equalTo(1)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/health/GetHealthCancellationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/health/GetHealthCancellationIT.java index 7a9fd0b6ccf60..eda8a4eb9e459 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/health/GetHealthCancellationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/health/GetHealthCancellationIT.java @@ -90,7 +90,7 @@ public void testCancellation() throws Exception { } final ClusterService clusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); - final PlainActionFuture findHealthNodeFuture = PlainActionFuture.newFuture(); + final PlainActionFuture findHealthNodeFuture = new PlainActionFuture<>(); // the health node might take a bit of time to be assigned by the persistent task framework so we wait until we have a health // node in the cluster before proceeding with the test // proceeding with the execution before the health node assignment would yield a non-deterministic behaviour as we diff --git a/server/src/internalClusterTest/java/org/elasticsearch/health/HealthMetadataServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/health/HealthMetadataServiceIT.java index 0717d4b306ed2..660d6028486a0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/health/HealthMetadataServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/health/HealthMetadataServiceIT.java @@ -45,210 +45,207 @@ public void setup() { } public void testEachMasterPublishesTheirThresholds() throws Exception { - try (InternalTestCluster internalCluster = internalCluster()) { - int numberOfNodes = 3; - Map watermarkByNode = new HashMap<>(); - Map maxHeadroomByNode = new HashMap<>(); - Map shardLimitsPerNode = new HashMap<>(); - for (int i = 0; i < numberOfNodes; i++) { - ByteSizeValue randomBytes = ByteSizeValue.ofBytes(randomLongBetween(6, 19)); - String customWatermark = percentageMode ? randomIntBetween(86, 94) + "%" : randomBytes.toString(); - ByteSizeValue customMaxHeadroom = percentageMode ? randomBytes : ByteSizeValue.MINUS_ONE; - var customShardLimits = new HealthMetadata.ShardLimits(randomIntBetween(1, 1000), randomIntBetween(1001, 2000)); - String nodeName = startNode(internalCluster, customWatermark, customMaxHeadroom.toString(), customShardLimits); - watermarkByNode.put(nodeName, customWatermark); - maxHeadroomByNode.put(nodeName, customMaxHeadroom); - shardLimitsPerNode.put(nodeName, customShardLimits); - } - ensureStableCluster(numberOfNodes); + final InternalTestCluster internalCluster = internalCluster(); + int numberOfNodes = 3; + Map watermarkByNode = new HashMap<>(); + Map maxHeadroomByNode = new HashMap<>(); + Map shardLimitsPerNode = new HashMap<>(); + for (int i = 0; i < numberOfNodes; i++) { + ByteSizeValue randomBytes = ByteSizeValue.ofBytes(randomLongBetween(6, 19)); + String customWatermark = percentageMode ? randomIntBetween(86, 94) + "%" : randomBytes.toString(); + ByteSizeValue customMaxHeadroom = percentageMode ? randomBytes : ByteSizeValue.MINUS_ONE; + var customShardLimits = new HealthMetadata.ShardLimits(randomIntBetween(1, 1000), randomIntBetween(1001, 2000)); + String nodeName = startNode(internalCluster, customWatermark, customMaxHeadroom.toString(), customShardLimits); + watermarkByNode.put(nodeName, customWatermark); + maxHeadroomByNode.put(nodeName, customMaxHeadroom); + shardLimitsPerNode.put(nodeName, customShardLimits); + } + ensureStableCluster(numberOfNodes); - String electedMaster = internalCluster.getMasterName(); - { - var healthMetadata = HealthMetadata.getFromClusterState(internalCluster.clusterService().state()); - var diskMetadata = healthMetadata.getDiskMetadata(); - assertThat(diskMetadata.describeHighWatermark(), equalTo(watermarkByNode.get(electedMaster))); - assertThat(diskMetadata.highMaxHeadroom(), equalTo(maxHeadroomByNode.get(electedMaster))); + String electedMaster = internalCluster.getMasterName(); + { + var healthMetadata = HealthMetadata.getFromClusterState(internalCluster.clusterService().state()); + var diskMetadata = healthMetadata.getDiskMetadata(); + assertThat(diskMetadata.describeHighWatermark(), equalTo(watermarkByNode.get(electedMaster))); + assertThat(diskMetadata.highMaxHeadroom(), equalTo(maxHeadroomByNode.get(electedMaster))); - var shardLimitsMetadata = healthMetadata.getShardLimitsMetadata(); - assertEquals(shardLimitsMetadata, shardLimitsPerNode.get(electedMaster)); - } + var shardLimitsMetadata = healthMetadata.getShardLimitsMetadata(); + assertEquals(shardLimitsMetadata, shardLimitsPerNode.get(electedMaster)); + } - // Stop the master to ensure another node will become master with a different watermark - internalCluster.stopNode(electedMaster); - ensureStableCluster(numberOfNodes - 1); - electedMaster = internalCluster.getMasterName(); - { - var healthMetadata = HealthMetadata.getFromClusterState(internalCluster.clusterService().state()); - var diskMetadata = healthMetadata.getDiskMetadata(); - assertThat(diskMetadata.describeHighWatermark(), equalTo(watermarkByNode.get(electedMaster))); - assertThat(diskMetadata.highMaxHeadroom(), equalTo(maxHeadroomByNode.get(electedMaster))); - - var shardLimitsMetadata = healthMetadata.getShardLimitsMetadata(); - assertEquals(shardLimitsMetadata, shardLimitsPerNode.get(electedMaster)); - } + // Stop the master to ensure another node will become master with a different watermark + internalCluster.stopNode(electedMaster); + ensureStableCluster(numberOfNodes - 1); + electedMaster = internalCluster.getMasterName(); + { + var healthMetadata = HealthMetadata.getFromClusterState(internalCluster.clusterService().state()); + var diskMetadata = healthMetadata.getDiskMetadata(); + assertThat(diskMetadata.describeHighWatermark(), equalTo(watermarkByNode.get(electedMaster))); + assertThat(diskMetadata.highMaxHeadroom(), equalTo(maxHeadroomByNode.get(electedMaster))); + + var shardLimitsMetadata = healthMetadata.getShardLimitsMetadata(); + assertEquals(shardLimitsMetadata, shardLimitsPerNode.get(electedMaster)); + } - // restart the whole cluster - internalCluster.fullRestart(); - ensureStableCluster(internalCluster.numDataAndMasterNodes()); - String electedMasterAfterRestart = internalCluster.getMasterName(); - { - var healthMetadata = HealthMetadata.getFromClusterState(internalCluster.clusterService().state()); - var diskMetadata = healthMetadata.getDiskMetadata(); - assertThat(diskMetadata.describeHighWatermark(), equalTo(watermarkByNode.get(electedMasterAfterRestart))); - assertThat(diskMetadata.highMaxHeadroom(), equalTo(maxHeadroomByNode.get(electedMasterAfterRestart))); - - var shardLimitsMetadata = healthMetadata.getShardLimitsMetadata(); - assertEquals(shardLimitsMetadata, shardLimitsPerNode.get(electedMasterAfterRestart)); - } + // restart the whole cluster + internalCluster.fullRestart(); + ensureStableCluster(internalCluster.numDataAndMasterNodes()); + String electedMasterAfterRestart = internalCluster.getMasterName(); + { + var healthMetadata = HealthMetadata.getFromClusterState(internalCluster.clusterService().state()); + var diskMetadata = healthMetadata.getDiskMetadata(); + assertThat(diskMetadata.describeHighWatermark(), equalTo(watermarkByNode.get(electedMasterAfterRestart))); + assertThat(diskMetadata.highMaxHeadroom(), equalTo(maxHeadroomByNode.get(electedMasterAfterRestart))); + + var shardLimitsMetadata = healthMetadata.getShardLimitsMetadata(); + assertEquals(shardLimitsMetadata, shardLimitsPerNode.get(electedMasterAfterRestart)); } } public void testWatermarkSettingUpdate() throws Exception { - try (InternalTestCluster internalCluster = internalCluster()) { - int numberOfNodes = 3; - ByteSizeValue randomBytes = ByteSizeValue.ofBytes(randomLongBetween(6, 19)); - String initialWatermark = percentageMode ? randomIntBetween(86, 94) + "%" : randomBytes.toString(); - ByteSizeValue initialMaxHeadroom = percentageMode ? randomBytes : ByteSizeValue.MINUS_ONE; - HealthMetadata.ShardLimits initialShardLimits = new HealthMetadata.ShardLimits( - randomIntBetween(1, 1000), - randomIntBetween(1001, 2000) - ); - for (int i = 0; i < numberOfNodes; i++) { - startNode(internalCluster, initialWatermark, initialMaxHeadroom.toString(), initialShardLimits); - } + final InternalTestCluster internalCluster = internalCluster(); + int numberOfNodes = 3; + ByteSizeValue randomBytes = ByteSizeValue.ofBytes(randomLongBetween(6, 19)); + String initialWatermark = percentageMode ? randomIntBetween(86, 94) + "%" : randomBytes.toString(); + ByteSizeValue initialMaxHeadroom = percentageMode ? randomBytes : ByteSizeValue.MINUS_ONE; + HealthMetadata.ShardLimits initialShardLimits = new HealthMetadata.ShardLimits( + randomIntBetween(1, 1000), + randomIntBetween(1001, 2000) + ); + for (int i = 0; i < numberOfNodes; i++) { + startNode(internalCluster, initialWatermark, initialMaxHeadroom.toString(), initialShardLimits); + } - randomBytes = ByteSizeValue.ofBytes(randomLongBetween(101, 200)); - String updatedLowWatermark = percentageMode ? randomIntBetween(40, 59) + "%" : randomBytes.toString(); - ByteSizeValue updatedLowMaxHeadroom = percentageMode ? randomBytes : ByteSizeValue.MINUS_ONE; - randomBytes = ByteSizeValue.ofBytes(randomLongBetween(50, 100)); - String updatedHighWatermark = percentageMode ? randomIntBetween(60, 90) + "%" : randomBytes.toString(); - ByteSizeValue updatedHighMaxHeadroom = percentageMode ? randomBytes : ByteSizeValue.MINUS_ONE; - randomBytes = ByteSizeValue.ofBytes(randomLongBetween(5, 10)); - String updatedFloodStageWatermark = percentageMode ? randomIntBetween(91, 95) + "%" : randomBytes.toString(); - ByteSizeValue updatedFloodStageMaxHeadroom = percentageMode ? randomBytes : ByteSizeValue.MINUS_ONE; - HealthMetadata.ShardLimits updatedShardLimits = new HealthMetadata.ShardLimits( - randomIntBetween(3000, 4000), - randomIntBetween(4001, 5000) - ); + randomBytes = ByteSizeValue.ofBytes(randomLongBetween(101, 200)); + String updatedLowWatermark = percentageMode ? randomIntBetween(40, 59) + "%" : randomBytes.toString(); + ByteSizeValue updatedLowMaxHeadroom = percentageMode ? randomBytes : ByteSizeValue.MINUS_ONE; + randomBytes = ByteSizeValue.ofBytes(randomLongBetween(50, 100)); + String updatedHighWatermark = percentageMode ? randomIntBetween(60, 90) + "%" : randomBytes.toString(); + ByteSizeValue updatedHighMaxHeadroom = percentageMode ? randomBytes : ByteSizeValue.MINUS_ONE; + randomBytes = ByteSizeValue.ofBytes(randomLongBetween(5, 10)); + String updatedFloodStageWatermark = percentageMode ? randomIntBetween(91, 95) + "%" : randomBytes.toString(); + ByteSizeValue updatedFloodStageMaxHeadroom = percentageMode ? randomBytes : ByteSizeValue.MINUS_ONE; + HealthMetadata.ShardLimits updatedShardLimits = new HealthMetadata.ShardLimits( + randomIntBetween(3000, 4000), + randomIntBetween(4001, 5000) + ); - ensureStableCluster(numberOfNodes); - { - var healthMetadata = HealthMetadata.getFromClusterState(internalCluster.clusterService().state()); - var diskMetadata = healthMetadata.getDiskMetadata(); - assertThat(diskMetadata.describeHighWatermark(), equalTo(initialWatermark)); - assertThat(diskMetadata.highMaxHeadroom(), equalTo(initialMaxHeadroom)); + ensureStableCluster(numberOfNodes); + { + var healthMetadata = HealthMetadata.getFromClusterState(internalCluster.clusterService().state()); + var diskMetadata = healthMetadata.getDiskMetadata(); + assertThat(diskMetadata.describeHighWatermark(), equalTo(initialWatermark)); + assertThat(diskMetadata.highMaxHeadroom(), equalTo(initialMaxHeadroom)); - var shardLimitsMetadata = healthMetadata.getShardLimitsMetadata(); - assertEquals(shardLimitsMetadata, initialShardLimits); - } - var settingsBuilder = Settings.builder() - .put(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), updatedLowWatermark) - .put(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), updatedHighWatermark) - .put(CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), updatedFloodStageWatermark) - .put(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), updatedShardLimits.maxShardsPerNode()) - .put(SETTING_CLUSTER_MAX_SHARDS_PER_NODE_FROZEN.getKey(), updatedShardLimits.maxShardsPerNodeFrozen()); - - if (percentageMode) { - settingsBuilder.put(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_MAX_HEADROOM_SETTING.getKey(), updatedLowMaxHeadroom) - .put(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_MAX_HEADROOM_SETTING.getKey(), updatedHighMaxHeadroom) - .put(CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_MAX_HEADROOM_SETTING.getKey(), updatedFloodStageMaxHeadroom); - } - updateSettings(internalCluster, settingsBuilder); - - assertBusy(() -> { - var healthMetadata = HealthMetadata.getFromClusterState(internalCluster.clusterService().state()); - var diskMetadata = healthMetadata.getDiskMetadata(); - assertThat(diskMetadata.describeHighWatermark(), equalTo(updatedHighWatermark)); - assertThat(diskMetadata.highMaxHeadroom(), equalTo(updatedHighMaxHeadroom)); - assertThat(diskMetadata.describeFloodStageWatermark(), equalTo(updatedFloodStageWatermark)); - assertThat(diskMetadata.floodStageMaxHeadroom(), equalTo(updatedFloodStageMaxHeadroom)); - - var shardLimitsMetadata = healthMetadata.getShardLimitsMetadata(); - assertEquals(shardLimitsMetadata, updatedShardLimits); - }); - - var electedMaster = internalCluster.getMasterName(); - - // Force a master fail-over but, since the settings were manually changed, we should return the manually set values - internalCluster.stopNode(electedMaster); - ensureStableCluster(numberOfNodes - 1); - - assertBusy(() -> { - var healthMetadata = HealthMetadata.getFromClusterState( - internalCluster.clusterService(internalCluster.getMasterName()).state() - ); - var diskMetadata = healthMetadata.getDiskMetadata(); - assertThat(diskMetadata.describeHighWatermark(), equalTo(updatedHighWatermark)); - assertThat(diskMetadata.highMaxHeadroom(), equalTo(updatedHighMaxHeadroom)); - assertThat(diskMetadata.describeFloodStageWatermark(), equalTo(updatedFloodStageWatermark)); - assertThat(diskMetadata.floodStageMaxHeadroom(), equalTo(updatedFloodStageMaxHeadroom)); - - var shardLimitsMetadata = healthMetadata.getShardLimitsMetadata(); - assertEquals(shardLimitsMetadata, updatedShardLimits); - }); - - // restart the whole cluster - internalCluster.fullRestart(); - ensureStableCluster(internalCluster.numDataAndMasterNodes()); - String electedMasterAfterRestart = internalCluster.getMasterName(); - assertBusy(() -> { - var healthMetadata = HealthMetadata.getFromClusterState(internalCluster.clusterService(electedMasterAfterRestart).state()); - var diskMetadata = healthMetadata.getDiskMetadata(); - assertThat(diskMetadata.describeHighWatermark(), equalTo(updatedHighWatermark)); - assertThat(diskMetadata.highMaxHeadroom(), equalTo(updatedHighMaxHeadroom)); - assertThat(diskMetadata.describeFloodStageWatermark(), equalTo(updatedFloodStageWatermark)); - assertThat(diskMetadata.floodStageMaxHeadroom(), equalTo(updatedFloodStageMaxHeadroom)); - - var shardLimitsMetadata = healthMetadata.getShardLimitsMetadata(); - assertEquals(shardLimitsMetadata, updatedShardLimits); - }); + var shardLimitsMetadata = healthMetadata.getShardLimitsMetadata(); + assertEquals(shardLimitsMetadata, initialShardLimits); + } + var settingsBuilder = Settings.builder() + .put(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), updatedLowWatermark) + .put(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), updatedHighWatermark) + .put(CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), updatedFloodStageWatermark) + .put(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), updatedShardLimits.maxShardsPerNode()) + .put(SETTING_CLUSTER_MAX_SHARDS_PER_NODE_FROZEN.getKey(), updatedShardLimits.maxShardsPerNodeFrozen()); + + if (percentageMode) { + settingsBuilder.put(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_MAX_HEADROOM_SETTING.getKey(), updatedLowMaxHeadroom) + .put(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_MAX_HEADROOM_SETTING.getKey(), updatedHighMaxHeadroom) + .put(CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_MAX_HEADROOM_SETTING.getKey(), updatedFloodStageMaxHeadroom); } + updateSettings(internalCluster, settingsBuilder); + + assertBusy(() -> { + var healthMetadata = HealthMetadata.getFromClusterState(internalCluster.clusterService().state()); + var diskMetadata = healthMetadata.getDiskMetadata(); + assertThat(diskMetadata.describeHighWatermark(), equalTo(updatedHighWatermark)); + assertThat(diskMetadata.highMaxHeadroom(), equalTo(updatedHighMaxHeadroom)); + assertThat(diskMetadata.describeFloodStageWatermark(), equalTo(updatedFloodStageWatermark)); + assertThat(diskMetadata.floodStageMaxHeadroom(), equalTo(updatedFloodStageMaxHeadroom)); + + var shardLimitsMetadata = healthMetadata.getShardLimitsMetadata(); + assertEquals(shardLimitsMetadata, updatedShardLimits); + }); + + var electedMaster = internalCluster.getMasterName(); + + // Force a master fail-over but, since the settings were manually changed, we should return the manually set values + internalCluster.stopNode(electedMaster); + ensureStableCluster(numberOfNodes - 1); + + assertBusy(() -> { + var healthMetadata = HealthMetadata.getFromClusterState( + internalCluster.clusterService(internalCluster.getMasterName()).state() + ); + var diskMetadata = healthMetadata.getDiskMetadata(); + assertThat(diskMetadata.describeHighWatermark(), equalTo(updatedHighWatermark)); + assertThat(diskMetadata.highMaxHeadroom(), equalTo(updatedHighMaxHeadroom)); + assertThat(diskMetadata.describeFloodStageWatermark(), equalTo(updatedFloodStageWatermark)); + assertThat(diskMetadata.floodStageMaxHeadroom(), equalTo(updatedFloodStageMaxHeadroom)); + + var shardLimitsMetadata = healthMetadata.getShardLimitsMetadata(); + assertEquals(shardLimitsMetadata, updatedShardLimits); + }); + + // restart the whole cluster + internalCluster.fullRestart(); + ensureStableCluster(internalCluster.numDataAndMasterNodes()); + String electedMasterAfterRestart = internalCluster.getMasterName(); + assertBusy(() -> { + var healthMetadata = HealthMetadata.getFromClusterState(internalCluster.clusterService(electedMasterAfterRestart).state()); + var diskMetadata = healthMetadata.getDiskMetadata(); + assertThat(diskMetadata.describeHighWatermark(), equalTo(updatedHighWatermark)); + assertThat(diskMetadata.highMaxHeadroom(), equalTo(updatedHighMaxHeadroom)); + assertThat(diskMetadata.describeFloodStageWatermark(), equalTo(updatedFloodStageWatermark)); + assertThat(diskMetadata.floodStageMaxHeadroom(), equalTo(updatedFloodStageMaxHeadroom)); + + var shardLimitsMetadata = healthMetadata.getShardLimitsMetadata(); + assertEquals(shardLimitsMetadata, updatedShardLimits); + }); } public void testHealthNodeToggleEnabled() throws Exception { - try (InternalTestCluster internalCluster = internalCluster()) { - int numberOfNodes = 3; - Map watermarkByNode = new HashMap<>(); - Map maxHeadroomByNode = new HashMap<>(); - Map shardLimitsPerNode = new HashMap<>(); - for (int i = 0; i < numberOfNodes; i++) { - ByteSizeValue randomBytes = ByteSizeValue.ofBytes(randomLongBetween(6, 19)); - String customWatermark = percentageMode ? randomIntBetween(86, 94) + "%" : randomBytes.toString(); - ByteSizeValue customMaxHeadroom = percentageMode ? randomBytes : ByteSizeValue.MINUS_ONE; - var customShardLimits = new HealthMetadata.ShardLimits(randomIntBetween(1, 1000), randomIntBetween(1001, 2000)); - String nodeName = startNode(internalCluster, customWatermark, customMaxHeadroom.toString(), customShardLimits); - watermarkByNode.put(nodeName, customWatermark); - maxHeadroomByNode.put(nodeName, customMaxHeadroom); - shardLimitsPerNode.put(nodeName, customShardLimits); - } + final InternalTestCluster internalCluster = internalCluster(); + int numberOfNodes = 3; + Map watermarkByNode = new HashMap<>(); + Map maxHeadroomByNode = new HashMap<>(); + Map shardLimitsPerNode = new HashMap<>(); + for (int i = 0; i < numberOfNodes; i++) { + ByteSizeValue randomBytes = ByteSizeValue.ofBytes(randomLongBetween(6, 19)); + String customWatermark = percentageMode ? randomIntBetween(86, 94) + "%" : randomBytes.toString(); + ByteSizeValue customMaxHeadroom = percentageMode ? randomBytes : ByteSizeValue.MINUS_ONE; + var customShardLimits = new HealthMetadata.ShardLimits(randomIntBetween(1, 1000), randomIntBetween(1001, 2000)); + String nodeName = startNode(internalCluster, customWatermark, customMaxHeadroom.toString(), customShardLimits); + watermarkByNode.put(nodeName, customWatermark); + maxHeadroomByNode.put(nodeName, customMaxHeadroom); + shardLimitsPerNode.put(nodeName, customShardLimits); + } - String electedMaster = internalCluster.getMasterName(); - { - var healthMetadata = HealthMetadata.getFromClusterState(internalCluster.clusterService().state()); - var diskMetadata = healthMetadata.getDiskMetadata(); - assertThat(diskMetadata.describeHighWatermark(), equalTo(watermarkByNode.get(electedMaster))); - assertThat(diskMetadata.highMaxHeadroom(), equalTo(maxHeadroomByNode.get(electedMaster))); + String electedMaster = internalCluster.getMasterName(); + { + var healthMetadata = HealthMetadata.getFromClusterState(internalCluster.clusterService().state()); + var diskMetadata = healthMetadata.getDiskMetadata(); + assertThat(diskMetadata.describeHighWatermark(), equalTo(watermarkByNode.get(electedMaster))); + assertThat(diskMetadata.highMaxHeadroom(), equalTo(maxHeadroomByNode.get(electedMaster))); - var shardLimitsMetadata = healthMetadata.getShardLimitsMetadata(); - assertEquals(shardLimitsMetadata, shardLimitsPerNode.get(electedMaster)); - } + var shardLimitsMetadata = healthMetadata.getShardLimitsMetadata(); + assertEquals(shardLimitsMetadata, shardLimitsPerNode.get(electedMaster)); + } - // toggle the health metadata service so we can check that the posted settings are still from the master node - updateSettings(internalCluster, Settings.builder().put(HealthNodeTaskExecutor.ENABLED_SETTING.getKey(), false)); + // toggle the health metadata service so we can check that the posted settings are still from the master node + updateSettings(internalCluster, Settings.builder().put(HealthNodeTaskExecutor.ENABLED_SETTING.getKey(), false)); - updateSettings(internalCluster, Settings.builder().put(HealthNodeTaskExecutor.ENABLED_SETTING.getKey(), true)); + updateSettings(internalCluster, Settings.builder().put(HealthNodeTaskExecutor.ENABLED_SETTING.getKey(), true)); - electedMaster = internalCluster.getMasterName(); - ensureStableCluster(numberOfNodes); - { - var healthMetadata = HealthMetadata.getFromClusterState(internalCluster.clusterService().state()); - var diskMetadata = healthMetadata.getDiskMetadata(); - assertThat(diskMetadata.describeHighWatermark(), equalTo(watermarkByNode.get(electedMaster))); - assertThat(diskMetadata.highMaxHeadroom(), equalTo(maxHeadroomByNode.get(electedMaster))); + electedMaster = internalCluster.getMasterName(); + ensureStableCluster(numberOfNodes); + { + var healthMetadata = HealthMetadata.getFromClusterState(internalCluster.clusterService().state()); + var diskMetadata = healthMetadata.getDiskMetadata(); + assertThat(diskMetadata.describeHighWatermark(), equalTo(watermarkByNode.get(electedMaster))); + assertThat(diskMetadata.highMaxHeadroom(), equalTo(maxHeadroomByNode.get(electedMaster))); - var shardLimitsMetadata = healthMetadata.getShardLimitsMetadata(); - assertEquals(shardLimitsMetadata, shardLimitsPerNode.get(electedMaster)); - } + var shardLimitsMetadata = healthMetadata.getShardLimitsMetadata(); + assertEquals(shardLimitsMetadata, shardLimitsPerNode.get(electedMaster)); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/health/HealthServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/health/HealthServiceIT.java index 2e741d6691d24..14697cc6533c1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/health/HealthServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/health/HealthServiceIT.java @@ -52,35 +52,34 @@ protected Settings nodeSettings(int ordinal, Settings otherSettings) { } public void testThatHealthNodeDataIsFetchedAndPassedToIndicators() throws Exception { - try (InternalTestCluster internalCluster = internalCluster()) { - ensureStableCluster(internalCluster.getNodeNames().length); - waitForAllNodesToReportHealth(); - for (String node : internalCluster.getNodeNames()) { - HealthService healthService = internalCluster.getInstance(HealthService.class, node); - AtomicBoolean onResponseCalled = new AtomicBoolean(false); - ActionListener> listener = new ActionListener<>() { - @Override - public void onResponse(List resultList) { - /* - * The following is really just asserting that the TestHealthIndicatorService's calculate method was called. The - * assertions that it actually got the HealthInfo data are in the calculate method of TestHealthIndicatorService. - */ - assertNotNull(resultList); - assertThat(resultList.size(), equalTo(1)); - HealthIndicatorResult testIndicatorResult = resultList.get(0); - assertThat(testIndicatorResult.status(), equalTo(HealthStatus.RED)); - assertThat(testIndicatorResult.symptom(), equalTo(TestHealthIndicatorService.SYMPTOM)); - onResponseCalled.set(true); - } - - @Override - public void onFailure(Exception e) { - throw new RuntimeException(e); - } - }; - healthService.getHealth(internalCluster.client(node), TestHealthIndicatorService.NAME, true, 1000, listener); - assertBusy(() -> assertThat(onResponseCalled.get(), equalTo(true))); - } + final InternalTestCluster internalCluster = internalCluster(); + ensureStableCluster(internalCluster.getNodeNames().length); + waitForAllNodesToReportHealth(); + for (String node : internalCluster.getNodeNames()) { + HealthService healthService = internalCluster.getInstance(HealthService.class, node); + AtomicBoolean onResponseCalled = new AtomicBoolean(false); + ActionListener> listener = new ActionListener<>() { + @Override + public void onResponse(List resultList) { + /* + * The following is really just asserting that the TestHealthIndicatorService's calculate method was called. The + * assertions that it actually got the HealthInfo data are in the calculate method of TestHealthIndicatorService. + */ + assertNotNull(resultList); + assertThat(resultList.size(), equalTo(1)); + HealthIndicatorResult testIndicatorResult = resultList.get(0); + assertThat(testIndicatorResult.status(), equalTo(HealthStatus.RED)); + assertThat(testIndicatorResult.symptom(), equalTo(TestHealthIndicatorService.SYMPTOM)); + onResponseCalled.set(true); + } + + @Override + public void onFailure(Exception e) { + throw new RuntimeException(e); + } + }; + healthService.getHealth(internalCluster.client(node), TestHealthIndicatorService.NAME, true, 1000, listener); + assertBusy(() -> assertThat(onResponseCalled.get(), equalTo(true))); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/health/UpdateHealthInfoCacheIT.java b/server/src/internalClusterTest/java/org/elasticsearch/health/UpdateHealthInfoCacheIT.java index 88852b3e00f23..02816688f1bbb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/health/UpdateHealthInfoCacheIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/health/UpdateHealthInfoCacheIT.java @@ -21,7 +21,6 @@ import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.junit.annotations.TestLogging; -import java.io.IOException; import java.util.Collection; import java.util.List; import java.util.Map; @@ -35,78 +34,66 @@ public class UpdateHealthInfoCacheIT extends ESIntegTestCase { private static final DiskHealthInfo GREEN = new DiskHealthInfo(HealthStatus.GREEN, null); public void testNodesReportingHealth() throws Exception { - try (InternalTestCluster internalCluster = internalCluster()) { - decreasePollingInterval(internalCluster); - String[] nodeIds = getNodes(internalCluster).keySet().toArray(new String[0]); - DiscoveryNode healthNode = waitAndGetHealthNode(internalCluster); - assertThat(healthNode, notNullValue()); - assertBusy(() -> assertResultsCanBeFetched(internalCluster, healthNode, List.of(nodeIds), null)); - } catch (IOException e) { - throw new RuntimeException("Failed to close internal cluster: " + e.getMessage(), e); - } + final InternalTestCluster internalCluster = internalCluster(); + decreasePollingInterval(internalCluster); + String[] nodeIds = getNodes(internalCluster).keySet().toArray(new String[0]); + DiscoveryNode healthNode = waitAndGetHealthNode(internalCluster); + assertThat(healthNode, notNullValue()); + assertBusy(() -> assertResultsCanBeFetched(internalCluster, healthNode, List.of(nodeIds), null)); } public void testNodeLeavingCluster() throws Exception { - try (InternalTestCluster internalCluster = internalCluster()) { - decreasePollingInterval(internalCluster); - Collection nodes = getNodes(internalCluster).values(); - DiscoveryNode healthNode = waitAndGetHealthNode(internalCluster); - assertThat(healthNode, notNullValue()); - DiscoveryNode nodeToLeave = nodes.stream().filter(node -> { - boolean isMaster = node.getName().equals(internalCluster.getMasterName()); - boolean isHealthNode = node.getId().equals(healthNode.getId()); - // We have dedicated tests for master and health node - return isMaster == false && isHealthNode == false; - }).findAny().orElseThrow(); - internalCluster.stopNode(nodeToLeave.getName()); - assertBusy( - () -> assertResultsCanBeFetched( - internalCluster, - healthNode, - nodes.stream().filter(node -> node.equals(nodeToLeave) == false).map(DiscoveryNode::getId).toList(), - nodeToLeave.getId() - ) - ); - } catch (IOException e) { - throw new RuntimeException("Failed to close internal cluster: " + e.getMessage(), e); - } + final InternalTestCluster internalCluster = internalCluster(); + decreasePollingInterval(internalCluster); + Collection nodes = getNodes(internalCluster).values(); + DiscoveryNode healthNode = waitAndGetHealthNode(internalCluster); + assertThat(healthNode, notNullValue()); + DiscoveryNode nodeToLeave = nodes.stream().filter(node -> { + boolean isMaster = node.getName().equals(internalCluster.getMasterName()); + boolean isHealthNode = node.getId().equals(healthNode.getId()); + // We have dedicated tests for master and health node + return isMaster == false && isHealthNode == false; + }).findAny().orElseThrow(); + internalCluster.stopNode(nodeToLeave.getName()); + assertBusy( + () -> assertResultsCanBeFetched( + internalCluster, + healthNode, + nodes.stream().filter(node -> node.equals(nodeToLeave) == false).map(DiscoveryNode::getId).toList(), + nodeToLeave.getId() + ) + ); } @TestLogging(value = "org.elasticsearch.health.node:DEBUG", reason = "https://github.com/elastic/elasticsearch/issues/97213") public void testHealthNodeFailOver() throws Exception { - try (InternalTestCluster internalCluster = internalCluster()) { - decreasePollingInterval(internalCluster); - String[] nodeIds = getNodes(internalCluster).keySet().toArray(new String[0]); - DiscoveryNode healthNodeToBeShutDown = waitAndGetHealthNode(internalCluster); - assertThat(healthNodeToBeShutDown, notNullValue()); - internalCluster.restartNode(healthNodeToBeShutDown.getName()); - ensureStableCluster(nodeIds.length); - DiscoveryNode newHealthNode = waitAndGetHealthNode(internalCluster); - assertThat(newHealthNode, notNullValue()); - logger.info("Previous health node {}, new health node {}.", healthNodeToBeShutDown, newHealthNode); - assertBusy(() -> assertResultsCanBeFetched(internalCluster, newHealthNode, List.of(nodeIds), null)); - } catch (IOException e) { - throw new RuntimeException("Failed to close internal cluster: " + e.getMessage(), e); - } + final InternalTestCluster internalCluster = internalCluster(); + decreasePollingInterval(internalCluster); + String[] nodeIds = getNodes(internalCluster).keySet().toArray(new String[0]); + DiscoveryNode healthNodeToBeShutDown = waitAndGetHealthNode(internalCluster); + assertThat(healthNodeToBeShutDown, notNullValue()); + internalCluster.restartNode(healthNodeToBeShutDown.getName()); + ensureStableCluster(nodeIds.length); + DiscoveryNode newHealthNode = waitAndGetHealthNode(internalCluster); + assertThat(newHealthNode, notNullValue()); + logger.info("Previous health node {}, new health node {}.", healthNodeToBeShutDown, newHealthNode); + assertBusy(() -> assertResultsCanBeFetched(internalCluster, newHealthNode, List.of(nodeIds), null)); } @TestLogging(value = "org.elasticsearch.health.node:DEBUG", reason = "https://github.com/elastic/elasticsearch/issues/97213") public void testMasterFailure() throws Exception { - try (InternalTestCluster internalCluster = internalCluster()) { - decreasePollingInterval(internalCluster); - String[] nodeIds = getNodes(internalCluster).keySet().toArray(new String[0]); - DiscoveryNode healthNodeBeforeIncident = waitAndGetHealthNode(internalCluster); - assertThat(healthNodeBeforeIncident, notNullValue()); - String masterName = internalCluster.getMasterName(); - logger.info("Restarting elected master node {}.", masterName); - internalCluster.restartNode(masterName); - ensureStableCluster(nodeIds.length); - DiscoveryNode newHealthNode = waitAndGetHealthNode(internalCluster); - assertThat(newHealthNode, notNullValue()); - assertBusy(() -> assertResultsCanBeFetched(internalCluster, newHealthNode, List.of(nodeIds), null)); - } catch (IOException e) { - throw new RuntimeException("Failed to close internal cluster: " + e.getMessage(), e); - } + final InternalTestCluster internalCluster = internalCluster(); + decreasePollingInterval(internalCluster); + String[] nodeIds = getNodes(internalCluster).keySet().toArray(new String[0]); + DiscoveryNode healthNodeBeforeIncident = waitAndGetHealthNode(internalCluster); + assertThat(healthNodeBeforeIncident, notNullValue()); + String masterName = internalCluster.getMasterName(); + logger.info("Restarting elected master node {}.", masterName); + internalCluster.restartNode(masterName); + ensureStableCluster(nodeIds.length); + DiscoveryNode newHealthNode = waitAndGetHealthNode(internalCluster); + assertThat(newHealthNode, notNullValue()); + assertBusy(() -> assertResultsCanBeFetched(internalCluster, newHealthNode, List.of(nodeIds), null)); } /** diff --git a/server/src/internalClusterTest/java/org/elasticsearch/health/node/DiskHealthIndicatorServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/health/node/DiskHealthIndicatorServiceIT.java index 1cab207fda30c..2f9ef3c80d23c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/health/node/DiskHealthIndicatorServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/health/node/DiskHealthIndicatorServiceIT.java @@ -18,7 +18,6 @@ import org.elasticsearch.health.HealthStatus; import org.elasticsearch.health.node.selection.HealthNode; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.InternalTestCluster; import java.util.List; import java.util.Map; @@ -30,41 +29,39 @@ public class DiskHealthIndicatorServiceIT extends ESIntegTestCase { public void testGreen() throws Exception { - try (InternalTestCluster internalCluster = internalCluster()) { - internalCluster.startMasterOnlyNode(); - internalCluster.startDataOnlyNode(); - ensureStableCluster(internalCluster.getNodeNames().length); - waitForAllNodesToReportHealth(); - for (String node : internalCluster.getNodeNames()) { - HealthService healthService = internalCluster.getInstance(HealthService.class, node); - List resultList = getHealthServiceResults(healthService, node); - assertNotNull(resultList); - assertThat(resultList.size(), equalTo(1)); - HealthIndicatorResult testIndicatorResult = resultList.get(0); - assertThat(testIndicatorResult.status(), equalTo(HealthStatus.GREEN)); - assertThat(testIndicatorResult.symptom(), equalTo("The cluster has enough available disk space.")); - } + final var internalCluster = internalCluster(); + internalCluster.startMasterOnlyNode(); + internalCluster.startDataOnlyNode(); + ensureStableCluster(internalCluster.getNodeNames().length); + waitForAllNodesToReportHealth(); + for (String node : internalCluster.getNodeNames()) { + HealthService healthService = internalCluster.getInstance(HealthService.class, node); + List resultList = getHealthServiceResults(healthService, node); + assertNotNull(resultList); + assertThat(resultList.size(), equalTo(1)); + HealthIndicatorResult testIndicatorResult = resultList.get(0); + assertThat(testIndicatorResult.status(), equalTo(HealthStatus.GREEN)); + assertThat(testIndicatorResult.symptom(), equalTo("The cluster has enough available disk space.")); } } public void testRed() throws Exception { - try (InternalTestCluster internalCluster = internalCluster()) { - internalCluster.startMasterOnlyNode(getVeryLowWatermarksSettings()); - internalCluster.startDataOnlyNode(getVeryLowWatermarksSettings()); - ensureStableCluster(internalCluster.getNodeNames().length); - waitForAllNodesToReportHealth(); - for (String node : internalCluster.getNodeNames()) { - HealthService healthService = internalCluster.getInstance(HealthService.class, node); - List resultList = getHealthServiceResults(healthService, node); - assertNotNull(resultList); - assertThat(resultList.size(), equalTo(1)); - HealthIndicatorResult testIndicatorResult = resultList.get(0); - assertThat(testIndicatorResult.status(), equalTo(HealthStatus.RED)); - assertThat( - testIndicatorResult.symptom(), - equalTo("2 nodes with roles: [data, master] are out of disk or running low on disk space.") - ); - } + final var internalCluster = internalCluster(); + internalCluster.startMasterOnlyNode(getVeryLowWatermarksSettings()); + internalCluster.startDataOnlyNode(getVeryLowWatermarksSettings()); + ensureStableCluster(internalCluster.getNodeNames().length); + waitForAllNodesToReportHealth(); + for (String node : internalCluster.getNodeNames()) { + HealthService healthService = internalCluster.getInstance(HealthService.class, node); + List resultList = getHealthServiceResults(healthService, node); + assertNotNull(resultList); + assertThat(resultList.size(), equalTo(1)); + HealthIndicatorResult testIndicatorResult = resultList.get(0); + assertThat(testIndicatorResult.status(), equalTo(HealthStatus.RED)); + assertThat( + testIndicatorResult.symptom(), + equalTo("2 nodes with roles: [data, master] are out of disk or running low on disk space.") + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/FinalPipelineIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/FinalPipelineIT.java index 24372978834c6..443d0c384a058 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/FinalPipelineIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/FinalPipelineIT.java @@ -15,7 +15,6 @@ import org.elasticsearch.action.ingest.DeletePipelineRequest; import org.elasticsearch.action.ingest.GetPipelineResponse; import org.elasticsearch.action.ingest.PutPipelineRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -39,6 +38,7 @@ import java.util.Objects; import java.util.function.BiConsumer; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; @@ -73,7 +73,7 @@ public void testFinalPipelineCantChangeDestination() { final IllegalStateException e = expectThrows( IllegalStateException.class, - () -> client().prepareIndex("index").setId("1").setSource(Map.of("field", "value")).get() + () -> prepareIndex("index").setId("1").setSource(Map.of("field", "value")).get() ); assertThat( e, @@ -93,7 +93,7 @@ public void testFinalPipelineCantRerouteDestination() { final IllegalStateException e = expectThrows( IllegalStateException.class, - () -> client().prepareIndex("index").setId("1").setSource(Map.of("field", "value")).get() + () -> prepareIndex("index").setId("1").setSource(Map.of("field", "value")).get() ); assertThat( e, @@ -118,15 +118,15 @@ public void testFinalPipelineOfOldDestinationIsNotInvoked() { {"processors": [{"final": {"exists":"no_such_field"}}]}"""); clusterAdmin().putPipeline(new PutPipelineRequest("final_pipeline", finalPipelineBody, XContentType.JSON)).actionGet(); - DocWriteResponse indexResponse = client().prepareIndex("index") - .setId("1") + DocWriteResponse indexResponse = prepareIndex("index").setId("1") .setSource(Map.of("field", "value")) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); assertEquals(RestStatus.CREATED, indexResponse.status()); - SearchResponse target = prepareSearch("target").get(); - assertEquals(1, target.getHits().getTotalHits().value); - assertFalse(target.getHits().getAt(0).getSourceAsMap().containsKey("final")); + assertResponse(prepareSearch("target"), response -> { + assertEquals(1, response.getHits().getTotalHits().value); + assertFalse(response.getHits().getAt(0).getSourceAsMap().containsKey("final")); + }); } public void testFinalPipelineOfNewDestinationIsInvoked() { @@ -144,15 +144,15 @@ public void testFinalPipelineOfNewDestinationIsInvoked() { {"processors": [{"final": {}}]}"""); clusterAdmin().putPipeline(new PutPipelineRequest("final_pipeline", finalPipelineBody, XContentType.JSON)).actionGet(); - DocWriteResponse indexResponse = client().prepareIndex("index") - .setId("1") + DocWriteResponse indexResponse = prepareIndex("index").setId("1") .setSource(Map.of("field", "value")) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); assertEquals(RestStatus.CREATED, indexResponse.status()); - SearchResponse target = prepareSearch("target").get(); - assertEquals(1, target.getHits().getTotalHits().value); - assertEquals(true, target.getHits().getAt(0).getSourceAsMap().get("final")); + assertResponse(prepareSearch("target"), response -> { + assertEquals(1, response.getHits().getTotalHits().value); + assertEquals(true, response.getHits().getAt(0).getSourceAsMap().get("final")); + }); } public void testDefaultPipelineOfNewDestinationIsNotInvoked() { @@ -170,15 +170,15 @@ public void testDefaultPipelineOfNewDestinationIsNotInvoked() { {"processors": [{"final": {}}]}"""); clusterAdmin().putPipeline(new PutPipelineRequest("target_default_pipeline", targetPipeline, XContentType.JSON)).actionGet(); - DocWriteResponse indexResponse = client().prepareIndex("index") - .setId("1") + DocWriteResponse indexResponse = prepareIndex("index").setId("1") .setSource(Map.of("field", "value")) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); assertEquals(RestStatus.CREATED, indexResponse.status()); - SearchResponse target = prepareSearch("target").get(); - assertEquals(1, target.getHits().getTotalHits().value); - assertFalse(target.getHits().getAt(0).getSourceAsMap().containsKey("final")); + assertResponse(prepareSearch("target"), response -> { + assertEquals(1, response.getHits().getTotalHits().value); + assertFalse(response.getHits().getAt(0).getSourceAsMap().containsKey("final")); + }); } public void testDefaultPipelineOfRerouteDestinationIsInvoked() { @@ -196,15 +196,15 @@ public void testDefaultPipelineOfRerouteDestinationIsInvoked() { {"processors": [{"final": {}}]}"""); clusterAdmin().putPipeline(new PutPipelineRequest("target_default_pipeline", targetPipeline, XContentType.JSON)).actionGet(); - DocWriteResponse indexResponse = client().prepareIndex("index") - .setId("1") + DocWriteResponse indexResponse = prepareIndex("index").setId("1") .setSource(Map.of("field", "value")) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); assertEquals(RestStatus.CREATED, indexResponse.status()); - SearchResponse target = prepareSearch("target").get(); - assertEquals(1, target.getHits().getTotalHits().value); - assertTrue(target.getHits().getAt(0).getSourceAsMap().containsKey("final")); + assertResponse(prepareSearch("target"), response -> { + assertEquals(1, response.getHits().getTotalHits().value); + assertTrue(response.getHits().getAt(0).getSourceAsMap().containsKey("final")); + }); } public void testAvoidIndexingLoop() { @@ -224,8 +224,7 @@ public void testAvoidIndexingLoop() { IllegalStateException exception = expectThrows( IllegalStateException.class, - () -> client().prepareIndex("index") - .setId("1") + () -> prepareIndex("index").setId("1") .setSource(Map.of("dest", "index")) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get() @@ -243,7 +242,7 @@ public void testFinalPipeline() { // this asserts that the final_pipeline was used, without us having to actually create the pipeline etc. final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> client().prepareIndex("index").setId("1").setSource(Map.of("field", "value")).get() + () -> prepareIndex("index").setId("1").setSource(Map.of("field", "value")).get() ); assertThat(e, hasToString(containsString("pipeline with id [final_pipeline] does not exist"))); } @@ -257,7 +256,7 @@ public void testRequestPipelineAndFinalPipeline() { clusterAdmin().putPipeline(new PutPipelineRequest("final_pipeline", finalPipelineBody, XContentType.JSON)).actionGet(); final Settings settings = Settings.builder().put(IndexSettings.FINAL_PIPELINE.getKey(), "final_pipeline").build(); createIndex("index", settings); - final IndexRequestBuilder index = client().prepareIndex("index").setId("1"); + final IndexRequestBuilder index = prepareIndex("index").setId("1"); index.setSource(Map.of("field", "value")); index.setPipeline("request_pipeline"); index.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); @@ -285,7 +284,7 @@ public void testDefaultAndFinalPipeline() { .put(IndexSettings.FINAL_PIPELINE.getKey(), "final_pipeline") .build(); createIndex("index", settings); - final IndexRequestBuilder index = client().prepareIndex("index").setId("1"); + final IndexRequestBuilder index = prepareIndex("index").setId("1"); index.setSource(Map.of("field", "value")); index.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); final DocWriteResponse response = index.get(); @@ -332,7 +331,7 @@ public void testDefaultAndFinalPipelineFromTemplates() { .setOrder(finalPipelineOrder) .setSettings(finalPipelineSettings) .get(); - final IndexRequestBuilder index = client().prepareIndex("index").setId("1"); + final IndexRequestBuilder index = prepareIndex("index").setId("1"); index.setSource(Map.of("field", "value")); index.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); final DocWriteResponse response = index.get(); @@ -370,7 +369,7 @@ public void testHighOrderFinalPipelinePreferred() throws IOException { // this asserts that the high_order_final_pipeline was selected, without us having to actually create the pipeline etc. final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> client().prepareIndex("index").setId("1").setSource(Map.of("field", "value")).get() + () -> prepareIndex("index").setId("1").setSource(Map.of("field", "value")).get() ); assertThat(e, hasToString(containsString("pipeline with id [high_order_final_pipeline] does not exist"))); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/HiddenIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/HiddenIndexIT.java index 41bdf944edd59..14d9cf9e56eae 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/HiddenIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/HiddenIndexIT.java @@ -13,7 +13,6 @@ import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.cluster.metadata.MappingMetadata; @@ -27,6 +26,7 @@ import java.util.Map; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; @@ -37,40 +37,51 @@ public class HiddenIndexIT extends ESIntegTestCase { public void testHiddenIndexSearch() { assertAcked(indicesAdmin().prepareCreate("hidden-index").setSettings(Settings.builder().put("index.hidden", true).build()).get()); - client().prepareIndex("hidden-index").setSource("foo", "bar").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); + prepareIndex("hidden-index").setSource("foo", "bar").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); // default not visible to wildcard expansion - SearchResponse searchResponse = prepareSearch(randomFrom("*", "_all", "h*", "*index")).setSize(1000) - .setQuery(QueryBuilders.matchAllQuery()) - .get(); - boolean matchedHidden = Arrays.stream(searchResponse.getHits().getHits()).anyMatch(hit -> "hidden-index".equals(hit.getIndex())); - assertFalse(matchedHidden); + assertResponse( + prepareSearch(randomFrom("*", "_all", "h*", "*index")).setSize(1000).setQuery(QueryBuilders.matchAllQuery()), + response -> { + boolean matchedHidden = Arrays.stream(response.getHits().getHits()).anyMatch(hit -> "hidden-index".equals(hit.getIndex())); + assertFalse(matchedHidden); + } + ); // direct access allowed - searchResponse = prepareSearch("hidden-index").setSize(1000).setQuery(QueryBuilders.matchAllQuery()).get(); - matchedHidden = Arrays.stream(searchResponse.getHits().getHits()).anyMatch(hit -> "hidden-index".equals(hit.getIndex())); - assertTrue(matchedHidden); + assertResponse(prepareSearch("hidden-index").setSize(1000).setQuery(QueryBuilders.matchAllQuery()), response -> { + boolean matchedHidden = Arrays.stream(response.getHits().getHits()).anyMatch(hit -> "hidden-index".equals(hit.getIndex())); + assertTrue(matchedHidden); + }); // with indices option to include hidden - searchResponse = prepareSearch(randomFrom("*", "_all", "h*", "*index")).setSize(1000) - .setQuery(QueryBuilders.matchAllQuery()) - .setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_HIDDEN) - .get(); - matchedHidden = Arrays.stream(searchResponse.getHits().getHits()).anyMatch(hit -> "hidden-index".equals(hit.getIndex())); - assertTrue(matchedHidden); + assertResponse( + prepareSearch(randomFrom("*", "_all", "h*", "*index")).setSize(1000) + .setQuery(QueryBuilders.matchAllQuery()) + .setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_HIDDEN), + response -> { + boolean matchedHidden = Arrays.stream(response.getHits().getHits()).anyMatch(hit -> "hidden-index".equals(hit.getIndex())); + assertTrue(matchedHidden); + } + ); // implicit based on use of pattern starting with . and a wildcard assertAcked(indicesAdmin().prepareCreate(".hidden-index").setSettings(Settings.builder().put("index.hidden", true).build()).get()); - client().prepareIndex(".hidden-index").setSource("foo", "bar").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); - searchResponse = prepareSearch(randomFrom(".*", ".hidden-*")).setSize(1000).setQuery(QueryBuilders.matchAllQuery()).get(); - matchedHidden = Arrays.stream(searchResponse.getHits().getHits()).anyMatch(hit -> ".hidden-index".equals(hit.getIndex())); - assertTrue(matchedHidden); + prepareIndex(".hidden-index").setSource("foo", "bar").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); + assertResponse(prepareSearch(randomFrom(".*", ".hidden-*")).setSize(1000).setQuery(QueryBuilders.matchAllQuery()), response -> { + boolean matchedHidden = Arrays.stream(response.getHits().getHits()).anyMatch(hit -> ".hidden-index".equals(hit.getIndex())); + assertTrue(matchedHidden); + }); // make index not hidden updateIndexSettings(Settings.builder().put("index.hidden", false), "hidden-index"); - searchResponse = prepareSearch(randomFrom("*", "_all", "h*", "*index")).setSize(1000).setQuery(QueryBuilders.matchAllQuery()).get(); - matchedHidden = Arrays.stream(searchResponse.getHits().getHits()).anyMatch(hit -> "hidden-index".equals(hit.getIndex())); - assertTrue(matchedHidden); + assertResponse( + prepareSearch(randomFrom("*", "_all", "h*", "*index")).setSize(1000).setQuery(QueryBuilders.matchAllQuery()), + response -> { + boolean matchedHidden = Arrays.stream(response.getHits().getHits()).anyMatch(hit -> "hidden-index".equals(hit.getIndex())); + assertTrue(matchedHidden); + } + ); } public void testGlobalTemplatesDoNotApply() { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/IndexRequestBuilderIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/IndexRequestBuilderIT.java index 31368a3cfb8fd..71f0c75efa026 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/IndexRequestBuilderIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/IndexRequestBuilderIT.java @@ -28,13 +28,12 @@ public void testSetSource() throws InterruptedException, ExecutionException { Map map = new HashMap<>(); map.put("test_field", "foobar"); IndexRequestBuilder[] builders = new IndexRequestBuilder[] { - client().prepareIndex("test").setSource((Object) "test_field", (Object) "foobar"), - client().prepareIndex("test").setSource("{\"test_field\" : \"foobar\"}", XContentType.JSON), - client().prepareIndex("test").setSource(new BytesArray("{\"test_field\" : \"foobar\"}"), XContentType.JSON), - client().prepareIndex("test").setSource(new BytesArray("{\"test_field\" : \"foobar\"}"), XContentType.JSON), - client().prepareIndex("test") - .setSource(BytesReference.toBytes(new BytesArray("{\"test_field\" : \"foobar\"}")), XContentType.JSON), - client().prepareIndex("test").setSource(map) }; + prepareIndex("test").setSource((Object) "test_field", (Object) "foobar"), + prepareIndex("test").setSource("{\"test_field\" : \"foobar\"}", XContentType.JSON), + prepareIndex("test").setSource(new BytesArray("{\"test_field\" : \"foobar\"}"), XContentType.JSON), + prepareIndex("test").setSource(new BytesArray("{\"test_field\" : \"foobar\"}"), XContentType.JSON), + prepareIndex("test").setSource(BytesReference.toBytes(new BytesArray("{\"test_field\" : \"foobar\"}")), XContentType.JSON), + prepareIndex("test").setSource(map) }; indexRandom(true, builders); ElasticsearchAssertions.assertHitCount( prepareSearch("test").setQuery(QueryBuilders.termQuery("test_field", "foobar")), @@ -44,7 +43,7 @@ public void testSetSource() throws InterruptedException, ExecutionException { public void testOddNumberOfSourceObjects() { try { - client().prepareIndex("test").setSource("test_field", "foobar", new Object()); + prepareIndex("test").setSource("test_field", "foobar", new Object()); fail("Expected IllegalArgumentException"); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("The number of object passed must be even but was [3]")); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/IndexSortIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/IndexSortIT.java index 9d94b9d0b41b0..fae08f8d5577e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/IndexSortIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/IndexSortIT.java @@ -70,8 +70,7 @@ public void testIndexSort() { .putList("index.sort.field", "date", "numeric_dv", "keyword_dv") ).setMapping(TEST_MAPPING).get(); for (int i = 0; i < 20; i++) { - client().prepareIndex("test") - .setId(Integer.toString(i)) + prepareIndex("test").setId(Integer.toString(i)) .setSource("numeric_dv", randomInt(), "keyword_dv", randomAlphaOfLengthBetween(10, 20)) .get(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/WaitUntilRefreshIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/WaitUntilRefreshIT.java index ec03a740f8ade..ca2d2c60e23e1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/WaitUntilRefreshIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/WaitUntilRefreshIT.java @@ -56,11 +56,7 @@ public void createTestIndex() { } public void testIndex() { - DocWriteResponse index = client().prepareIndex("test") - .setId("1") - .setSource("foo", "bar") - .setRefreshPolicy(RefreshPolicy.WAIT_UNTIL) - .get(); + DocWriteResponse index = prepareIndex("test").setId("1").setSource("foo", "bar").setRefreshPolicy(RefreshPolicy.WAIT_UNTIL).get(); assertEquals(RestStatus.CREATED, index.status()); assertFalse("request shouldn't have forced a refresh", index.forcedRefresh()); assertSearchHits(prepareSearch("test").setQuery(matchQuery("foo", "bar")), "1"); @@ -68,7 +64,7 @@ public void testIndex() { public void testDelete() throws InterruptedException, ExecutionException { // Index normally - indexRandom(true, client().prepareIndex("test").setId("1").setSource("foo", "bar")); + indexRandom(true, prepareIndex("test").setId("1").setSource("foo", "bar")); assertSearchHits(prepareSearch("test").setQuery(matchQuery("foo", "bar")), "1"); // Now delete with blockUntilRefresh @@ -80,7 +76,7 @@ public void testDelete() throws InterruptedException, ExecutionException { public void testUpdate() throws InterruptedException, ExecutionException { // Index normally - indexRandom(true, client().prepareIndex("test").setId("1").setSource("foo", "bar")); + indexRandom(true, prepareIndex("test").setId("1").setSource("foo", "bar")); assertSearchHits(prepareSearch("test").setQuery(matchQuery("foo", "bar")), "1"); // Update with RefreshPolicy.WAIT_UNTIL @@ -115,7 +111,7 @@ public void testUpdate() throws InterruptedException, ExecutionException { public void testBulk() { // Index by bulk with RefreshPolicy.WAIT_UNTIL BulkRequestBuilder bulk = client().prepareBulk().setRefreshPolicy(RefreshPolicy.WAIT_UNTIL); - bulk.add(client().prepareIndex("test").setId("1").setSource("foo", "bar")); + bulk.add(prepareIndex("test").setId("1").setSource("foo", "bar")); assertBulkSuccess(bulk.get()); assertSearchHits(prepareSearch("test").setQuery(matchQuery("foo", "bar")), "1"); @@ -143,16 +139,16 @@ public void testBulk() { */ public void testNoRefreshInterval() throws InterruptedException, ExecutionException { updateIndexSettings(Settings.builder().put("index.refresh_interval", -1), "test"); - ActionFuture index = client().prepareIndex("test") - .setId("1") + ActionFuture index = prepareIndex("test").setId("1") .setSource("foo", "bar") .setRefreshPolicy(RefreshPolicy.WAIT_UNTIL) .execute(); while (false == index.isDone()) { indicesAdmin().prepareRefresh("test").get(); } - assertEquals(RestStatus.CREATED, index.get().status()); - assertFalse("request shouldn't have forced a refresh", index.get().forcedRefresh()); + var response = index.get(); + assertEquals(RestStatus.CREATED, response.status()); + assertFalse("request shouldn't have forced a refresh", response.forcedRefresh()); assertSearchHits(prepareSearch("test").setQuery(matchQuery("foo", "bar")), "1"); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/engine/InternalEngineMergeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/engine/InternalEngineMergeIT.java index 71ae1704b5fed..b7d4ce18b15fe 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/engine/InternalEngineMergeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/engine/InternalEngineMergeIT.java @@ -41,7 +41,7 @@ public void testMergesHappening() throws Exception { .source(jsonBuilder().startObject().field("l", randomLong()).endObject()) ); } - BulkResponse response = request.execute().actionGet(); + BulkResponse response = request.get(); refresh(); assertNoFailures(response); IndicesStatsResponse stats = indicesAdmin().prepareStats("test").setSegments(true).setMerge(true).get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/engine/MaxDocsLimitIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/engine/MaxDocsLimitIT.java index f297b61e7087d..ee165d1870571 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/engine/MaxDocsLimitIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/engine/MaxDocsLimitIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.index.engine; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; @@ -21,7 +20,6 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.elasticsearch.xcontent.XContentType; import org.junit.After; import org.junit.Before; @@ -32,6 +30,8 @@ import java.util.concurrent.atomic.AtomicInteger; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCountAndNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.hamcrest.Matchers.both; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -108,24 +108,20 @@ public void testMaxDocsLimit() throws Exception { ); assertThat(deleteError.getMessage(), containsString("Number of documents in the index can't exceed [" + maxDocs.get() + "]")); indicesAdmin().prepareRefresh("test").get(); - SearchResponse searchResponse = prepareSearch("test").setQuery(new MatchAllQueryBuilder()) - .setTrackTotalHitsUpTo(Integer.MAX_VALUE) - .setSize(0) - .get(); - ElasticsearchAssertions.assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo((long) maxDocs.get())); + assertNoFailuresAndResponse( + prepareSearch("test").setQuery(new MatchAllQueryBuilder()).setTrackTotalHitsUpTo(Integer.MAX_VALUE).setSize(0), + response -> assertThat(response.getHits().getTotalHits().value, equalTo((long) maxDocs.get())) + ); if (randomBoolean()) { indicesAdmin().prepareFlush("test").get(); } internalCluster().fullRestart(); internalCluster().ensureAtLeastNumDataNodes(2); ensureGreen("test"); - searchResponse = prepareSearch("test").setQuery(new MatchAllQueryBuilder()) - .setTrackTotalHitsUpTo(Integer.MAX_VALUE) - .setSize(0) - .get(); - ElasticsearchAssertions.assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo((long) maxDocs.get())); + assertNoFailuresAndResponse( + prepareSearch("test").setQuery(new MatchAllQueryBuilder()).setTrackTotalHitsUpTo(Integer.MAX_VALUE).setSize(0), + response -> assertThat(response.getHits().getTotalHits().value, equalTo((long) maxDocs.get())) + ); } public void testMaxDocsLimitConcurrently() throws Exception { @@ -135,12 +131,10 @@ public void testMaxDocsLimitConcurrently() throws Exception { assertThat(indexingResult.numFailures, greaterThan(0)); assertThat(indexingResult.numSuccess, both(greaterThan(0)).and(lessThanOrEqualTo(maxDocs.get()))); indicesAdmin().prepareRefresh("test").get(); - SearchResponse searchResponse = prepareSearch("test").setQuery(new MatchAllQueryBuilder()) - .setTrackTotalHitsUpTo(Integer.MAX_VALUE) - .setSize(0) - .get(); - ElasticsearchAssertions.assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo((long) indexingResult.numSuccess)); + assertHitCountAndNoFailures( + prepareSearch("test").setQuery(new MatchAllQueryBuilder()).setTrackTotalHitsUpTo(Integer.MAX_VALUE).setSize(0), + indexingResult.numSuccess + ); int totalSuccess = indexingResult.numSuccess; while (totalSuccess < maxDocs.get()) { indexingResult = indexDocs(between(1, 10), between(1, 8)); @@ -152,12 +146,10 @@ public void testMaxDocsLimitConcurrently() throws Exception { assertThat(indexingResult.numSuccess, equalTo(0)); } indicesAdmin().prepareRefresh("test").get(); - searchResponse = prepareSearch("test").setQuery(new MatchAllQueryBuilder()) - .setTrackTotalHitsUpTo(Integer.MAX_VALUE) - .setSize(0) - .get(); - ElasticsearchAssertions.assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo((long) totalSuccess)); + assertHitCountAndNoFailures( + prepareSearch("test").setQuery(new MatchAllQueryBuilder()).setTrackTotalHitsUpTo(Integer.MAX_VALUE).setSize(0), + totalSuccess + ); } record IndexingResult(int numSuccess, int numFailures) {} @@ -173,7 +165,7 @@ static IndexingResult indexDocs(int numRequests, int numThreads) throws Exceptio phaser.arriveAndAwaitAdvance(); while (completedRequests.incrementAndGet() <= numRequests) { try { - final DocWriteResponse resp = client().prepareIndex("test").setSource("{}", XContentType.JSON).get(); + final DocWriteResponse resp = prepareIndex("test").setSource("{}", XContentType.JSON).get(); numSuccess.incrementAndGet(); assertThat(resp.status(), equalTo(RestStatus.CREATED)); } catch (IllegalArgumentException e) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/fielddata/FieldDataLoadingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/fielddata/FieldDataLoadingIT.java index 334462f3b757d..55e90d4398201 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/fielddata/FieldDataLoadingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/fielddata/FieldDataLoadingIT.java @@ -35,7 +35,7 @@ public void testEagerGlobalOrdinalsFieldDataLoading() throws Exception { ); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("name", "name").get(); + prepareIndex("test").setId("1").setSource("name", "name").get(); indicesAdmin().prepareRefresh("test").get(); ClusterStatsResponse response = clusterAdmin().prepareClusterStats().get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/CopyToMapperIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/CopyToMapperIntegrationIT.java index 9fd1e788eca8c..c1f06aeceebde 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/CopyToMapperIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/CopyToMapperIntegrationIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.index.mapper; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.aggregations.AggregationBuilders; @@ -22,6 +21,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; @@ -32,25 +32,25 @@ public void testDynamicTemplateCopyTo() throws Exception { int recordCount = between(1, 200); for (int i = 0; i < recordCount * 2; i++) { - client().prepareIndex("test-idx").setId(Integer.toString(i)).setSource("test_field", "test " + i, "even", i % 2 == 0).get(); + prepareIndex("test-idx").setId(Integer.toString(i)).setSource("test_field", "test " + i, "even", i % 2 == 0).get(); } - indicesAdmin().prepareRefresh("test-idx").execute().actionGet(); + indicesAdmin().prepareRefresh("test-idx").get(); SubAggCollectionMode aggCollectionMode = randomFrom(SubAggCollectionMode.values()); - SearchResponse response = prepareSearch("test-idx").setQuery(QueryBuilders.termQuery("even", true)) - .addAggregation(AggregationBuilders.terms("test").field("test_field").size(recordCount * 2).collectMode(aggCollectionMode)) - .addAggregation( - AggregationBuilders.terms("test_raw").field("test_field_raw").size(recordCount * 2).collectMode(aggCollectionMode) - ) - .execute() - .actionGet(); - - assertThat(response.getHits().getTotalHits().value, equalTo((long) recordCount)); - - assertThat(((Terms) response.getAggregations().get("test")).getBuckets().size(), equalTo(recordCount + 1)); - assertThat(((Terms) response.getAggregations().get("test_raw")).getBuckets().size(), equalTo(recordCount)); - + assertResponse( + prepareSearch("test-idx").setQuery(QueryBuilders.termQuery("even", true)) + .addAggregation(AggregationBuilders.terms("test").field("test_field").size(recordCount * 2).collectMode(aggCollectionMode)) + .addAggregation( + AggregationBuilders.terms("test_raw").field("test_field_raw").size(recordCount * 2).collectMode(aggCollectionMode) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo((long) recordCount)); + + assertThat(((Terms) response.getAggregations().get("test")).getBuckets().size(), equalTo(recordCount + 1)); + assertThat(((Terms) response.getAggregations().get("test_raw")).getBuckets().size(), equalTo(recordCount)); + } + ); } public void testDynamicObjectCopyTo() throws Exception { @@ -65,8 +65,8 @@ public void testDynamicObjectCopyTo() throws Exception { .endObject() ); assertAcked(indicesAdmin().prepareCreate("test-idx").setMapping(mapping)); - client().prepareIndex("test-idx").setId("1").setSource("foo", "bar").get(); - indicesAdmin().prepareRefresh("test-idx").execute().actionGet(); + prepareIndex("test-idx").setId("1").setSource("foo", "bar").get(); + indicesAdmin().prepareRefresh("test-idx").get(); assertHitCount(prepareSearch("test-idx").setQuery(QueryBuilders.termQuery("root.top.child", "bar")), 1L); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java index 38349e14bdf05..868540ac3e3f8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java @@ -64,9 +64,9 @@ protected Collection> nodePlugins() { public void testConflictingDynamicMappings() { // we don't use indexRandom because the order of requests is important here createIndex("index"); - client().prepareIndex("index").setId("1").setSource("foo", 3).get(); + prepareIndex("index").setId("1").setSource("foo", 3).get(); try { - client().prepareIndex("index").setId("2").setSource("foo", "bar").get(); + prepareIndex("index").setId("2").setSource("foo", "bar").get(); fail("Indexing request should have failed!"); } catch (DocumentParsingException e) { // general case, the parsing code complains that it can't parse "bar" as a "long" @@ -82,10 +82,10 @@ public void testConflictingDynamicMappings() { public void testConflictingDynamicMappingsBulk() { // we don't use indexRandom because the order of requests is important here createIndex("index"); - client().prepareIndex("index").setId("1").setSource("foo", 3).get(); - BulkResponse bulkResponse = client().prepareBulk().add(client().prepareIndex("index").setId("1").setSource("foo", 3)).get(); + prepareIndex("index").setId("1").setSource("foo", 3).get(); + BulkResponse bulkResponse = client().prepareBulk().add(prepareIndex("index").setId("1").setSource("foo", 3)).get(); assertFalse(bulkResponse.hasFailures()); - bulkResponse = client().prepareBulk().add(client().prepareIndex("index").setId("2").setSource("foo", "bar")).get(); + bulkResponse = client().prepareBulk().add(prepareIndex("index").setId("2").setSource("foo", "bar")).get(); assertTrue(bulkResponse.hasFailures()); } @@ -112,7 +112,7 @@ public void run() { startLatch.await(); assertEquals( DocWriteResponse.Result.CREATED, - client().prepareIndex("index").setId(id).setSource("field" + id, "bar").get().getResult() + prepareIndex("index").setId(id).setSource("field" + id, "bar").get().getResult() ); } catch (Exception e) { error.compareAndSet(null, e); @@ -163,7 +163,7 @@ public void testPreflightCheckAvoidsMaster() throws InterruptedException, IOExce XContentType.JSON ) .get(); - client().prepareIndex("index").setId("1").setSource("nested1", Map.of("foo", "bar"), "nested2", Map.of("foo", "bar")).get(); + prepareIndex("index").setId("1").setSource("nested1", Map.of("foo", "bar"), "nested2", Map.of("foo", "bar")).get(); final CountDownLatch masterBlockedLatch = new CountDownLatch(1); final CountDownLatch indexingCompletedLatch = new CountDownLatch(1); @@ -184,9 +184,7 @@ public void onFailure(Exception e) { }); masterBlockedLatch.await(); - final IndexRequestBuilder indexRequestBuilder = client().prepareIndex("index") - .setId("2") - .setSource("nested3", Map.of("foo", "bar")); + final IndexRequestBuilder indexRequestBuilder = prepareIndex("index").setId("2").setSource("nested3", Map.of("foo", "bar")); try { assertThat( expectThrows(IllegalArgumentException.class, () -> indexRequestBuilder.get(TimeValue.timeValueSeconds(10))).getMessage(), @@ -200,7 +198,7 @@ public void onFailure(Exception e) { public void testTotalFieldsLimitForDynamicMappingsUpdateCheckedAtDocumentParseTime() throws InterruptedException { createIndex("index", Settings.builder().put(INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), 2).build()); ensureGreen("index"); - client().prepareIndex("index").setId("1").setSource("field1", "value1").get(); + prepareIndex("index").setId("1").setSource("field1", "value1").get(); final CountDownLatch masterBlockedLatch = new CountDownLatch(1); final CountDownLatch indexingCompletedLatch = new CountDownLatch(1); @@ -221,7 +219,7 @@ public void onFailure(Exception e) { }); masterBlockedLatch.await(); - final IndexRequestBuilder indexRequestBuilder = client().prepareIndex("index").setId("2").setSource("field2", "value2"); + final IndexRequestBuilder indexRequestBuilder = prepareIndex("index").setId("2").setSource("field2", "value2"); try { Exception e = expectThrows(DocumentParsingException.class, () -> indexRequestBuilder.get(TimeValue.timeValueSeconds(10))); assertThat(e.getMessage(), Matchers.containsString("failed to parse")); @@ -265,8 +263,7 @@ public void testTotalFieldsLimitWithRuntimeFields() { { // introduction of a new object with 2 new sub-fields fails - final IndexRequestBuilder indexRequestBuilder = client().prepareIndex("index1") - .setId("1") + final IndexRequestBuilder indexRequestBuilder = prepareIndex("index1").setId("1") .setSource("field3", "value3", "my_object2", Map.of("new_field1", "value1", "new_field2", "value2")); Exception exc = expectThrows(DocumentParsingException.class, () -> indexRequestBuilder.get(TimeValue.timeValueSeconds(10))); assertThat(exc.getMessage(), Matchers.containsString("failed to parse")); @@ -279,7 +276,7 @@ public void testTotalFieldsLimitWithRuntimeFields() { { // introduction of a new single field succeeds - client().prepareIndex("index1").setId("2").setSource("field3", "value3", "new_field4", 100).get(); + prepareIndex("index1").setId("2").setSource("field3", "value3", "new_field4", 100).get(); } { @@ -294,8 +291,7 @@ public void testTotalFieldsLimitWithRuntimeFields() { """, XContentType.JSON)); // introduction of a new object with 2 new sub-fields succeeds - client().prepareIndex("index1") - .setId("1") + prepareIndex("index1").setId("1") .setSource("field3", "value3", "my_object2", Map.of("new_field1", "value1", "new_field2", "value2")); } } @@ -304,7 +300,7 @@ public void testMappingVersionAfterDynamicMappingUpdate() throws Exception { createIndex("test"); final ClusterService clusterService = internalCluster().clusterService(); final long previousVersion = clusterService.state().metadata().index("test").getMappingVersion(); - client().prepareIndex("test").setId("1").setSource("field", "text").get(); + prepareIndex("test").setId("1").setSource("field", "text").get(); assertBusy(() -> assertThat(clusterService.state().metadata().index("test").getMappingVersion(), equalTo(1 + previousVersion))); } @@ -507,7 +503,7 @@ public void testDynamicRuntimeObjectFields() { Exception exception = expectThrows( DocumentParsingException.class, - () -> client().prepareIndex("test").setSource("obj.runtime", "value").get() + () -> prepareIndex("test").setSource("obj.runtime", "value").get() ); assertThat( exception.getMessage(), @@ -539,8 +535,7 @@ public void testDynamicRuntimeObjectFields() { // target the same shard where we are sure the mapping update has been applied assertEquals( RestStatus.CREATED, - client().prepareIndex("test") - .setSource("obj.runtime.dynamic.number", 1) + prepareIndex("test").setSource("obj.runtime.dynamic.number", 1) .setId("id") .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get() @@ -552,7 +547,7 @@ public void testDynamicRuntimeObjectFields() { // a doc with the same field but a different type causes a conflict Exception e = expectThrows( DocumentParsingException.class, - () -> client().prepareIndex("test").setId("id").setSource("obj.runtime.dynamic.number", "string").get() + () -> prepareIndex("test").setId("id").setSource("obj.runtime.dynamic.number", "string").get() ); assertThat( e.getMessage(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/MultiFieldsIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/MultiFieldsIntegrationIT.java index 25c33ee66bad4..a22910ab9c4eb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/MultiFieldsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/MultiFieldsIntegrationIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.index.mapper; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.unit.DistanceUnit; @@ -46,10 +45,10 @@ public void testMultiFields() throws Exception { assertThat(titleFields.get("not_analyzed"), notNullValue()); assertThat(((Map) titleFields.get("not_analyzed")).get("type").toString(), equalTo("keyword")); - client().prepareIndex("my-index").setId("1").setSource("title", "Multi fields").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("my-index").setId("1").setSource("title", "Multi fields").setRefreshPolicy(IMMEDIATE).get(); - assertHitCount(client().prepareSearch("my-index").setQuery(matchQuery("title", "multi")), 1); - assertHitCount(client().prepareSearch("my-index").setQuery(matchQuery("title.not_analyzed", "Multi fields")), 1); + assertHitCount(prepareSearch("my-index").setQuery(matchQuery("title", "multi")), 1); + assertHitCount(prepareSearch("my-index").setQuery(matchQuery("title.not_analyzed", "Multi fields")), 1); assertAcked(indicesAdmin().preparePutMapping("my-index").setSource(createPutMappingSource())); @@ -65,9 +64,9 @@ public void testMultiFields() throws Exception { assertThat(titleFields.get("uncased"), notNullValue()); assertThat(((Map) titleFields.get("uncased")).get("analyzer").toString(), equalTo("whitespace")); - client().prepareIndex("my-index").setId("1").setSource("title", "Multi fields").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("my-index").setId("1").setSource("title", "Multi fields").setRefreshPolicy(IMMEDIATE).get(); - assertHitCount(client().prepareSearch("my-index").setQuery(matchQuery("title.uncased", "Multi")), 1); + assertHitCount(prepareSearch("my-index").setQuery(matchQuery("title.uncased", "Multi")), 1); } @SuppressWarnings("unchecked") @@ -89,13 +88,13 @@ public void testGeoPointMultiField() throws Exception { assertThat(bField.get("type").toString(), equalTo("keyword")); GeoPoint point = new GeoPoint(51, 19); - client().prepareIndex("my-index").setId("1").setSource("a", point.toString()).setRefreshPolicy(IMMEDIATE).get(); - SearchResponse countResponse = prepareSearch("my-index").setSize(0) - .setQuery(constantScoreQuery(geoDistanceQuery("a").point(51, 19).distance(50, DistanceUnit.KILOMETERS))) - .get(); - assertThat(countResponse.getHits().getTotalHits().value, equalTo(1L)); - countResponse = prepareSearch("my-index").setSize(0).setQuery(matchQuery("a.b", point.geohash())).get(); - assertThat(countResponse.getHits().getTotalHits().value, equalTo(1L)); + prepareIndex("my-index").setId("1").setSource("a", point.toString()).setRefreshPolicy(IMMEDIATE).get(); + assertHitCount( + prepareSearch("my-index").setSize(0) + .setQuery(constantScoreQuery(geoDistanceQuery("a").point(51, 19).distance(50, DistanceUnit.KILOMETERS))), + 1L + ); + assertHitCount(prepareSearch("my-index").setSize(0).setQuery(matchQuery("a.b", point.geohash())), 1L); } @SuppressWarnings("unchecked") @@ -115,9 +114,8 @@ public void testCompletionMultiField() throws Exception { assertThat(bField.size(), equalTo(1)); assertThat(bField.get("type").toString(), equalTo("keyword")); - client().prepareIndex("my-index").setId("1").setSource("a", "complete me").setRefreshPolicy(IMMEDIATE).get(); - SearchResponse countResponse = prepareSearch("my-index").setSize(0).setQuery(matchQuery("a.b", "complete me")).get(); - assertThat(countResponse.getHits().getTotalHits().value, equalTo(1L)); + prepareIndex("my-index").setId("1").setSource("a", "complete me").setRefreshPolicy(IMMEDIATE).get(); + assertHitCount(prepareSearch("my-index").setSize(0).setQuery(matchQuery("a.b", "complete me")), 1L); } @SuppressWarnings("unchecked") @@ -137,9 +135,8 @@ public void testIpMultiField() throws Exception { assertThat(bField.size(), equalTo(1)); assertThat(bField.get("type").toString(), equalTo("keyword")); - client().prepareIndex("my-index").setId("1").setSource("a", "127.0.0.1").setRefreshPolicy(IMMEDIATE).get(); - SearchResponse countResponse = prepareSearch("my-index").setSize(0).setQuery(matchQuery("a.b", "127.0.0.1")).get(); - assertThat(countResponse.getHits().getTotalHits().value, equalTo(1L)); + prepareIndex("my-index").setId("1").setSource("a", "127.0.0.1").setRefreshPolicy(IMMEDIATE).get(); + assertHitCount(prepareSearch("my-index").setSize(0).setQuery(matchQuery("a.b", "127.0.0.1")), 1L); } private XContentBuilder createMappingSource(String fieldType) throws IOException { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/query/plugin/CustomQueryParserIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/query/plugin/CustomQueryParserIT.java index bb20ddd321d7c..66a35328954e1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/query/plugin/CustomQueryParserIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/query/plugin/CustomQueryParserIT.java @@ -32,7 +32,7 @@ public void setUp() throws Exception { super.setUp(); createIndex("test"); ensureGreen(); - client().prepareIndex("index").setId("1").setSource("field", "value").get(); + prepareIndex("index").setId("1").setSource("field", "value").get(); refresh(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/search/MatchPhraseQueryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/search/MatchPhraseQueryIT.java index 7751d5e7783b9..70983e5abfb96 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/search/MatchPhraseQueryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/search/MatchPhraseQueryIT.java @@ -55,8 +55,8 @@ public void testZeroTermsQuery() throws ExecutionException, InterruptedException private List getIndexRequests() { List requests = new ArrayList<>(); - requests.add(client().prepareIndex(INDEX).setSource("name", "the beatles")); - requests.add(client().prepareIndex(INDEX).setSource("name", "led zeppelin")); + requests.add(prepareIndex(INDEX).setSource("name", "the beatles")); + requests.add(prepareIndex(INDEX).setSource("name", "led zeppelin")); return requests; } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java index d1122004ccce2..9c4473297ba7b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java @@ -57,7 +57,7 @@ public void testGlobalCheckpointSyncWithAsyncDurability() throws Exception { for (int j = 0; j < 10; j++) { final String id = Integer.toString(j); - client().prepareIndex("test").setId(id).setSource("{\"foo\": " + id + "}", XContentType.JSON).get(); + prepareIndex("test").setId(id).setSource("{\"foo\": " + id + "}", XContentType.JSON).get(); } assertBusy(() -> { @@ -157,7 +157,7 @@ private void runGlobalCheckpointSyncTest( } for (int j = 0; j < numberOfDocuments; j++) { final String id = Integer.toString(index * numberOfDocuments + j); - client().prepareIndex("test").setId(id).setSource("{\"foo\": " + id + "}", XContentType.JSON).get(); + prepareIndex("test").setId(id).setSource("{\"foo\": " + id + "}", XContentType.JSON).get(); } try { barrier.await(); @@ -223,7 +223,7 @@ public void testPersistGlobalCheckpoint() throws Exception { } int numDocs = randomIntBetween(1, 20); for (int i = 0; i < numDocs; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get(); + prepareIndex("test").setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get(); } ensureGreen("test"); assertBusy(() -> { @@ -252,7 +252,7 @@ public void testPersistLocalCheckpoint() { logger.info("numDocs {}", numDocs); long maxSeqNo = 0; for (int i = 0; i < numDocs; i++) { - maxSeqNo = client().prepareIndex("test").setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get().getSeqNo(); + maxSeqNo = prepareIndex("test").setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get().getSeqNo(); logger.info("got {}", maxSeqNo); } for (IndicesService indicesService : internalCluster().getDataNodeInstances(IndicesService.class)) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/GlobalCheckpointListenersIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/GlobalCheckpointListenersIT.java index 70adc75574437..b38198a98b5a5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/GlobalCheckpointListenersIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/GlobalCheckpointListenersIT.java @@ -63,7 +63,7 @@ public void accept(final long g, final Exception e) { } }, null); - client().prepareIndex("test").setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get(); + prepareIndex("test").setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get(); assertBusy(() -> assertThat(globalCheckpoint.get(), equalTo((long) index))); // adding a listener expecting a lower global checkpoint should fire immediately final AtomicLong immediateGlobalCheckpint = new AtomicLong(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java index f473015f864db..52bb5159c9b7d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -146,7 +146,7 @@ public void testLockTryingToDelete() throws Exception { public void testDurableFlagHasEffect() { createIndex("test"); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get(); + prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndexService test = indicesService.indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); @@ -169,7 +169,7 @@ public void testDurableFlagHasEffect() { setDurability(shard, Translog.Durability.REQUEST); assertFalse(needsSync.test(translog)); setDurability(shard, Translog.Durability.ASYNC); - client().prepareIndex("test").setId("2").setSource("{}", XContentType.JSON).get(); + prepareIndex("test").setId("2").setSource("{}", XContentType.JSON).get(); assertTrue(needsSync.test(translog)); setDurability(shard, Translog.Durability.REQUEST); client().prepareDelete("test", "1").get(); @@ -181,7 +181,7 @@ public void testDurableFlagHasEffect() { setDurability(shard, Translog.Durability.REQUEST); assertNoFailures( client().prepareBulk() - .add(client().prepareIndex("test").setId("3").setSource("{}", XContentType.JSON)) + .add(prepareIndex("test").setId("3").setSource("{}", XContentType.JSON)) .add(client().prepareDelete("test", "1")) .get() ); @@ -190,7 +190,7 @@ public void testDurableFlagHasEffect() { setDurability(shard, Translog.Durability.ASYNC); assertNoFailures( client().prepareBulk() - .add(client().prepareIndex("test").setId("4").setSource("{}", XContentType.JSON)) + .add(prepareIndex("test").setId("4").setSource("{}", XContentType.JSON)) .add(client().prepareDelete("test", "3")) .get() ); @@ -220,7 +220,7 @@ public void testIndexDirIsDeletedWhenShardRemoved() throws Exception { Settings idxSettings = Settings.builder().put(IndexMetadata.SETTING_DATA_PATH, idxPath).build(); createIndex("test", idxSettings); ensureGreen("test"); - client().prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get(); assertHitCount(client().prepareSearch("test"), 1L); indicesAdmin().prepareDelete("test").get(); assertAllIndicesRemovedAndDeletionCompleted(Collections.singleton(getInstanceFromNode(IndicesService.class))); @@ -230,7 +230,7 @@ public void testIndexDirIsDeletedWhenShardRemoved() throws Exception { public void testExpectedShardSizeIsPresent() throws InterruptedException { assertAcked(indicesAdmin().prepareCreate("test").setSettings(indexSettings(1, 0))); for (int i = 0; i < 50; i++) { - client().prepareIndex("test").setSource("{}", XContentType.JSON).get(); + prepareIndex("test").setSource("{}", XContentType.JSON).get(); } ensureGreen("test"); InternalClusterInfoService clusterInfoService = (InternalClusterInfoService) getInstanceFromNode(ClusterInfoService.class); @@ -253,7 +253,7 @@ public void testIndexCanChangeCustomDataPath() throws Exception { logger.info("--> creating index [{}] with data_path [{}]", index, indexDataPath); createIndex(index, Settings.builder().put(IndexMetadata.SETTING_DATA_PATH, indexDataPath.toAbsolutePath().toString()).build()); - client().prepareIndex(index).setId("1").setSource("foo", "bar").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex(index).setId("1").setSource("foo", "bar").setRefreshPolicy(IMMEDIATE).get(); ensureGreen(index); assertHitCount(client().prepareSearch(index).setSize(0), 1L); @@ -328,11 +328,7 @@ public void testMaybeFlush() throws Exception { .build() ) .get(); - client().prepareIndex("test") - .setId("0") - .setSource("{}", XContentType.JSON) - .setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE) - .get(); + prepareIndex("test").setId("0").setSource("{}", XContentType.JSON).setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get(); assertFalse(shard.shouldPeriodicallyFlush()); shard.applyIndexOperationOnPrimary( Versions.MATCH_ANY, @@ -347,11 +343,7 @@ public void testMaybeFlush() throws Exception { final Translog translog = getTranslog(shard); assertEquals(2, translog.stats().getUncommittedOperations()); assertThat(shard.flushStats().getTotal(), equalTo(0L)); - client().prepareIndex("test") - .setId("2") - .setSource("{}", XContentType.JSON) - .setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE) - .get(); + prepareIndex("test").setId("2").setSource("{}", XContentType.JSON).setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get(); assertThat(shard.getLastKnownGlobalCheckpoint(), equalTo(2L)); assertBusy(() -> { // this is async assertFalse(shard.shouldPeriodicallyFlush()); @@ -454,11 +446,7 @@ public void testStressMaybeFlushOrRollTranslogGeneration() throws Exception { settings = Settings.builder().put("index.translog.generation_threshold_size", "117b").build(); } indicesAdmin().prepareUpdateSettings("test").setSettings(settings).get(); - client().prepareIndex("test") - .setId("0") - .setSource("{}", XContentType.JSON) - .setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE) - .get(); + prepareIndex("test").setId("0").setSource("{}", XContentType.JSON).setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get(); assertFalse(shard.shouldPeriodicallyFlush()); final AtomicBoolean running = new AtomicBoolean(true); final int numThreads = randomIntBetween(2, 4); @@ -481,7 +469,7 @@ public void testStressMaybeFlushOrRollTranslogGeneration() throws Exception { final CheckedRunnable check; if (flush) { final FlushStats initialStats = shard.flushStats(); - client().prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get(); + prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get(); check = () -> { assertFalse(shard.shouldPeriodicallyFlush()); final FlushStats currentStats = shard.flushStats(); @@ -506,7 +494,7 @@ public void testStressMaybeFlushOrRollTranslogGeneration() throws Exception { }; } else { final long generation = getTranslog(shard).currentFileGeneration(); - client().prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get(); + prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get(); check = () -> { assertFalse(shard.shouldRollTranslogGeneration()); assertEquals(generation + 1, getTranslog(shard).currentFileGeneration()); @@ -527,7 +515,7 @@ public void testFlushStats() throws Exception { indicesAdmin().prepareUpdateSettings("test").setSettings(settings).get(); final int numDocs = between(10, 100); for (int i = 0; i < numDocs; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get(); + prepareIndex("test").setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get(); } // A flush stats may include the new total count but the old period count - assert eventually. assertBusy(() -> { @@ -538,7 +526,7 @@ public void testFlushStats() throws Exception { settings = Settings.builder().put("index.translog.flush_threshold_size", (String) null).build(); indicesAdmin().prepareUpdateSettings("test").setSettings(settings).get(); - client().prepareIndex("test").setId(UUIDs.randomBase64UUID()).setSource("{}", XContentType.JSON).get(); + prepareIndex("test").setId(UUIDs.randomBase64UUID()).setSource("{}", XContentType.JSON).get(); indicesAdmin().prepareFlush("test").setForce(randomBoolean()).setWaitIfOngoing(true).get(); final FlushStats flushStats = indicesAdmin().prepareStats("test").clear().setFlush(true).get().getTotal().flush; assertThat(flushStats.getTotal(), greaterThan(flushStats.getPeriodic())); @@ -550,9 +538,9 @@ public void testShardHasMemoryBufferOnTranslogRecover() throws Throwable { IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndexService indexService = indicesService.indexService(resolveIndex("test")); IndexShard shard = indexService.getShardOrNull(0); - client().prepareIndex("test").setId("0").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); + prepareIndex("test").setId("0").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); client().prepareDelete("test", "0").get(); - client().prepareIndex("test").setId("1").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get(); CheckedFunction wrapper = directoryReader -> directoryReader; shard.close("simon says", false); @@ -666,7 +654,7 @@ public void testInvalidateIndicesRequestCacheWhenRollbackEngine() throws Excepti final SearchRequest countRequest = new SearchRequest("test").source(new SearchSourceBuilder().size(0)); final long numDocs = between(10, 20); for (int i = 0; i < numDocs; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get(); + prepareIndex("test").setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get(); if (randomBoolean()) { shard.refresh("test"); } @@ -688,7 +676,7 @@ public void testInvalidateIndicesRequestCacheWhenRollbackEngine() throws Excepti final long moreDocs = between(10, 20); for (int i = 0; i < moreDocs; i++) { - client().prepareIndex("test").setId(Long.toString(i + numDocs)).setSource("{}", XContentType.JSON).get(); + prepareIndex("test").setId(Long.toString(i + numDocs)).setSource("{}", XContentType.JSON).get(); if (randomBoolean()) { shard.refresh("test"); } @@ -712,7 +700,7 @@ public void testShardChangesWithDefaultDocType() throws Exception { int numOps = between(1, 10); for (int i = 0; i < numOps; i++) { if (randomBoolean()) { - client().prepareIndex("index").setId(randomFrom("1", "2")).setSource("{}", XContentType.JSON).get(); + prepareIndex("index").setId(randomFrom("1", "2")).setSource("{}", XContentType.JSON).get(); } else { client().prepareDelete("index", randomFrom("1", "2")).get(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java index d57cbe50074ac..6c691c0a14440 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java @@ -119,7 +119,7 @@ public void testCorruptIndex() throws Exception { final int numExtraDocs = between(10, 100); IndexRequestBuilder[] builders = new IndexRequestBuilder[numExtraDocs]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex(indexName).setSource("foo", "bar"); + builders[i] = prepareIndex(indexName).setSource("foo", "bar"); } numDocs += numExtraDocs; @@ -282,7 +282,7 @@ public void testCorruptTranslogTruncation() throws Exception { logger.info("--> indexing [{}] docs to be kept", numDocsToKeep); IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocsToKeep]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex(indexName).setSource("foo", "bar"); + builders[i] = prepareIndex(indexName).setSource("foo", "bar"); } indexRandom(false, false, false, Arrays.asList(builders)); flush(indexName); @@ -293,7 +293,7 @@ public void testCorruptTranslogTruncation() throws Exception { logger.info("--> indexing [{}] more doc to be truncated", numDocsToTruncate); builders = new IndexRequestBuilder[numDocsToTruncate]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex(indexName).setSource("foo", "bar"); + builders[i] = prepareIndex(indexName).setSource("foo", "bar"); } indexRandom(false, false, false, Arrays.asList(builders)); @@ -472,7 +472,7 @@ public void testCorruptTranslogTruncationOfReplica() throws Exception { logger.info("--> indexing [{}] docs to be kept", numDocsToKeep); IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocsToKeep]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex(indexName).setSource("foo", "bar"); + builders[i] = prepareIndex(indexName).setSource("foo", "bar"); } indexRandom(false, false, false, Arrays.asList(builders)); flush(indexName); @@ -482,7 +482,7 @@ public void testCorruptTranslogTruncationOfReplica() throws Exception { logger.info("--> indexing [{}] more docs to be truncated", numDocsToTruncate); builders = new IndexRequestBuilder[numDocsToTruncate]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex(indexName).setSource("foo", "bar"); + builders[i] = prepareIndex(indexName).setSource("foo", "bar"); } indexRandom(false, false, false, Arrays.asList(builders)); final int totalDocs = numDocsToKeep + numDocsToTruncate; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/SearchIdleIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/SearchIdleIT.java index 22bb5974ad550..e1ab2bdc2369e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/SearchIdleIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/SearchIdleIT.java @@ -15,7 +15,6 @@ import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.MultiGetRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; @@ -43,6 +42,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoSearchHits; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; public class SearchIdleIT extends ESSingleNodeTestCase { @@ -92,10 +92,10 @@ private void runTestAutomaticRefresh(final IntToLongFunction count) throws Inter int numDocs = scaledRandomIntBetween(25, 100); totalNumDocs.set(numDocs); CountDownLatch indexingDone = new CountDownLatch(numDocs); - client().prepareIndex("test").setId("0").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); + prepareIndex("test").setId("0").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); indexingDone.countDown(); // one doc is indexed above blocking IndexShard shard = indexService.getShard(0); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); shard.scheduledRefresh(future); boolean hasRefreshed = future.actionGet(); if (randomTimeValue == TimeValue.ZERO) { @@ -125,7 +125,7 @@ private void runTestAutomaticRefresh(final IntToLongFunction count) throws Inter started.await(); assertThat(count.applyAsLong(totalNumDocs.get()), equalTo(1L)); for (int i = 1; i < numDocs; i++) { - client().prepareIndex("test").setId("" + i).setSource("{\"foo\" : \"bar\"}", XContentType.JSON).execute(new ActionListener<>() { + prepareIndex("test").setId("" + i).setSource("{\"foo\" : \"bar\"}", XContentType.JSON).execute(new ActionListener<>() { @Override public void onResponse(DocWriteResponse indexResponse) { indexingDone.countDown(); @@ -154,7 +154,7 @@ public void testPendingRefreshWithIntervalChange() throws Exception { IndexService indexService = createIndex("test", builder.build()); assertFalse(indexService.getIndexSettings().isExplicitRefresh()); ensureGreen(); - client().prepareIndex("test").setId("0").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); + prepareIndex("test").setId("0").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); IndexShard shard = indexService.getShard(0); scheduleRefresh(shard, false); assertTrue(shard.isSearchIdle()); @@ -162,7 +162,7 @@ public void testPendingRefreshWithIntervalChange() throws Exception { // async on purpose to make sure it happens concurrently indicesAdmin().prepareRefresh().execute(ActionListener.running(refreshLatch::countDown)); assertHitCount(client().prepareSearch(), 1); - client().prepareIndex("test").setId("1").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); + prepareIndex("test").setId("1").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); scheduleRefresh(shard, false); assertTrue(shard.hasRefreshPending()); @@ -179,7 +179,7 @@ public void testPendingRefreshWithIntervalChange() throws Exception { // We need to ensure a `scheduledRefresh` triggered by the internal refresh setting update is executed before we index a new doc; // otherwise, it will compete to call `Engine#maybeRefresh` with the `scheduledRefresh` that we are going to verify. ensureNoPendingScheduledRefresh(indexService.getThreadPool()); - client().prepareIndex("test").setId("2").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); + prepareIndex("test").setId("2").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); scheduleRefresh(shard, true); assertFalse(shard.hasRefreshPending()); assertTrue(shard.isSearchIdle()); @@ -193,7 +193,7 @@ public void testPendingRefreshWithIntervalChange() throws Exception { } private static void scheduleRefresh(IndexShard shard, boolean expectRefresh) { - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); shard.scheduledRefresh(future); assertThat(future.actionGet(), equalTo(expectRefresh)); } @@ -279,15 +279,13 @@ public void testSearchIdleBoolQueryMatchOneIndex() throws InterruptedException { assertEquals( RestStatus.CREATED, - client().prepareIndex(idleIndex) - .setSource("keyword", "idle", "@timestamp", "2021-05-10T19:00:03.765Z", "routing_field", "aaa") + prepareIndex(idleIndex).setSource("keyword", "idle", "@timestamp", "2021-05-10T19:00:03.765Z", "routing_field", "aaa") .get() .status() ); assertEquals( RestStatus.CREATED, - client().prepareIndex(activeIndex) - .setSource("keyword", "active", "@timestamp", "2021-05-12T20:07:12.112Z", "routing_field", "aaa") + prepareIndex(activeIndex).setSource("keyword", "active", "@timestamp", "2021-05-12T20:07:12.112Z", "routing_field", "aaa") .get() .status() ); @@ -306,18 +304,20 @@ public void testSearchIdleBoolQueryMatchOneIndex() throws InterruptedException { assertIdleShard(activeIndexStatsBefore); // WHEN - final SearchResponse searchResponse = client().prepareSearch("test*") - .setQuery(new RangeQueryBuilder("@timestamp").from("2021-05-12T20:00:00.000Z").to("2021-05-12T21:00:00.000Z")) - .setPreFilterShardSize(5) - .get(); - - // THEN - assertEquals(RestStatus.OK, searchResponse.status()); - assertEquals(idleIndexShardsCount + activeIndexShardsCount - 1, searchResponse.getSkippedShards()); - assertEquals(0, searchResponse.getFailedShards()); - Arrays.stream(searchResponse.getHits().getHits()).forEach(searchHit -> assertEquals("test2", searchHit.getIndex())); - // NOTE: we need an empty result from at least one shard - assertEquals(1, searchResponse.getHits().getHits().length); + assertResponse( + client().prepareSearch("test*") + .setQuery(new RangeQueryBuilder("@timestamp").from("2021-05-12T20:00:00.000Z").to("2021-05-12T21:00:00.000Z")) + .setPreFilterShardSize(5), + response -> { + // THEN + assertEquals(RestStatus.OK, response.status()); + assertEquals(idleIndexShardsCount + activeIndexShardsCount - 1, response.getSkippedShards()); + assertEquals(0, response.getFailedShards()); + Arrays.stream(response.getHits().getHits()).forEach(searchHit -> assertEquals("test2", searchHit.getIndex())); + // NOTE: we need an empty result from at least one shard + assertEquals(1, response.getHits().getHits().length); + } + ); final IndicesStatsResponse idleIndexStatsAfter = indicesAdmin().prepareStats(idleIndex).get(); assertIdleShardsRefreshStats(idleIndexStatsBefore, idleIndexStatsAfter); } @@ -351,11 +351,8 @@ public void testSearchIdleExistsQueryMatchOneIndex() throws InterruptedException "type=keyword" ); - assertEquals(RestStatus.CREATED, client().prepareIndex(idleIndex).setSource("keyword", "idle").get().status()); - assertEquals( - RestStatus.CREATED, - client().prepareIndex(activeIndex).setSource("keyword", "active", "unmapped", "bbb").get().status() - ); + assertEquals(RestStatus.CREATED, prepareIndex(idleIndex).setSource("keyword", "idle").get().status()); + assertEquals(RestStatus.CREATED, prepareIndex(activeIndex).setSource("keyword", "active", "unmapped", "bbb").get().status()); assertEquals(RestStatus.OK, indicesAdmin().prepareRefresh(idleIndex, activeIndex).get().getStatus()); waitUntil( @@ -371,18 +368,15 @@ public void testSearchIdleExistsQueryMatchOneIndex() throws InterruptedException assertIdleShard(activeIndexStatsBefore); // WHEN - final SearchResponse searchResponse = client().prepareSearch("test*") - .setQuery(new ExistsQueryBuilder("unmapped")) - .setPreFilterShardSize(5) - .get(); - - // THEN - assertEquals(RestStatus.OK, searchResponse.status()); - assertEquals(idleIndexShardsCount, searchResponse.getSkippedShards()); - assertEquals(0, searchResponse.getFailedShards()); - Arrays.stream(searchResponse.getHits().getHits()).forEach(searchHit -> assertEquals("test2", searchHit.getIndex())); - // NOTE: we need an empty result from at least one shard - assertEquals(1, searchResponse.getHits().getHits().length); + assertResponse(client().prepareSearch("test*").setQuery(new ExistsQueryBuilder("unmapped")).setPreFilterShardSize(5), response -> { + // THEN + assertEquals(RestStatus.OK, response.status()); + assertEquals(idleIndexShardsCount, response.getSkippedShards()); + assertEquals(0, response.getFailedShards()); + Arrays.stream(response.getHits().getHits()).forEach(searchHit -> assertEquals("test2", searchHit.getIndex())); + // NOTE: we need an empty result from at least one shard + assertEquals(1, response.getHits().getHits().length); + }); final IndicesStatsResponse idleIndexStatsAfter = indicesAdmin().prepareStats(idleIndex).get(); assertIdleShardsRefreshStats(idleIndexStatsBefore, idleIndexStatsAfter); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java index 8de218f8a29c8..ec79b53ccd174 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java @@ -158,7 +158,7 @@ public void testCorruptFileAndRecover() throws InterruptedException, IOException disableAllocation("test"); IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex("test").setSource("field", "value"); + builders[i] = prepareIndex("test").setSource("field", "value"); } indexRandom(true, builders); ensureGreen(); @@ -269,7 +269,7 @@ public void testCorruptPrimaryNoReplica() throws ExecutionException, Interrupted ensureGreen(); IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex("test").setSource("field", "value"); + builders[i] = prepareIndex("test").setSource("field", "value"); } indexRandom(true, builders); ensureGreen(); @@ -402,11 +402,11 @@ public void testCorruptionOnNetworkLayer() throws InterruptedException { ensureGreen(); IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex("test").setSource("field", "value"); + builders[i] = prepareIndex("test").setSource("field", "value"); } indexRandom(true, builders); ensureGreen(); - assertAllSuccessful(indicesAdmin().prepareFlush().setForce(true).execute().actionGet()); + assertAllSuccessful(indicesAdmin().prepareFlush().setForce(true).get()); // we have to flush at least once here since we don't corrupt the translog assertHitCount(prepareSearch().setSize(0), numDocs); @@ -541,11 +541,11 @@ public void testCorruptFileThenSnapshotAndRestore() throws InterruptedException, ensureGreen(); IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex("test").setSource("field", "value"); + builders[i] = prepareIndex("test").setSource("field", "value"); } indexRandom(true, builders); ensureGreen(); - assertAllSuccessful(indicesAdmin().prepareFlush().setForce(true).execute().actionGet()); + assertAllSuccessful(indicesAdmin().prepareFlush().setForce(true).get()); // we have to flush at least once here since we don't corrupt the translog assertHitCount(prepareSearch().setSize(0), numDocs); @@ -608,11 +608,11 @@ public void testReplicaCorruption() throws Exception { ensureGreen(); IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex("test").setSource("field", "value"); + builders[i] = prepareIndex("test").setSource("field", "value"); } indexRandom(true, builders); ensureGreen(); - assertAllSuccessful(indicesAdmin().prepareFlush().setForce(true).execute().actionGet()); + assertAllSuccessful(indicesAdmin().prepareFlush().setForce(true).get()); // we have to flush at least once here since we don't corrupt the translog assertHitCount(prepareSearch().setSize(0), numDocs); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedTranslogIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedTranslogIT.java index b8ecbc2e750af..d8d9ef47d4451 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedTranslogIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedTranslogIT.java @@ -59,7 +59,7 @@ public void testCorruptTranslogFiles() throws Exception { IndexRequestBuilder[] builders = new IndexRequestBuilder[scaledRandomIntBetween(100, 1000)]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex("test").setSource("foo", "bar"); + builders[i] = prepareIndex("test").setSource("foo", "bar"); } indexRandom(false, false, false, Arrays.asList(builders)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/store/ExceptionRetryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/store/ExceptionRetryIT.java index 19efcd9e3f31f..423e5c14c472a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/store/ExceptionRetryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/store/ExceptionRetryIT.java @@ -17,7 +17,6 @@ import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.bulk.TransportShardBulkAction; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.index.engine.SegmentsStats; import org.elasticsearch.plugins.Plugin; @@ -39,7 +38,8 @@ import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -94,9 +94,9 @@ public void testRetryDueToExceptionOnNetworkLayer() throws ExecutionException, I bulkBuilder.add(client.prepareIndex("index").setSource(doc)); } - BulkResponse response = bulkBuilder.get(); - if (response.hasFailures()) { - for (BulkItemResponse singleIndexRespons : response.getItems()) { + BulkResponse bulkResponse = bulkBuilder.get(); + if (bulkResponse.hasFailures()) { + for (BulkItemResponse singleIndexRespons : bulkResponse.getItems()) { if (singleIndexRespons.isFailed()) { fail("None of the bulk items should fail but got " + singleIndexRespons.getFailureMessage()); } @@ -104,41 +104,42 @@ public void testRetryDueToExceptionOnNetworkLayer() throws ExecutionException, I } refresh(); - SearchResponse searchResponse = prepareSearch("index").setSize(numDocs * 2).addStoredField("_id").get(); - - Set uniqueIds = new HashSet<>(); - long dupCounter = 0; - boolean found_duplicate_already = false; - for (int i = 0; i < searchResponse.getHits().getHits().length; i++) { - if (uniqueIds.add(searchResponse.getHits().getHits()[i].getId()) == false) { - if (found_duplicate_already == false) { - SearchResponse dupIdResponse = prepareSearch("index").setQuery( - termQuery("_id", searchResponse.getHits().getHits()[i].getId()) - ).setExplain(true).get(); - assertThat(dupIdResponse.getHits().getTotalHits().value, greaterThan(1L)); - logger.info("found a duplicate id:"); - for (SearchHit hit : dupIdResponse.getHits()) { - logger.info("Doc {} was found on shard {}", hit.getId(), hit.getShard().getShardId()); + assertNoFailuresAndResponse(prepareSearch("index").setSize(numDocs * 2).addStoredField("_id"), response -> { + Set uniqueIds = new HashSet<>(); + long dupCounter = 0; + boolean found_duplicate_already = false; + for (int i = 0; i < response.getHits().getHits().length; i++) { + if (uniqueIds.add(response.getHits().getHits()[i].getId()) == false) { + if (found_duplicate_already == false) { + assertResponse( + prepareSearch("index").setQuery(termQuery("_id", response.getHits().getHits()[i].getId())).setExplain(true), + dupIdResponse -> { + assertThat(dupIdResponse.getHits().getTotalHits().value, greaterThan(1L)); + logger.info("found a duplicate id:"); + for (SearchHit hit : dupIdResponse.getHits()) { + logger.info("Doc {} was found on shard {}", hit.getId(), hit.getShard().getShardId()); + } + logger.info("will not print anymore in case more duplicates are found."); + } + ); + found_duplicate_already = true; } - logger.info("will not print anymore in case more duplicates are found."); - found_duplicate_already = true; + dupCounter++; } - dupCounter++; } - } - assertNoFailures(searchResponse); - assertThat(dupCounter, equalTo(0L)); - assertHitCount(searchResponse, numDocs); - IndicesStatsResponse index = indicesAdmin().prepareStats("index").clear().setSegments(true).get(); - IndexStats indexStats = index.getIndex("index"); - long maxUnsafeAutoIdTimestamp = Long.MIN_VALUE; - for (IndexShardStats indexShardStats : indexStats) { - for (ShardStats shardStats : indexShardStats) { - SegmentsStats segments = shardStats.getStats().getSegments(); - maxUnsafeAutoIdTimestamp = Math.max(maxUnsafeAutoIdTimestamp, segments.getMaxUnsafeAutoIdTimestamp()); + assertThat(dupCounter, equalTo(0L)); + assertHitCount(response, numDocs); + IndicesStatsResponse index = indicesAdmin().prepareStats("index").clear().setSegments(true).get(); + IndexStats indexStats = index.getIndex("index"); + long maxUnsafeAutoIdTimestamp = Long.MIN_VALUE; + for (IndexShardStats indexShardStats : indexStats) { + for (ShardStats shardStats : indexShardStats) { + SegmentsStats segments = shardStats.getStats().getSegments(); + maxUnsafeAutoIdTimestamp = Math.max(maxUnsafeAutoIdTimestamp, segments.getMaxUnsafeAutoIdTimestamp()); + } } - } - assertTrue("exception must have been thrown otherwise setup is broken", exceptionThrown.get()); - assertTrue("maxUnsafeAutoIdTimestamp must be > than 0 we have at least one retry", maxUnsafeAutoIdTimestamp > -1); + assertTrue("exception must have been thrown otherwise setup is broken", exceptionThrown.get()); + assertTrue("maxUnsafeAutoIdTimestamp must be > than 0 we have at least one retry", maxUnsafeAutoIdTimestamp > -1); + }); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/suggest/stats/SuggestStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/suggest/stats/SuggestStatsIT.java index 95846fcb55594..143ffedeefc55 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/suggest/stats/SuggestStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/suggest/stats/SuggestStatsIT.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardIterator; @@ -28,6 +27,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -42,7 +42,7 @@ protected int numberOfReplicas() { public void testSimpleStats() throws Exception { // clear all stats first - indicesAdmin().prepareStats().clear().execute().actionGet(); + indicesAdmin().prepareStats().clear().get(); final int numNodes = cluster().numDataNodes(); assertThat(numNodes, greaterThanOrEqualTo(2)); final int shardsIdx1 = randomIntBetween(1, 10); // we make sure each node gets at least a single shard... @@ -66,20 +66,26 @@ public void testSimpleStats() throws Exception { long startTime = System.currentTimeMillis(); for (int i = 0; i < suggestAllIdx; i++) { - SearchResponse suggestResponse = addSuggestions(internalCluster().coordOnlyNodeClient().prepareSearch(), i).get(); - assertAllSuccessful(suggestResponse); + assertResponse( + addSuggestions(internalCluster().coordOnlyNodeClient().prepareSearch(), i), + response -> assertAllSuccessful(response) + ); } for (int i = 0; i < suggestIdx1; i++) { - SearchResponse suggestResponse = addSuggestions(internalCluster().coordOnlyNodeClient().prepareSearch("test1"), i).get(); - assertAllSuccessful(suggestResponse); + assertResponse( + addSuggestions(internalCluster().coordOnlyNodeClient().prepareSearch("test1"), i), + response -> assertAllSuccessful(response) + ); } for (int i = 0; i < suggestIdx2; i++) { - SearchResponse suggestResponse = addSuggestions(internalCluster().coordOnlyNodeClient().prepareSearch("test2"), i).get(); - assertAllSuccessful(suggestResponse); + assertResponse( + addSuggestions(internalCluster().coordOnlyNodeClient().prepareSearch("test2"), i), + response -> assertAllSuccessful(response) + ); } long endTime = System.currentTimeMillis(); - IndicesStatsResponse indicesStats = indicesAdmin().prepareStats().execute().actionGet(); + IndicesStatsResponse indicesStats = indicesAdmin().prepareStats().get(); final SearchStats.Stats suggest = indicesStats.getTotal().getSearch().getTotal(); // check current @@ -105,7 +111,7 @@ public void testSimpleStats() throws Exception { // the upperbound is num shards * total time since we do searches in parallel assertThat(suggest.getSuggestTimeInMillis(), lessThanOrEqualTo(totalShards * (endTime - startTime))); - NodesStatsResponse nodeStats = clusterAdmin().prepareNodesStats().execute().actionGet(); + NodesStatsResponse nodeStats = clusterAdmin().prepareNodesStats().get(); Set nodeIdsWithIndex = nodeIdsWithIndex("test1", "test2"); int num = 0; for (NodeStats stat : nodeStats.getNodes()) { @@ -138,7 +144,7 @@ private SearchRequestBuilder addSuggestions(SearchRequestBuilder request, int i) } private Set nodeIdsWithIndex(String... indices) { - ClusterState state = clusterAdmin().prepareState().execute().actionGet().getState(); + ClusterState state = clusterAdmin().prepareState().get().getState(); GroupShardsIterator allAssignedShardsGrouped = state.routingTable().allAssignedShardsGrouped(indices, true); Set nodes = new HashSet<>(); for (ShardIterator shardIterator : allAssignedShardsGrouped) { @@ -153,7 +159,7 @@ private Set nodeIdsWithIndex(String... indices) { } protected int numAssignedShards(String... indices) { - ClusterState state = clusterAdmin().prepareState().execute().actionGet().getState(); + ClusterState state = clusterAdmin().prepareState().get().getState(); GroupShardsIterator allAssignedShardsGrouped = state.routingTable().allAssignedShardsGrouped(indices, true); return allAssignedShardsGrouped.size(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indexing/IndexActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indexing/IndexActionIT.java index e3c66f3dabfdf..5f1b1ab81b9da 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indexing/IndexActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indexing/IndexActionIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.DocumentParsingException; @@ -28,6 +27,7 @@ import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicIntegerArray; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -47,7 +47,7 @@ public void testAutoGenerateIdNoDuplicates() throws Exception { logger.info("indexing [{}] docs", numOfDocs); List builders = new ArrayList<>(numOfDocs); for (int j = 0; j < numOfDocs; j++) { - builders.add(client().prepareIndex("test").setSource("field", "value_" + j)); + builders.add(prepareIndex("test").setSource("field", "value_" + j)); } indexRandom(true, builders); logger.info("verifying indexed content"); @@ -55,17 +55,18 @@ public void testAutoGenerateIdNoDuplicates() throws Exception { for (int j = 0; j < numOfChecks; j++) { try { logger.debug("running search with all types"); - SearchResponse response = prepareSearch("test").get(); - if (response.getHits().getTotalHits().value != numOfDocs) { - final String message = "Count is " - + response.getHits().getTotalHits().value - + " but " - + numOfDocs - + " was expected. " - + ElasticsearchAssertions.formatShardStatus(response); - logger.error("{}. search response: \n{}", message, response); - fail(message); - } + assertResponse(prepareSearch("test"), response -> { + if (response.getHits().getTotalHits().value != numOfDocs) { + final String message = "Count is " + + response.getHits().getTotalHits().value + + " but " + + numOfDocs + + " was expected. " + + ElasticsearchAssertions.formatShardStatus(response); + logger.error("{}. search response: \n{}", message, response); + fail(message); + } + }); } catch (Exception e) { logger.error("search for all docs types failed", e); if (firstError == null) { @@ -74,17 +75,18 @@ public void testAutoGenerateIdNoDuplicates() throws Exception { } try { logger.debug("running search with a specific type"); - SearchResponse response = prepareSearch("test").get(); - if (response.getHits().getTotalHits().value != numOfDocs) { - final String message = "Count is " - + response.getHits().getTotalHits().value - + " but " - + numOfDocs - + " was expected. " - + ElasticsearchAssertions.formatShardStatus(response); - logger.error("{}. search response: \n{}", message, response); - fail(message); - } + assertResponse(prepareSearch("test"), response -> { + if (response.getHits().getTotalHits().value != numOfDocs) { + final String message = "Count is " + + response.getHits().getTotalHits().value + + " but " + + numOfDocs + + " was expected. " + + ElasticsearchAssertions.formatShardStatus(response); + logger.error("{}. search response: \n{}", message, response); + fail(message); + } + }); } catch (Exception e) { logger.error("search for all docs of a specific type failed", e); if (firstError == null) { @@ -103,15 +105,15 @@ public void testCreatedFlag() throws Exception { createIndex("test"); ensureGreen(); - DocWriteResponse indexResponse = client().prepareIndex("test").setId("1").setSource("field1", "value1_1").execute().actionGet(); + DocWriteResponse indexResponse = prepareIndex("test").setId("1").setSource("field1", "value1_1").get(); assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); - indexResponse = client().prepareIndex("test").setId("1").setSource("field1", "value1_2").execute().actionGet(); + indexResponse = prepareIndex("test").setId("1").setSource("field1", "value1_2").get(); assertEquals(DocWriteResponse.Result.UPDATED, indexResponse.getResult()); - client().prepareDelete("test", "1").execute().actionGet(); + client().prepareDelete("test", "1").get(); - indexResponse = client().prepareIndex("test").setId("1").setSource("field1", "value1_2").execute().actionGet(); + indexResponse = prepareIndex("test").setId("1").setSource("field1", "value1_2").get(); assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); } @@ -120,14 +122,14 @@ public void testCreatedFlagWithFlush() throws Exception { createIndex("test"); ensureGreen(); - DocWriteResponse indexResponse = client().prepareIndex("test").setId("1").setSource("field1", "value1_1").execute().actionGet(); + DocWriteResponse indexResponse = prepareIndex("test").setId("1").setSource("field1", "value1_1").get(); assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); - client().prepareDelete("test", "1").execute().actionGet(); + client().prepareDelete("test", "1").get(); flush(); - indexResponse = client().prepareIndex("test").setId("1").setSource("field1", "value1_2").execute().actionGet(); + indexResponse = prepareIndex("test").setId("1").setSource("field1", "value1_2").get(); assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); } @@ -169,13 +171,11 @@ public void testCreatedFlagWithExternalVersioning() throws Exception { createIndex("test"); ensureGreen(); - DocWriteResponse indexResponse = client().prepareIndex("test") - .setId("1") + DocWriteResponse indexResponse = prepareIndex("test").setId("1") .setSource("field1", "value1_1") .setVersion(123) .setVersionType(VersionType.EXTERNAL) - .execute() - .actionGet(); + .get(); assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); } @@ -183,10 +183,7 @@ public void testCreateFlagWithBulk() { createIndex("test"); ensureGreen(); - BulkResponse bulkResponse = client().prepareBulk() - .add(client().prepareIndex("test").setId("1").setSource("field1", "value1_1")) - .execute() - .actionGet(); + BulkResponse bulkResponse = client().prepareBulk().add(prepareIndex("test").setId("1").setSource("field1", "value1_1")).get(); assertThat(bulkResponse.hasFailures(), equalTo(false)); assertThat(bulkResponse.getItems().length, equalTo(1)); IndexResponse indexResponse = bulkResponse.getItems()[0].getResponse(); @@ -208,7 +205,7 @@ public void testCreateIndexWithLongName() { } try { - client().prepareIndex(randomAlphaOfLengthBetween(min, max).toLowerCase(Locale.ROOT)).setSource("foo", "bar").get(); + prepareIndex(randomAlphaOfLengthBetween(min, max).toLowerCase(Locale.ROOT)).setSource("foo", "bar").get(); fail("exception should have been thrown on too-long index name"); } catch (InvalidIndexNameException e) { assertThat( @@ -220,7 +217,7 @@ public void testCreateIndexWithLongName() { try { // Catch chars that are more than a single byte - client().prepareIndex( + prepareIndex( randomAlphaOfLength(MetadataCreateIndexService.MAX_INDEX_NAME_BYTES - 1).toLowerCase(Locale.ROOT) + "Ϟ".toLowerCase( Locale.ROOT ) @@ -263,9 +260,7 @@ public void testInvalidIndexName() { } public void testDocumentWithBlankFieldName() { - Exception e = expectThrows(DocumentParsingException.class, () -> { - client().prepareIndex("test").setId("1").setSource("", "value1_2").execute().actionGet(); - }); + Exception e = expectThrows(DocumentParsingException.class, () -> prepareIndex("test").setId("1").setSource("", "value1_2").get()); assertThat(e.getMessage(), containsString("failed to parse")); assertThat(e.getCause().getMessage(), containsString("field name cannot be an empty string")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java index 9cef9becd6aaf..28a5ad9c29126 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java @@ -79,7 +79,7 @@ public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception { final String node2 = getLocalNodeId(server_2); // explicitly call reroute, so shards will get relocated to the new node (we delay it in ES in case other nodes join) - clusterAdmin().prepareReroute().execute().actionGet(); + clusterAdmin().prepareReroute().get(); clusterHealth = clusterAdmin().health( new ClusterHealthRequest(new String[] {}).waitForGreenStatus().waitForNodes("2").waitForNoRelocatingShards(true) @@ -120,7 +120,7 @@ public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception { final String node3 = getLocalNodeId(server_3); // explicitly call reroute, so shards will get relocated to the new node (we delay it in ES in case other nodes join) - clusterAdmin().prepareReroute().execute().actionGet(); + clusterAdmin().prepareReroute().get(); clusterHealth = clusterAdmin().prepareHealth() .setWaitForGreenStatus() @@ -203,7 +203,7 @@ public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception { logger.info("Deleting index [test]"); // last, lets delete the index - AcknowledgedResponse deleteIndexResponse = indicesAdmin().prepareDelete("test").execute().actionGet(); + AcknowledgedResponse deleteIndexResponse = indicesAdmin().prepareDelete("test").get(); assertThat(deleteIndexResponse.isAcknowledged(), equalTo(true)); clusterState = clusterAdmin().prepareState().get().getState(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/DateMathIndexExpressionsIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/DateMathIndexExpressionsIntegrationIT.java index 211e34c99ec23..67e8d2fd75d65 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/DateMathIndexExpressionsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/DateMathIndexExpressionsIntegrationIT.java @@ -12,12 +12,6 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; -import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; -import org.elasticsearch.action.delete.DeleteResponse; -import org.elasticsearch.action.get.GetResponse; -import org.elasticsearch.action.get.MultiGetResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.index.IndexNotFoundException; @@ -29,8 +23,10 @@ import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; import java.util.Locale; +import java.util.function.Consumer; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -51,21 +47,25 @@ public void setNow() { * of failing when index resolution with `now` is one day off, this method wraps calls with the assumption that * the day did not change during the test run. */ - public R dateSensitiveGet(ActionRequestBuilder builder) { + public void dateSensitiveGet( + ActionRequestBuilder builder, + Consumer consumer + ) { Runnable dayChangeAssumption = () -> assumeTrue( "day changed between requests", ZonedDateTime.now(ZoneOffset.UTC).getDayOfYear() == now.getDayOfYear() ); - R response; try { - response = builder.get(); + assertResponse(builder, response -> { + dayChangeAssumption.run(); + consumer.accept(response); + }); } catch (IndexNotFoundException e) { // index resolver throws this if it does not find the exact index due to day changes dayChangeAssumption.run(); throw e; } - dayChangeAssumption.run(); - return response; + } public void testIndexNameDateMathExpressions() { @@ -74,61 +74,69 @@ public void testIndexNameDateMathExpressions() { String index3 = ".marvel-" + DateTimeFormatter.ofPattern("yyyy.MM.dd", Locale.ROOT).format(now.minusDays(2)); createIndex(index1, index2, index3); - GetSettingsResponse getSettingsResponse = dateSensitiveGet(indicesAdmin().prepareGetSettings(index1, index2, index3)); - assertEquals(index1, getSettingsResponse.getSetting(index1, IndexMetadata.SETTING_INDEX_PROVIDED_NAME)); - assertEquals(index2, getSettingsResponse.getSetting(index2, IndexMetadata.SETTING_INDEX_PROVIDED_NAME)); - assertEquals(index3, getSettingsResponse.getSetting(index3, IndexMetadata.SETTING_INDEX_PROVIDED_NAME)); + dateSensitiveGet(indicesAdmin().prepareGetSettings(index1, index2, index3), response -> { + assertEquals(index1, response.getSetting(index1, IndexMetadata.SETTING_INDEX_PROVIDED_NAME)); + assertEquals(index2, response.getSetting(index2, IndexMetadata.SETTING_INDEX_PROVIDED_NAME)); + assertEquals(index3, response.getSetting(index3, IndexMetadata.SETTING_INDEX_PROVIDED_NAME)); + }); String dateMathExp1 = "<.marvel-{now/d}>"; String dateMathExp2 = "<.marvel-{now/d-1d}>"; String dateMathExp3 = "<.marvel-{now/d-2d}>"; - client().prepareIndex(dateMathExp1).setId("1").setSource("{}", XContentType.JSON).get(); - client().prepareIndex(dateMathExp2).setId("2").setSource("{}", XContentType.JSON).get(); - client().prepareIndex(dateMathExp3).setId("3").setSource("{}", XContentType.JSON).get(); + prepareIndex(dateMathExp1).setId("1").setSource("{}", XContentType.JSON).get(); + prepareIndex(dateMathExp2).setId("2").setSource("{}", XContentType.JSON).get(); + prepareIndex(dateMathExp3).setId("3").setSource("{}", XContentType.JSON).get(); refresh(); - SearchResponse searchResponse = dateSensitiveGet(prepareSearch(dateMathExp1, dateMathExp2, dateMathExp3)); - assertHitCount(searchResponse, 3); - assertSearchHits(searchResponse, "1", "2", "3"); - - GetResponse getResponse = dateSensitiveGet(client().prepareGet(dateMathExp1, "1")); - assertThat(getResponse.isExists(), is(true)); - assertThat(getResponse.getId(), equalTo("1")); - - getResponse = dateSensitiveGet(client().prepareGet(dateMathExp2, "2")); - assertThat(getResponse.isExists(), is(true)); - assertThat(getResponse.getId(), equalTo("2")); - - getResponse = dateSensitiveGet(client().prepareGet(dateMathExp3, "3")); - assertThat(getResponse.isExists(), is(true)); - assertThat(getResponse.getId(), equalTo("3")); - - MultiGetResponse mgetResponse = dateSensitiveGet( - client().prepareMultiGet().add(dateMathExp1, "1").add(dateMathExp2, "2").add(dateMathExp3, "3") - ); - assertThat(mgetResponse.getResponses()[0].getResponse().isExists(), is(true)); - assertThat(mgetResponse.getResponses()[0].getResponse().getId(), equalTo("1")); - assertThat(mgetResponse.getResponses()[1].getResponse().isExists(), is(true)); - assertThat(mgetResponse.getResponses()[1].getResponse().getId(), equalTo("2")); - assertThat(mgetResponse.getResponses()[2].getResponse().isExists(), is(true)); - assertThat(mgetResponse.getResponses()[2].getResponse().getId(), equalTo("3")); - - IndicesStatsResponse indicesStatsResponse = dateSensitiveGet(indicesAdmin().prepareStats(dateMathExp1, dateMathExp2, dateMathExp3)); - assertThat(indicesStatsResponse.getIndex(index1), notNullValue()); - assertThat(indicesStatsResponse.getIndex(index2), notNullValue()); - assertThat(indicesStatsResponse.getIndex(index3), notNullValue()); - - DeleteResponse deleteResponse = dateSensitiveGet(client().prepareDelete(dateMathExp1, "1")); - assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); - assertThat(deleteResponse.getId(), equalTo("1")); - - deleteResponse = dateSensitiveGet(client().prepareDelete(dateMathExp2, "2")); - assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); - assertThat(deleteResponse.getId(), equalTo("2")); - - deleteResponse = dateSensitiveGet(client().prepareDelete(dateMathExp3, "3")); - assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); - assertThat(deleteResponse.getId(), equalTo("3")); + dateSensitiveGet(prepareSearch(dateMathExp1, dateMathExp2, dateMathExp3), response -> { + assertHitCount(response, 3); + assertSearchHits(response, "1", "2", "3"); + }); + + dateSensitiveGet(client().prepareGet(dateMathExp1, "1"), response -> { + assertThat(response.isExists(), is(true)); + assertThat(response.getId(), equalTo("1")); + }); + + dateSensitiveGet(client().prepareGet(dateMathExp2, "2"), response -> { + assertThat(response.isExists(), is(true)); + assertThat(response.getId(), equalTo("2")); + }); + + dateSensitiveGet(client().prepareGet(dateMathExp3, "3"), response -> { + assertThat(response.isExists(), is(true)); + assertThat(response.getId(), equalTo("3")); + }); + + dateSensitiveGet(client().prepareMultiGet().add(dateMathExp1, "1").add(dateMathExp2, "2").add(dateMathExp3, "3"), response -> { + assertThat(response.getResponses()[0].getResponse().isExists(), is(true)); + assertThat(response.getResponses()[0].getResponse().getId(), equalTo("1")); + assertThat(response.getResponses()[1].getResponse().isExists(), is(true)); + assertThat(response.getResponses()[1].getResponse().getId(), equalTo("2")); + assertThat(response.getResponses()[2].getResponse().isExists(), is(true)); + assertThat(response.getResponses()[2].getResponse().getId(), equalTo("3")); + }); + + dateSensitiveGet(indicesAdmin().prepareStats(dateMathExp1, dateMathExp2, dateMathExp3), response -> { + assertThat(response.getIndex(index1), notNullValue()); + assertThat(response.getIndex(index2), notNullValue()); + assertThat(response.getIndex(index3), notNullValue()); + }); + + dateSensitiveGet(client().prepareDelete(dateMathExp1, "1"), response -> { + assertEquals(DocWriteResponse.Result.DELETED, response.getResult()); + assertThat(response.getId(), equalTo("1")); + }); + + dateSensitiveGet(client().prepareDelete(dateMathExp2, "2"), response -> { + assertEquals(DocWriteResponse.Result.DELETED, response.getResult()); + assertThat(response.getId(), equalTo("2")); + }); + + dateSensitiveGet(client().prepareDelete(dateMathExp3, "3"), response -> { + assertEquals(DocWriteResponse.Result.DELETED, response.getResult()); + assertThat(response.getId(), equalTo("3")); + }); } public void testAutoCreateIndexWithDateMathExpression() { @@ -139,19 +147,21 @@ public void testAutoCreateIndexWithDateMathExpression() { String dateMathExp1 = "<.marvel-{now/d}>"; String dateMathExp2 = "<.marvel-{now/d-1d}>"; String dateMathExp3 = "<.marvel-{now/d-2d}>"; - client().prepareIndex(dateMathExp1).setId("1").setSource("{}", XContentType.JSON).get(); - client().prepareIndex(dateMathExp2).setId("2").setSource("{}", XContentType.JSON).get(); - client().prepareIndex(dateMathExp3).setId("3").setSource("{}", XContentType.JSON).get(); + prepareIndex(dateMathExp1).setId("1").setSource("{}", XContentType.JSON).get(); + prepareIndex(dateMathExp2).setId("2").setSource("{}", XContentType.JSON).get(); + prepareIndex(dateMathExp3).setId("3").setSource("{}", XContentType.JSON).get(); refresh(); - SearchResponse searchResponse = dateSensitiveGet(prepareSearch(dateMathExp1, dateMathExp2, dateMathExp3)); - assertHitCount(searchResponse, 3); - assertSearchHits(searchResponse, "1", "2", "3"); + dateSensitiveGet(prepareSearch(dateMathExp1, dateMathExp2, dateMathExp3), response -> { + assertHitCount(response, 3); + assertSearchHits(response, "1", "2", "3"); + }); - IndicesStatsResponse indicesStatsResponse = dateSensitiveGet(indicesAdmin().prepareStats(dateMathExp1, dateMathExp2, dateMathExp3)); - assertThat(indicesStatsResponse.getIndex(index1), notNullValue()); - assertThat(indicesStatsResponse.getIndex(index2), notNullValue()); - assertThat(indicesStatsResponse.getIndex(index3), notNullValue()); + dateSensitiveGet(indicesAdmin().prepareStats(dateMathExp1, dateMathExp2, dateMathExp3), response -> { + assertThat(response.getIndex(index1), notNullValue()); + assertThat(response.getIndex(index2), notNullValue()); + assertThat(response.getIndex(index3), notNullValue()); + }); } public void testCreateIndexWithDateMathExpression() { @@ -164,15 +174,15 @@ public void testCreateIndexWithDateMathExpression() { String dateMathExp3 = "<.marvel-{now/d-2d}>"; createIndex(dateMathExp1, dateMathExp2, dateMathExp3); - GetSettingsResponse getSettingsResponse = dateSensitiveGet(indicesAdmin().prepareGetSettings(index1, index2, index3)); - assertEquals(dateMathExp1, getSettingsResponse.getSetting(index1, IndexMetadata.SETTING_INDEX_PROVIDED_NAME)); - assertEquals(dateMathExp2, getSettingsResponse.getSetting(index2, IndexMetadata.SETTING_INDEX_PROVIDED_NAME)); - assertEquals(dateMathExp3, getSettingsResponse.getSetting(index3, IndexMetadata.SETTING_INDEX_PROVIDED_NAME)); + dateSensitiveGet(indicesAdmin().prepareGetSettings(index1, index2, index3), response -> { + assertEquals(dateMathExp1, response.getSetting(index1, IndexMetadata.SETTING_INDEX_PROVIDED_NAME)); + assertEquals(dateMathExp2, response.getSetting(index2, IndexMetadata.SETTING_INDEX_PROVIDED_NAME)); + assertEquals(dateMathExp3, response.getSetting(index3, IndexMetadata.SETTING_INDEX_PROVIDED_NAME)); + }); ClusterState clusterState = clusterAdmin().prepareState().get().getState(); assertThat(clusterState.metadata().index(index1), notNullValue()); assertThat(clusterState.metadata().index(index2), notNullValue()); assertThat(clusterState.metadata().index(index3), notNullValue()); } - } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndexingMemoryControllerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndexingMemoryControllerIT.java index db5578ee6e60b..3dd9feff9ce25 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndexingMemoryControllerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndexingMemoryControllerIT.java @@ -96,10 +96,10 @@ public void testDeletesAloneCanTriggerRefresh() throws Exception { IndexService indexService = createIndex("index", indexSettings(1, 0).put("index.refresh_interval", -1).build()); IndexShard shard = indexService.getShard(0); for (int i = 0; i < 100; i++) { - client().prepareIndex("index").setId(Integer.toString(i)).setSource("field", "value").get(); + prepareIndex("index").setId(Integer.toString(i)).setSource("field", "value").get(); } // Force merge so we know all merges are done before we start deleting: - ForceMergeResponse r = client().admin().indices().prepareForceMerge().setMaxNumSegments(1).execute().actionGet(); + ForceMergeResponse r = client().admin().indices().prepareForceMerge().setMaxNumSegments(1).get(); assertNoFailures(r); final RefreshStats refreshStats = shard.refreshStats(); for (int i = 0; i < 100; i++) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java index 7bedd163c2530..ce3fd98476725 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java @@ -23,7 +23,6 @@ import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder; import org.elasticsearch.action.search.MultiSearchRequestBuilder; -import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -45,6 +44,7 @@ import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @@ -308,7 +308,7 @@ public void testWildcardBehaviour() throws Exception { verify(getSettings(indices).setIndicesOptions(options), false); assertAcked(prepareCreate("foobar")); - client().prepareIndex("foobar").setId("1").setSource("k", "v").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("foobar").setId("1").setSource("k", "v").setRefreshPolicy(IMMEDIATE).get(); // Verify defaults for wildcards, with one wildcard expression and one existing index indices = new String[] { "foo*" }; @@ -394,7 +394,7 @@ public void testWildcardBehaviourSnapshotRestore() throws Exception { public void testAllMissingLenient() throws Exception { createIndex("test1"); - client().prepareIndex("test1").setId("1").setSource("k", "v").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test1").setId("1").setSource("k", "v").setRefreshPolicy(IMMEDIATE).get(); assertHitCount(prepareSearch("test2").setIndicesOptions(IndicesOptions.lenientExpandOpen()).setQuery(matchAllQuery()), 0L); assertHitCount(prepareSearch("test2", "test3").setQuery(matchAllQuery()).setIndicesOptions(IndicesOptions.lenientExpandOpen()), 0L); // you should still be able to run empty searches without things blowing up @@ -403,12 +403,12 @@ public void testAllMissingLenient() throws Exception { public void testAllMissingStrict() throws Exception { createIndex("test1"); - expectThrows(IndexNotFoundException.class, () -> prepareSearch("test2").setQuery(matchAllQuery()).execute().actionGet()); + expectThrows(IndexNotFoundException.class, () -> prepareSearch("test2").setQuery(matchAllQuery()).get()); - expectThrows(IndexNotFoundException.class, () -> prepareSearch("test2", "test3").setQuery(matchAllQuery()).execute().actionGet()); + expectThrows(IndexNotFoundException.class, () -> prepareSearch("test2", "test3").setQuery(matchAllQuery()).get()); // you should still be able to run empty searches without things blowing up - prepareSearch().setQuery(matchAllQuery()).execute().actionGet(); + prepareSearch().setQuery(matchAllQuery()).get(); } // For now don't handle closed indices @@ -674,10 +674,11 @@ private static void verify(ActionRequestBuilder requestBuilder, boolean fa private static void verify(ActionRequestBuilder requestBuilder, boolean fail, long expectedCount) { if (fail) { if (requestBuilder instanceof MultiSearchRequestBuilder multiSearchRequestBuilder) { - MultiSearchResponse multiSearchResponse = multiSearchRequestBuilder.get(); - assertThat(multiSearchResponse.getResponses().length, equalTo(1)); - assertThat(multiSearchResponse.getResponses()[0].isFailure(), is(true)); - assertThat(multiSearchResponse.getResponses()[0].getResponse(), nullValue()); + assertResponse(multiSearchRequestBuilder, multiSearchResponse -> { + assertThat(multiSearchResponse.getResponses().length, equalTo(1)); + assertThat(multiSearchResponse.getResponses()[0].isFailure(), is(true)); + assertThat(multiSearchResponse.getResponses()[0].getResponse(), nullValue()); + }); } else { try { requestBuilder.get(); @@ -688,9 +689,10 @@ private static void verify(ActionRequestBuilder requestBuilder, boolean fa if (requestBuilder instanceof SearchRequestBuilder searchRequestBuilder) { assertHitCount(searchRequestBuilder, expectedCount); } else if (requestBuilder instanceof MultiSearchRequestBuilder multiSearchRequestBuilder) { - MultiSearchResponse multiSearchResponse = multiSearchRequestBuilder.get(); - assertThat(multiSearchResponse.getResponses().length, equalTo(1)); - assertThat(multiSearchResponse.getResponses()[0].getResponse(), notNullValue()); + assertResponse(multiSearchRequestBuilder, response -> { + assertThat(response.getResponses().length, equalTo(1)); + assertThat(response.getResponses()[0].getResponse(), notNullValue()); + }); } else { requestBuilder.get(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesRequestCacheIT.java index b10d4147af25c..0b99e3ba3ffcf 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesRequestCacheIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.settings.Settings; @@ -35,7 +34,8 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.dateRange; import static org.elasticsearch.search.aggregations.AggregationBuilders.filter; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -59,23 +59,8 @@ public void testCacheAggs() throws Exception { // This is not a random example: serialization with time zones writes shared strings // which used to not work well with the query cache because of the handles stream output // see #9500 - final SearchResponse r1 = client.prepareSearch("index") - .setSize(0) - .setSearchType(SearchType.QUERY_THEN_FETCH) - .addAggregation( - dateHistogram("histo").field("f").timeZone(ZoneId.of("+01:00")).minDocCount(0).calendarInterval(DateHistogramInterval.MONTH) - ) - .get(); - assertNoFailures(r1); - - // The cached is actually used - assertThat( - indicesAdmin().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), - greaterThan(0L) - ); - - for (int i = 0; i < 10; ++i) { - final SearchResponse r2 = client.prepareSearch("index") + assertNoFailuresAndResponse( + client.prepareSearch("index") .setSize(0) .setSearchType(SearchType.QUERY_THEN_FETCH) .addAggregation( @@ -83,21 +68,42 @@ public void testCacheAggs() throws Exception { .timeZone(ZoneId.of("+01:00")) .minDocCount(0) .calendarInterval(DateHistogramInterval.MONTH) - ) - .get(); - assertNoFailures(r2); - Histogram h1 = r1.getAggregations().get("histo"); - Histogram h2 = r2.getAggregations().get("histo"); - final List buckets1 = h1.getBuckets(); - final List buckets2 = h2.getBuckets(); - assertEquals(buckets1.size(), buckets2.size()); - for (int j = 0; j < buckets1.size(); ++j) { - final Bucket b1 = buckets1.get(j); - final Bucket b2 = buckets2.get(j); - assertEquals(b1.getKey(), b2.getKey()); - assertEquals(b1.getDocCount(), b2.getDocCount()); + ), + r1 -> { + // The cached is actually used + assertThat( + indicesAdmin().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), + greaterThan(0L) + ); + + for (int i = 0; i < 10; ++i) { + assertNoFailuresAndResponse( + client.prepareSearch("index") + .setSize(0) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .addAggregation( + dateHistogram("histo").field("f") + .timeZone(ZoneId.of("+01:00")) + .minDocCount(0) + .calendarInterval(DateHistogramInterval.MONTH) + ), + r2 -> { + Histogram h1 = r1.getAggregations().get("histo"); + Histogram h2 = r2.getAggregations().get("histo"); + final List buckets1 = h1.getBuckets(); + final List buckets2 = h2.getBuckets(); + assertEquals(buckets1.size(), buckets2.size()); + for (int j = 0; j < buckets1.size(); ++j) { + final Bucket b1 = buckets1.get(j); + final Bucket b2 = buckets2.get(j); + assertEquals(b1.getKey(), b2.getKey()); + assertEquals(b1.getDocCount(), b2.getDocCount()); + } + } + ); + } } - } + ); } public void testQueryRewrite() throws Exception { @@ -133,35 +139,43 @@ public void testQueryRewrite() throws Exception { assertCacheState(client, "index", 0, 0); - final SearchResponse r1 = client.prepareSearch("index") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setSize(0) - .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-19").lte("2016-03-25")) - // to ensure that query is executed even if it rewrites to match_no_docs - .addAggregation(new GlobalAggregationBuilder("global")) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r1); - assertThat(r1.getHits().getTotalHits().value, equalTo(7L)); + assertResponse( + client.prepareSearch("index") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setSize(0) + .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-19").lte("2016-03-25")) + // to ensure that query is executed even if it rewrites to match_no_docs + .addAggregation(new GlobalAggregationBuilder("global")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(7L)); + } + ); assertCacheState(client, "index", 0, 5); + assertResponse( + client.prepareSearch("index") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setSize(0) + .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-20").lte("2016-03-26")) + .addAggregation(new GlobalAggregationBuilder("global")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(7L)); - final SearchResponse r2 = client.prepareSearch("index") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setSize(0) - .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-20").lte("2016-03-26")) - .addAggregation(new GlobalAggregationBuilder("global")) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r2); - assertThat(r2.getHits().getTotalHits().value, equalTo(7L)); + } + ); assertCacheState(client, "index", 3, 7); - - final SearchResponse r3 = client.prepareSearch("index") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setSize(0) - .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-21").lte("2016-03-27")) - .addAggregation(new GlobalAggregationBuilder("global")) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r3); - assertThat(r3.getHits().getTotalHits().value, equalTo(7L)); + assertResponse( + client.prepareSearch("index") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setSize(0) + .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-21").lte("2016-03-27")) + .addAggregation(new GlobalAggregationBuilder("global")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(7L)); + } + ); assertCacheState(client, "index", 6, 9); } @@ -195,31 +209,40 @@ public void testQueryRewriteMissingValues() throws Exception { assertCacheState(client, "index", 0, 0); - final SearchResponse r1 = client.prepareSearch("index") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setSize(0) - .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-19").lte("2016-03-28")) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r1); - assertThat(r1.getHits().getTotalHits().value, equalTo(8L)); + assertResponse( + client.prepareSearch("index") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setSize(0) + .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-19").lte("2016-03-28")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(8L)); + } + ); assertCacheState(client, "index", 0, 1); - final SearchResponse r2 = client.prepareSearch("index") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setSize(0) - .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-19").lte("2016-03-28")) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r2); - assertThat(r2.getHits().getTotalHits().value, equalTo(8L)); + assertResponse( + client.prepareSearch("index") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setSize(0) + .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-19").lte("2016-03-28")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(8L)); + } + ); assertCacheState(client, "index", 1, 1); - final SearchResponse r3 = client.prepareSearch("index") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setSize(0) - .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-19").lte("2016-03-28")) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r3); - assertThat(r3.getHits().getTotalHits().value, equalTo(8L)); + assertResponse( + client.prepareSearch("index") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setSize(0) + .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-19").lte("2016-03-28")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(8L)); + } + ); assertCacheState(client, "index", 2, 1); } @@ -253,35 +276,44 @@ public void testQueryRewriteDates() throws Exception { assertCacheState(client, "index", 0, 0); - final SearchResponse r1 = client.prepareSearch("index") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setSize(0) - .setQuery(QueryBuilders.rangeQuery("d").gte("2013-01-01T00:00:00").lte("now")) - // to ensure that query is executed even if it rewrites to match_no_docs - .addAggregation(new GlobalAggregationBuilder("global")) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r1); - assertThat(r1.getHits().getTotalHits().value, equalTo(9L)); + assertResponse( + client.prepareSearch("index") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setSize(0) + .setQuery(QueryBuilders.rangeQuery("d").gte("2013-01-01T00:00:00").lte("now")) + // to ensure that query is executed even if it rewrites to match_no_docs + .addAggregation(new GlobalAggregationBuilder("global")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(9L)); + } + ); assertCacheState(client, "index", 0, 1); - final SearchResponse r2 = client.prepareSearch("index") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setSize(0) - .setQuery(QueryBuilders.rangeQuery("d").gte("2013-01-01T00:00:00").lte("now")) - .addAggregation(new GlobalAggregationBuilder("global")) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r2); - assertThat(r2.getHits().getTotalHits().value, equalTo(9L)); + assertResponse( + client.prepareSearch("index") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setSize(0) + .setQuery(QueryBuilders.rangeQuery("d").gte("2013-01-01T00:00:00").lte("now")) + .addAggregation(new GlobalAggregationBuilder("global")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(9L)); + } + ); assertCacheState(client, "index", 1, 1); - final SearchResponse r3 = client.prepareSearch("index") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setSize(0) - .setQuery(QueryBuilders.rangeQuery("d").gte("2013-01-01T00:00:00").lte("now")) - .addAggregation(new GlobalAggregationBuilder("global")) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r3); - assertThat(r3.getHits().getTotalHits().value, equalTo(9L)); + assertResponse( + client.prepareSearch("index") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setSize(0) + .setQuery(QueryBuilders.rangeQuery("d").gte("2013-01-01T00:00:00").lte("now")) + .addAggregation(new GlobalAggregationBuilder("global")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(9L)); + } + ); assertCacheState(client, "index", 2, 1); } @@ -324,13 +356,16 @@ public void testQueryRewriteDatesWithNow() throws Exception { assertCacheState(client, "index-2", 0, 0); assertCacheState(client, "index-3", 0, 0); - final SearchResponse r1 = client.prepareSearch("index-*") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setSize(0) - .setQuery(QueryBuilders.rangeQuery("d").gte("now-7d/d").lte("now")) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r1); - assertThat(r1.getHits().getTotalHits().value, equalTo(8L)); + assertResponse( + client.prepareSearch("index-*") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setSize(0) + .setQuery(QueryBuilders.rangeQuery("d").gte("now-7d/d").lte("now")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(8L)); + } + ); assertCacheState(client, "index-1", 0, 1); assertCacheState(client, "index-2", 0, 1); // Because the query will INTERSECT with the 3rd index it will not be @@ -338,24 +373,30 @@ public void testQueryRewriteDatesWithNow() throws Exception { // cache miss or cache hit since queries containing now can't be cached assertCacheState(client, "index-3", 0, 0); - final SearchResponse r2 = client.prepareSearch("index-*") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setSize(0) - .setQuery(QueryBuilders.rangeQuery("d").gte("now-7d/d").lte("now")) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r2); - assertThat(r2.getHits().getTotalHits().value, equalTo(8L)); + assertResponse( + client.prepareSearch("index-*") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setSize(0) + .setQuery(QueryBuilders.rangeQuery("d").gte("now-7d/d").lte("now")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(8L)); + } + ); assertCacheState(client, "index-1", 1, 1); assertCacheState(client, "index-2", 1, 1); assertCacheState(client, "index-3", 0, 0); - final SearchResponse r3 = client.prepareSearch("index-*") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setSize(0) - .setQuery(QueryBuilders.rangeQuery("d").gte("now-7d/d").lte("now")) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r3); - assertThat(r3.getHits().getTotalHits().value, equalTo(8L)); + assertResponse( + client.prepareSearch("index-*") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setSize(0) + .setQuery(QueryBuilders.rangeQuery("d").gte("now-7d/d").lte("now")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(8L)); + } + ); assertCacheState(client, "index-1", 2, 1); assertCacheState(client, "index-2", 2, 1); assertCacheState(client, "index-3", 0, 0); @@ -391,70 +432,88 @@ public void testCanCache() throws Exception { assertCacheState(client, "index", 0, 0); // If size > 0 we should no cache by default - final SearchResponse r1 = client.prepareSearch("index") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setSize(1) - .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-19").lte("2016-03-25")) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r1); - assertThat(r1.getHits().getTotalHits().value, equalTo(7L)); + assertResponse( + client.prepareSearch("index") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setSize(1) + .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-19").lte("2016-03-25")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(7L)); + } + ); assertCacheState(client, "index", 0, 0); // If search type is DFS_QUERY_THEN_FETCH we should not cache - final SearchResponse r2 = client.prepareSearch("index") - .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .setSize(0) - .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-20").lte("2016-03-26")) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r2); - assertThat(r2.getHits().getTotalHits().value, equalTo(7L)); + assertResponse( + client.prepareSearch("index") + .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) + .setSize(0) + .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-20").lte("2016-03-26")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(7L)); + } + ); assertCacheState(client, "index", 0, 0); // If search type is DFS_QUERY_THEN_FETCH we should not cache even if // the cache flag is explicitly set on the request - final SearchResponse r3 = client.prepareSearch("index") - .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .setSize(0) - .setRequestCache(true) - .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-20").lte("2016-03-26")) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r3); - assertThat(r3.getHits().getTotalHits().value, equalTo(7L)); + assertResponse( + client.prepareSearch("index") + .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) + .setSize(0) + .setRequestCache(true) + .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-20").lte("2016-03-26")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(7L)); + } + ); assertCacheState(client, "index", 0, 0); // If the request has an non-filter aggregation containing now we should not cache - final SearchResponse r5 = client.prepareSearch("index") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setSize(0) - .setRequestCache(true) - .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-20").lte("2016-03-26")) - .addAggregation(dateRange("foo").field("s").addRange("now-10y", "now")) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r5); - assertThat(r5.getHits().getTotalHits().value, equalTo(7L)); + assertResponse( + client.prepareSearch("index") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setSize(0) + .setRequestCache(true) + .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-20").lte("2016-03-26")) + .addAggregation(dateRange("foo").field("s").addRange("now-10y", "now")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(7L)); + } + ); assertCacheState(client, "index", 0, 0); // If size > 1 and cache flag is set on the request we should cache - final SearchResponse r6 = client.prepareSearch("index") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setSize(1) - .setRequestCache(true) - .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-21").lte("2016-03-27")) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r6); - assertThat(r6.getHits().getTotalHits().value, equalTo(7L)); + assertResponse( + client.prepareSearch("index") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setSize(1) + .setRequestCache(true) + .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-21").lte("2016-03-27")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(7L)); + } + ); assertCacheState(client, "index", 0, 2); // If the request has a filter aggregation containing now we should cache since it gets rewritten - final SearchResponse r4 = client.prepareSearch("index") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setSize(0) - .setRequestCache(true) - .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-20").lte("2016-03-26")) - .addAggregation(filter("foo", QueryBuilders.rangeQuery("s").from("now-10y").to("now"))) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r4); - assertThat(r4.getHits().getTotalHits().value, equalTo(7L)); + assertResponse( + client.prepareSearch("index") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setSize(0) + .setRequestCache(true) + .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-20").lte("2016-03-26")) + .addAggregation(filter("foo", QueryBuilders.rangeQuery("s").from("now-10y").to("now"))), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(7L)); + } + ); assertCacheState(client, "index", 0, 4); } @@ -476,32 +535,40 @@ public void testCacheWithFilteredAlias() { assertCacheState(client, "index", 0, 0); - SearchResponse r1 = client.prepareSearch("index") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setSize(0) - .setQuery(QueryBuilders.rangeQuery("created_at").gte("now-7d/d")) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r1); - assertThat(r1.getHits().getTotalHits().value, equalTo(1L)); + assertResponse( + client.prepareSearch("index") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setSize(0) + .setQuery(QueryBuilders.rangeQuery("created_at").gte("now-7d/d")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + } + ); assertCacheState(client, "index", 0, 1); - r1 = client.prepareSearch("index") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setSize(0) - .setQuery(QueryBuilders.rangeQuery("created_at").gte("now-7d/d")) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r1); - assertThat(r1.getHits().getTotalHits().value, equalTo(1L)); + assertResponse( + client.prepareSearch("index") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setSize(0) + .setQuery(QueryBuilders.rangeQuery("created_at").gte("now-7d/d")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + } + ); assertCacheState(client, "index", 1, 1); - r1 = client.prepareSearch("last_week").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).get(); - ElasticsearchAssertions.assertAllSuccessful(r1); - assertThat(r1.getHits().getTotalHits().value, equalTo(1L)); + assertResponse(client.prepareSearch("last_week").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0), response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + }); assertCacheState(client, "index", 1, 2); - r1 = client.prepareSearch("last_week").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).get(); - ElasticsearchAssertions.assertAllSuccessful(r1); - assertThat(r1.getHits().getTotalHits().value, equalTo(1L)); + assertResponse(client.prepareSearch("last_week").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0), response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + }); assertCacheState(client, "index", 2, 2); } @@ -519,14 +586,13 @@ public void testProfileDisableCache() throws Exception { int expectedMisses = 0; for (int i = 0; i < 5; i++) { boolean profile = i % 2 == 0; - SearchResponse resp = client.prepareSearch("index") - .setRequestCache(true) - .setProfile(profile) - .setQuery(QueryBuilders.termQuery("k", "hello")) - .get(); - assertNoFailures(resp); - ElasticsearchAssertions.assertAllSuccessful(resp); - assertThat(resp.getHits().getTotalHits().value, equalTo(1L)); + assertNoFailuresAndResponse( + client.prepareSearch("index").setRequestCache(true).setProfile(profile).setQuery(QueryBuilders.termQuery("k", "hello")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + } + ); if (profile == false) { if (i == 1) { expectedMisses++; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationIT.java index d92bb64d4fd9a..0e6cd665b6951 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationIT.java @@ -86,7 +86,7 @@ public void testThatPreBuiltAnalyzersAreNotClosedOnIndexClose() throws Exception int amountOfIndicesToClose = randomInt(numIndices - 1); for (int i = 0; i < amountOfIndicesToClose; i++) { String indexName = indexNames.get(i); - indicesAdmin().prepareClose(indexName).execute().actionGet(); + indicesAdmin().prepareClose(indexName).get(); } ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/flush/FlushIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/flush/FlushIT.java index 70234d81feadd..1d41641d027a5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/flush/FlushIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/flush/FlushIT.java @@ -49,7 +49,7 @@ public void testWaitIfOngoing() throws InterruptedException { final int numIters = scaledRandomIntBetween(10, 30); for (int i = 0; i < numIters; i++) { for (int j = 0; j < 10; j++) { - client().prepareIndex("test").setSource("{}", XContentType.JSON).get(); + prepareIndex("test").setSource("{}", XContentType.JSON).get(); } final CountDownLatch latch = new CountDownLatch(10); final CopyOnWriteArrayList errors = new CopyOnWriteArrayList<>(); @@ -87,7 +87,7 @@ public void testRejectIllegalFlushParameters() { createIndex("test"); int numDocs = randomIntBetween(0, 10); for (int i = 0; i < numDocs; i++) { - client().prepareIndex("test").setSource("{}", XContentType.JSON).get(); + prepareIndex("test").setSource("{}", XContentType.JSON).get(); } assertThat( expectThrows( @@ -124,7 +124,7 @@ public void testFlushOnInactive() throws Exception { ensureGreen(indexName); int numDocs = randomIntBetween(1, 10); for (int i = 0; i < numDocs; i++) { - client().prepareIndex(indexName).setSource("f", "v").get(); + prepareIndex(indexName).setSource("f", "v").get(); } if (randomBoolean()) { internalCluster().restartNode(randomFrom(dataNodes)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateIT.java index 7541bce29fbe9..ec5d9876b7703 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateIT.java @@ -58,21 +58,18 @@ public void testConcurrentDynamicMapping() throws Exception { for (int j = 0; j < numDocs; j++) { Map source = new HashMap<>(); source.put(fieldName, "test-user"); - client().prepareIndex("test") - .setId(Integer.toString(currentID++)) - .setSource(source) - .execute(new ActionListener() { - @Override - public void onResponse(DocWriteResponse response) { - latch.countDown(); - } + prepareIndex("test").setId(Integer.toString(currentID++)).setSource(source).execute(new ActionListener() { + @Override + public void onResponse(DocWriteResponse response) { + latch.countDown(); + } - @Override - public void onFailure(Exception e) { - throwable.add(e); - latch.countDown(); - } - }); + @Override + public void onFailure(Exception e) { + throwable.add(e); + latch.countDown(); + } + }); } latch.await(); assertThat(throwable, emptyIterable()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/MalformedDynamicTemplateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/MalformedDynamicTemplateIT.java index 7a9aa7a47215a..8d7311e4f7619 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/MalformedDynamicTemplateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/MalformedDynamicTemplateIT.java @@ -58,7 +58,7 @@ public void testBWCMalformedDynamicTemplate() { .put("index.version.created", IndexVersionUtils.randomPreviousCompatibleVersion(random(), IndexVersions.V_8_0_0)) ).setMapping(mapping) ); - client().prepareIndex(indexName).setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); + prepareIndex(indexName).setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); assertNoFailures((indicesAdmin().prepareRefresh(indexName)).get()); assertHitCount(prepareSearch(indexName), 1); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java index 33b80a4a4f3b7..720f48754519b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java @@ -41,7 +41,7 @@ protected Collection> nodePlugins() { public void testGetMappingsWhereThereAreNone() { createIndex("index"); - GetMappingsResponse response = indicesAdmin().prepareGetMappings().execute().actionGet(); + GetMappingsResponse response = indicesAdmin().prepareGetMappings().get(); assertThat(response.mappings().containsKey("index"), equalTo(true)); assertEquals(MappingMetadata.EMPTY_MAPPINGS, response.mappings().get("index")); } @@ -59,42 +59,41 @@ private XContentBuilder getMappingForType() throws IOException { } public void testSimpleGetMappings() throws Exception { - indicesAdmin().prepareCreate("indexa").setMapping(getMappingForType()).execute().actionGet(); - indicesAdmin().prepareCreate("indexb").setMapping(getMappingForType()).execute().actionGet(); + indicesAdmin().prepareCreate("indexa").setMapping(getMappingForType()).get(); + indicesAdmin().prepareCreate("indexb").setMapping(getMappingForType()).get(); ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth() .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() - .execute() - .actionGet(); + .get(); assertThat(clusterHealth.isTimedOut(), equalTo(false)); // Get all mappings - GetMappingsResponse response = indicesAdmin().prepareGetMappings().execute().actionGet(); + GetMappingsResponse response = indicesAdmin().prepareGetMappings().get(); assertThat(response.mappings().size(), equalTo(2)); assertThat(response.mappings().get("indexa"), notNullValue()); assertThat(response.mappings().get("indexb"), notNullValue()); // Get all mappings, via wildcard support - response = indicesAdmin().prepareGetMappings("*").execute().actionGet(); + response = indicesAdmin().prepareGetMappings("*").get(); assertThat(response.mappings().size(), equalTo(2)); assertThat(response.mappings().get("indexa"), notNullValue()); assertThat(response.mappings().get("indexb"), notNullValue()); // Get mappings in indexa - response = indicesAdmin().prepareGetMappings("indexa").execute().actionGet(); + response = indicesAdmin().prepareGetMappings("indexa").get(); assertThat(response.mappings().size(), equalTo(1)); assertThat(response.mappings().get("indexa"), notNullValue()); } public void testGetMappingsWithBlocks() throws IOException { - indicesAdmin().prepareCreate("test").setMapping(getMappingForType()).execute().actionGet(); + indicesAdmin().prepareCreate("test").setMapping(getMappingForType()).get(); ensureGreen(); for (String block : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) { try { enableIndexBlock("test", block); - GetMappingsResponse response = indicesAdmin().prepareGetMappings().execute().actionGet(); + GetMappingsResponse response = indicesAdmin().prepareGetMappings().get(); assertThat(response.mappings().size(), equalTo(1)); assertNotNull(response.mappings().get("test")); } finally { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java index 0eca3d689903e..0439fe6f67fb5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; @@ -48,6 +47,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_READ_ONLY; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; @@ -62,9 +62,8 @@ protected Collection> nodePlugins() { public void testDynamicUpdates() throws Exception { indicesAdmin().prepareCreate("test") .setSettings(indexSettings(1, 0).put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), Long.MAX_VALUE)) - .execute() - .actionGet(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); + .get(); + clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); updateClusterSettings( Settings.builder().put(MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING.getKey(), TimeValue.timeValueMinutes(5)) ); @@ -75,8 +74,7 @@ public void testDynamicUpdates() throws Exception { String type = "type"; String fieldName = "field_" + type + "_" + rec; indexRequests.add( - client().prepareIndex("test") - .setId(Integer.toString(rec)) + prepareIndex("test").setId(Integer.toString(rec)) .setTimeout(TimeValue.timeValueMinutes(5)) .setSource(fieldName, "some_value") ); @@ -84,10 +82,9 @@ public void testDynamicUpdates() throws Exception { indexRandom(true, false, indexRequests); logger.info("checking all the documents are there"); - RefreshResponse refreshResponse = indicesAdmin().prepareRefresh().execute().actionGet(); + RefreshResponse refreshResponse = indicesAdmin().prepareRefresh().get(); assertThat(refreshResponse.getFailedShards(), equalTo(0)); - SearchResponse response = prepareSearch("test").setSize(0).execute().actionGet(); - assertThat(response.getHits().getTotalHits().value, equalTo((long) recCount)); + assertHitCount(prepareSearch("test").setSize(0), recCount); logger.info("checking all the fields are in the mappings"); @@ -103,30 +100,30 @@ public void testDynamicUpdates() throws Exception { public void testUpdateMappingWithoutType() { indicesAdmin().prepareCreate("test").setSettings(indexSettings(1, 0)).setMapping(""" {"properties":{"body":{"type":"text"}}} - """).execute().actionGet(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); + """).get(); + clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); AcknowledgedResponse putMappingResponse = indicesAdmin().preparePutMapping("test").setSource(""" {"properties":{"date":{"type":"integer"}}} - """, XContentType.JSON).execute().actionGet(); + """, XContentType.JSON).get(); assertThat(putMappingResponse.isAcknowledged(), equalTo(true)); - GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings("test").execute().actionGet(); + GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings("test").get(); assertThat(getMappingsResponse.mappings().get("test").source().toString(), equalTo(""" {"_doc":{"properties":{"body":{"type":"text"},"date":{"type":"integer"}}}}""")); } public void testUpdateMappingWithoutTypeMultiObjects() { createIndex("test", 1, 0); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); + clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); AcknowledgedResponse putMappingResponse = indicesAdmin().preparePutMapping("test").setSource(""" - {"properties":{"date":{"type":"integer"}}}""", XContentType.JSON).execute().actionGet(); + {"properties":{"date":{"type":"integer"}}}""", XContentType.JSON).get(); assertThat(putMappingResponse.isAcknowledged(), equalTo(true)); - GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings("test").execute().actionGet(); + GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings("test").get(); assertThat(getMappingsResponse.mappings().get("test").source().toString(), equalTo(""" {"_doc":{"properties":{"date":{"type":"integer"}}}}""")); } @@ -134,13 +131,13 @@ public void testUpdateMappingWithoutTypeMultiObjects() { public void testUpdateMappingWithConflicts() { indicesAdmin().prepareCreate("test").setSettings(indexSettings(2, 0)).setMapping(""" {"properties":{"body":{"type":"text"}}} - """).execute().actionGet(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); + """).get(); + clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); try { indicesAdmin().preparePutMapping("test").setSource(""" {"_doc":{"properties":{"body":{"type":"integer"}}}} - """, XContentType.JSON).execute().actionGet(); + """, XContentType.JSON).get(); fail("Expected MergeMappingException"); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("mapper [body] cannot be changed from type [text] to [integer]")); @@ -150,11 +147,11 @@ public void testUpdateMappingWithConflicts() { public void testUpdateMappingWithNormsConflicts() { indicesAdmin().prepareCreate("test").setMapping(""" {"properties":{"body":{"type":"text", "norms": false }}} - """).execute().actionGet(); + """).get(); try { indicesAdmin().preparePutMapping("test").setSource(""" {"_doc":{"properties":{"body":{"type":"text", "norms": true }}}} - """, XContentType.JSON).execute().actionGet(); + """, XContentType.JSON).get(); fail("Expected MergeMappingException"); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("Cannot update parameter [norms] from [false] to [true]")); @@ -166,12 +163,12 @@ public void testUpdateMappingWithNormsConflicts() { */ public void testUpdateMappingNoChanges() { indicesAdmin().prepareCreate("test").setSettings(indexSettings(2, 0)).setMapping(""" - {"properties":{"body":{"type":"text"}}}""").execute().actionGet(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); + {"properties":{"body":{"type":"text"}}}""").get(); + clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); AcknowledgedResponse putMappingResponse = indicesAdmin().preparePutMapping("test").setSource(""" {"_doc":{"properties":{"body":{"type":"text"}}}} - """, XContentType.JSON).execute().actionGet(); + """, XContentType.JSON).get(); // no changes, we return assertThat(putMappingResponse.isAcknowledged(), equalTo(true)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java index a1068654daef2..e726c8a08002a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java @@ -17,7 +17,6 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.Requests; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; @@ -51,6 +50,7 @@ import static org.elasticsearch.test.ESIntegTestCase.Scope.TEST; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.Matchers.equalTo; @@ -144,9 +144,9 @@ public void testMemoryBreaker() throws Exception { SearchRequestBuilder searchRequest = client.prepareSearch("cb-test").setQuery(matchAllQuery()).addSort("test", SortOrder.DESC); String errMsg = "Data too large, data for [test] would be"; - assertFailures(searchRequest, RestStatus.INTERNAL_SERVER_ERROR, containsString(errMsg)); + assertFailures(searchRequest, RestStatus.TOO_MANY_REQUESTS, containsString(errMsg)); errMsg = "which is larger than the limit of [100/100b]"; - assertFailures(searchRequest, RestStatus.INTERNAL_SERVER_ERROR, containsString(errMsg)); + assertFailures(searchRequest, RestStatus.TOO_MANY_REQUESTS, containsString(errMsg)); NodesStatsResponse stats = client.admin().cluster().prepareNodesStats().setBreaker(true).get(); long breaks = 0; @@ -210,9 +210,9 @@ public void testRamAccountingTermsEnum() throws Exception { SearchRequestBuilder searchRequest = client.prepareSearch("ramtest").setQuery(matchAllQuery()).addSort("test", SortOrder.DESC); String errMsg = "Data too large, data for [test] would be"; - assertFailures(searchRequest, RestStatus.INTERNAL_SERVER_ERROR, containsString(errMsg)); + assertFailures(searchRequest, RestStatus.TOO_MANY_REQUESTS, containsString(errMsg)); errMsg = "which is larger than the limit of [100/100b]"; - assertFailures(searchRequest, RestStatus.INTERNAL_SERVER_ERROR, containsString(errMsg)); + assertFailures(searchRequest, RestStatus.TOO_MANY_REQUESTS, containsString(errMsg)); NodesStatsResponse stats = client.admin().cluster().prepareNodesStats().setBreaker(true).get(); long breaks = 0; @@ -277,11 +277,10 @@ public void testAggTookTooMuch() throws Exception { // A terms aggregation on the "test" field should trip the bucket circuit breaker try { - SearchResponse resp = client.prepareSearch("cb-test") - .setQuery(matchAllQuery()) - .addAggregation(terms("my_terms").field("test")) - .get(); - assertTrue("there should be shard failures", resp.getFailedShards() > 0); + assertResponse( + client.prepareSearch("cb-test").setQuery(matchAllQuery()).addAggregation(terms("my_terms").field("test")), + response -> assertTrue("there should be shard failures", response.getFailedShards() > 0) + ); fail("aggregation should have tripped the breaker"); } catch (Exception e) { Throwable cause = e.getCause(); @@ -293,7 +292,7 @@ public void testAggTookTooMuch() throws Exception { /** Issues a cache clear and waits 30 seconds for the field data breaker to be cleared */ public void clearFieldData() throws Exception { - indicesAdmin().prepareClearCache().setFieldDataCache(true).execute().actionGet(); + indicesAdmin().prepareClearCache().setFieldDataCache(true).get(); assertBusy(() -> { NodesStatsResponse resp = clusterAdmin().prepareNodesStats().clear().setBreaker(true).get(new TimeValue(15, TimeUnit.SECONDS)); for (NodeStats nStats : resp.getNodes()) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java index 5958f1ad57932..6a52159c71ab9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java @@ -61,7 +61,7 @@ protected boolean addMockInternalEngine() { } public void testBreakerWithRandomExceptions() throws IOException, InterruptedException, ExecutionException { - for (NodeStats node : clusterAdmin().prepareNodesStats().clear().setBreaker(true).execute().actionGet().getNodes()) { + for (NodeStats node : clusterAdmin().prepareNodesStats().clear().setBreaker(true).get().getNodes()) { assertThat("Breaker is not set to 0", node.getBreaker().getStats(CircuitBreaker.FIELDDATA).getEstimated(), equalTo(0L)); } @@ -108,7 +108,7 @@ public void testBreakerWithRandomExceptions() throws IOException, InterruptedExc .put(EXCEPTION_LOW_LEVEL_RATIO_KEY, lowLevelRate) .put(MockEngineSupport.WRAP_READER_RATIO.getKey(), 1.0d); logger.info("creating index: [test] using settings: [{}]", settings.build()); - CreateIndexResponse response = indicesAdmin().prepareCreate("test").setSettings(settings).setMapping(mapping).execute().actionGet(); + CreateIndexResponse response = indicesAdmin().prepareCreate("test").setSettings(settings).setMapping(mapping).get(); final int numDocs; if (response.isShardsAcknowledged() == false) { /* some seeds just won't let you create the index at all and we enter a ping-pong mode @@ -126,8 +126,7 @@ public void testBreakerWithRandomExceptions() throws IOException, InterruptedExc } for (int i = 0; i < numDocs; i++) { try { - client().prepareIndex("test") - .setId("" + i) + prepareIndex("test").setId("" + i) .setTimeout(TimeValue.timeValueSeconds(1)) .setSource("test-str", randomUnicodeOfLengthBetween(5, 25), "test-num", i) .get(); @@ -146,7 +145,7 @@ public void testBreakerWithRandomExceptions() throws IOException, InterruptedExc refreshResponse.getTotalShards() ); final int numSearches = scaledRandomIntBetween(50, 150); - NodesStatsResponse resp = clusterAdmin().prepareNodesStats().clear().setBreaker(true).execute().actionGet(); + NodesStatsResponse resp = clusterAdmin().prepareNodesStats().clear().setBreaker(true).get(); for (NodeStats stats : resp.getNodes()) { assertThat("Breaker is set to 0", stats.getBreaker().getStats(CircuitBreaker.FIELDDATA).getEstimated(), equalTo(0L)); } @@ -172,7 +171,7 @@ public void testBreakerWithRandomExceptions() throws IOException, InterruptedExc // breaker adjustment code, it should show up here by the breaker // estimate being either positive or negative. ensureGreen("test"); // make sure all shards are there - there could be shards that are still starting up. - assertAllSuccessful(indicesAdmin().prepareClearCache("test").setFieldDataCache(true).execute().actionGet()); + assertAllSuccessful(indicesAdmin().prepareClearCache("test").setFieldDataCache(true).get()); // Since .cleanUp() is no longer called on cache clear, we need to call it on each node manually for (String node : internalCluster().getNodeNames()) { @@ -181,7 +180,7 @@ public void testBreakerWithRandomExceptions() throws IOException, InterruptedExc // Clean up the cache, ensuring that entries' listeners have been called fdCache.getCache().refresh(); } - NodesStatsResponse nodeStats = clusterAdmin().prepareNodesStats().clear().setBreaker(true).execute().actionGet(); + NodesStatsResponse nodeStats = clusterAdmin().prepareNodesStats().clear().setBreaker(true).get(); for (NodeStats stats : nodeStats.getNodes()) { assertThat( "Breaker reset to 0 last search success: " + success + " mapping: " + mapping, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java index cdd77d5864a7b..0fe5845e9ed32 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java @@ -40,11 +40,11 @@ public void testPrimaryRelocationWhileIndexing() throws Exception { @Override public void run() { while (finished.get() == false && numAutoGenDocs.get() < 10_000) { - DocWriteResponse indexResponse = client().prepareIndex("test").setId("id").setSource("field", "value").get(); + DocWriteResponse indexResponse = prepareIndex("test").setId("id").setSource("field", "value").get(); assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); DeleteResponse deleteResponse = client().prepareDelete("test", "id").get(); assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); - client().prepareIndex("test").setSource("auto", true).get(); + prepareIndex("test").setSource("auto", true).get(); numAutoGenDocs.incrementAndGet(); } } @@ -64,14 +64,12 @@ public void run() { logger.info("--> [iteration {}] relocating from {} to {} ", i, relocationSource.getName(), relocationTarget.getName()); clusterAdmin().prepareReroute() .add(new MoveAllocationCommand("test", 0, relocationSource.getId(), relocationTarget.getId())) - .execute() - .actionGet(); + .get(); ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth() .setTimeout(TimeValue.timeValueSeconds(60)) .setWaitForEvents(Priority.LANGUID) .setWaitForNoRelocatingShards(true) - .execute() - .actionGet(); + .get(); if (clusterHealthResponse.isTimedOut()) { final String hotThreads = clusterAdmin().prepareNodesHotThreads() .setIgnoreIdleThreads(false) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index aee3d3680155e..762bbdda77df1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -281,14 +281,10 @@ private void unthrottleRecovery() { */ public void startShardRecovery(String sourceNode, String targetNode) throws Exception { logger.info("--> updating cluster settings with moving shard from node `{}` to node `{}`", sourceNode, targetNode); - clusterAdmin().prepareReroute() - .add(new MoveAllocationCommand(INDEX_NAME, 0, sourceNode, targetNode)) - .execute() - .actionGet() - .getState(); + clusterAdmin().prepareReroute().add(new MoveAllocationCommand(INDEX_NAME, 0, sourceNode, targetNode)).get().getState(); logger.info("--> requesting shard recovery"); - indicesAdmin().prepareRecoveries(INDEX_NAME).execute().actionGet(); + indicesAdmin().prepareRecoveries(INDEX_NAME).get(); logger.info("--> waiting for recovery to begin on both the source and target nodes"); final Index index = resolveIndex(INDEX_NAME); @@ -353,7 +349,7 @@ public void testGatewayRecovery() throws Exception { ensureGreen(); logger.info("--> request recoveries"); - RecoveryResponse response = indicesAdmin().prepareRecoveries(INDEX_NAME).execute().actionGet(); + RecoveryResponse response = indicesAdmin().prepareRecoveries(INDEX_NAME).get(); assertThat(response.shardRecoveryStates().size(), equalTo(SHARD_COUNT_1)); assertThat(response.shardRecoveryStates().get(INDEX_NAME).size(), equalTo(1)); @@ -378,7 +374,7 @@ public void testGatewayRecoveryTestActiveOnly() throws Exception { ensureGreen(); logger.info("--> request recoveries"); - RecoveryResponse response = indicesAdmin().prepareRecoveries(INDEX_NAME).setActiveOnly(true).execute().actionGet(); + RecoveryResponse response = indicesAdmin().prepareRecoveries(INDEX_NAME).setActiveOnly(true).get(); List recoveryStates = response.shardRecoveryStates().get(INDEX_NAME); assertThat(recoveryStates.size(), equalTo(0)); // Should not expect any responses back @@ -408,7 +404,7 @@ public void testReplicaRecovery() throws Exception { setReplicaCount(1, INDEX_NAME); ensureGreen(INDEX_NAME); - final RecoveryResponse response = indicesAdmin().prepareRecoveries(INDEX_NAME).execute().actionGet(); + final RecoveryResponse response = indicesAdmin().prepareRecoveries(INDEX_NAME).get(); // we should now have two total shards, one primary and one replica List recoveryStates = response.shardRecoveryStates().get(INDEX_NAME); @@ -461,8 +457,14 @@ public void testCancelNewShardRecoveryAndUsesExistingShardCopy() throws Exceptio int numDocs = randomIntBetween(10, 200); final IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { - docs[i] = client().prepareIndex(INDEX_NAME) - .setSource("foo-int", randomInt(), "foo-string", randomAlphaOfLength(32), "foo-float", randomFloat()); + docs[i] = prepareIndex(INDEX_NAME).setSource( + "foo-int", + randomInt(), + "foo-string", + randomAlphaOfLength(32), + "foo-float", + randomFloat() + ); } indexRandom(randomBoolean(), docs); @@ -502,7 +504,7 @@ public void testCancelNewShardRecoveryAndUsesExistingShardCopy() throws Exceptio public Settings onNodeStopped(String nodeName) throws Exception { safeAwait(phase1ReadyBlocked); // nodeB stopped, peer recovery from nodeA to nodeC, it will be cancelled after nodeB get started. - RecoveryResponse response = indicesAdmin().prepareRecoveries(INDEX_NAME).execute().actionGet(); + RecoveryResponse response = indicesAdmin().prepareRecoveries(INDEX_NAME).get(); List recoveryStates = response.shardRecoveryStates().get(INDEX_NAME); List nodeCRecoveryStates = findRecoveriesForTargetNode(nodeC, recoveryStates); @@ -551,7 +553,7 @@ public void testRerouteRecovery() throws Exception { throttleRecovery10Seconds(shardSize); logger.info("--> move shard from: {} to: {}", nodeA, nodeB); - clusterAdmin().prepareReroute().add(new MoveAllocationCommand(INDEX_NAME, 0, nodeA, nodeB)).execute().actionGet().getState(); + clusterAdmin().prepareReroute().add(new MoveAllocationCommand(INDEX_NAME, 0, nodeA, nodeB)).get().getState(); logger.info("--> waiting for recovery to start both on source and target"); final Index index = resolveIndex(INDEX_NAME); @@ -563,7 +565,7 @@ public void testRerouteRecovery() throws Exception { }); logger.info("--> request recoveries"); - RecoveryResponse response = indicesAdmin().prepareRecoveries(INDEX_NAME).execute().actionGet(); + RecoveryResponse response = indicesAdmin().prepareRecoveries(INDEX_NAME).get(); List recoveryStates = response.shardRecoveryStates().get(INDEX_NAME); List nodeARecoveryStates = findRecoveriesForTargetNode(nodeA, recoveryStates); @@ -600,7 +602,7 @@ public void testRerouteRecovery() throws Exception { // wait for it to be finished ensureGreen(); - response = indicesAdmin().prepareRecoveries(INDEX_NAME).execute().actionGet(); + response = indicesAdmin().prepareRecoveries(INDEX_NAME).get(); recoveryStates = response.shardRecoveryStates().get(INDEX_NAME); assertThat(recoveryStates.size(), equalTo(1)); @@ -637,9 +639,9 @@ public void testRerouteRecovery() throws Exception { throttleRecovery10Seconds(shardSize); logger.info("--> move replica shard from: {} to: {}", nodeA, nodeC); - clusterAdmin().prepareReroute().add(new MoveAllocationCommand(INDEX_NAME, 0, nodeA, nodeC)).execute().actionGet().getState(); + clusterAdmin().prepareReroute().add(new MoveAllocationCommand(INDEX_NAME, 0, nodeA, nodeC)).get().getState(); - response = indicesAdmin().prepareRecoveries(INDEX_NAME).execute().actionGet(); + response = indicesAdmin().prepareRecoveries(INDEX_NAME).get(); recoveryStates = response.shardRecoveryStates().get(INDEX_NAME); nodeARecoveryStates = findRecoveriesForTargetNode(nodeA, recoveryStates); @@ -664,7 +666,7 @@ public void testRerouteRecovery() throws Exception { internalCluster().stopNode(nodeA); ensureStableCluster(2); - response = indicesAdmin().prepareRecoveries(INDEX_NAME).execute().actionGet(); + response = indicesAdmin().prepareRecoveries(INDEX_NAME).get(); recoveryStates = response.shardRecoveryStates().get(INDEX_NAME); nodeARecoveryStates = findRecoveriesForTargetNode(nodeA, recoveryStates); @@ -685,7 +687,7 @@ public void testRerouteRecovery() throws Exception { unthrottleRecovery(); ensureGreen(); - response = indicesAdmin().prepareRecoveries(INDEX_NAME).execute().actionGet(); + response = indicesAdmin().prepareRecoveries(INDEX_NAME).get(); recoveryStates = response.shardRecoveryStates().get(INDEX_NAME); nodeARecoveryStates = findRecoveriesForTargetNode(nodeA, recoveryStates); @@ -853,20 +855,19 @@ public void testSnapshotRecovery() throws Exception { logger.info("--> snapshot"); CreateSnapshotResponse createSnapshotResponse = createSnapshot(INDEX_NAME); - indicesAdmin().prepareClose(INDEX_NAME).execute().actionGet(); + indicesAdmin().prepareClose(INDEX_NAME).get(); logger.info("--> restore"); RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(REPO_NAME, SNAP_NAME) .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); int totalShards = restoreSnapshotResponse.getRestoreInfo().totalShards(); assertThat(totalShards, greaterThan(0)); ensureGreen(); logger.info("--> request recoveries"); - RecoveryResponse response = indicesAdmin().prepareRecoveries(INDEX_NAME).execute().actionGet(); + RecoveryResponse response = indicesAdmin().prepareRecoveries(INDEX_NAME).get(); Repository repository = internalCluster().getAnyMasterNodeInstance(RepositoriesService.class).repository(REPO_NAME); final RepositoryData repositoryData = AbstractSnapshotIntegTestCase.getRepositoryData(repository); @@ -920,14 +921,20 @@ private IndicesStatsResponse createAndPopulateIndex(String name, int nodeCount, final IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { - docs[i] = client().prepareIndex(name) - .setSource("foo-int", randomInt(), "foo-string", randomAlphaOfLength(32), "foo-float", randomFloat()); + docs[i] = prepareIndex(name).setSource( + "foo-int", + randomInt(), + "foo-string", + randomAlphaOfLength(32), + "foo-float", + randomFloat() + ); } indexRandom(true, docs); flush(); assertThat(prepareSearch(name).setSize(0).get().getHits().getTotalHits().value, equalTo((long) numDocs)); - return indicesAdmin().prepareStats(name).execute().actionGet(); + return indicesAdmin().prepareStats(name).get(); } private void validateIndexRecoveryState(RecoveryState.Index indexState) { @@ -986,7 +993,7 @@ public void testHistoryRetention() throws Exception { final List requests = new ArrayList<>(); final int replicatedDocCount = scaledRandomIntBetween(25, 250); while (requests.size() < replicatedDocCount) { - requests.add(client().prepareIndex(indexName).setSource("{}", XContentType.JSON)); + requests.add(prepareIndex(indexName).setSource("{}", XContentType.JSON)); } indexRandom(true, requests); if (randomBoolean()) { @@ -1008,7 +1015,7 @@ public void testHistoryRetention() throws Exception { final int numNewDocs = scaledRandomIntBetween(25, 250); for (int i = 0; i < numNewDocs; i++) { - client().prepareIndex(indexName).setSource("{}", XContentType.JSON).setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); + prepareIndex(indexName).setSource("{}", XContentType.JSON).setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); } // Flush twice to update the safe commit's local checkpoint assertThat(indicesAdmin().prepareFlush(indexName).setForce(true).execute().get().getFailedShards(), equalTo(0)); @@ -1040,10 +1047,7 @@ public void testDoNotInfinitelyWaitForMapping() { indicesAdmin().preparePutMapping("test").setSource("test_field", "type=text,analyzer=test_analyzer").get(); int numDocs = between(1, 10); for (int i = 0; i < numDocs; i++) { - client().prepareIndex("test") - .setId("u" + i) - .setSource(singletonMap("test_field", Integer.toString(i)), XContentType.JSON) - .get(); + prepareIndex("test").setId("u" + i).setSource(singletonMap("test_field", Integer.toString(i)), XContentType.JSON).get(); } Semaphore recoveryBlocked = new Semaphore(1); for (DiscoveryNode node : clusterService().state().nodes()) { @@ -1143,7 +1147,7 @@ public void testRecoverLocallyUpToGlobalCheckpoint() throws Exception { randomBoolean(), false, randomBoolean(), - IntStream.range(0, numDocs).mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).collect(toList()) + IntStream.range(0, numDocs).mapToObj(n -> prepareIndex(indexName).setSource("num", n)).collect(toList()) ); indicesAdmin().prepareRefresh(indexName).get(); // avoid refresh when we are failing a shard String failingNode = randomFrom(nodes); @@ -1234,7 +1238,7 @@ public void testUsesFileBasedRecoveryIfRetentionLeaseMissing() throws Exception randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, between(0, 100)).mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).collect(toList()) + IntStream.range(0, between(0, 100)).mapToObj(n -> prepareIndex(indexName).setSource("num", n)).collect(toList()) ); ensureGreen(indexName); @@ -1298,7 +1302,7 @@ public void testUsesFileBasedRecoveryIfRetentionLeaseAheadOfGlobalCheckpoint() t randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, between(0, 100)).mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).collect(toList()) + IntStream.range(0, between(0, 100)).mapToObj(n -> prepareIndex(indexName).setSource("num", n)).collect(toList()) ); ensureGreen(indexName); @@ -1329,9 +1333,7 @@ public Settings onNodeStopped(String nodeName) throws Exception { randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, between(1, 100)) - .mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)) - .collect(toList()) + IntStream.range(0, between(1, 100)).mapToObj(n -> prepareIndex(indexName).setSource("num", n)).collect(toList()) ); // We do not guarantee that the replica can recover locally all the way to its own global checkpoint before starting @@ -1384,7 +1386,7 @@ public void testUsesFileBasedRecoveryIfOperationsBasedRecoveryWouldBeUnreasonabl randomBoolean(), false, randomBoolean(), - IntStream.range(0, between(0, 100)).mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).collect(toList()) + IntStream.range(0, between(0, 100)).mapToObj(n -> prepareIndex(indexName).setSource("num", n)).collect(toList()) ); ensureGreen(indexName); @@ -1465,9 +1467,7 @@ public Settings onNodeStopped(String nodeName) throws Exception { randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, newDocCount) - .mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)) - .collect(toList()) + IntStream.range(0, newDocCount).mapToObj(n -> prepareIndex(indexName).setSource("num", n)).collect(toList()) ); flush(indexName); @@ -1509,7 +1509,7 @@ public void testDoesNotCopyOperationsInSafeCommit() throws Exception { randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, between(0, 100)).mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).collect(toList()) + IntStream.range(0, between(0, 100)).mapToObj(n -> prepareIndex(indexName).setSource("num", n)).collect(toList()) ); final ShardId shardId = new ShardId(resolveIndex(indexName), 0); @@ -1528,7 +1528,7 @@ public void testDoesNotCopyOperationsInSafeCommit() throws Exception { randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, between(0, 100)).mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).collect(toList()) + IntStream.range(0, between(0, 100)).mapToObj(n -> prepareIndex(indexName).setSource("num", n)).collect(toList()) ); setReplicaCount(1, indexName); @@ -1585,9 +1585,7 @@ public void testRepeatedRecovery() throws Exception { randomBoolean(), false, randomBoolean(), - IntStream.range(0, randomIntBetween(0, 10)) - .mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)) - .collect(toList()) + IntStream.range(0, randomIntBetween(0, 10)).mapToObj(n -> prepareIndex(indexName).setSource("num", n)).collect(toList()) ); assertThat(indicesAdmin().prepareFlush(indexName).get().getFailedShards(), equalTo(0)); @@ -1615,9 +1613,7 @@ public void testRepeatedRecovery() throws Exception { randomBoolean(), false, randomBoolean(), - IntStream.range(0, randomIntBetween(0, 10)) - .mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)) - .collect(toList()) + IntStream.range(0, randomIntBetween(0, 10)).mapToObj(n -> prepareIndex(indexName).setSource("num", n)).collect(toList()) ); logger.info("--> add replicas again"); @@ -1635,7 +1631,7 @@ public void testAllocateEmptyPrimaryResetsGlobalCheckpoint() throws Exception { .setSettings(indexSettings(1, 1).put(MockEngineSupport.DISABLE_FLUSH_ON_CLOSE.getKey(), randomBoolean())) ); final List indexRequests = IntStream.range(0, between(10, 500)) - .mapToObj(n -> client().prepareIndex(indexName).setSource("foo", "bar")) + .mapToObj(n -> prepareIndex(indexName).setSource("foo", "bar")) .toList(); indexRandom(randomBoolean(), true, true, indexRequests); ensureGreen(); @@ -1681,9 +1677,10 @@ public void testPeerRecoveryTrimsLocalTranslog() throws Exception { indexers[i] = new Thread(() -> { while (stopped.get() == false) { try { - DocWriteResponse response = client().prepareIndex(indexName) - .setSource(Map.of("f" + randomIntBetween(1, 10), randomNonNegativeLong()), XContentType.JSON) - .get(); + DocWriteResponse response = prepareIndex(indexName).setSource( + Map.of("f" + randomIntBetween(1, 10), randomNonNegativeLong()), + XContentType.JSON + ).get(); assertThat(response.getResult(), is(oneOf(CREATED, UPDATED))); } catch (ElasticsearchException ignored) {} } @@ -1726,7 +1723,7 @@ public void testReservesBytesDuringPeerRecoveryPhaseOne() throws Exception { createIndex(indexName, indexSettings(1, 0).put("index.routing.allocation.include._name", String.join(",", dataNodes)).build()); ensureGreen(indexName); final List indexRequests = IntStream.range(0, between(10, 500)) - .mapToObj(n -> client().prepareIndex(indexName).setSource("foo", "bar")) + .mapToObj(n -> prepareIndex(indexName).setSource("foo", "bar")) .toList(); indexRandom(randomBoolean(), true, true, indexRequests); assertThat(indicesAdmin().prepareFlush(indexName).get().getFailedShards(), equalTo(0)); @@ -1800,7 +1797,7 @@ public void testWaitForClusterStateToBeAppliedOnSourceNode() throws Exception { createIndex(indexName, indexSettings(1, 0).build()); ensureGreen(indexName); final List indexRequests = IntStream.range(0, between(10, 500)) - .mapToObj(n -> client().prepareIndex(indexName).setSource("foo", "bar")) + .mapToObj(n -> prepareIndex(indexName).setSource("foo", "bar")) .toList(); indexRandom(randomBoolean(), true, true, indexRequests); assertThat(indicesAdmin().prepareFlush(indexName).get().getFailedShards(), equalTo(0)); @@ -1870,7 +1867,7 @@ public void testDeleteIndexDuringFinalization() throws Exception { createIndex(indexName, indexSettings(1, 0).build()); ensureGreen(indexName); final List indexRequests = IntStream.range(0, between(10, 500)) - .mapToObj(n -> client().prepareIndex(indexName).setSource("foo", "bar")) + .mapToObj(n -> prepareIndex(indexName).setSource("foo", "bar")) .toList(); indexRandom(randomBoolean(), true, true, indexRequests); assertThat(indicesAdmin().prepareFlush(indexName).get().getFailedShards(), equalTo(0)); @@ -1900,7 +1897,7 @@ public void testDeleteIndexDuringFinalization() throws Exception { // Process the TRANSLOG_OPS response on the replica (avoiding failing it due to a concurrent delete) but // before sending the response back send another document to the primary, advancing the GCP to prevent the replica // being marked as in-sync (NB below we delay the replica write until after the index is deleted) - client().prepareIndex(indexName).setSource("foo", "baz").execute(ActionListener.noop()); + prepareIndex(indexName).setSource("foo", "baz").execute(ActionListener.noop()); primaryIndexShard.addGlobalCheckpointListener( globalCheckpointBeforeRecovery + 1, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java index e34d5059b4991..30c57873fc6b1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java @@ -266,39 +266,10 @@ public void testFailingReposAreTreatedAsNonExistingShardSnapshots() throws Excep } } - public void testFetchingInformationFromAnIncompatibleMasterNodeReturnsAnEmptyList() { - String indexName = "test"; - createIndex(indexName, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).build()); - ShardId shardId = getShardIdForIndex(indexName); - - for (int i = 0; i < randomIntBetween(1, 50); i++) { - index(indexName, Integer.toString(i), Collections.singletonMap("foo", "bar")); - } - - String snapshotName = "snap"; - String repositoryName = "repo"; - createRepository(repositoryName, "fs", randomRepoPath(), true); - createSnapshot(repositoryName, snapshotName, indexName); - - RepositoriesService repositoriesService = internalCluster().getAnyMasterNodeInstance(RepositoriesService.class); - ThreadPool threadPool = internalCluster().getAnyMasterNodeInstance(ThreadPool.class); - ClusterService clusterService = internalCluster().getAnyMasterNodeInstance(ClusterService.class); - ShardSnapshotsService shardSnapshotsService = new ShardSnapshotsService(client(), repositoriesService, threadPool, clusterService) { - @Override - protected boolean masterSupportsFetchingLatestSnapshots() { - return false; - } - }; - - PlainActionFuture> latestSnapshots = PlainActionFuture.newFuture(); - shardSnapshotsService.fetchLatestSnapshotsForShard(shardId, latestSnapshots); - assertThat(latestSnapshots.actionGet().isPresent(), is(equalTo(false))); - } - private Optional getLatestShardSnapshot(ShardId shardId) throws Exception { ShardSnapshotsService shardSnapshotsService = getShardSnapshotsService(); - PlainActionFuture> future = PlainActionFuture.newFuture(); + PlainActionFuture> future = new PlainActionFuture<>(); shardSnapshotsService.fetchLatestSnapshotsForShard(shardId, future); return future.get(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasIT.java index b3e0d258cb113..56fffa682a36d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasIT.java @@ -40,8 +40,7 @@ public void testSimpleUpdateNumberOfReplicas() throws Exception { ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth() .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() - .execute() - .actionGet(); + .get(); logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); NumShards numShards = getNumShards("test"); @@ -53,8 +52,7 @@ public void testSimpleUpdateNumberOfReplicas() throws Exception { assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.totalNumShards)); for (int i = 0; i < 10; i++) { - client().prepareIndex("test") - .setId(Integer.toString(i)) + prepareIndex("test").setId(Integer.toString(i)) .setSource(jsonBuilder().startObject().field("value", "test" + i).endObject()) .get(); } @@ -73,8 +71,7 @@ public void testSimpleUpdateNumberOfReplicas() throws Exception { .setWaitForEvents(Priority.LANGUID) .setWaitForYellowStatus() .setWaitForActiveShards(numShards.numPrimaries * 2) - .execute() - .actionGet(); + .get(); logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW)); @@ -107,8 +104,7 @@ public void testSimpleUpdateNumberOfReplicas() throws Exception { .setWaitForGreenStatus() .setWaitForNoRelocatingShards(true) .setWaitForNodes(">=3") - .execute() - .actionGet(); + .get(); logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -130,8 +126,7 @@ public void testSimpleUpdateNumberOfReplicas() throws Exception { .setWaitForGreenStatus() .setWaitForNoRelocatingShards(true) .setWaitForNodes(">=3") - .execute() - .actionGet(); + .get(); logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -165,8 +160,7 @@ public void testAutoExpandNumberOfReplicas0ToData() throws IOException { .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForActiveShards(numShards.numPrimaries * 2) - .execute() - .actionGet(); + .get(); logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -181,8 +175,7 @@ public void testAutoExpandNumberOfReplicas0ToData() throws IOException { .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForActiveShards(numShards.numPrimaries * 2) - .execute() - .actionGet(); + .get(); logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -202,8 +195,7 @@ public void testAutoExpandNumberOfReplicas0ToData() throws IOException { .setWaitForGreenStatus() .setWaitForActiveShards(numShards.numPrimaries * 3) .setWaitForNodes(">=3") - .execute() - .actionGet(); + .get(); logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -229,8 +221,7 @@ public void testAutoExpandNumberOfReplicas0ToData() throws IOException { .setWaitForGreenStatus() .setWaitForActiveShards(numShards.numPrimaries * 2) .setWaitForNodes(">=2") - .execute() - .actionGet(); + .get(); logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -256,8 +247,7 @@ public void testAutoExpandNumberOfReplicas0ToData() throws IOException { .setWaitForGreenStatus() .setWaitForNodes(">=1") .setWaitForActiveShards(numShards.numPrimaries) - .execute() - .actionGet(); + .get(); logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -286,8 +276,7 @@ public void testAutoExpandNumberReplicas1ToData() throws IOException { .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForActiveShards(numShards.numPrimaries * 2) - .execute() - .actionGet(); + .get(); logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -302,8 +291,7 @@ public void testAutoExpandNumberReplicas1ToData() throws IOException { .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForActiveShards(numShards.numPrimaries * 2) - .execute() - .actionGet(); + .get(); logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -321,8 +309,7 @@ public void testAutoExpandNumberReplicas1ToData() throws IOException { .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForActiveShards(numShards.numPrimaries * 3) - .execute() - .actionGet(); + .get(); logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -348,8 +335,7 @@ public void testAutoExpandNumberReplicas1ToData() throws IOException { .setWaitForGreenStatus() .setWaitForNodes(">=2") .setWaitForActiveShards(numShards.numPrimaries * 2) - .execute() - .actionGet(); + .get(); logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -375,8 +361,7 @@ public void testAutoExpandNumberReplicas1ToData() throws IOException { .setWaitForYellowStatus() .setWaitForNodes(">=1") .setWaitForActiveShards(numShards.numPrimaries) - .execute() - .actionGet(); + .get(); logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW)); @@ -396,8 +381,7 @@ public void testAutoExpandNumberReplicas2() { .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForActiveShards(numShards.numPrimaries * 3) - .execute() - .actionGet(); + .get(); logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -418,8 +402,7 @@ public void testAutoExpandNumberReplicas2() { .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForActiveShards(numShards.numPrimaries * 4) - .execute() - .actionGet(); + .get(); logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -444,8 +427,7 @@ public void testUpdateWithInvalidNumberOfReplicas() { try { indicesAdmin().prepareUpdateSettings("test") .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, value)) - .execute() - .actionGet(); + .get(); fail("should have thrown an exception about the replica shard count"); } catch (IllegalArgumentException e) { assertEquals("Failed to parse value [" + value + "] for setting [index.number_of_replicas] must be >= 0", e.getMessage()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java index e770127bf577c..563e6e0761cb1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java @@ -62,13 +62,10 @@ public void testInvalidDynamicUpdate() { createIndex("test"); IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareUpdateSettings("test") - .setSettings(Settings.builder().put("index.dummy", "boom")) - .execute() - .actionGet() + () -> indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.dummy", "boom")).get() ); assertEquals(exception.getCause().getMessage(), "this setting goes boom"); - IndexMetadata indexMetadata = clusterAdmin().prepareState().execute().actionGet().getState().metadata().index("test"); + IndexMetadata indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); assertNotEquals(indexMetadata.getSettings().get("index.dummy"), "invalid dynamic value"); } @@ -209,51 +206,35 @@ public void testUpdateDependentIndexSettings() { iae = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareUpdateSettings("test") - .setSettings(Settings.builder().put("index.acc.test.pw", "asdf")) - .execute() - .actionGet() + () -> indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.acc.test.pw", "asdf")).get() ); assertEquals("missing required setting [index.acc.test.user] for setting [index.acc.test.pw]", iae.getMessage()); // user has no dependency - indicesAdmin().prepareUpdateSettings("test") - .setSettings(Settings.builder().put("index.acc.test.user", "asdf")) - .execute() - .actionGet(); + indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.acc.test.user", "asdf")).get(); // now we are consistent - indicesAdmin().prepareUpdateSettings("test") - .setSettings(Settings.builder().put("index.acc.test.pw", "test")) - .execute() - .actionGet(); + indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.acc.test.pw", "test")).get(); // now try to remove it and make sure it fails iae = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareUpdateSettings("test") - .setSettings(Settings.builder().putNull("index.acc.test.user")) - .execute() - .actionGet() + () -> indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().putNull("index.acc.test.user")).get() ); assertEquals("missing required setting [index.acc.test.user] for setting [index.acc.test.pw]", iae.getMessage()); // now we are consistent indicesAdmin().prepareUpdateSettings("test") .setSettings(Settings.builder().putNull("index.acc.test.pw").putNull("index.acc.test.user")) - .execute() - .actionGet(); + .get(); } } public void testResetDefaultWithWildcard() { createIndex("test"); - indicesAdmin().prepareUpdateSettings("test") - .setSettings(Settings.builder().put("index.refresh_interval", -1)) - .execute() - .actionGet(); - IndexMetadata indexMetadata = clusterAdmin().prepareState().execute().actionGet().getState().metadata().index("test"); + indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.refresh_interval", -1)).get(); + IndexMetadata indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); assertEquals(indexMetadata.getSettings().get("index.refresh_interval"), "-1"); for (IndicesService service : internalCluster().getInstances(IndicesService.class)) { IndexService indexService = service.indexService(resolveIndex("test")); @@ -261,8 +242,8 @@ public void testResetDefaultWithWildcard() { assertEquals(indexService.getIndexSettings().getRefreshInterval().millis(), -1); } } - indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().putNull("index.ref*")).execute().actionGet(); - indexMetadata = clusterAdmin().prepareState().execute().actionGet().getState().metadata().index("test"); + indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().putNull("index.ref*")).get(); + indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); assertNull(indexMetadata.getSettings().get("index.refresh_interval")); for (IndicesService service : internalCluster().getInstances(IndicesService.class)) { IndexService indexService = service.indexService(resolveIndex("test")); @@ -281,9 +262,8 @@ public void testResetDefault() { .put("index.translog.flush_threshold_size", "1024b") .put("index.translog.generation_threshold_size", "4096b") ) - .execute() - .actionGet(); - IndexMetadata indexMetadata = clusterAdmin().prepareState().execute().actionGet().getState().metadata().index("test"); + .get(); + IndexMetadata indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); assertEquals(indexMetadata.getSettings().get("index.refresh_interval"), "-1"); for (IndicesService service : internalCluster().getInstances(IndicesService.class)) { IndexService indexService = service.indexService(resolveIndex("test")); @@ -293,11 +273,8 @@ public void testResetDefault() { assertEquals(indexService.getIndexSettings().getGenerationThresholdSize().getBytes(), 4096); } } - indicesAdmin().prepareUpdateSettings("test") - .setSettings(Settings.builder().putNull("index.refresh_interval")) - .execute() - .actionGet(); - indexMetadata = clusterAdmin().prepareState().execute().actionGet().getState().metadata().index("test"); + indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().putNull("index.refresh_interval")).get(); + indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); assertNull(indexMetadata.getSettings().get("index.refresh_interval")); for (IndicesService service : internalCluster().getInstances(IndicesService.class)) { IndexService indexService = service.indexService(resolveIndex("test")); @@ -319,8 +296,7 @@ public void testOpenCloseUpdateSettings() throws Exception { .put("index.refresh_interval", -1) // this one can change .put("index.fielddata.cache", "none") ) // this one can't - .execute() - .actionGet() + .get() ); expectThrows( IllegalArgumentException.class, @@ -330,10 +306,9 @@ public void testOpenCloseUpdateSettings() throws Exception { .put("index.refresh_interval", -1) // this one can change .put("index.final", "no") ) // this one can't - .execute() - .actionGet() + .get() ); - IndexMetadata indexMetadata = clusterAdmin().prepareState().execute().actionGet().getState().metadata().index("test"); + IndexMetadata indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); assertThat(indexMetadata.getSettings().get("index.refresh_interval"), nullValue()); assertThat(indexMetadata.getSettings().get("index.fielddata.cache"), nullValue()); assertThat(indexMetadata.getSettings().get("index.final"), nullValue()); @@ -346,10 +321,9 @@ public void testOpenCloseUpdateSettings() throws Exception { indicesAdmin().prepareUpdateSettings("test") .setSettings(Settings.builder().put("index.refresh_interval", -1)) // this one can change - .execute() - .actionGet(); + .get(); - indexMetadata = clusterAdmin().prepareState().execute().actionGet().getState().metadata().index("test"); + indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); assertThat(indexMetadata.getSettings().get("index.refresh_interval"), equalTo("-1")); // Now verify via dedicated get settings api: getSettingsResponse = indicesAdmin().prepareGetSettings("test").get(); @@ -362,18 +336,14 @@ public void testOpenCloseUpdateSettings() throws Exception { .setTimeout("30s") .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() - .execute() - .actionGet(); + .get(); assertThat(health.isTimedOut(), equalTo(false)); - indicesAdmin().prepareClose("test").execute().actionGet(); + indicesAdmin().prepareClose("test").get(); - indicesAdmin().prepareUpdateSettings("test") - .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)) - .execute() - .actionGet(); + indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)).get(); - indexMetadata = clusterAdmin().prepareState().execute().actionGet().getState().metadata().index("test"); + indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); assertThat(indexMetadata.getNumberOfReplicas(), equalTo(1)); indicesAdmin().prepareUpdateSettings("test") @@ -382,10 +352,9 @@ public void testOpenCloseUpdateSettings() throws Exception { .put("index.refresh_interval", "1s") // this one can change .put("index.fielddata.cache", "none") ) // this one can't - .execute() - .actionGet(); + .get(); - indexMetadata = clusterAdmin().prepareState().execute().actionGet().getState().metadata().index("test"); + indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); assertThat(indexMetadata.getSettings().get("index.refresh_interval"), equalTo("1s")); assertThat(indexMetadata.getSettings().get("index.fielddata.cache"), equalTo("none")); @@ -397,11 +366,10 @@ public void testOpenCloseUpdateSettings() throws Exception { .put("index.refresh_interval", -1) // this one can change .put("index.final", "no") ) // this one really can't - .execute() - .actionGet() + .get() ); assertThat(ex.getMessage(), containsString("final test setting [index.final], not updateable")); - indexMetadata = clusterAdmin().prepareState().execute().actionGet().getState().metadata().index("test"); + indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); assertThat(indexMetadata.getSettings().get("index.refresh_interval"), equalTo("1s")); assertThat(indexMetadata.getSettings().get("index.final"), nullValue()); @@ -413,11 +381,11 @@ public void testOpenCloseUpdateSettings() throws Exception { public void testEngineGCDeletesSetting() throws Exception { createIndex("test"); - client().prepareIndex("test").setId("1").setSource("f", 1).setVersionType(VersionType.EXTERNAL).setVersion(1).get(); + prepareIndex("test").setId("1").setSource("f", 1).setVersionType(VersionType.EXTERNAL).setVersion(1).get(); client().prepareDelete("test", "1").setVersionType(VersionType.EXTERNAL).setVersion(2).get(); // delete is still in cache this should fail assertRequestBuilderThrows( - client().prepareIndex("test").setId("1").setSource("f", 3).setVersionType(VersionType.EXTERNAL).setVersion(1), + prepareIndex("test").setId("1").setSource("f", 3).setVersionType(VersionType.EXTERNAL).setVersion(1), VersionConflictEngineException.class ); @@ -432,7 +400,7 @@ public void testEngineGCDeletesSetting() throws Exception { } // delete should not be in cache - client().prepareIndex("test").setId("1").setSource("f", 2).setVersionType(VersionType.EXTERNAL).setVersion(1); + prepareIndex("test").setId("1").setSource("f", 2).setVersionType(VersionType.EXTERNAL).setVersion(1); } public void testUpdateSettingsWithBlocks() { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java index 433ddab21c34d..2b07f36551279 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java @@ -47,7 +47,7 @@ public void testCloseAllRequiresName() { } private void assertIndexIsClosed(String... indices) { - ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState().execute().actionGet(); + ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState().get(); for (String index : indices) { IndexMetadata indexMetadata = clusterStateResponse.getState().metadata().indices().get(index); assertNotNull(indexMetadata); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java index 91425067bd817..2ef7dc560b768 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java @@ -123,9 +123,7 @@ public void testCloseIndex() throws Exception { randomBoolean(), false, randomBoolean(), - IntStream.range(0, nbDocs) - .mapToObj(i -> client().prepareIndex(indexName).setId(String.valueOf(i)).setSource("num", i)) - .collect(toList()) + IntStream.range(0, nbDocs).mapToObj(i -> prepareIndex(indexName).setId(String.valueOf(i)).setSource("num", i)).collect(toList()) ); assertBusy(() -> closeIndices(indexName)); @@ -145,7 +143,7 @@ public void testCloseAlreadyClosedIndex() throws Exception { false, randomBoolean(), IntStream.range(0, randomIntBetween(1, 10)) - .mapToObj(i -> client().prepareIndex(indexName).setId(String.valueOf(i)).setSource("num", i)) + .mapToObj(i -> prepareIndex(indexName).setId(String.valueOf(i)).setSource("num", i)) .collect(toList()) ); } @@ -187,9 +185,7 @@ public void testConcurrentClose() throws InterruptedException { randomBoolean(), false, randomBoolean(), - IntStream.range(0, nbDocs) - .mapToObj(i -> client().prepareIndex(indexName).setId(String.valueOf(i)).setSource("num", i)) - .collect(toList()) + IntStream.range(0, nbDocs).mapToObj(i -> prepareIndex(indexName).setId(String.valueOf(i)).setSource("num", i)).collect(toList()) ); ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(indexName) @@ -258,7 +254,7 @@ public void testCloseWhileDeletingIndices() throws Exception { false, randomBoolean(), IntStream.range(0, 10) - .mapToObj(n -> client().prepareIndex(indexName).setId(String.valueOf(n)).setSource("num", n)) + .mapToObj(n -> prepareIndex(indexName).setId(String.valueOf(n)).setSource("num", n)) .collect(toList()) ); } @@ -361,9 +357,7 @@ public void testCloseIndexWaitForActiveShards() throws Exception { randomBoolean(), false, randomBoolean(), - IntStream.range(0, nbDocs) - .mapToObj(i -> client().prepareIndex(indexName).setId(String.valueOf(i)).setSource("num", i)) - .collect(toList()) + IntStream.range(0, nbDocs).mapToObj(i -> prepareIndex(indexName).setId(String.valueOf(i)).setSource("num", i)).collect(toList()) ); ensureGreen(indexName); @@ -388,9 +382,7 @@ public void testNoopPeerRecoveriesWhenIndexClosed() throws Exception { randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, randomIntBetween(0, 50)) - .mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)) - .collect(toList()) + IntStream.range(0, randomIntBetween(0, 50)).mapToObj(n -> prepareIndex(indexName).setSource("num", n)).collect(toList()) ); ensureGreen(indexName); @@ -425,9 +417,7 @@ public void testRecoverExistingReplica() throws Exception { randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, randomIntBetween(0, 50)) - .mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)) - .collect(toList()) + IntStream.range(0, randomIntBetween(0, 50)).mapToObj(n -> prepareIndex(indexName).setSource("num", n)).collect(toList()) ); ensureGreen(indexName); indicesAdmin().prepareFlush(indexName).get(); @@ -468,9 +458,7 @@ public void testRelocatedClosedIndexIssue() throws Exception { randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, randomIntBetween(0, 50)) - .mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)) - .collect(toList()) + IntStream.range(0, randomIntBetween(0, 50)).mapToObj(n -> prepareIndex(indexName).setSource("num", n)).collect(toList()) ); assertAcked(indicesAdmin().prepareClose(indexName)); // move single shard to second node @@ -489,9 +477,7 @@ public void testResyncPropagatePrimaryTerm() throws Exception { randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, randomIntBetween(0, 50)) - .mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)) - .collect(toList()) + IntStream.range(0, randomIntBetween(0, 50)).mapToObj(n -> prepareIndex(indexName).setSource("num", n)).collect(toList()) ); ensureGreen(indexName); assertAcked(indicesAdmin().prepareClose(indexName)); @@ -520,9 +506,7 @@ public void testSearcherId() throws Exception { randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, randomIntBetween(0, 50)) - .mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)) - .collect(toList()) + IntStream.range(0, randomIntBetween(0, 50)).mapToObj(n -> prepareIndex(indexName).setSource("num", n)).collect(toList()) ); ensureGreen(indexName); assertAcked(indicesAdmin().prepareClose(indexName)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseWhileRelocatingShardsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseWhileRelocatingShardsIT.java index 53d3e62109536..77d38410d1ea9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseWhileRelocatingShardsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseWhileRelocatingShardsIT.java @@ -94,7 +94,7 @@ public void testCloseWhileRelocatingShards() throws Exception { createIndex(indexName); indexRandom( randomBoolean(), - IntStream.range(0, nbDocs).mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).toList() + IntStream.range(0, nbDocs).mapToObj(n -> prepareIndex(indexName).setSource("num", n)).toList() ); } default -> { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java index 1ce0c0985b704..021515eb4cbcc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java @@ -14,7 +14,6 @@ import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; @@ -39,8 +38,7 @@ import static org.elasticsearch.indices.state.CloseIndexIT.assertIndexIsOpened; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCountAndNoFailures; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -50,47 +48,43 @@ public class OpenCloseIndexIT extends ESIntegTestCase { public void testSimpleCloseOpen() { Client client = client(); createIndex("test1"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); - AcknowledgedResponse closeIndexResponse = client.admin().indices().prepareClose("test1").execute().actionGet(); + AcknowledgedResponse closeIndexResponse = client.admin().indices().prepareClose("test1").get(); assertThat(closeIndexResponse.isAcknowledged(), equalTo(true)); assertIndexIsClosed("test1"); - OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("test1").execute().actionGet(); + OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("test1").get(); assertThat(openIndexResponse.isAcknowledged(), equalTo(true)); assertThat(openIndexResponse.isShardsAcknowledged(), equalTo(true)); assertIndexIsOpened("test1"); } public void testSimpleOpenMissingIndex() { - Exception e = expectThrows(IndexNotFoundException.class, () -> indicesAdmin().prepareOpen("test1").execute().actionGet()); + Exception e = expectThrows(IndexNotFoundException.class, () -> indicesAdmin().prepareOpen("test1").get()); assertThat(e.getMessage(), is("no such index [test1]")); } public void testOpenOneMissingIndex() { Client client = client(); createIndex("test1"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); - Exception e = expectThrows( - IndexNotFoundException.class, - () -> client.admin().indices().prepareOpen("test1", "test2").execute().actionGet() - ); + Exception e = expectThrows(IndexNotFoundException.class, () -> client.admin().indices().prepareOpen("test1", "test2").get()); assertThat(e.getMessage(), is("no such index [test2]")); } public void testOpenOneMissingIndexIgnoreMissing() { Client client = client(); createIndex("test1"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); OpenIndexResponse openIndexResponse = client.admin() .indices() .prepareOpen("test1", "test2") .setIndicesOptions(IndicesOptions.lenientExpandOpen()) - .execute() - .actionGet(); + .get(); assertThat(openIndexResponse.isAcknowledged(), equalTo(true)); assertThat(openIndexResponse.isShardsAcknowledged(), equalTo(true)); assertIndexIsOpened("test1"); @@ -99,20 +93,20 @@ public void testOpenOneMissingIndexIgnoreMissing() { public void testCloseOpenMultipleIndices() { Client client = client(); createIndex("test1", "test2", "test3"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); - AcknowledgedResponse closeIndexResponse1 = client.admin().indices().prepareClose("test1").execute().actionGet(); + AcknowledgedResponse closeIndexResponse1 = client.admin().indices().prepareClose("test1").get(); assertThat(closeIndexResponse1.isAcknowledged(), equalTo(true)); - AcknowledgedResponse closeIndexResponse2 = client.admin().indices().prepareClose("test2").execute().actionGet(); + AcknowledgedResponse closeIndexResponse2 = client.admin().indices().prepareClose("test2").get(); assertThat(closeIndexResponse2.isAcknowledged(), equalTo(true)); assertIndexIsClosed("test1", "test2"); assertIndexIsOpened("test3"); - OpenIndexResponse openIndexResponse1 = client.admin().indices().prepareOpen("test1").execute().actionGet(); + OpenIndexResponse openIndexResponse1 = client.admin().indices().prepareOpen("test1").get(); assertThat(openIndexResponse1.isAcknowledged(), equalTo(true)); assertThat(openIndexResponse1.isShardsAcknowledged(), equalTo(true)); - OpenIndexResponse openIndexResponse2 = client.admin().indices().prepareOpen("test2").execute().actionGet(); + OpenIndexResponse openIndexResponse2 = client.admin().indices().prepareOpen("test2").get(); assertThat(openIndexResponse2.isAcknowledged(), equalTo(true)); assertThat(openIndexResponse2.isShardsAcknowledged(), equalTo(true)); assertIndexIsOpened("test1", "test2", "test3"); @@ -121,15 +115,15 @@ public void testCloseOpenMultipleIndices() { public void testCloseOpenWildcard() { Client client = client(); createIndex("test1", "test2", "a"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); - AcknowledgedResponse closeIndexResponse = client.admin().indices().prepareClose("test*").execute().actionGet(); + AcknowledgedResponse closeIndexResponse = client.admin().indices().prepareClose("test*").get(); assertThat(closeIndexResponse.isAcknowledged(), equalTo(true)); assertIndexIsClosed("test1", "test2"); assertIndexIsOpened("a"); - OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("test*").execute().actionGet(); + OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("test*").get(); assertThat(openIndexResponse.isAcknowledged(), equalTo(true)); assertThat(openIndexResponse.isShardsAcknowledged(), equalTo(true)); assertIndexIsOpened("test1", "test2", "a"); @@ -138,14 +132,14 @@ public void testCloseOpenWildcard() { public void testCloseOpenAll() { Client client = client(); createIndex("test1", "test2", "test3"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); - AcknowledgedResponse closeIndexResponse = client.admin().indices().prepareClose("_all").execute().actionGet(); + AcknowledgedResponse closeIndexResponse = client.admin().indices().prepareClose("_all").get(); assertThat(closeIndexResponse.isAcknowledged(), equalTo(true)); assertIndexIsClosed("test1", "test2", "test3"); - OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("_all").execute().actionGet(); + OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("_all").get(); assertThat(openIndexResponse.isAcknowledged(), equalTo(true)); assertThat(openIndexResponse.isShardsAcknowledged(), equalTo(true)); assertIndexIsOpened("test1", "test2", "test3"); @@ -154,40 +148,37 @@ public void testCloseOpenAll() { public void testCloseOpenAllWildcard() { Client client = client(); createIndex("test1", "test2", "test3"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); - AcknowledgedResponse closeIndexResponse = client.admin().indices().prepareClose("*").execute().actionGet(); + AcknowledgedResponse closeIndexResponse = client.admin().indices().prepareClose("*").get(); assertThat(closeIndexResponse.isAcknowledged(), equalTo(true)); assertIndexIsClosed("test1", "test2", "test3"); - OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("*").execute().actionGet(); + OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("*").get(); assertThat(openIndexResponse.isAcknowledged(), equalTo(true)); assertThat(openIndexResponse.isShardsAcknowledged(), equalTo(true)); assertIndexIsOpened("test1", "test2", "test3"); } public void testOpenNoIndex() { - Exception e = expectThrows(ActionRequestValidationException.class, () -> indicesAdmin().prepareOpen().execute().actionGet()); + Exception e = expectThrows(ActionRequestValidationException.class, () -> indicesAdmin().prepareOpen().get()); assertThat(e.getMessage(), containsString("index is missing")); } public void testOpenNullIndex() { - Exception e = expectThrows( - ActionRequestValidationException.class, - () -> indicesAdmin().prepareOpen((String[]) null).execute().actionGet() - ); + Exception e = expectThrows(ActionRequestValidationException.class, () -> indicesAdmin().prepareOpen((String[]) null).get()); assertThat(e.getMessage(), containsString("index is missing")); } public void testOpenAlreadyOpenedIndex() { Client client = client(); createIndex("test1"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); // no problem if we try to open an index that's already in open state - OpenIndexResponse openIndexResponse1 = client.admin().indices().prepareOpen("test1").execute().actionGet(); + OpenIndexResponse openIndexResponse1 = client.admin().indices().prepareOpen("test1").get(); assertThat(openIndexResponse1.isAcknowledged(), equalTo(true)); assertThat(openIndexResponse1.isShardsAcknowledged(), equalTo(true)); assertIndexIsOpened("test1"); @@ -196,22 +187,17 @@ public void testOpenAlreadyOpenedIndex() { public void testSimpleCloseOpenAlias() { Client client = client(); createIndex("test1"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); - AcknowledgedResponse aliasesResponse = client.admin() - .indices() - .prepareAliases() - .addAlias("test1", "test1-alias") - .execute() - .actionGet(); + AcknowledgedResponse aliasesResponse = client.admin().indices().prepareAliases().addAlias("test1", "test1-alias").get(); assertThat(aliasesResponse.isAcknowledged(), equalTo(true)); - AcknowledgedResponse closeIndexResponse = client.admin().indices().prepareClose("test1-alias").execute().actionGet(); + AcknowledgedResponse closeIndexResponse = client.admin().indices().prepareClose("test1-alias").get(); assertThat(closeIndexResponse.isAcknowledged(), equalTo(true)); assertIndexIsClosed("test1"); - OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("test1-alias").execute().actionGet(); + OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("test1-alias").get(); assertThat(openIndexResponse.isAcknowledged(), equalTo(true)); assertThat(openIndexResponse.isShardsAcknowledged(), equalTo(true)); assertIndexIsOpened("test1"); @@ -220,29 +206,19 @@ public void testSimpleCloseOpenAlias() { public void testCloseOpenAliasMultipleIndices() { Client client = client(); createIndex("test1", "test2"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); - AcknowledgedResponse aliasesResponse1 = client.admin() - .indices() - .prepareAliases() - .addAlias("test1", "test-alias") - .execute() - .actionGet(); + AcknowledgedResponse aliasesResponse1 = client.admin().indices().prepareAliases().addAlias("test1", "test-alias").get(); assertThat(aliasesResponse1.isAcknowledged(), equalTo(true)); - AcknowledgedResponse aliasesResponse2 = client.admin() - .indices() - .prepareAliases() - .addAlias("test2", "test-alias") - .execute() - .actionGet(); + AcknowledgedResponse aliasesResponse2 = client.admin().indices().prepareAliases().addAlias("test2", "test-alias").get(); assertThat(aliasesResponse2.isAcknowledged(), equalTo(true)); - AcknowledgedResponse closeIndexResponse = client.admin().indices().prepareClose("test-alias").execute().actionGet(); + AcknowledgedResponse closeIndexResponse = client.admin().indices().prepareClose("test-alias").get(); assertThat(closeIndexResponse.isAcknowledged(), equalTo(true)); assertIndexIsClosed("test1", "test2"); - OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("test-alias").execute().actionGet(); + OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("test-alias").get(); assertThat(openIndexResponse.isAcknowledged(), equalTo(true)); assertThat(openIndexResponse.isShardsAcknowledged(), equalTo(true)); assertIndexIsOpened("test1", "test2"); @@ -285,7 +261,7 @@ public void testOpenCloseWithDocs() throws IOException, ExecutionException, Inte int docs = between(10, 100); IndexRequestBuilder[] builder = new IndexRequestBuilder[docs]; for (int i = 0; i < docs; i++) { - builder[i] = client().prepareIndex("test").setId("" + i).setSource("test", "init"); + builder[i] = prepareIndex("test").setId("" + i).setSource("test", "init"); } indexRandom(true, builder); if (randomBoolean()) { @@ -296,9 +272,7 @@ public void testOpenCloseWithDocs() throws IOException, ExecutionException, Inte // check the index still contains the records that we indexed indicesAdmin().prepareOpen("test").execute().get(); ensureGreen(); - SearchResponse searchResponse = prepareSearch().setQuery(QueryBuilders.matchQuery("test", "init")).get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, docs); + assertHitCountAndNoFailures(prepareSearch().setQuery(QueryBuilders.matchQuery("test", "init")), docs); } public void testOpenCloseIndexWithBlocks() { @@ -307,7 +281,7 @@ public void testOpenCloseIndexWithBlocks() { int docs = between(10, 100); for (int i = 0; i < docs; i++) { - client().prepareIndex("test").setId("" + i).setSource("test", "init").execute().actionGet(); + prepareIndex("test").setId("" + i).setSource("test", "init").get(); } for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) { @@ -315,12 +289,12 @@ public void testOpenCloseIndexWithBlocks() { enableIndexBlock("test", blockSetting); // Closing an index is not blocked - AcknowledgedResponse closeIndexResponse = indicesAdmin().prepareClose("test").execute().actionGet(); + AcknowledgedResponse closeIndexResponse = indicesAdmin().prepareClose("test").get(); assertAcked(closeIndexResponse); assertIndexIsClosed("test"); // Opening an index is not blocked - OpenIndexResponse openIndexResponse = indicesAdmin().prepareOpen("test").execute().actionGet(); + OpenIndexResponse openIndexResponse = indicesAdmin().prepareOpen("test").get(); assertAcked(openIndexResponse); assertThat(openIndexResponse.isShardsAcknowledged(), equalTo(true)); assertIndexIsOpened("test"); @@ -340,7 +314,7 @@ public void testOpenCloseIndexWithBlocks() { } } - AcknowledgedResponse closeIndexResponse = indicesAdmin().prepareClose("test").execute().actionGet(); + AcknowledgedResponse closeIndexResponse = indicesAdmin().prepareClose("test").get(); assertAcked(closeIndexResponse); assertIndexIsClosed("test"); @@ -363,7 +337,7 @@ public void testTranslogStats() throws Exception { final int nbDocs = randomIntBetween(0, 50); int uncommittedOps = 0; for (long i = 0; i < nbDocs; i++) { - final DocWriteResponse indexResponse = client().prepareIndex(indexName).setId(Long.toString(i)).setSource("field", i).get(); + final DocWriteResponse indexResponse = prepareIndex(indexName).setId(Long.toString(i)).setSource("field", i).get(); assertThat(indexResponse.status(), is(RestStatus.CREATED)); if (rarely()) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java index dd22f50ab420b..b5448498f0ce9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java @@ -43,7 +43,7 @@ public void testSimpleOpenClose() { ); logger.info("--> indexing a simple document"); - client().prepareIndex("test").setId("1").setSource("field1", "value1").get(); + prepareIndex("test").setId("1").setSource("field1", "value1").get(); logger.info("--> closing test index..."); assertAcked(indicesAdmin().prepareClose("test")); @@ -54,7 +54,7 @@ public void testSimpleOpenClose() { logger.info("--> trying to index into a closed index ..."); try { - client().prepareIndex("test").setId("1").setSource("field1", "value1").get(); + prepareIndex("test").setId("1").setSource("field1", "value1").get(); fail(); } catch (IndexClosedException e) { // all is well @@ -76,7 +76,7 @@ public void testSimpleOpenClose() { ); logger.info("--> indexing a simple document"); - client().prepareIndex("test").setId("1").setSource("field1", "value1").get(); + prepareIndex("test").setId("1").setSource("field1", "value1").get(); } public void testFastCloseAfterCreateContinuesCreateAfterOpen() { @@ -111,7 +111,7 @@ public void testFastCloseAfterCreateContinuesCreateAfterOpen() { ); logger.info("--> indexing a simple document"); - client().prepareIndex("test").setId("1").setSource("field1", "value1").get(); + prepareIndex("test").setId("1").setSource("field1", "value1").get(); } public void testConsistencyAfterIndexCreationFailure() { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/stats/IndexStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/stats/IndexStatsIT.java index a5a9ca2862a0e..ec62a1cbbd9bf 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/stats/IndexStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/stats/IndexStatsIT.java @@ -133,11 +133,11 @@ public void testFieldDataStats() { .setMapping("field", "type=text,fielddata=true", "field2", "type=text,fielddata=true") ); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("field", "value1", "field2", "value1").execute().actionGet(); - client().prepareIndex("test").setId("2").setSource("field", "value2", "field2", "value2").execute().actionGet(); - indicesAdmin().prepareRefresh().execute().actionGet(); + prepareIndex("test").setId("1").setSource("field", "value1", "field2", "value1").get(); + prepareIndex("test").setId("2").setSource("field", "value2", "field2", "value2").get(); + indicesAdmin().prepareRefresh().get(); - NodesStatsResponse nodesStats = clusterAdmin().prepareNodesStats("data:true").setIndices(true).execute().actionGet(); + NodesStatsResponse nodesStats = clusterAdmin().prepareNodesStats("data:true").setIndices(true).get(); assertThat( nodesStats.getNodes().get(0).getIndices().getFieldData().getMemorySizeInBytes() + nodesStats.getNodes() .get(1) @@ -146,14 +146,14 @@ public void testFieldDataStats() { .getMemorySizeInBytes(), equalTo(0L) ); - IndicesStatsResponse indicesStats = indicesAdmin().prepareStats("test").clear().setFieldData(true).execute().actionGet(); + IndicesStatsResponse indicesStats = indicesAdmin().prepareStats("test").clear().setFieldData(true).get(); assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0L)); // sort to load it to field data... - prepareSearch().addSort("field", SortOrder.ASC).execute().actionGet(); - prepareSearch().addSort("field", SortOrder.ASC).execute().actionGet(); + prepareSearch().addSort("field", SortOrder.ASC).get(); + prepareSearch().addSort("field", SortOrder.ASC).get(); - nodesStats = clusterAdmin().prepareNodesStats("data:true").setIndices(true).execute().actionGet(); + nodesStats = clusterAdmin().prepareNodesStats("data:true").setIndices(true).get(); assertThat( nodesStats.getNodes().get(0).getIndices().getFieldData().getMemorySizeInBytes() + nodesStats.getNodes() .get(1) @@ -162,18 +162,17 @@ public void testFieldDataStats() { .getMemorySizeInBytes(), greaterThan(0L) ); - indicesStats = indicesAdmin().prepareStats("test").clear().setFieldData(true).execute().actionGet(); + indicesStats = indicesAdmin().prepareStats("test").clear().setFieldData(true).get(); assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), greaterThan(0L)); // sort to load it to field data... - prepareSearch().addSort("field2", SortOrder.ASC).execute().actionGet(); - prepareSearch().addSort("field2", SortOrder.ASC).execute().actionGet(); + prepareSearch().addSort("field2", SortOrder.ASC).get(); + prepareSearch().addSort("field2", SortOrder.ASC).get(); // now check the per field stats nodesStats = clusterAdmin().prepareNodesStats("data:true") .setIndices(new CommonStatsFlags().set(CommonStatsFlags.Flag.FieldData, true).fieldDataFields("*")) - .execute() - .actionGet(); + .get(); assertThat( nodesStats.getNodes().get(0).getIndices().getFieldData().getMemorySizeInBytes() + nodesStats.getNodes() .get(1) @@ -207,7 +206,7 @@ public void testFieldDataStats() { ) ); - indicesStats = indicesAdmin().prepareStats("test").clear().setFieldData(true).setFieldDataFields("*").execute().actionGet(); + indicesStats = indicesAdmin().prepareStats("test").clear().setFieldData(true).setFieldDataFields("*").get(); assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), greaterThan(0L)); assertThat(indicesStats.getTotal().getFieldData().getFields().get("field"), greaterThan(0L)); assertThat( @@ -215,8 +214,8 @@ public void testFieldDataStats() { lessThan(indicesStats.getTotal().getFieldData().getMemorySizeInBytes()) ); - indicesAdmin().prepareClearCache().setFieldDataCache(true).execute().actionGet(); - nodesStats = clusterAdmin().prepareNodesStats("data:true").setIndices(true).execute().actionGet(); + indicesAdmin().prepareClearCache().setFieldDataCache(true).get(); + nodesStats = clusterAdmin().prepareNodesStats("data:true").setIndices(true).get(); assertThat( nodesStats.getNodes().get(0).getIndices().getFieldData().getMemorySizeInBytes() + nodesStats.getNodes() .get(1) @@ -225,7 +224,7 @@ public void testFieldDataStats() { .getMemorySizeInBytes(), equalTo(0L) ); - indicesStats = indicesAdmin().prepareStats("test").clear().setFieldData(true).execute().actionGet(); + indicesStats = indicesAdmin().prepareStats("test").clear().setFieldData(true).get(); assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0L)); } @@ -237,12 +236,12 @@ public void testClearAllCaches() throws Exception { .setMapping("field", "type=text,fielddata=true") ); ensureGreen(); - clusterAdmin().prepareHealth().setWaitForGreenStatus().execute().actionGet(); - client().prepareIndex("test").setId("1").setSource("field", "value1").execute().actionGet(); - client().prepareIndex("test").setId("2").setSource("field", "value2").execute().actionGet(); - indicesAdmin().prepareRefresh().execute().actionGet(); + clusterAdmin().prepareHealth().setWaitForGreenStatus().get(); + prepareIndex("test").setId("1").setSource("field", "value1").get(); + prepareIndex("test").setId("2").setSource("field", "value2").get(); + indicesAdmin().prepareRefresh().get(); - NodesStatsResponse nodesStats = clusterAdmin().prepareNodesStats("data:true").setIndices(true).execute().actionGet(); + NodesStatsResponse nodesStats = clusterAdmin().prepareNodesStats("data:true").setIndices(true).get(); assertThat( nodesStats.getNodes().get(0).getIndices().getFieldData().getMemorySizeInBytes() + nodesStats.getNodes() .get(1) @@ -260,20 +259,15 @@ public void testClearAllCaches() throws Exception { equalTo(0L) ); - IndicesStatsResponse indicesStats = indicesAdmin().prepareStats("test") - .clear() - .setFieldData(true) - .setQueryCache(true) - .execute() - .actionGet(); + IndicesStatsResponse indicesStats = indicesAdmin().prepareStats("test").clear().setFieldData(true).setQueryCache(true).get(); assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0L)); assertThat(indicesStats.getTotal().getQueryCache().getMemorySizeInBytes(), equalTo(0L)); // sort to load it to field data and filter to load filter cache - prepareSearch().setPostFilter(QueryBuilders.termQuery("field", "value1")).addSort("field", SortOrder.ASC).execute().actionGet(); - prepareSearch().setPostFilter(QueryBuilders.termQuery("field", "value2")).addSort("field", SortOrder.ASC).execute().actionGet(); + prepareSearch().setPostFilter(QueryBuilders.termQuery("field", "value1")).addSort("field", SortOrder.ASC).get(); + prepareSearch().setPostFilter(QueryBuilders.termQuery("field", "value2")).addSort("field", SortOrder.ASC).get(); - nodesStats = clusterAdmin().prepareNodesStats("data:true").setIndices(true).execute().actionGet(); + nodesStats = clusterAdmin().prepareNodesStats("data:true").setIndices(true).get(); assertThat( nodesStats.getNodes().get(0).getIndices().getFieldData().getMemorySizeInBytes() + nodesStats.getNodes() .get(1) @@ -291,13 +285,13 @@ public void testClearAllCaches() throws Exception { greaterThan(0L) ); - indicesStats = indicesAdmin().prepareStats("test").clear().setFieldData(true).setQueryCache(true).execute().actionGet(); + indicesStats = indicesAdmin().prepareStats("test").clear().setFieldData(true).setQueryCache(true).get(); assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), greaterThan(0L)); assertThat(indicesStats.getTotal().getQueryCache().getMemorySizeInBytes(), greaterThan(0L)); - indicesAdmin().prepareClearCache().execute().actionGet(); + indicesAdmin().prepareClearCache().get(); Thread.sleep(100); // Make sure the filter cache entries have been removed... - nodesStats = clusterAdmin().prepareNodesStats("data:true").setIndices(true).execute().actionGet(); + nodesStats = clusterAdmin().prepareNodesStats("data:true").setIndices(true).get(); assertThat( nodesStats.getNodes().get(0).getIndices().getFieldData().getMemorySizeInBytes() + nodesStats.getNodes() .get(1) @@ -315,7 +309,7 @@ public void testClearAllCaches() throws Exception { equalTo(0L) ); - indicesStats = indicesAdmin().prepareStats("test").clear().setFieldData(true).setQueryCache(true).execute().actionGet(); + indicesStats = indicesAdmin().prepareStats("test").clear().setFieldData(true).setQueryCache(true).get(); assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0L)); assertThat(indicesStats.getTotal().getQueryCache().getMemorySizeInBytes(), equalTo(0L)); } @@ -335,8 +329,7 @@ public void testQueryCache() throws Exception { while (true) { IndexRequestBuilder[] builders = new IndexRequestBuilder[pageDocs]; for (int i = 0; i < pageDocs; ++i) { - builders[i] = client().prepareIndex("idx") - .setId(Integer.toString(counter++)) + builders[i] = prepareIndex("idx").setId(Integer.toString(counter++)) .setSource(jsonBuilder().startObject().field("common", "field").field("str_value", "s" + i).endObject()); } indexRandom(true, builders); @@ -383,8 +376,7 @@ public void testQueryCache() throws Exception { // index the data again... IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; ++i) { - builders[i] = client().prepareIndex("idx") - .setId(Integer.toString(i)) + builders[i] = prepareIndex("idx").setId(Integer.toString(i)) .setSource(jsonBuilder().startObject().field("common", "field").field("str_value", "s" + i).endObject()); } indexRandom(true, builders); @@ -494,13 +486,13 @@ public void testNonThrottleStats() throws Exception { sb.append(termUpto++); sb.append(" some random text that keeps repeating over and over again hambone"); } - client().prepareIndex("test").setId("" + termUpto).setSource("field" + (i % 10), sb.toString()).get(); + prepareIndex("test").setId("" + termUpto).setSource("field" + (i % 10), sb.toString()).get(); } refresh(); - stats = indicesAdmin().prepareStats().execute().actionGet(); + stats = indicesAdmin().prepareStats().get(); // nodesStats = clusterAdmin().prepareNodesStats().setIndices(true).get(); - stats = indicesAdmin().prepareStats().execute().actionGet(); + stats = indicesAdmin().prepareStats().get(); assertThat(stats.getPrimaries().getIndexing().getTotal().getThrottleTime().millis(), equalTo(0L)); } @@ -530,13 +522,13 @@ public void testThrottleStats() throws Exception { sb.append(' '); sb.append(termUpto++); } - client().prepareIndex("test").setId("" + termUpto).setSource("field" + (i % 10), sb.toString()).get(); + prepareIndex("test").setId("" + termUpto).setSource("field" + (i % 10), sb.toString()).get(); if (i % 2 == 0) { refresh(); } } refresh(); - stats = indicesAdmin().prepareStats().execute().actionGet(); + stats = indicesAdmin().prepareStats().get(); // nodesStats = clusterAdmin().prepareNodesStats().setIndices(true).get(); done = stats.getPrimaries().getIndexing().getTotal().getThrottleTime().millis() > 0; if (System.currentTimeMillis() - start > 300 * 1000) { // Wait 5 minutes for throttling to kick in @@ -556,9 +548,9 @@ public void testSimpleStats() throws Exception { createIndex("test1", "test2"); ensureGreen(); - client().prepareIndex("test1").setId(Integer.toString(1)).setSource("field", "value").execute().actionGet(); - client().prepareIndex("test1").setId(Integer.toString(2)).setSource("field", "value").execute().actionGet(); - client().prepareIndex("test2").setId(Integer.toString(1)).setSource("field", "value").execute().actionGet(); + prepareIndex("test1").setId(Integer.toString(1)).setSource("field", "value").get(); + prepareIndex("test1").setId(Integer.toString(2)).setSource("field", "value").get(); + prepareIndex("test2").setId(Integer.toString(1)).setSource("field", "value").get(); refresh(); NumShards test1 = getNumShards("test1"); @@ -567,7 +559,7 @@ public void testSimpleStats() throws Exception { long test2ExpectedWrites = test2.dataCopies; long totalExpectedWrites = test1ExpectedWrites + test2ExpectedWrites; - IndicesStatsResponse stats = indicesAdmin().prepareStats().execute().actionGet(); + IndicesStatsResponse stats = indicesAdmin().prepareStats().get(); assertThat(stats.getPrimaries().getDocs().getCount(), equalTo(3L)); assertThat(stats.getTotal().getDocs().getCount(), equalTo(totalExpectedWrites)); assertThat(stats.getPrimaries().getIndexing().getTotal().getIndexCount(), equalTo(3L)); @@ -601,7 +593,7 @@ public void testSimpleStats() throws Exception { assertThat(stats.getIndex("test2").getTotal().getSearch().getTotal().getQueryCurrent(), equalTo(0L)); // check flags - stats = indicesAdmin().prepareStats().clear().setFlush(true).setRefresh(true).setMerge(true).execute().actionGet(); + stats = indicesAdmin().prepareStats().clear().setFlush(true).setRefresh(true).setMerge(true).get(); assertThat(stats.getTotal().getDocs(), nullValue()); assertThat(stats.getTotal().getStore(), nullValue()); @@ -611,19 +603,19 @@ public void testSimpleStats() throws Exception { assertThat(stats.getTotal().getRefresh(), notNullValue()); // check get - GetResponse getResponse = client().prepareGet("test2", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test2", "1").get(); assertThat(getResponse.isExists(), equalTo(true)); - stats = indicesAdmin().prepareStats().execute().actionGet(); + stats = indicesAdmin().prepareStats().get(); assertThat(stats.getTotal().getGet().getCount(), equalTo(1L)); assertThat(stats.getTotal().getGet().getExistsCount(), equalTo(1L)); assertThat(stats.getTotal().getGet().getMissingCount(), equalTo(0L)); // missing get - getResponse = client().prepareGet("test2", "2").execute().actionGet(); + getResponse = client().prepareGet("test2", "2").get(); assertThat(getResponse.isExists(), equalTo(false)); - stats = indicesAdmin().prepareStats().execute().actionGet(); + stats = indicesAdmin().prepareStats().get(); assertThat(stats.getTotal().getGet().getCount(), equalTo(2L)); assertThat(stats.getTotal().getGet().getExistsCount(), equalTo(1L)); assertThat(stats.getTotal().getGet().getMissingCount(), equalTo(1L)); @@ -637,8 +629,7 @@ public void testSimpleStats() throws Exception { .setRefresh(true) .setMerge(true) .clear() // reset defaults - .execute() - .actionGet(); + .get(); assertThat(stats.getTotal().getDocs(), nullValue()); assertThat(stats.getTotal().getStore(), nullValue()); @@ -648,37 +639,31 @@ public void testSimpleStats() throws Exception { // index failed try { - client().prepareIndex("test1") - .setId(Integer.toString(1)) + prepareIndex("test1").setId(Integer.toString(1)) .setSource("field", "value") .setVersion(1) .setVersionType(VersionType.EXTERNAL) - .execute() - .actionGet(); + .get(); fail("Expected a version conflict"); } catch (VersionConflictEngineException e) {} try { - client().prepareIndex("test2") - .setId(Integer.toString(1)) + prepareIndex("test2").setId(Integer.toString(1)) .setSource("field", "value") .setVersion(1) .setVersionType(VersionType.EXTERNAL) - .execute() - .actionGet(); + .get(); fail("Expected a version conflict"); } catch (VersionConflictEngineException e) {} try { - client().prepareIndex("test2") - .setId(Integer.toString(1)) + prepareIndex("test2").setId(Integer.toString(1)) .setSource("field", "value") .setVersion(1) .setVersionType(VersionType.EXTERNAL) - .execute() - .actionGet(); + .get(); fail("Expected a version conflict"); } catch (VersionConflictEngineException e) {} - stats = indicesAdmin().prepareStats().execute().actionGet(); + stats = indicesAdmin().prepareStats().get(); assertThat(stats.getIndex("test2").getPrimaries().getIndexing().getTotal().getIndexFailedCount(), equalTo(2L)); assertThat(stats.getPrimaries().getIndexing().getTotal().getIndexFailedCount(), equalTo(3L)); } @@ -697,8 +682,7 @@ public void testMergeStats() { .setRefresh(true) .setMerge(true) .clear() // reset defaults - .execute() - .actionGet(); + .get(); assertThat(stats.getTotal().getDocs(), nullValue()); assertThat(stats.getTotal().getStore(), nullValue()); @@ -707,11 +691,11 @@ public void testMergeStats() { assertThat(stats.getTotal().getSearch(), nullValue()); for (int i = 0; i < 20; i++) { - client().prepareIndex("test_index").setId(Integer.toString(i)).setSource("field", "value").execute().actionGet(); - indicesAdmin().prepareFlush().execute().actionGet(); + prepareIndex("test_index").setId(Integer.toString(i)).setSource("field", "value").get(); + indicesAdmin().prepareFlush().get(); } - indicesAdmin().prepareForceMerge().setMaxNumSegments(1).execute().actionGet(); - stats = indicesAdmin().prepareStats().setMerge(true).execute().actionGet(); + indicesAdmin().prepareForceMerge().setMaxNumSegments(1).get(); + stats = indicesAdmin().prepareStats().setMerge(true).get(); assertThat(stats.getTotal().getMerge(), notNullValue()); assertThat(stats.getTotal().getMerge().getTotal(), greaterThan(0L)); @@ -738,7 +722,7 @@ public void testSegmentsStats() { assertThat(stats.getTotal().getSegments().getVersionMapMemoryInBytes(), greaterThan(0L)); indicesAdmin().prepareFlush().get(); - indicesAdmin().prepareForceMerge().setMaxNumSegments(1).execute().actionGet(); + indicesAdmin().prepareForceMerge().setMaxNumSegments(1).get(); indicesAdmin().prepareRefresh().get(); final boolean includeSegmentFileSizes = randomBoolean(); @@ -765,18 +749,18 @@ public void testAllFlags() throws Exception { ensureGreen(); - client().prepareIndex("test_index").setId(Integer.toString(1)).setSource("field", "value").execute().actionGet(); - client().prepareIndex("test_index").setId(Integer.toString(2)).setSource("field", "value").execute().actionGet(); - client().prepareIndex("test_index_2").setId(Integer.toString(1)).setSource("field", "value").execute().actionGet(); + prepareIndex("test_index").setId(Integer.toString(1)).setSource("field", "value").get(); + prepareIndex("test_index").setId(Integer.toString(2)).setSource("field", "value").get(); + prepareIndex("test_index_2").setId(Integer.toString(1)).setSource("field", "value").get(); - indicesAdmin().prepareRefresh().execute().actionGet(); + indicesAdmin().prepareRefresh().get(); IndicesStatsRequestBuilder builder = indicesAdmin().prepareStats(); Flag[] values = CommonStatsFlags.SHARD_LEVEL.getFlags(); for (Flag flag : values) { set(flag, builder, false); } - IndicesStatsResponse stats = builder.execute().actionGet(); + IndicesStatsResponse stats = builder.get(); for (Flag flag : values) { assertThat(isSet(flag, stats.getPrimaries()), equalTo(false)); assertThat(isSet(flag, stats.getTotal()), equalTo(false)); @@ -785,7 +769,7 @@ public void testAllFlags() throws Exception { for (Flag flag : values) { set(flag, builder, true); } - stats = builder.execute().actionGet(); + stats = builder.get(); for (Flag flag : values) { assertThat(isSet(flag, stats.getPrimaries()), equalTo(true)); assertThat(isSet(flag, stats.getTotal()), equalTo(true)); @@ -805,7 +789,7 @@ public void testAllFlags() throws Exception { for (Flag flag : flags) { // set the flags set(flag, builder, true); } - stats = builder.execute().actionGet(); + stats = builder.get(); for (Flag flag : flags) { // check the flags assertThat(isSet(flag, stats.getPrimaries()), equalTo(true)); assertThat(isSet(flag, stats.getTotal()), equalTo(true)); @@ -896,35 +880,35 @@ public void testMultiIndex() throws Exception { ensureGreen(); - client().prepareIndex("test1").setId(Integer.toString(1)).setSource("field", "value").execute().actionGet(); - client().prepareIndex("test1").setId(Integer.toString(2)).setSource("field", "value").execute().actionGet(); - client().prepareIndex("test2").setId(Integer.toString(1)).setSource("field", "value").execute().actionGet(); + prepareIndex("test1").setId(Integer.toString(1)).setSource("field", "value").get(); + prepareIndex("test1").setId(Integer.toString(2)).setSource("field", "value").get(); + prepareIndex("test2").setId(Integer.toString(1)).setSource("field", "value").get(); refresh(); int numShards1 = getNumShards("test1").totalNumShards; int numShards2 = getNumShards("test2").totalNumShards; IndicesStatsRequestBuilder builder = indicesAdmin().prepareStats(); - IndicesStatsResponse stats = builder.execute().actionGet(); + IndicesStatsResponse stats = builder.get(); assertThat(stats.getTotalShards(), equalTo(numShards1 + numShards2)); - stats = builder.setIndices("_all").execute().actionGet(); + stats = builder.setIndices("_all").get(); assertThat(stats.getTotalShards(), equalTo(numShards1 + numShards2)); - stats = builder.setIndices("_all").execute().actionGet(); + stats = builder.setIndices("_all").get(); assertThat(stats.getTotalShards(), equalTo(numShards1 + numShards2)); - stats = builder.setIndices("*").execute().actionGet(); + stats = builder.setIndices("*").get(); assertThat(stats.getTotalShards(), equalTo(numShards1 + numShards2)); - stats = builder.setIndices("test1").execute().actionGet(); + stats = builder.setIndices("test1").get(); assertThat(stats.getTotalShards(), equalTo(numShards1)); - stats = builder.setIndices("test1", "test2").execute().actionGet(); + stats = builder.setIndices("test1", "test2").get(); assertThat(stats.getTotalShards(), equalTo(numShards1 + numShards2)); - stats = builder.setIndices("*2").execute().actionGet(); + stats = builder.setIndices("*2").get(); assertThat(stats.getTotalShards(), equalTo(numShards2)); } @@ -953,37 +937,37 @@ public void testCompletionFieldsParam() throws Exception { }""")); ensureGreen(); - client().prepareIndex("test1").setId(Integer.toString(1)).setSource(""" + prepareIndex("test1").setId(Integer.toString(1)).setSource(""" {"bar":"bar","baz":"baz"}""", XContentType.JSON).get(); refresh(); IndicesStatsRequestBuilder builder = indicesAdmin().prepareStats(); - IndicesStatsResponse stats = builder.execute().actionGet(); + IndicesStatsResponse stats = builder.get(); assertThat(stats.getTotal().completion.getSizeInBytes(), greaterThan(0L)); assertThat(stats.getTotal().completion.getFields(), is(nullValue())); - stats = builder.setCompletionFields("bar.completion").execute().actionGet(); + stats = builder.setCompletionFields("bar.completion").get(); assertThat(stats.getTotal().completion.getSizeInBytes(), greaterThan(0L)); assertThat(stats.getTotal().completion.getFields().containsField("bar.completion"), is(true)); assertThat(stats.getTotal().completion.getFields().get("bar.completion"), greaterThan(0L)); assertThat(stats.getTotal().completion.getFields().containsField("baz.completion"), is(false)); - stats = builder.setCompletionFields("bar.completion", "baz.completion").execute().actionGet(); + stats = builder.setCompletionFields("bar.completion", "baz.completion").get(); assertThat(stats.getTotal().completion.getSizeInBytes(), greaterThan(0L)); assertThat(stats.getTotal().completion.getFields().containsField("bar.completion"), is(true)); assertThat(stats.getTotal().completion.getFields().get("bar.completion"), greaterThan(0L)); assertThat(stats.getTotal().completion.getFields().containsField("baz.completion"), is(true)); assertThat(stats.getTotal().completion.getFields().get("baz.completion"), greaterThan(0L)); - stats = builder.setCompletionFields("*").execute().actionGet(); + stats = builder.setCompletionFields("*").get(); assertThat(stats.getTotal().completion.getSizeInBytes(), greaterThan(0L)); assertThat(stats.getTotal().completion.getFields().containsField("bar.completion"), is(true)); assertThat(stats.getTotal().completion.getFields().get("bar.completion"), greaterThan(0L)); assertThat(stats.getTotal().completion.getFields().containsField("baz.completion"), is(true)); assertThat(stats.getTotal().completion.getFields().get("baz.completion"), greaterThan(0L)); - stats = builder.setCompletionFields("*r*").execute().actionGet(); + stats = builder.setCompletionFields("*r*").get(); assertThat(stats.getTotal().completion.getSizeInBytes(), greaterThan(0L)); assertThat(stats.getTotal().completion.getFields().containsField("bar.completion"), is(true)); assertThat(stats.getTotal().completion.getFields().get("bar.completion"), greaterThan(0L)); @@ -996,30 +980,30 @@ public void testGroupsParam() throws Exception { ensureGreen(); - client().prepareIndex("test1").setId(Integer.toString(1)).setSource("foo", "bar").execute().actionGet(); + prepareIndex("test1").setId(Integer.toString(1)).setSource("foo", "bar").get(); refresh(); - prepareSearch("_all").setStats("bar", "baz").execute().actionGet(); + prepareSearch("_all").setStats("bar", "baz").get(); IndicesStatsRequestBuilder builder = indicesAdmin().prepareStats(); - IndicesStatsResponse stats = builder.execute().actionGet(); + IndicesStatsResponse stats = builder.get(); assertThat(stats.getTotal().search.getTotal().getQueryCount(), greaterThan(0L)); assertThat(stats.getTotal().search.getGroupStats(), is(nullValue())); - stats = builder.setGroups("bar").execute().actionGet(); + stats = builder.setGroups("bar").get(); assertThat(stats.getTotal().search.getGroupStats().get("bar").getQueryCount(), greaterThan(0L)); assertThat(stats.getTotal().search.getGroupStats().containsKey("baz"), is(false)); - stats = builder.setGroups("bar", "baz").execute().actionGet(); + stats = builder.setGroups("bar", "baz").get(); assertThat(stats.getTotal().search.getGroupStats().get("bar").getQueryCount(), greaterThan(0L)); assertThat(stats.getTotal().search.getGroupStats().get("baz").getQueryCount(), greaterThan(0L)); - stats = builder.setGroups("*").execute().actionGet(); + stats = builder.setGroups("*").get(); assertThat(stats.getTotal().search.getGroupStats().get("bar").getQueryCount(), greaterThan(0L)); assertThat(stats.getTotal().search.getGroupStats().get("baz").getQueryCount(), greaterThan(0L)); - stats = builder.setGroups("*r").execute().actionGet(); + stats = builder.setGroups("*r").get(); assertThat(stats.getTotal().search.getGroupStats().get("bar").getQueryCount(), greaterThan(0L)); assertThat(stats.getTotal().search.getGroupStats().containsKey("baz"), is(false)); @@ -1135,8 +1119,8 @@ public void testFilterCacheStats() throws Exception { indexRandom( false, true, - client().prepareIndex("index").setId("1").setSource("foo", "bar"), - client().prepareIndex("index").setId("2").setSource("foo", "baz") + prepareIndex("index").setId("1").setSource("foo", "bar"), + prepareIndex("index").setId("2").setSource("foo", "baz") ); persistGlobalCheckpoint("index"); // Need to persist the global checkpoint for the soft-deletes retention MP. refresh(); @@ -1201,8 +1185,8 @@ public void testFilterCacheStats() throws Exception { indexRandom( true, - client().prepareIndex("index").setId("1").setSource("foo", "bar"), - client().prepareIndex("index").setId("2").setSource("foo", "baz") + prepareIndex("index").setId("1").setSource("foo", "bar"), + prepareIndex("index").setId("2").setSource("foo", "baz") ); assertBusy(() -> { @@ -1299,7 +1283,7 @@ public void testConcurrentIndexingAndStatsRequests() throws BrokenBarrierExcepti } while (stop.get() == false) { final String id = Integer.toString(idGenerator.incrementAndGet()); - final DocWriteResponse response = client().prepareIndex("test").setId(id).setSource("{}", XContentType.JSON).get(); + final DocWriteResponse response = prepareIndex("test").setId(id).setSource("{}", XContentType.JSON).get(); assertThat(response.getResult(), equalTo(DocWriteResponse.Result.CREATED)); } }); @@ -1371,8 +1355,7 @@ public void testWriteLoadIsCaptured() throws Exception { final List> indexRequestFutures = new ArrayList<>(numDocs); for (int i = 0; i < numDocs; i++) { indexRequestFutures.add( - client().prepareIndex(indexName) - .setId(Integer.toString(idGenerator.incrementAndGet())) + prepareIndex(indexName).setId(Integer.toString(idGenerator.incrementAndGet())) .setSource("{}", XContentType.JSON) .execute() ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/ComposableTemplateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/ComposableTemplateIT.java index 29c38c07fcbd7..0e385768fc256 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/ComposableTemplateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/ComposableTemplateIT.java @@ -33,9 +33,9 @@ public void testComponentTemplatesCanBeUpdatedAfterRestart() throws Exception { }"""), null), 3L, Collections.singletonMap("eggplant", "potato")); client().execute(PutComponentTemplateAction.INSTANCE, new PutComponentTemplateAction.Request("my-ct").componentTemplate(ct)).get(); - ComposableIndexTemplate cit = new ComposableIndexTemplate( - Collections.singletonList("coleslaw"), - new Template(null, new CompressedXContent(""" + ComposableIndexTemplate cit = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("coleslaw")) + .template(new Template(null, new CompressedXContent(""" { "dynamic": false, "properties": { @@ -43,12 +43,12 @@ public void testComponentTemplatesCanBeUpdatedAfterRestart() throws Exception { "type": "keyword" } } - }"""), null), - Collections.singletonList("my-ct"), - 4L, - 5L, - Collections.singletonMap("egg", "bread") - ); + }"""), null)) + .componentTemplates(Collections.singletonList("my-ct")) + .priority(4L) + .version(5L) + .metadata(Collections.singletonMap("egg", "bread")) + .build(); client().execute( PutComposableIndexTemplateAction.INSTANCE, new PutComposableIndexTemplateAction.Request("my-it").indexTemplate(cit) @@ -68,9 +68,9 @@ public void testComponentTemplatesCanBeUpdatedAfterRestart() throws Exception { }"""), null), 3L, Collections.singletonMap("eggplant", "potato")); client().execute(PutComponentTemplateAction.INSTANCE, new PutComponentTemplateAction.Request("my-ct").componentTemplate(ct2)).get(); - ComposableIndexTemplate cit2 = new ComposableIndexTemplate( - Collections.singletonList("coleslaw"), - new Template(null, new CompressedXContent(""" + ComposableIndexTemplate cit2 = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("coleslaw")) + .template(new Template(null, new CompressedXContent(""" { "dynamic": true, "properties": { @@ -78,12 +78,12 @@ public void testComponentTemplatesCanBeUpdatedAfterRestart() throws Exception { "type": "integer" } } - }"""), null), - Collections.singletonList("my-ct"), - 4L, - 5L, - Collections.singletonMap("egg", "bread") - ); + }"""), null)) + .componentTemplates(Collections.singletonList("my-ct")) + .priority(4L) + .version(5L) + .metadata(Collections.singletonMap("egg", "bread")) + .build(); client().execute( PutComposableIndexTemplateAction.INSTANCE, new PutComposableIndexTemplateAction.Request("my-it").indexTemplate(cit2) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/IndexTemplateBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/IndexTemplateBlocksIT.java index 22ae5d62dc297..48958e3e39b9b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/IndexTemplateBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/IndexTemplateBlocksIT.java @@ -44,13 +44,12 @@ public void testIndexTemplatesWithBlocks() throws IOException { .endObject() .endObject() ) - .execute() - .actionGet(); + .get(); try { setClusterReadOnly(true); - GetIndexTemplatesResponse response = indicesAdmin().prepareGetTemplates("template_blocks").execute().actionGet(); + GetIndexTemplatesResponse response = indicesAdmin().prepareGetTemplates("template_blocks").get(); assertThat(response.getIndexTemplates(), hasSize(1)); assertBlocked( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java index 359b90a351b60..25cdd413aec2b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java @@ -17,7 +17,6 @@ import org.elasticsearch.action.fieldcaps.FieldCapabilities; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.AliasMetadata; import org.elasticsearch.common.ParsingException; @@ -49,6 +48,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; @@ -149,39 +149,38 @@ public void testSimpleIndexTemplateTests() throws Exception { assertThat(response.getIndexTemplates(), hasSize(2)); // index something into test_index, will match on both templates - client().prepareIndex("test_index").setId("1").setSource("field1", "value1", "field2", "value 2").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test_index").setId("1").setSource("field1", "value1", "field2", "value 2").setRefreshPolicy(IMMEDIATE).get(); ensureGreen(); - SearchResponse searchResponse = prepareSearch("test_index").setQuery(termQuery("field1", "value1")) - .addStoredField("field1") - .addStoredField("field2") - .execute() - .actionGet(); - - assertHitCount(searchResponse, 1); - assertThat(searchResponse.getHits().getAt(0).field("field1").getValue().toString(), equalTo("value1")); - // field2 is not stored. - assertThat(searchResponse.getHits().getAt(0).field("field2"), nullValue()); + assertResponse( + prepareSearch("test_index").setQuery(termQuery("field1", "value1")).addStoredField("field1").addStoredField("field2"), + searchResponse -> { + assertHitCount(searchResponse, 1); + assertThat(searchResponse.getHits().getAt(0).field("field1").getValue().toString(), equalTo("value1")); + // field2 is not stored. + assertThat(searchResponse.getHits().getAt(0).field("field2"), nullValue()); + } + ); - client().prepareIndex("text_index").setId("1").setSource("field1", "value1", "field2", "value 2").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("text_index").setId("1").setSource("field1", "value1", "field2", "value 2").setRefreshPolicy(IMMEDIATE).get(); ensureGreen(); // now only match on one template (template_1) - searchResponse = prepareSearch("text_index").setQuery(termQuery("field1", "value1")) - .addStoredField("field1") - .addStoredField("field2") - .execute() - .actionGet(); - if (searchResponse.getFailedShards() > 0) { - logger.warn("failed search {}", Arrays.toString(searchResponse.getShardFailures())); - } - assertHitCount(searchResponse, 1); - assertThat(searchResponse.getHits().getAt(0).field("field1").getValue().toString(), equalTo("value1")); - assertThat(searchResponse.getHits().getAt(0).field("field2").getValue().toString(), equalTo("value 2")); + assertResponse( + prepareSearch("text_index").setQuery(termQuery("field1", "value1")).addStoredField("field1").addStoredField("field2"), + searchResponse -> { + if (searchResponse.getFailedShards() > 0) { + logger.warn("failed search {}", Arrays.toString(searchResponse.getShardFailures())); + } + assertHitCount(searchResponse, 1); + assertThat(searchResponse.getHits().getAt(0).field("field1").getValue().toString(), equalTo("value1")); + assertThat(searchResponse.getHits().getAt(0).field("field2").getValue().toString(), equalTo("value 2")); + } + ); } public void testDeleteIndexTemplate() throws Exception { - final int existingTemplates = admin().cluster().prepareState().execute().actionGet().getState().metadata().templates().size(); + final int existingTemplates = admin().cluster().prepareState().get().getState().metadata().templates().size(); logger.info("--> put template_1 and template_2"); indicesAdmin().preparePutTemplate("template_1") .setPatterns(Collections.singletonList("te*")) @@ -203,8 +202,7 @@ public void testDeleteIndexTemplate() throws Exception { .endObject() .endObject() ) - .execute() - .actionGet(); + .get(); indicesAdmin().preparePutTemplate("template_2") .setPatterns(Collections.singletonList("test*")) @@ -222,13 +220,12 @@ public void testDeleteIndexTemplate() throws Exception { .endObject() .endObject() ) - .execute() - .actionGet(); + .get(); logger.info("--> explicitly delete template_1"); - indicesAdmin().prepareDeleteTemplate("template_1").execute().actionGet(); + indicesAdmin().prepareDeleteTemplate("template_1").get(); - ClusterState state = admin().cluster().prepareState().execute().actionGet().getState(); + ClusterState state = admin().cluster().prepareState().get().getState(); assertThat(state.metadata().templates().size(), equalTo(1 + existingTemplates)); assertThat(state.metadata().templates().containsKey("template_2"), equalTo(true)); @@ -255,19 +252,15 @@ public void testDeleteIndexTemplate() throws Exception { .endObject() .endObject() ) - .execute() - .actionGet(); + .get(); logger.info("--> delete template*"); - indicesAdmin().prepareDeleteTemplate("template*").execute().actionGet(); - assertThat( - admin().cluster().prepareState().execute().actionGet().getState().metadata().templates().size(), - equalTo(existingTemplates) - ); + indicesAdmin().prepareDeleteTemplate("template*").get(); + assertThat(admin().cluster().prepareState().get().getState().metadata().templates().size(), equalTo(existingTemplates)); logger.info("--> delete * with no templates, make sure we don't get a failure"); - indicesAdmin().prepareDeleteTemplate("*").execute().actionGet(); - assertThat(admin().cluster().prepareState().execute().actionGet().getState().metadata().templates().size(), equalTo(0)); + indicesAdmin().prepareDeleteTemplate("*").get(); + assertThat(admin().cluster().prepareState().get().getState().metadata().templates().size(), equalTo(0)); } public void testThatGetIndexTemplatesWorks() throws Exception { @@ -293,11 +286,10 @@ public void testThatGetIndexTemplatesWorks() throws Exception { .endObject() .endObject() ) - .execute() - .actionGet(); + .get(); logger.info("--> get template template_1"); - GetIndexTemplatesResponse getTemplate1Response = indicesAdmin().prepareGetTemplates("template_1").execute().actionGet(); + GetIndexTemplatesResponse getTemplate1Response = indicesAdmin().prepareGetTemplates("template_1").get(); assertThat(getTemplate1Response.getIndexTemplates(), hasSize(1)); assertThat(getTemplate1Response.getIndexTemplates().get(0), is(notNullValue())); assertThat(getTemplate1Response.getIndexTemplates().get(0).patterns(), is(Collections.singletonList("te*"))); @@ -305,7 +297,7 @@ public void testThatGetIndexTemplatesWorks() throws Exception { assertThat(getTemplate1Response.getIndexTemplates().get(0).getVersion(), is(123)); logger.info("--> get non-existing-template"); - GetIndexTemplatesResponse getTemplate2Response = indicesAdmin().prepareGetTemplates("non-existing-template").execute().actionGet(); + GetIndexTemplatesResponse getTemplate2Response = indicesAdmin().prepareGetTemplates("non-existing-template").get(); assertThat(getTemplate2Response.getIndexTemplates(), hasSize(0)); } @@ -331,8 +323,7 @@ public void testThatGetIndexTemplatesWithSimpleRegexWorks() throws Exception { .endObject() .endObject() ) - .execute() - .actionGet(); + .get(); logger.info("--> put template_2"); indicesAdmin().preparePutTemplate("template_2") @@ -355,8 +346,7 @@ public void testThatGetIndexTemplatesWithSimpleRegexWorks() throws Exception { .endObject() .endObject() ) - .execute() - .actionGet(); + .get(); logger.info("--> put template3"); indicesAdmin().preparePutTemplate("template3") @@ -379,11 +369,10 @@ public void testThatGetIndexTemplatesWithSimpleRegexWorks() throws Exception { .endObject() .endObject() ) - .execute() - .actionGet(); + .get(); logger.info("--> get template template_*"); - GetIndexTemplatesResponse getTemplate1Response = indicesAdmin().prepareGetTemplates("template_*").execute().actionGet(); + GetIndexTemplatesResponse getTemplate1Response = indicesAdmin().prepareGetTemplates("template_*").get(); assertThat(getTemplate1Response.getIndexTemplates(), hasSize(2)); List templateNames = new ArrayList<>(); @@ -392,7 +381,7 @@ public void testThatGetIndexTemplatesWithSimpleRegexWorks() throws Exception { assertThat(templateNames, containsInAnyOrder("template_1", "template_2")); logger.info("--> get all templates"); - getTemplate1Response = indicesAdmin().prepareGetTemplates("template*").execute().actionGet(); + getTemplate1Response = indicesAdmin().prepareGetTemplates("template*").get(); assertThat(getTemplate1Response.getIndexTemplates(), hasSize(3)); templateNames = new ArrayList<>(); @@ -402,7 +391,7 @@ public void testThatGetIndexTemplatesWithSimpleRegexWorks() throws Exception { assertThat(templateNames, containsInAnyOrder("template_1", "template_2", "template3")); logger.info("--> get templates template_1 and template_2"); - getTemplate1Response = indicesAdmin().prepareGetTemplates("template_1", "template_2").execute().actionGet(); + getTemplate1Response = indicesAdmin().prepareGetTemplates("template_1", "template_2").get(); assertThat(getTemplate1Response.getIndexTemplates(), hasSize(2)); templateNames = new ArrayList<>(); @@ -498,11 +487,11 @@ public void testIndexTemplateWithAliases() throws Exception { assertAcked(prepareCreate("test_index")); ensureGreen(); - client().prepareIndex("test_index").setId("1").setSource("type", "type1", "field", "A value").get(); - client().prepareIndex("test_index").setId("2").setSource("type", "type2", "field", "B value").get(); - client().prepareIndex("test_index").setId("3").setSource("type", "typeX", "field", "C value").get(); - client().prepareIndex("test_index").setId("4").setSource("type", "typeY", "field", "D value").get(); - client().prepareIndex("test_index").setId("5").setSource("type", "typeZ", "field", "E value").get(); + prepareIndex("test_index").setId("1").setSource("type", "type1", "field", "A value").get(); + prepareIndex("test_index").setId("2").setSource("type", "type2", "field", "B value").get(); + prepareIndex("test_index").setId("3").setSource("type", "typeX", "field", "C value").get(); + prepareIndex("test_index").setId("4").setSource("type", "typeY", "field", "D value").get(); + prepareIndex("test_index").setId("5").setSource("type", "typeZ", "field", "E value").get(); GetAliasesResponse getAliasesResponse = indicesAdmin().prepareGetAliases().setIndices("test_index").get(); assertThat(getAliasesResponse.getAliases().size(), equalTo(1)); @@ -514,20 +503,22 @@ public void testIndexTemplateWithAliases() throws Exception { assertHitCount(prepareSearch("simple_alias"), 5L); assertHitCount(prepareSearch("templated_alias-test_index"), 5L); - SearchResponse searchResponse = prepareSearch("filtered_alias").get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getSourceAsMap().get("type"), equalTo("type2")); + assertResponse(prepareSearch("filtered_alias"), response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("type"), equalTo("type2")); + }); // Search the complex filter alias - searchResponse = prepareSearch("complex_filtered_alias").get(); - assertHitCount(searchResponse, 3L); + assertResponse(prepareSearch("complex_filtered_alias"), response -> { + assertHitCount(response, 3L); - Set types = new HashSet<>(); - for (SearchHit searchHit : searchResponse.getHits().getHits()) { - types.add(searchHit.getSourceAsMap().get("type").toString()); - } - assertThat(types.size(), equalTo(3)); - assertThat(types, containsInAnyOrder("typeX", "typeY", "typeZ")); + Set types = new HashSet<>(); + for (SearchHit searchHit : response.getHits().getHits()) { + types.add(searchHit.getSourceAsMap().get("type").toString()); + } + assertThat(types.size(), equalTo(3)); + assertThat(types, containsInAnyOrder("typeX", "typeY", "typeZ")); + }); } public void testIndexTemplateWithAliasesInSource() { @@ -552,15 +543,16 @@ public void testIndexTemplateWithAliasesInSource() { assertThat(getAliasesResponse.getAliases().size(), equalTo(1)); assertThat(getAliasesResponse.getAliases().get("test_index").size(), equalTo(1)); - client().prepareIndex("test_index").setId("1").setSource("field", "value1").get(); - client().prepareIndex("test_index").setId("2").setSource("field", "value2").get(); + prepareIndex("test_index").setId("1").setSource("field", "value1").get(); + prepareIndex("test_index").setId("2").setSource("field", "value2").get(); refresh(); assertHitCount(prepareSearch("test_index"), 2L); - SearchResponse searchResponse = prepareSearch("my_alias").get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getSourceAsMap().get("field"), equalTo("value2")); + assertResponse(prepareSearch("my_alias"), response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field"), equalTo("value2")); + }); } public void testIndexTemplateWithAliasesSource() { @@ -587,16 +579,17 @@ public void testIndexTemplateWithAliasesSource() { assertThat(getAliasesResponse.getAliases().size(), equalTo(1)); assertThat(getAliasesResponse.getAliases().get("test_index").size(), equalTo(3)); - client().prepareIndex("test_index").setId("1").setSource("field", "value1").get(); - client().prepareIndex("test_index").setId("2").setSource("field", "value2").get(); + prepareIndex("test_index").setId("1").setSource("field", "value1").get(); + prepareIndex("test_index").setId("2").setSource("field", "value2").get(); refresh(); assertHitCount(prepareSearch("test_index"), 2L); assertHitCount(prepareSearch("alias1"), 2L); - SearchResponse searchResponse = prepareSearch("alias2").get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getSourceAsMap().get("field"), equalTo("value2")); + assertResponse(prepareSearch("alias2"), response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field"), equalTo("value2")); + }); } public void testDuplicateAlias() throws Exception { @@ -723,7 +716,7 @@ public void testStrictAliasParsingInIndicesCreatedViaTemplates() throws Exceptio .addAlias(new Alias("alias4").filter(termQuery("field", "value"))) .get(); - client().prepareIndex("a1").setId("test").setSource("{}", XContentType.JSON).get(); + prepareIndex("a1").setId("test").setSource("{}", XContentType.JSON).get(); BulkResponse response = client().prepareBulk().add(new IndexRequest("a2").id("test").source("{}", XContentType.JSON)).get(); assertThat(response.hasFailures(), is(false)); assertThat(response.getItems()[0].isFailed(), equalTo(false)); @@ -739,7 +732,7 @@ public void testStrictAliasParsingInIndicesCreatedViaTemplates() throws Exceptio // So the aliases defined in the index template for this index will not fail // even though the fields in the alias fields don't exist yet and indexing into // an index that doesn't exist yet will succeed - client().prepareIndex("b1").setId("test").setSource("{}", XContentType.JSON).get(); + prepareIndex("b1").setId("test").setSource("{}", XContentType.JSON).get(); response = client().prepareBulk().add(new IndexRequest("b2").id("test").source("{}", XContentType.JSON)).get(); assertThat(response.hasFailures(), is(false)); @@ -842,33 +835,31 @@ public void testMultipleTemplate() throws IOException { ) .get(); - client().prepareIndex("ax").setId("1").setSource("field1", "value1", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("ax").setId("1").setSource("field1", "value1", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); - client().prepareIndex("bx").setId("1").setSource("field1", "value1", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("bx").setId("1").setSource("field1", "value1", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); ensureGreen(); // ax -> matches template - SearchResponse searchResponse = prepareSearch("ax").setQuery(termQuery("field1", "value1")) - .addStoredField("field1") - .addStoredField("field2") - .execute() - .actionGet(); - - assertHitCount(searchResponse, 1); - assertEquals("value1", searchResponse.getHits().getAt(0).field("field1").getValue().toString()); - assertNull(searchResponse.getHits().getAt(0).field("field2")); + assertResponse( + prepareSearch("ax").setQuery(termQuery("field1", "value1")).addStoredField("field1").addStoredField("field2"), + response -> { + assertHitCount(response, 1); + assertEquals("value1", response.getHits().getAt(0).field("field1").getValue().toString()); + assertNull(response.getHits().getAt(0).field("field2")); + } + ); // bx -> matches template - searchResponse = prepareSearch("bx").setQuery(termQuery("field1", "value1")) - .addStoredField("field1") - .addStoredField("field2") - .execute() - .actionGet(); - - assertHitCount(searchResponse, 1); - assertEquals("value1", searchResponse.getHits().getAt(0).field("field1").getValue().toString()); - assertNull(searchResponse.getHits().getAt(0).field("field2")); + assertResponse( + prepareSearch("bx").setQuery(termQuery("field1", "value1")).addStoredField("field1").addStoredField("field2"), + response -> { + assertHitCount(response, 1); + assertEquals("value1", response.getHits().getAt(0).field("field1").getValue().toString()); + assertNull(response.getHits().getAt(0).field("field2")); + } + ); } public void testPartitionedTemplate() throws Exception { @@ -995,7 +986,7 @@ public void testIndexTemplatesWithSameSubfield() { """, XContentType.JSON) .get(); - client().prepareIndex("test").setSource().get(); + prepareIndex("test").setSource().get(); FieldCapabilitiesResponse fieldCapabilitiesResponse = client().prepareFieldCaps("test").setFields("*location").get(); { Map field = fieldCapabilitiesResponse.getField("kwm.source.geo.location"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestClientIT.java b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestClientIT.java index 04194238bd9ff..afc39cd6b4d7e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestClientIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestClientIT.java @@ -230,15 +230,13 @@ public void test() throws Exception { assertThat(getResponse.pipelines().size(), equalTo(1)); assertThat(getResponse.pipelines().get(0).getId(), equalTo("_id")); - client().prepareIndex("test").setId("1").setPipeline("_id").setSource("field", "value", "fail", false).get(); + prepareIndex("test").setId("1").setPipeline("_id").setSource("field", "value", "fail", false).get(); Map doc = client().prepareGet("test", "1").get().getSourceAsMap(); assertThat(doc.get("field"), equalTo("value")); assertThat(doc.get("processed"), equalTo(true)); - client().prepareBulk() - .add(client().prepareIndex("test").setId("2").setSource("field", "value2", "fail", false).setPipeline("_id")) - .get(); + client().prepareBulk().add(prepareIndex("test").setId("2").setSource("field", "value2", "fail", false).setPipeline("_id")).get(); doc = client().prepareGet("test", "2").get().getSourceAsMap(); assertThat(doc.get("field"), equalTo("value2")); assertThat(doc.get("processed"), equalTo(true)); @@ -290,7 +288,7 @@ public void testWithDedicatedMaster() throws Exception { clusterAdmin().putPipeline(putPipelineRequest).get(); BulkItemResponse item = client(masterOnlyNode).prepareBulk() - .add(client().prepareIndex("test").setSource("field", "value2", "drop", true).setPipeline("_id")) + .add(prepareIndex("test").setSource("field", "value2", "drop", true).setPipeline("_id")) .get() .getItems()[0]; assertFalse(item.isFailed()); @@ -422,7 +420,7 @@ public void testPipelineProcessorOnFailure() throws Exception { clusterAdmin().putPipeline(putPipelineRequest).get(); } - client().prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).setPipeline("1").get(); + prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).setPipeline("1").get(); Map inserted = client().prepareGet("test", "1").get().getSourceAsMap(); assertThat(inserted.get("readme"), equalTo("pipeline with id [3] is a bad pipeline")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/mget/SimpleMgetIT.java b/server/src/internalClusterTest/java/org/elasticsearch/mget/SimpleMgetIT.java index 6c6d59844d484..03e8edbf1173d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/mget/SimpleMgetIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/mget/SimpleMgetIT.java @@ -37,8 +37,7 @@ public class SimpleMgetIT extends ESIntegTestCase { public void testThatMgetShouldWorkWithOneIndexMissing() throws IOException { createIndex("test"); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("foo", "bar").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); @@ -75,8 +74,7 @@ public void testThatMgetShouldWorkWithMultiIndexAlias() throws IOException { assertAcked(prepareCreate("test").addAlias(new Alias("multiIndexAlias"))); assertAcked(prepareCreate("test2").addAlias(new Alias("multiIndexAlias"))); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("foo", "bar").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); @@ -115,8 +113,7 @@ public void testThatMgetShouldWorkWithAliasRouting() throws IOException { ) ); - client().prepareIndex("alias1") - .setId("1") + prepareIndex("alias1").setId("1") .setSource(jsonBuilder().startObject().field("foo", "bar").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); @@ -142,7 +139,7 @@ public void testThatSourceFilteringIsSupported() throws Exception { .endObject() ); for (int i = 0; i < 100; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource(sourceBytesRef, XContentType.JSON).get(); + prepareIndex("test").setId(Integer.toString(i)).setSource(sourceBytesRef, XContentType.JSON).get(); } MultiGetRequestBuilder request = client().prepareMultiGet(); @@ -173,7 +170,7 @@ public void testThatSourceFilteringIsSupported() throws Exception { assertThat(((Map) source.get("included")).size(), equalTo(1)); assertThat(((Map) source.get("included")), hasKey("field")); } else { - assertThat(responseItem.getResponse().getSourceAsBytes(), nullValue()); + assertThat(responseItem.getResponse().getSourceAsBytesRef(), nullValue()); } } } @@ -189,8 +186,7 @@ public void testThatRoutingPerDocumentIsSupported() throws Exception { final String id = routingKeyForShard("test", 0); final String routingOtherShard = routingKeyForShard("test", 1); - client().prepareIndex("test") - .setId(id) + prepareIndex("test").setId(id) .setRefreshPolicy(IMMEDIATE) .setRouting(routingOtherShard) .setSource(jsonBuilder().startObject().field("foo", "bar").endObject()) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java b/server/src/internalClusterTest/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java index 15225edc47a60..cafc0e9426eea 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java @@ -41,7 +41,7 @@ public void testNodesInfos() throws Exception { String server2NodeId = internalCluster().getInstance(ClusterService.class, node_2).state().nodes().getLocalNodeId(); logger.info("--> started nodes: {} and {}", server1NodeId, server2NodeId); - NodesInfoResponse response = clusterAdmin().prepareNodesInfo().execute().actionGet(); + NodesInfoResponse response = clusterAdmin().prepareNodesInfo().get(); assertThat(response.getNodes(), hasSize(2)); assertThat(response.getNodesMap().get(server1NodeId), notNullValue()); assertThat(response.getNodesMap().get(server2NodeId), notNullValue()); @@ -80,7 +80,7 @@ public void testNodesInfosTotalIndexingBuffer() throws Exception { String server2NodeId = internalCluster().getInstance(ClusterService.class, node_2).state().nodes().getLocalNodeId(); logger.info("--> started nodes: {} and {}", server1NodeId, server2NodeId); - NodesInfoResponse response = clusterAdmin().prepareNodesInfo().execute().actionGet(); + NodesInfoResponse response = clusterAdmin().prepareNodesInfo().get(); assertThat(response.getNodes(), hasSize(2)); assertThat(response.getNodesMap().get(server1NodeId), notNullValue()); assertNotNull(response.getNodesMap().get(server1NodeId).getTotalIndexingBuffer()); @@ -91,7 +91,7 @@ public void testNodesInfosTotalIndexingBuffer() throws Exception { assertThat(response.getNodesMap().get(server2NodeId).getTotalIndexingBuffer().getBytes(), greaterThan(0L)); // again, using only the indices flag - response = clusterAdmin().prepareNodesInfo().clear().setIndices(true).execute().actionGet(); + response = clusterAdmin().prepareNodesInfo().clear().setIndices(true).get(); assertThat(response.getNodes(), hasSize(2)); assertThat(response.getNodesMap().get(server1NodeId), notNullValue()); assertNotNull(response.getNodesMap().get(server1NodeId).getTotalIndexingBuffer()); @@ -118,7 +118,7 @@ public void testAllocatedProcessors() throws Exception { String server2NodeId = internalCluster().getInstance(ClusterService.class, node_2).state().nodes().getLocalNodeId(); logger.info("--> started nodes: {} and {}", server1NodeId, server2NodeId); - NodesInfoResponse response = clusterAdmin().prepareNodesInfo().execute().actionGet(); + NodesInfoResponse response = clusterAdmin().prepareNodesInfo().get(); assertThat(response.getNodes(), hasSize(2)); assertThat(response.getNodesMap().get(server1NodeId), notNullValue()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/readiness/ReadinessClusterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/readiness/ReadinessClusterIT.java index ab24bf923b9db..450b27eb0db8b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/readiness/ReadinessClusterIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/readiness/ReadinessClusterIT.java @@ -99,16 +99,13 @@ protected Collection> getMockPlugins() { } private void assertMasterNode(Client client, String node) { - assertThat( - client.admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), - equalTo(node) - ); + assertThat(client.admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), equalTo(node)); } private void expectMasterNotFound() { expectThrows( MasterNotDiscoveredException.class, - () -> clusterAdmin().prepareState().setMasterNodeTimeout("100ms").execute().actionGet().getState().nodes().getMasterNodeId() + () -> clusterAdmin().prepareState().setMasterNodeTimeout("100ms").get().getState().nodes().getMasterNodeId() ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/recovery/FullRollingRestartIT.java b/server/src/internalClusterTest/java/org/elasticsearch/recovery/FullRollingRestartIT.java index f4aa261b09625..1e67a38c76017 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/recovery/FullRollingRestartIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/recovery/FullRollingRestartIT.java @@ -49,19 +49,11 @@ public void testFullRollingRestart() throws Exception { final String healthTimeout = "1m"; for (int i = 0; i < 1000; i++) { - client().prepareIndex("test") - .setId(Long.toString(i)) - .setSource(Map.of("test", "value" + i)) - .execute() - .actionGet(); + prepareIndex("test").setId(Long.toString(i)).setSource(Map.of("test", "value" + i)).get(); } flush(); for (int i = 1000; i < 2000; i++) { - client().prepareIndex("test") - .setId(Long.toString(i)) - .setSource(Map.of("test", "value" + i)) - .execute() - .actionGet(); + prepareIndex("test").setId(Long.toString(i)).setSource(Map.of("test", "value" + i)).get(); } logger.info("--> now start adding nodes"); @@ -173,11 +165,7 @@ public void testNoRebalanceOnRollingRestart() throws Exception { ).get(); for (int i = 0; i < 100; i++) { - client().prepareIndex("test") - .setId(Long.toString(i)) - .setSource(Map.of("test", "value" + i)) - .execute() - .actionGet(); + prepareIndex("test").setId(Long.toString(i)).setSource(Map.of("test", "value" + i)).get(); } ensureGreen(); ClusterState state = clusterAdmin().prepareState().get().getState(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java b/server/src/internalClusterTest/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java index fceeb2013b7c5..d47c68690bab8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java @@ -41,8 +41,8 @@ import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; public class RecoveryWhileUnderLoadIT extends ESIntegTestCase { @@ -85,7 +85,7 @@ public void testRecoverWhileUnderLoadAllocateReplicasTest() throws Exception { indexer.continueIndexing(extraDocs); logger.info("--> flushing the index ...."); // now flush, just to make sure we have some data in the index, not just translog - indicesAdmin().prepareFlush().execute().actionGet(); + indicesAdmin().prepareFlush().get(); logger.info("--> waiting for {} docs to be indexed ...", waitFor); waitForDocs(waitFor, indexer); @@ -144,7 +144,7 @@ public void testRecoverWhileUnderLoadAllocateReplicasRelocatePrimariesTest() thr indexer.continueIndexing(extraDocs); logger.info("--> flushing the index ...."); // now flush, just to make sure we have some data in the index, not just translog - indicesAdmin().prepareFlush().execute().actionGet(); + indicesAdmin().prepareFlush().get(); logger.info("--> waiting for {} docs to be indexed ...", waitFor); waitForDocs(waitFor, indexer); @@ -200,7 +200,7 @@ public void testRecoverWhileUnderLoadWithReducedAllowedNodes() throws Exception indexer.continueIndexing(extraDocs); logger.info("--> flushing the index ...."); // now flush, just to make sure we have some data in the index, not just translog - indicesAdmin().prepareFlush().execute().actionGet(); + indicesAdmin().prepareFlush().get(); logger.info("--> waiting for {} docs to be indexed ...", waitFor); waitForDocs(waitFor, indexer); @@ -313,22 +313,23 @@ public void testRecoverWhileRelocating() throws Exception { private void iterateAssertCount(final int numberOfShards, final int iterations, final Set ids) throws Exception { final long numberOfDocs = ids.size(); - SearchResponse[] iterationResults = new SearchResponse[iterations]; - boolean error = false; + long[] iterationHitCount = new long[iterations]; + boolean[] error = new boolean[1]; for (int i = 0; i < iterations; i++) { - SearchResponse searchResponse = prepareSearch().setSize((int) numberOfDocs) - .setQuery(matchAllQuery()) - .setTrackTotalHits(true) - .addSort("id", SortOrder.ASC) - .get(); - logSearchResponse(numberOfShards, numberOfDocs, i, searchResponse); - iterationResults[i] = searchResponse; - if (searchResponse.getHits().getTotalHits().value != numberOfDocs) { - error = true; - } + final int finalI = i; + assertResponse( + prepareSearch().setSize((int) numberOfDocs).setQuery(matchAllQuery()).setTrackTotalHits(true).addSort("id", SortOrder.ASC), + response -> { + logSearchResponse(numberOfShards, numberOfDocs, finalI, response); + iterationHitCount[finalI] = response.getHits().getTotalHits().value; + if (iterationHitCount[finalI] != numberOfDocs) { + error[0] = true; + } + } + ); } - if (error) { + if (error[0]) { // Printing out shards and their doc count IndicesStatsResponse indicesStatsResponse = indicesAdmin().prepareStats().get(); for (ShardStats shardStats : indicesStatsResponse.getShards()) { @@ -364,21 +365,22 @@ private void iterateAssertCount(final int numberOfShards, final int iterations, // if there was an error we try to wait and see if at some point it'll get fixed logger.info("--> trying to wait"); assertBusy(() -> { - boolean errorOccurred = false; + boolean[] errorOccurred = new boolean[1]; for (int i = 0; i < iterations; i++) { - SearchResponse searchResponse = prepareSearch().setTrackTotalHits(true).setSize(0).setQuery(matchAllQuery()).get(); - if (searchResponse.getHits().getTotalHits().value != numberOfDocs) { - errorOccurred = true; - } + assertResponse(prepareSearch().setTrackTotalHits(true).setSize(0).setQuery(matchAllQuery()), response -> { + if (response.getHits().getTotalHits().value != numberOfDocs) { + errorOccurred[0] = true; + } + }); } - assertFalse("An error occurred while waiting", errorOccurred); + assertFalse("An error occurred while waiting", errorOccurred[0]); }, 5, TimeUnit.MINUTES); assertEquals(numberOfDocs, ids.size()); } // lets now make the test fail if it was supposed to fail for (int i = 0; i < iterations; i++) { - assertHitCount(iterationResults[i], numberOfDocs); + assertEquals(iterationHitCount[i], numberOfDocs); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/recovery/RelocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/recovery/RelocationIT.java index 9e04413bfb014..e53bcb0480d7b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/recovery/RelocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/recovery/RelocationIT.java @@ -16,7 +16,6 @@ import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; @@ -80,6 +79,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHitsWithoutFailures; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; @@ -122,42 +122,40 @@ public void testSimpleRelocationNoIndexing() { logger.info("--> index 10 docs"); for (int i = 0; i < 10; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).execute().actionGet(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).get(); } logger.info("--> flush so we have an actual index"); - indicesAdmin().prepareFlush().execute().actionGet(); + indicesAdmin().prepareFlush().get(); logger.info("--> index more docs so we have something in the translog"); for (int i = 10; i < 20; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).execute().actionGet(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).get(); } logger.info("--> verifying count"); - indicesAdmin().prepareRefresh().execute().actionGet(); - assertThat(prepareSearch("test").setSize(0).execute().actionGet().getHits().getTotalHits().value, equalTo(20L)); + indicesAdmin().prepareRefresh().get(); + assertThat(prepareSearch("test").setSize(0).get().getHits().getTotalHits().value, equalTo(20L)); logger.info("--> start another node"); final String node_2 = internalCluster().startNode(); ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth() .setWaitForEvents(Priority.LANGUID) .setWaitForNodes("2") - .execute() - .actionGet(); + .get(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); logger.info("--> relocate the shard from node1 to node2"); - clusterAdmin().prepareReroute().add(new MoveAllocationCommand("test", 0, node_1, node_2)).execute().actionGet(); + clusterAdmin().prepareReroute().add(new MoveAllocationCommand("test", 0, node_1, node_2)).get(); clusterHealthResponse = clusterAdmin().prepareHealth() .setWaitForEvents(Priority.LANGUID) .setWaitForNoRelocatingShards(true) .setTimeout(ACCEPTABLE_RELOCATION_TIME) - .execute() - .actionGet(); + .get(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); logger.info("--> verifying count again..."); - indicesAdmin().prepareRefresh().execute().actionGet(); - assertThat(prepareSearch("test").setSize(0).execute().actionGet().getHits().getTotalHits().value, equalTo(20L)); + indicesAdmin().prepareRefresh().get(); + assertThat(prepareSearch("test").setSize(0).get().getHits().getTotalHits().value, equalTo(20L)); } public void testRelocationWhileIndexingRandom() throws Exception { @@ -187,8 +185,7 @@ public void testRelocationWhileIndexingRandom() throws Exception { .setWaitForEvents(Priority.LANGUID) .setWaitForNodes(Integer.toString(i)) .setWaitForGreenStatus() - .execute() - .actionGet(); + .get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); } } @@ -219,8 +216,7 @@ public void testRelocationWhileIndexingRandom() throws Exception { .setWaitForEvents(Priority.LANGUID) .setWaitForNoRelocatingShards(true) .setTimeout(ACCEPTABLE_RELOCATION_TIME) - .execute() - .actionGet(); + .get(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); indexer.pauseIndexing(); logger.info("--> DONE relocate the shard from {} to {}", fromNode, toNode); @@ -231,7 +227,7 @@ public void testRelocationWhileIndexingRandom() throws Exception { logger.info("--> indexing threads stopped"); logger.info("--> refreshing the index"); - indicesAdmin().prepareRefresh("test").execute().actionGet(); + indicesAdmin().prepareRefresh("test").get(); logger.info("--> searching the index"); boolean ranOnce = false; for (int i = 0; i < 10; i++) { @@ -239,8 +235,7 @@ public void testRelocationWhileIndexingRandom() throws Exception { SearchHits hits = prepareSearch("test").setQuery(matchAllQuery()) .setSize((int) indexer.totalIndexedDocs()) .storedFields() - .execute() - .actionGet() + .get() .getHits(); ranOnce = true; if (hits.getTotalHits().value != indexer.totalIndexedDocs()) { @@ -298,8 +293,7 @@ public void testRelocationWhileRefreshing() throws Exception { .setWaitForEvents(Priority.LANGUID) .setWaitForNodes(Integer.toString(i + 1)) .setWaitForGreenStatus() - .execute() - .actionGet(); + .get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); } } @@ -334,12 +328,12 @@ public void indexShardStateChanged( List builders1 = new ArrayList<>(); for (int numDocs = randomIntBetween(10, 30); numDocs > 0; numDocs--) { - builders1.add(client().prepareIndex("test").setSource("{}", XContentType.JSON)); + builders1.add(prepareIndex("test").setSource("{}", XContentType.JSON)); } List builders2 = new ArrayList<>(); for (int numDocs = randomIntBetween(10, 30); numDocs > 0; numDocs--) { - builders2.add(client().prepareIndex("test").setSource("{}", XContentType.JSON)); + builders2.add(prepareIndex("test").setSource("{}", XContentType.JSON)); } logger.info("--> START relocate the shard from {} to {}", nodes[fromNode], nodes[toNode]); @@ -366,15 +360,15 @@ public void indexShardStateChanged( logger.info("--> DONE relocate the shard from {} to {}", fromNode, toNode); logger.debug("--> verifying all searches return the same number of docs"); - long expectedCount = -1; + long[] expectedCount = new long[] { -1 }; for (Client client : clients()) { - SearchResponse response = client.prepareSearch("test").setPreference("_local").setSize(0).get(); - assertNoFailures(response); - if (expectedCount < 0) { - expectedCount = response.getHits().getTotalHits().value; - } else { - assertEquals(expectedCount, response.getHits().getTotalHits().value); - } + assertNoFailuresAndResponse(client.prepareSearch("test").setPreference("_local").setSize(0), response -> { + if (expectedCount[0] < 0) { + expectedCount[0] = response.getHits().getTotalHits().value; + } else { + assertEquals(expectedCount[0], response.getHits().getTotalHits().value); + } + }); } } @@ -394,7 +388,7 @@ public void testCancellationCleansTempFiles() throws Exception { List requests = new ArrayList<>(); int numDocs = scaledRandomIntBetween(25, 250); for (int i = 0; i < numDocs; i++) { - requests.add(client().prepareIndex(indexName).setSource("{}", XContentType.JSON)); + requests.add(prepareIndex(indexName).setSource("{}", XContentType.JSON)); } indexRandom(true, requests); assertFalse(clusterAdmin().prepareHealth().setWaitForNodes("3").setWaitForGreenStatus().get().isTimedOut()); @@ -497,7 +491,7 @@ public void testIndexSearchAndRelocateConcurrently() throws Exception { for (int i = 0; i < numDocs; i++) { String id = randomRealisticUnicodeOfLength(10) + String.valueOf(i); ids.add(id); - docs[i] = client().prepareIndex("test").setId(id).setSource("field1", English.intToEnglish(i)); + docs[i] = prepareIndex("test").setId(id).setSource("field1", English.intToEnglish(i)); } indexRandom(true, docs); assertHitCount(prepareSearch("test"), numDocs); @@ -512,7 +506,7 @@ public void testIndexSearchAndRelocateConcurrently() throws Exception { for (int i = 0; i < numDocs; i++) { String id = randomRealisticUnicodeOfLength(10) + String.valueOf(numDocs + i); ids.add(id); - docs[i] = client().prepareIndex("test").setId(id).setSource("field1", English.intToEnglish(numDocs + i)); + docs[i] = prepareIndex("test").setId(id).setSource("field1", English.intToEnglish(numDocs + i)); } indexRandom(true, docs); @@ -544,14 +538,13 @@ public void testRelocateWhileWaitingForRefresh() { logger.info("--> index 10 docs"); for (int i = 0; i < 10; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).execute().actionGet(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).get(); } logger.info("--> flush so we have an actual index"); - indicesAdmin().prepareFlush().execute().actionGet(); + indicesAdmin().prepareFlush().get(); logger.info("--> index more docs so we have something in the translog"); for (int i = 10; i < 20; i++) { - client().prepareIndex("test") - .setId(Integer.toString(i)) + prepareIndex("test").setId(Integer.toString(i)) .setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL) .setSource("field", "value" + i) .execute(); @@ -562,24 +555,22 @@ public void testRelocateWhileWaitingForRefresh() { ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth() .setWaitForEvents(Priority.LANGUID) .setWaitForNodes("2") - .execute() - .actionGet(); + .get(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); logger.info("--> relocate the shard from node1 to node2"); - clusterAdmin().prepareReroute().add(new MoveAllocationCommand("test", 0, node1, node2)).execute().actionGet(); + clusterAdmin().prepareReroute().add(new MoveAllocationCommand("test", 0, node1, node2)).get(); clusterHealthResponse = clusterAdmin().prepareHealth() .setWaitForEvents(Priority.LANGUID) .setWaitForNoRelocatingShards(true) .setTimeout(ACCEPTABLE_RELOCATION_TIME) - .execute() - .actionGet(); + .get(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); logger.info("--> verifying count"); - indicesAdmin().prepareRefresh().execute().actionGet(); - assertThat(prepareSearch("test").setSize(0).execute().actionGet().getHits().getTotalHits().value, equalTo(20L)); + indicesAdmin().prepareRefresh().get(); + assertThat(prepareSearch("test").setSize(0).get().getHits().getTotalHits().value, equalTo(20L)); } public void testRelocateWhileContinuouslyIndexingAndWaitingForRefresh() throws Exception { @@ -595,16 +586,15 @@ public void testRelocateWhileContinuouslyIndexingAndWaitingForRefresh() throws E logger.info("--> index 10 docs"); for (int i = 0; i < 10; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).execute().actionGet(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).get(); } logger.info("--> flush so we have an actual index"); - indicesAdmin().prepareFlush().execute().actionGet(); + indicesAdmin().prepareFlush().get(); logger.info("--> index more docs so we have something in the translog"); final List> pendingIndexResponses = new ArrayList<>(); for (int i = 10; i < 20; i++) { pendingIndexResponses.add( - client().prepareIndex("test") - .setId(Integer.toString(i)) + prepareIndex("test").setId(Integer.toString(i)) .setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL) .setSource("field", "value" + i) .execute() @@ -616,8 +606,7 @@ public void testRelocateWhileContinuouslyIndexingAndWaitingForRefresh() throws E ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth() .setWaitForEvents(Priority.LANGUID) .setWaitForNodes("2") - .execute() - .actionGet(); + .get(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); logger.info("--> relocate the shard from node1 to node2"); @@ -627,8 +616,7 @@ public void testRelocateWhileContinuouslyIndexingAndWaitingForRefresh() throws E logger.info("--> index 100 docs while relocating"); for (int i = 20; i < 120; i++) { pendingIndexResponses.add( - client().prepareIndex("test") - .setId(Integer.toString(i)) + prepareIndex("test").setId(Integer.toString(i)) .setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL) .setSource("field", "value" + i) .execute() @@ -639,17 +627,16 @@ public void testRelocateWhileContinuouslyIndexingAndWaitingForRefresh() throws E .setWaitForEvents(Priority.LANGUID) .setWaitForNoRelocatingShards(true) .setTimeout(ACCEPTABLE_RELOCATION_TIME) - .execute() - .actionGet(); + .get(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); logger.info("--> verifying count"); assertBusy(() -> { - indicesAdmin().prepareRefresh().execute().actionGet(); + indicesAdmin().prepareRefresh().get(); assertTrue(pendingIndexResponses.stream().allMatch(ActionFuture::isDone)); }, 1, TimeUnit.MINUTES); - assertThat(prepareSearch("test").setSize(0).execute().actionGet().getHits().getTotalHits().value, equalTo(120L)); + assertThat(prepareSearch("test").setSize(0).get().getHits().getTotalHits().value, equalTo(120L)); } public void testRelocationEstablishedPeerRecoveryRetentionLeases() throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/recovery/SimpleRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/recovery/SimpleRecoveryIT.java index e11f443f6c5b3..bd69aebcd415e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/recovery/SimpleRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/recovery/SimpleRecoveryIT.java @@ -38,7 +38,7 @@ protected int maximumNumberOfReplicas() { } public void testSimpleRecovery() throws Exception { - assertAcked(prepareCreate("test", 1).execute().actionGet()); + assertAcked(prepareCreate("test", 1).get()); NumShards numShards = getNumShards("test"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java index 6281df7fc6646..28c56e0cdc916 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java @@ -88,7 +88,7 @@ public void testCancelRecoveryAndResume() throws Exception { List builder = new ArrayList<>(); for (int i = 0; i < numDocs; i++) { String id = Integer.toString(i); - builder.add(client().prepareIndex("test").setId(id).setSource("field1", English.intToEnglish(i), "the_id", id)); + builder.add(prepareIndex("test").setId(id).setSource("field1", English.intToEnglish(i), "the_id", id)); } indexRandom(true, builder); for (int i = 0; i < numDocs; i++) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java index be8053a1d6866..f77cc9ce20020 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java @@ -137,7 +137,7 @@ public void testGetShardSnapshotReturnsTheLatestSuccessfulSnapshot() throws Exce final SnapshotInfo snapshotInfo = createSnapshot(repoName, Strings.format("snap-%03d", i), snapshotIndices); if (snapshotInfo.indices().contains(indexName)) { lastSnapshot = snapshotInfo; - ClusterStateResponse clusterStateResponse = admin().cluster().prepareState().execute().actionGet(); + ClusterStateResponse clusterStateResponse = admin().cluster().prepareState().get(); IndexMetadata indexMetadata = clusterStateResponse.getState().metadata().index(indexName); expectedIndexMetadataId = IndexMetaDataGenerations.buildUniqueIdentifier(indexMetadata); } @@ -338,7 +338,7 @@ private PlainActionFuture getLatestSnapshotForShardFut boolean useAllRepositoriesRequest ) { ShardId shardId = new ShardId(new Index(indexName, "__na__"), shard); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); final GetShardSnapshotRequest request; if (useAllRepositoriesRequest && randomBoolean()) { request = GetShardSnapshotRequest.latestSnapshotInAllRepositories(shardId); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java index 7d444eef787c0..7886e628b26ad 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java @@ -14,7 +14,6 @@ import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.RepositoryCleanupInProgress; -import org.elasticsearch.common.blobstore.OperationPurpose; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.snapshots.AbstractSnapshotIntegTestCase; @@ -24,6 +23,7 @@ import java.io.IOException; import java.util.concurrent.ExecutionException; +import static org.elasticsearch.repositories.blobstore.BlobStoreTestUtil.randomPurpose; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFutureThrows; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.instanceOf; @@ -90,7 +90,7 @@ private ActionFuture startBlockedCleanup(String repoN final BlobStoreRepository repository = getRepositoryOnMaster(repoName); logger.info("--> creating a garbage data blob"); - final PlainActionFuture garbageFuture = PlainActionFuture.newFuture(); + final PlainActionFuture garbageFuture = new PlainActionFuture<>(); repository.threadPool() .generic() .execute( @@ -98,7 +98,7 @@ private ActionFuture startBlockedCleanup(String repoN garbageFuture, () -> repository.blobStore() .blobContainer(repository.basePath()) - .writeBlob(OperationPurpose.SNAPSHOT, "snap-foo.dat", new BytesArray(new byte[1]), true) + .writeBlob(randomPurpose(), "snap-foo.dat", new BytesArray(new byte[1]), true) ) ); garbageFuture.get(); @@ -137,7 +137,7 @@ public void testCleanupOldIndexN() throws ExecutionException, InterruptedExcepti final BlobStoreRepository repository = getRepositoryOnMaster(repoName); logger.info("--> write two outdated index-N blobs"); for (int i = 0; i < 2; ++i) { - final PlainActionFuture createOldIndexNFuture = PlainActionFuture.newFuture(); + final PlainActionFuture createOldIndexNFuture = new PlainActionFuture<>(); final int generation = i; repository.threadPool() .generic() @@ -147,7 +147,7 @@ public void testCleanupOldIndexN() throws ExecutionException, InterruptedExcepti () -> repository.blobStore() .blobContainer(repository.basePath()) .writeBlob( - OperationPurpose.SNAPSHOT, + randomPurpose(), BlobStoreRepository.INDEX_FILE_PREFIX + generation, new BytesArray(new byte[1]), true diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java index cf47fbe95da24..58dcfdaec5147 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java @@ -100,10 +100,7 @@ public class FileSettingsServiceIT extends ESIntegTestCase { }"""; private void assertMasterNode(Client client, String node) { - assertThat( - client.admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), - equalTo(node) - ); + assertThat(client.admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), equalTo(node)); } private void writeJSONFile(String node, String json) throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasResolveRoutingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasResolveRoutingIT.java index 478cae8746f86..53001e30763a0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasResolveRoutingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasResolveRoutingIT.java @@ -37,9 +37,9 @@ public void testSearchClosedWildcardIndex() throws ExecutionException, Interrupt indicesAdmin().prepareClose("test-1").get(); indexRandom( true, - client().prepareIndex("test-0").setId("1").setSource("field1", "the quick brown fox jumps"), - client().prepareIndex("test-0").setId("2").setSource("field1", "quick brown"), - client().prepareIndex("test-0").setId("3").setSource("field1", "quick") + prepareIndex("test-0").setId("1").setSource("field1", "the quick brown fox jumps"), + prepareIndex("test-0").setId("2").setSource("field1", "quick brown"), + prepareIndex("test-0").setId("3").setSource("field1", "quick") ); refresh("test-*"); assertHitCount( @@ -51,7 +51,7 @@ public void testSearchClosedWildcardIndex() throws ExecutionException, Interrupt public void testResolveIndexRouting() { createIndex("test1"); createIndex("test2"); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); + clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); indicesAdmin().prepareAliases() .addAliasAction(AliasActions.add().index("test1").alias("alias")) @@ -93,7 +93,7 @@ public void testResolveSearchRouting() { createIndex("test1"); createIndex("test2"); createIndex("test3"); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); + clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); indicesAdmin().prepareAliases() .addAliasAction(AliasActions.add().index("test1").alias("alias")) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasRoutingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasRoutingIT.java index 7ee081ffd433e..442a2dc99bda3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasRoutingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasRoutingIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.routing; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.client.internal.Requests; @@ -20,6 +19,7 @@ import org.elasticsearch.xcontent.XContentFactory; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; /** @@ -38,61 +38,57 @@ public void testAliasCrudRouting() throws Exception { assertAcked(indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index("test").alias("alias0").routing("0"))); logger.info("--> indexing with id [1], and routing [0] using alias"); - client().prepareIndex("alias0").setId("1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); + prepareIndex("alias0").setId("1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "1").execute().actionGet().isExists(), equalTo(false)); + assertThat(client().prepareGet("test", "1").get().isExists(), equalTo(false)); } logger.info("--> verifying get with routing, should find"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("test", "1").setRouting("0").get().isExists(), equalTo(true)); } logger.info("--> verifying get with routing alias, should find"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("alias0", "1").execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("alias0", "1").get().isExists(), equalTo(true)); } logger.info("--> updating with id [1] and routing through alias"); client().prepareUpdate("alias0", "1") .setUpsert(XContentFactory.jsonBuilder().startObject().field("field", 1).endObject()) .setDoc(Requests.INDEX_CONTENT_TYPE, "field", "value2") - .execute() - .actionGet(); + .get(); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("alias0", "1").execute().actionGet().isExists(), equalTo(true)); - assertThat( - client().prepareGet("alias0", "1").execute().actionGet().getSourceAsMap().get("field").toString(), - equalTo("value2") - ); + assertThat(client().prepareGet("alias0", "1").get().isExists(), equalTo(true)); + assertThat(client().prepareGet("alias0", "1").get().getSourceAsMap().get("field").toString(), equalTo("value2")); } logger.info("--> deleting with no routing, should not delete anything"); client().prepareDelete("test", "1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "1").execute().actionGet().isExists(), equalTo(false)); - assertThat(client().prepareGet("test", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true)); - assertThat(client().prepareGet("alias0", "1").execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("test", "1").get().isExists(), equalTo(false)); + assertThat(client().prepareGet("test", "1").setRouting("0").get().isExists(), equalTo(true)); + assertThat(client().prepareGet("alias0", "1").get().isExists(), equalTo(true)); } logger.info("--> deleting with routing alias, should delete"); client().prepareDelete("alias0", "1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "1").execute().actionGet().isExists(), equalTo(false)); - assertThat(client().prepareGet("test", "1").setRouting("0").execute().actionGet().isExists(), equalTo(false)); - assertThat(client().prepareGet("alias0", "1").execute().actionGet().isExists(), equalTo(false)); + assertThat(client().prepareGet("test", "1").get().isExists(), equalTo(false)); + assertThat(client().prepareGet("test", "1").setRouting("0").get().isExists(), equalTo(false)); + assertThat(client().prepareGet("alias0", "1").get().isExists(), equalTo(false)); } logger.info("--> indexing with id [1], and routing [0] using alias"); - client().prepareIndex("alias0").setId("1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); + prepareIndex("alias0").setId("1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "1").execute().actionGet().isExists(), equalTo(false)); + assertThat(client().prepareGet("test", "1").get().isExists(), equalTo(false)); } logger.info("--> verifying get with routing, should find"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true)); - assertThat(client().prepareGet("alias0", "1").execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("test", "1").setRouting("0").get().isExists(), equalTo(true)); + assertThat(client().prepareGet("alias0", "1").get().isExists(), equalTo(true)); } } @@ -108,59 +104,37 @@ public void testAliasSearchRouting() throws Exception { ); logger.info("--> indexing with id [1], and routing [0] using alias"); - client().prepareIndex("alias0").setId("1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); + prepareIndex("alias0").setId("1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "1").execute().actionGet().isExists(), equalTo(false)); + assertThat(client().prepareGet("test", "1").get().isExists(), equalTo(false)); } logger.info("--> verifying get with routing, should find"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("alias0", "1").execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("alias0", "1").get().isExists(), equalTo(true)); } logger.info("--> search with no routing, should fine one"); for (int i = 0; i < 5; i++) { - assertThat( - prepareSearch().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, - equalTo(1L) - ); + assertThat(prepareSearch().setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(1L)); } logger.info("--> search with wrong routing, should not find"); for (int i = 0; i < 5; i++) { assertThat( - prepareSearch().setRouting("1") - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch().setRouting("1").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(0L) ); assertThat( - prepareSearch().setSize(0) - .setRouting("1") - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch().setSize(0).setRouting("1").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(0L) ); - assertThat( - prepareSearch("alias1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, - equalTo(0L) - ); + assertThat(prepareSearch("alias1").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(0L)); assertThat( - prepareSearch("alias1").setSize(0) - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch("alias1").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(0L) ); } @@ -169,50 +143,28 @@ public void testAliasSearchRouting() throws Exception { for (int i = 0; i < 5; i++) { assertThat( - prepareSearch().setRouting("0") - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch().setRouting("0").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(1L) ); assertThat( - prepareSearch().setSize(0) - .setRouting("0") - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch().setSize(0).setRouting("0").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(1L) ); + assertThat(prepareSearch("alias0").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(1L)); assertThat( - prepareSearch("alias0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, - equalTo(1L) - ); - assertThat( - prepareSearch("alias0").setSize(0) - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch("alias0").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(1L) ); } logger.info("--> indexing with id [2], and routing [1] using alias"); - client().prepareIndex("alias1").setId("2").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); + prepareIndex("alias1").setId("2").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> search with no routing, should fine two"); for (int i = 0; i < 5; i++) { + assertThat(prepareSearch().setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(2L)); assertThat( - prepareSearch().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, - equalTo(2L) - ); - assertThat( - prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, + prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(2L) ); } @@ -220,35 +172,16 @@ public void testAliasSearchRouting() throws Exception { logger.info("--> search with 0 routing, should find one"); for (int i = 0; i < 5; i++) { assertThat( - prepareSearch().setRouting("0") - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, - equalTo(1L) - ); - assertThat( - prepareSearch().setSize(0) - .setRouting("0") - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch().setRouting("0").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(1L) ); assertThat( - prepareSearch("alias0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, + prepareSearch().setSize(0).setRouting("0").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(1L) ); + assertThat(prepareSearch("alias0").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(1L)); assertThat( - prepareSearch("alias0").setSize(0) - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch("alias0").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(1L) ); } @@ -256,35 +189,16 @@ public void testAliasSearchRouting() throws Exception { logger.info("--> search with 1 routing, should find one"); for (int i = 0; i < 5; i++) { assertThat( - prepareSearch().setRouting("1") - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch().setRouting("1").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(1L) ); assertThat( - prepareSearch().setSize(0) - .setRouting("1") - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch().setSize(0).setRouting("1").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(1L) ); + assertThat(prepareSearch("alias1").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(1L)); assertThat( - prepareSearch("alias1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, - equalTo(1L) - ); - assertThat( - prepareSearch("alias1").setSize(0) - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch("alias1").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(1L) ); } @@ -292,35 +206,21 @@ public void testAliasSearchRouting() throws Exception { logger.info("--> search with 0,1 indexRoutings , should find two"); for (int i = 0; i < 5; i++) { assertThat( - prepareSearch().setRouting("0", "1") - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch().setRouting("0", "1").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(2L) ); assertThat( prepareSearch().setSize(0) .setRouting("0", "1") .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() + .get() .getHits() .getTotalHits().value, equalTo(2L) ); + assertThat(prepareSearch("alias01").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(2L)); assertThat( - prepareSearch("alias01").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, - equalTo(2L) - ); - assertThat( - prepareSearch("alias01").setSize(0) - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch("alias01").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(2L) ); } @@ -328,20 +228,11 @@ public void testAliasSearchRouting() throws Exception { logger.info("--> search with two routing aliases , should find two"); for (int i = 0; i < 5; i++) { assertThat( - prepareSearch("alias0", "alias1").setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch("alias0", "alias1").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(2L) ); assertThat( - prepareSearch("alias0", "alias1").setSize(0) - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch("alias0", "alias1").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(2L) ); } @@ -349,18 +240,13 @@ public void testAliasSearchRouting() throws Exception { logger.info("--> search with alias0, alias1 and alias01, should find two"); for (int i = 0; i < 5; i++) { assertThat( - prepareSearch("alias0", "alias1", "alias01").setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch("alias0", "alias1", "alias01").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(2L) ); assertThat( prepareSearch("alias0", "alias1", "alias01").setSize(0) .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() + .get() .getHits() .getTotalHits().value, equalTo(2L) @@ -370,18 +256,13 @@ public void testAliasSearchRouting() throws Exception { logger.info("--> search with test, alias0 and alias1, should find two"); for (int i = 0; i < 5; i++) { assertThat( - prepareSearch("test", "alias0", "alias1").setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch("test", "alias0", "alias1").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(2L) ); assertThat( prepareSearch("test", "alias0", "alias1").setSize(0) .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() + .get() .getHits() .getTotalHits().value, equalTo(2L) @@ -412,42 +293,37 @@ public void testAliasSearchRoutingWithTwoIndices() throws Exception { ); ensureGreen(); // wait for events again to make sure we got the aliases on all nodes logger.info("--> indexing with id [1], and routing [0] using alias to test-a"); - client().prepareIndex("alias-a0").setId("1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); + prepareIndex("alias-a0").setId("1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test-a", "1").execute().actionGet().isExists(), equalTo(false)); + assertThat(client().prepareGet("test-a", "1").get().isExists(), equalTo(false)); } logger.info("--> verifying get with routing, should find"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("alias-a0", "1").execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("alias-a0", "1").get().isExists(), equalTo(true)); } logger.info("--> indexing with id [0], and routing [1] using alias to test-b"); - client().prepareIndex("alias-b1").setId("1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); + prepareIndex("alias-b1").setId("1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test-a", "1").execute().actionGet().isExists(), equalTo(false)); + assertThat(client().prepareGet("test-a", "1").get().isExists(), equalTo(false)); } logger.info("--> verifying get with routing, should find"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("alias-b1", "1").execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("alias-b1", "1").get().isExists(), equalTo(true)); } logger.info("--> search with alias-a1,alias-b0, should not find"); for (int i = 0; i < 5; i++) { assertThat( - prepareSearch("alias-a1", "alias-b0").setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch("alias-a1", "alias-b0").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(0L) ); assertThat( prepareSearch("alias-a1", "alias-b0").setSize(0) .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() + .get() .getHits() .getTotalHits().value, equalTo(0L) @@ -456,17 +332,9 @@ public void testAliasSearchRoutingWithTwoIndices() throws Exception { logger.info("--> search with alias-ab, should find two"); for (int i = 0; i < 5; i++) { + assertThat(prepareSearch("alias-ab").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(2L)); assertThat( - prepareSearch("alias-ab").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, - equalTo(2L) - ); - assertThat( - prepareSearch("alias-ab").setSize(0) - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch("alias-ab").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(2L) ); } @@ -474,18 +342,13 @@ public void testAliasSearchRoutingWithTwoIndices() throws Exception { logger.info("--> search with alias-a0,alias-b1 should find two"); for (int i = 0; i < 5; i++) { assertThat( - prepareSearch("alias-a0", "alias-b1").setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch("alias-a0", "alias-b1").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(2L) ); assertThat( prepareSearch("alias-a0", "alias-b1").setSize(0) .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() + .get() .getHits() .getTotalHits().value, equalTo(2L) @@ -505,16 +368,13 @@ public void testAliasSearchRoutingWithConcreteAndAliasedIndices_issue2682() thro assertAcked(indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index("index").alias("index_1").routing("1"))); logger.info("--> indexing on index_1 which is an alias for index with routing [1]"); - client().prepareIndex("index_1").setId("1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); + prepareIndex("index_1").setId("1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> indexing on index_2 which is a concrete index"); - client().prepareIndex("index_2").setId("2").setSource("field", "value2").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); + prepareIndex("index_2").setId("2").setSource("field", "value2").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> search all on index_* should find two"); for (int i = 0; i < 5; i++) { - assertThat( - prepareSearch("index_*").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, - equalTo(2L) - ); + assertThat(prepareSearch("index_*").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(2L)); } } @@ -531,21 +391,20 @@ public void testAliasSearchRoutingWithConcreteAndAliasedIndices_issue3268() thro assertAcked(indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index("index").alias("index_1").routing("1"))); logger.info("--> indexing on index_1 which is an alias for index with routing [1]"); - client().prepareIndex("index_1").setId("1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); + prepareIndex("index_1").setId("1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> indexing on index_2 which is a concrete index"); - client().prepareIndex("index_2").setId("2").setSource("field", "value2").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); - - SearchResponse searchResponse = prepareSearch("index_*").setSearchType(SearchType.QUERY_THEN_FETCH) - .setSize(1) - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet(); - - logger.info("--> search all on index_* should find two"); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - // Let's make sure that, even though 2 docs are available, only one is returned according to the size we set in the request - // Therefore the reduce phase has taken place, which proves that the QUERY_AND_FETCH search type wasn't erroneously forced. - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); + prepareIndex("index_2").setId("2").setSource("field", "value2").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); + + assertResponse( + prepareSearch("index_*").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(1).setQuery(QueryBuilders.matchAllQuery()), + response -> { + logger.info("--> search all on index_* should find two"); + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + // Let's make sure that, even though 2 docs are available, only one is returned according to the size we set in the request + // Therefore the reduce phase has taken place, which proves that the QUERY_AND_FETCH search type wasn't erroneously forced. + assertThat(response.getHits().getHits().length, equalTo(1)); + } + ); } public void testIndexingAliasesOverTime() throws Exception { @@ -555,23 +414,15 @@ public void testIndexingAliasesOverTime() throws Exception { assertAcked(indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index("test").alias("alias").routing("3"))); logger.info("--> indexing with id [0], and routing [3]"); - client().prepareIndex("alias").setId("0").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); + prepareIndex("alias").setId("0").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> verifying get with no routing, should not find anything"); logger.info("--> verifying get and search with routing, should find"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "0").setRouting("3").execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("test", "0").setRouting("3").get().isExists(), equalTo(true)); + assertThat(prepareSearch("alias").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(1L)); assertThat( - prepareSearch("alias").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, - equalTo(1L) - ); - assertThat( - prepareSearch("alias").setSize(0) - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch("alias").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(1L) ); } @@ -581,17 +432,9 @@ public void testIndexingAliasesOverTime() throws Exception { logger.info("--> verifying search with wrong routing should not find"); for (int i = 0; i < 5; i++) { + assertThat(prepareSearch("alias").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(0L)); assertThat( - prepareSearch("alias").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, - equalTo(0L) - ); - assertThat( - prepareSearch("alias").setSize(0) - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch("alias").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(0L) ); } @@ -603,24 +446,16 @@ public void testIndexingAliasesOverTime() throws Exception { ); logger.info("--> indexing with id [1], and routing [4]"); - client().prepareIndex("alias").setId("1").setSource("field", "value2").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); + prepareIndex("alias").setId("1").setSource("field", "value2").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> verifying get with no routing, should not find anything"); logger.info("--> verifying get and search with routing, should find"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "0").setRouting("3").execute().actionGet().isExists(), equalTo(true)); - assertThat(client().prepareGet("test", "1").setRouting("4").execute().actionGet().isExists(), equalTo(true)); - assertThat( - prepareSearch("alias").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, - equalTo(2L) - ); + assertThat(client().prepareGet("test", "0").setRouting("3").get().isExists(), equalTo(true)); + assertThat(client().prepareGet("test", "1").setRouting("4").get().isExists(), equalTo(true)); + assertThat(prepareSearch("alias").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(2L)); assertThat( - prepareSearch("alias").setSize(0) - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch("alias").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(2L) ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/routing/PartitionedRoutingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/routing/PartitionedRoutingIT.java index 4b685ca2699be..e25da54d7b214 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/routing/PartitionedRoutingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/routing/PartitionedRoutingIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.routing; import org.apache.lucene.util.Constants; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.settings.IndexScopedSettings; @@ -23,6 +22,7 @@ import java.util.Map; import java.util.Set; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.CoreMatchers.containsString; public class PartitionedRoutingIT extends ESIntegTestCase { @@ -40,8 +40,7 @@ public void testVariousPartitionSizes() throws Exception { .put("index.routing_partition_size", partitionSize) ) .setMapping("{\"_routing\":{\"required\":true}}") - .execute() - .actionGet(); + .get(); ensureGreen(); Map> routingToDocumentIds = generateRoutedDocumentIds(index); @@ -69,8 +68,7 @@ public void testShrinking() throws Exception { .put("index.routing_partition_size", partitionSize) ) .setMapping("{\"_routing\":{\"required\":true}}") - .execute() - .actionGet(); + .get(); ensureGreen(); Map> routingToDocumentIds = generateRoutedDocumentIds(index); @@ -145,35 +143,33 @@ private void verifyRoutedSearches(String index, Map> routing String routing = routingEntry.getKey(); int expectedDocuments = routingEntry.getValue().size(); - SearchResponse response = prepareSearch().setQuery(QueryBuilders.termQuery("_routing", routing)) - .setRouting(routing) - .setIndices(index) - .setSize(100) - .execute() - .actionGet(); - - logger.info( - "--> routed search on index [" - + index - + "] visited [" - + response.getTotalShards() - + "] shards for routing [" - + routing - + "] and got hits [" - + response.getHits().getTotalHits().value - + "]" + assertResponse( + prepareSearch().setQuery(QueryBuilders.termQuery("_routing", routing)).setRouting(routing).setIndices(index).setSize(100), + response -> { + logger.info( + "--> routed search on index [" + + index + + "] visited [" + + response.getTotalShards() + + "] shards for routing [" + + routing + + "] and got hits [" + + response.getHits().getTotalHits().value + + "]" + ); + + assertTrue( + response.getTotalShards() + " was not in " + expectedShards + " for " + index, + expectedShards.contains(response.getTotalShards()) + ); + assertEquals(expectedDocuments, response.getHits().getTotalHits().value); + + Set found = new HashSet<>(); + response.getHits().forEach(h -> found.add(h.getId())); + + assertEquals(routingEntry.getValue(), found); + } ); - - assertTrue( - response.getTotalShards() + " was not in " + expectedShards + " for " + index, - expectedShards.contains(response.getTotalShards()) - ); - assertEquals(expectedDocuments, response.getHits().getTotalHits().value); - - Set found = new HashSet<>(); - response.getHits().forEach(h -> found.add(h.getId())); - - assertEquals(routingEntry.getValue(), found); } } @@ -182,19 +178,18 @@ private void verifyBroadSearches(String index, Map> routingT String routing = routingEntry.getKey(); int expectedDocuments = routingEntry.getValue().size(); - SearchResponse response = prepareSearch().setQuery(QueryBuilders.termQuery("_routing", routing)) - .setIndices(index) - .setSize(100) - .execute() - .actionGet(); - - assertEquals(expectedShards, response.getTotalShards()); - assertEquals(expectedDocuments, response.getHits().getTotalHits().value); + assertResponse( + prepareSearch().setQuery(QueryBuilders.termQuery("_routing", routing)).setIndices(index).setSize(100), + response -> { + assertEquals(expectedShards, response.getTotalShards()); + assertEquals(expectedDocuments, response.getHits().getTotalHits().value); - Set found = new HashSet<>(); - response.getHits().forEach(h -> found.add(h.getId())); + Set found = new HashSet<>(); + response.getHits().forEach(h -> found.add(h.getId())); - assertEquals(routingEntry.getValue(), found); + assertEquals(routingEntry.getValue(), found); + } + ); } } @@ -203,7 +198,7 @@ private void verifyGets(String index, Map> routingToDocument String routing = routingEntry.getKey(); for (String id : routingEntry.getValue()) { - assertTrue(client().prepareGet(index, id).setRouting(routing).execute().actionGet().isExists()); + assertTrue(client().prepareGet(index, id).setRouting(routing).get().isExists()); } } } @@ -221,7 +216,7 @@ private Map> generateRoutedDocumentIds(String index) { String id = routingValue + "_" + String.valueOf(k); routingToDocumentIds.get(routingValue).add(id); - client().prepareIndex(index).setId(id).setRouting(routingValue).setSource("foo", "bar").get(); + prepareIndex(index).setId(id).setRouting(routingValue).setSource("foo", "bar").get(); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/routing/SimpleRoutingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/routing/SimpleRoutingIT.java index 93b1ac68be6a5..772d8767b7dd0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/routing/SimpleRoutingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/routing/SimpleRoutingIT.java @@ -68,49 +68,47 @@ public void testSimpleCrudRouting() throws Exception { ensureGreen(); String routingValue = findNonMatchingRoutingValue("test", "1"); logger.info("--> indexing with id [1], and routing [{}]", routingValue); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setRouting(routingValue) .setSource("field", "value1") .setRefreshPolicy(RefreshPolicy.IMMEDIATE) .get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "1").execute().actionGet().isExists(), equalTo(false)); + assertThat(client().prepareGet("test", "1").get().isExists(), equalTo(false)); } logger.info("--> verifying get with routing, should find"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("test", "1").setRouting(routingValue).get().isExists(), equalTo(true)); } logger.info("--> deleting with no routing, should not delete anything"); client().prepareDelete("test", "1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "1").execute().actionGet().isExists(), equalTo(false)); - assertThat(client().prepareGet("test", "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("test", "1").get().isExists(), equalTo(false)); + assertThat(client().prepareGet("test", "1").setRouting(routingValue).get().isExists(), equalTo(true)); } logger.info("--> deleting with routing, should delete"); client().prepareDelete("test", "1").setRouting(routingValue).setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "1").execute().actionGet().isExists(), equalTo(false)); - assertThat(client().prepareGet("test", "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(false)); + assertThat(client().prepareGet("test", "1").get().isExists(), equalTo(false)); + assertThat(client().prepareGet("test", "1").setRouting(routingValue).get().isExists(), equalTo(false)); } logger.info("--> indexing with id [1], and routing [0]"); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setRouting(routingValue) .setSource("field", "value1") .setRefreshPolicy(RefreshPolicy.IMMEDIATE) .get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "1").execute().actionGet().isExists(), equalTo(false)); + assertThat(client().prepareGet("test", "1").get().isExists(), equalTo(false)); } logger.info("--> verifying get with routing, should find"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("test", "1").setRouting(routingValue).get().isExists(), equalTo(true)); } } @@ -120,48 +118,33 @@ public void testSimpleSearchRouting() { String routingValue = findNonMatchingRoutingValue("test", "1"); logger.info("--> indexing with id [1], and routing [{}]", routingValue); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setRouting(routingValue) .setSource("field", "value1") .setRefreshPolicy(RefreshPolicy.IMMEDIATE) .get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "1").execute().actionGet().isExists(), equalTo(false)); + assertThat(client().prepareGet("test", "1").get().isExists(), equalTo(false)); } logger.info("--> verifying get with routing, should find"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("test", "1").setRouting(routingValue).get().isExists(), equalTo(true)); } logger.info("--> search with no routing, should fine one"); for (int i = 0; i < 5; i++) { - assertThat( - prepareSearch().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, - equalTo(1L) - ); + assertThat(prepareSearch().setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(1L)); } logger.info("--> search with wrong routing, should not find"); for (int i = 0; i < 5; i++) { assertThat( - prepareSearch().setRouting("1") - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch().setRouting("1").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(0L) ); assertThat( - prepareSearch().setSize(0) - .setRouting("1") - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch().setSize(0).setRouting("1").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(0L) ); } @@ -169,20 +152,14 @@ public void testSimpleSearchRouting() { logger.info("--> search with correct routing, should find"); for (int i = 0; i < 5; i++) { assertThat( - prepareSearch().setRouting(routingValue) - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch().setRouting(routingValue).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(1L) ); assertThat( prepareSearch().setSize(0) .setRouting(routingValue) .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() + .get() .getHits() .getTotalHits().value, equalTo(1L) @@ -191,8 +168,7 @@ public void testSimpleSearchRouting() { String secondRoutingValue = "1"; logger.info("--> indexing with id [{}], and routing [{}]", routingValue, secondRoutingValue); - client().prepareIndex("test") - .setId(routingValue) + prepareIndex("test").setId(routingValue) .setRouting(secondRoutingValue) .setSource("field", "value1") .setRefreshPolicy(RefreshPolicy.IMMEDIATE) @@ -200,12 +176,9 @@ public void testSimpleSearchRouting() { logger.info("--> search with no routing, should fine two"); for (int i = 0; i < 5; i++) { + assertThat(prepareSearch().setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(2L)); assertThat( - prepareSearch().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, - equalTo(2L) - ); - assertThat( - prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, + prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(2L) ); } @@ -213,20 +186,14 @@ public void testSimpleSearchRouting() { logger.info("--> search with {} routing, should find one", routingValue); for (int i = 0; i < 5; i++) { assertThat( - prepareSearch().setRouting(routingValue) - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch().setRouting(routingValue).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(1L) ); assertThat( prepareSearch().setSize(0) .setRouting(routingValue) .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() + .get() .getHits() .getTotalHits().value, equalTo(1L) @@ -236,20 +203,14 @@ public void testSimpleSearchRouting() { logger.info("--> search with {} routing, should find one", secondRoutingValue); for (int i = 0; i < 5; i++) { assertThat( - prepareSearch().setRouting("1") - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch().setRouting("1").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(1L) ); assertThat( prepareSearch().setSize(0) .setRouting(secondRoutingValue) .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() + .get() .getHits() .getTotalHits().value, equalTo(1L) @@ -261,8 +222,7 @@ public void testSimpleSearchRouting() { assertThat( prepareSearch().setRouting(routingValue, secondRoutingValue) .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() + .get() .getHits() .getTotalHits().value, equalTo(2L) @@ -271,8 +231,7 @@ public void testSimpleSearchRouting() { prepareSearch().setSize(0) .setRouting(routingValue, secondRoutingValue) .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() + .get() .getHits() .getTotalHits().value, equalTo(2L) @@ -284,8 +243,7 @@ public void testSimpleSearchRouting() { assertThat( prepareSearch().setRouting(routingValue, secondRoutingValue, routingValue) .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() + .get() .getHits() .getTotalHits().value, equalTo(2L) @@ -294,8 +252,7 @@ public void testSimpleSearchRouting() { prepareSearch().setSize(0) .setRouting(routingValue, secondRoutingValue, routingValue) .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() + .get() .getHits() .getTotalHits().value, equalTo(2L) @@ -316,14 +273,12 @@ public void testRequiredRoutingCrudApis() throws Exception { .endObject() .endObject() ) - .execute() - .actionGet(); + .get(); ensureGreen(); String routingValue = findNonMatchingRoutingValue("test", "1"); logger.info("--> indexing with id [1], and routing [{}]", routingValue); - client().prepareIndex(indexOrAlias()) - .setId("1") + prepareIndex(indexOrAlias()).setId("1") .setRouting(routingValue) .setSource("field", "value1") .setRefreshPolicy(RefreshPolicy.IMMEDIATE) @@ -332,7 +287,7 @@ public void testRequiredRoutingCrudApis() throws Exception { logger.info("--> indexing with id [1], with no routing, should fail"); try { - client().prepareIndex(indexOrAlias()).setId("1").setSource("field", "value1").get(); + prepareIndex(indexOrAlias()).setId("1").setSource("field", "value1").get(); fail("index with missing routing when routing is required should fail"); } catch (ElasticsearchException e) { assertThat(e.unwrapCause(), instanceOf(RoutingMissingException.class)); @@ -340,7 +295,7 @@ public void testRequiredRoutingCrudApis() throws Exception { logger.info("--> verifying get with routing, should find"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet(indexOrAlias(), "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet(indexOrAlias(), "1").setRouting(routingValue).get().isExists(), equalTo(true)); } logger.info("--> deleting with no routing, should fail"); @@ -353,34 +308,34 @@ public void testRequiredRoutingCrudApis() throws Exception { for (int i = 0; i < 5; i++) { try { - client().prepareGet(indexOrAlias(), "1").execute().actionGet().isExists(); + client().prepareGet(indexOrAlias(), "1").get().isExists(); fail("get with missing routing when routing is required should fail"); } catch (RoutingMissingException e) { assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); assertThat(e.getMessage(), equalTo("routing is required for [test]/[1]")); } - assertThat(client().prepareGet(indexOrAlias(), "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet(indexOrAlias(), "1").setRouting(routingValue).get().isExists(), equalTo(true)); } try { - client().prepareUpdate(indexOrAlias(), "1").setDoc(Requests.INDEX_CONTENT_TYPE, "field", "value2").execute().actionGet(); + client().prepareUpdate(indexOrAlias(), "1").setDoc(Requests.INDEX_CONTENT_TYPE, "field", "value2").get(); fail("update with missing routing when routing is required should fail"); } catch (ElasticsearchException e) { assertThat(e.unwrapCause(), instanceOf(RoutingMissingException.class)); } client().prepareUpdate(indexOrAlias(), "1").setRouting(routingValue).setDoc(Requests.INDEX_CONTENT_TYPE, "field", "value2").get(); - indicesAdmin().prepareRefresh().execute().actionGet(); + indicesAdmin().prepareRefresh().get(); for (int i = 0; i < 5; i++) { try { - client().prepareGet(indexOrAlias(), "1").execute().actionGet().isExists(); + client().prepareGet(indexOrAlias(), "1").get().isExists(); fail(); } catch (RoutingMissingException e) { assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); assertThat(e.getMessage(), equalTo("routing is required for [test]/[1]")); } - GetResponse getResponse = client().prepareGet(indexOrAlias(), "1").setRouting(routingValue).execute().actionGet(); + GetResponse getResponse = client().prepareGet(indexOrAlias(), "1").setRouting(routingValue).get(); assertThat(getResponse.isExists(), equalTo(true)); assertThat(getResponse.getSourceAsMap().get("field"), equalTo("value2")); } @@ -389,13 +344,13 @@ public void testRequiredRoutingCrudApis() throws Exception { for (int i = 0; i < 5; i++) { try { - client().prepareGet(indexOrAlias(), "1").execute().actionGet().isExists(); + client().prepareGet(indexOrAlias(), "1").get().isExists(); fail(); } catch (RoutingMissingException e) { assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); assertThat(e.getMessage(), equalTo("routing is required for [test]/[1]")); } - assertThat(client().prepareGet(indexOrAlias(), "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(false)); + assertThat(client().prepareGet(indexOrAlias(), "1").setRouting(routingValue).get().isExists(), equalTo(false)); } } @@ -412,15 +367,13 @@ public void testRequiredRoutingBulk() throws Exception { .endObject() .endObject() ) - .execute() - .actionGet(); + .get(); ensureGreen(); { String index = indexOrAlias(); BulkResponse bulkResponse = client().prepareBulk() .add(new IndexRequest(index).id("1").source(Requests.INDEX_CONTENT_TYPE, "field", "value")) - .execute() - .actionGet(); + .get(); assertThat(bulkResponse.getItems().length, equalTo(1)); assertThat(bulkResponse.hasFailures(), equalTo(true)); @@ -437,16 +390,14 @@ public void testRequiredRoutingBulk() throws Exception { String index = indexOrAlias(); BulkResponse bulkResponse = client().prepareBulk() .add(new IndexRequest(index).id("1").routing("0").source(Requests.INDEX_CONTENT_TYPE, "field", "value")) - .execute() - .actionGet(); + .get(); assertThat(bulkResponse.hasFailures(), equalTo(false)); } { BulkResponse bulkResponse = client().prepareBulk() .add(new UpdateRequest(indexOrAlias(), "1").doc(Requests.INDEX_CONTENT_TYPE, "field", "value2")) - .execute() - .actionGet(); + .get(); assertThat(bulkResponse.getItems().length, equalTo(1)); assertThat(bulkResponse.hasFailures(), equalTo(true)); @@ -462,14 +413,13 @@ public void testRequiredRoutingBulk() throws Exception { { BulkResponse bulkResponse = client().prepareBulk() .add(new UpdateRequest(indexOrAlias(), "1").doc(Requests.INDEX_CONTENT_TYPE, "field", "value2").routing("0")) - .execute() - .actionGet(); + .get(); assertThat(bulkResponse.hasFailures(), equalTo(false)); } { String index = indexOrAlias(); - BulkResponse bulkResponse = client().prepareBulk().add(new DeleteRequest(index).id("1")).execute().actionGet(); + BulkResponse bulkResponse = client().prepareBulk().add(new DeleteRequest(index).id("1")).get(); assertThat(bulkResponse.getItems().length, equalTo(1)); assertThat(bulkResponse.hasFailures(), equalTo(true)); @@ -484,7 +434,7 @@ public void testRequiredRoutingBulk() throws Exception { { String index = indexOrAlias(); - BulkResponse bulkResponse = client().prepareBulk().add(new DeleteRequest(index).id("1").routing("0")).execute().actionGet(); + BulkResponse bulkResponse = client().prepareBulk().add(new DeleteRequest(index).id("1").routing("0")).get(); assertThat(bulkResponse.getItems().length, equalTo(1)); assertThat(bulkResponse.hasFailures(), equalTo(false)); } @@ -504,22 +454,20 @@ public void testRequiredRoutingMappingVariousAPIs() throws Exception { .endObject() .endObject() ) - .execute() - .actionGet(); + .get(); ensureGreen(); String routingValue = findNonMatchingRoutingValue("test", "1"); logger.info("--> indexing with id [1], and routing [{}]", routingValue); - client().prepareIndex(indexOrAlias()).setId("1").setRouting(routingValue).setSource("field", "value1").get(); + prepareIndex(indexOrAlias()).setId("1").setRouting(routingValue).setSource("field", "value1").get(); logger.info("--> indexing with id [2], and routing [{}]", routingValue); - client().prepareIndex(indexOrAlias()) - .setId("2") + prepareIndex(indexOrAlias()).setId("2") .setRouting(routingValue) .setSource("field", "value2") .setRefreshPolicy(RefreshPolicy.IMMEDIATE) .get(); logger.info("--> verifying get with id [1] with routing [0], should succeed"); - assertThat(client().prepareGet(indexOrAlias(), "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet(indexOrAlias(), "1").setRouting(routingValue).get().isExists(), equalTo(true)); logger.info("--> verifying get with id [1], with no routing, should fail"); try { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java index ec01e34976058..ad610954e86b6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java @@ -8,19 +8,20 @@ package org.elasticsearch.search; +import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionFuture; -import org.elasticsearch.action.search.MultiSearchAction; import org.elasticsearch.action.search.MultiSearchResponse; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchScrollAction; import org.elasticsearch.action.search.SearchShardTask; import org.elasticsearch.action.search.SearchTask; import org.elasticsearch.action.search.SearchTransportService; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.action.search.TransportMultiSearchAction; +import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.action.search.TransportSearchScrollAction; import org.elasticsearch.common.Strings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.script.Script; @@ -50,6 +51,7 @@ import static org.hamcrest.Matchers.notNullValue; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE) +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/102257") public class SearchCancellationIT extends AbstractSearchCancellationTestCase { @Override @@ -69,7 +71,7 @@ public void testCancellationDuringQueryPhase() throws Exception { ).execute(); awaitForBlock(plugins); - cancelSearch(SearchAction.NAME); + cancelSearch(TransportSearchAction.TYPE.name()); disableBlocks(plugins); logger.info("Segments {}", Strings.toString(indicesAdmin().prepareSegments("test").get())); ensureSearchWasCancelled(searchResponse); @@ -87,7 +89,7 @@ public void testCancellationDuringFetchPhase() throws Exception { ).execute(); awaitForBlock(plugins); - cancelSearch(SearchAction.NAME); + cancelSearch(TransportSearchAction.TYPE.name()); disableBlocks(plugins); logger.info("Segments {}", Strings.toString(indicesAdmin().prepareSegments("test").get())); ensureSearchWasCancelled(searchResponse); @@ -132,7 +134,7 @@ public void testCancellationDuringAggregation() throws Exception { ) .execute(); awaitForBlock(plugins); - cancelSearch(SearchAction.NAME); + cancelSearch(TransportSearchAction.TYPE.name()); disableBlocks(plugins); ensureSearchWasCancelled(searchResponse); } @@ -149,7 +151,7 @@ public void testCancellationOfScrollSearches() throws Exception { .execute(); awaitForBlock(plugins); - cancelSearch(SearchAction.NAME); + cancelSearch(TransportSearchAction.TYPE.name()); disableBlocks(plugins); SearchResponse response = ensureSearchWasCancelled(searchResponse); if (response != null) { @@ -189,7 +191,7 @@ public void testCancellationOfScrollSearchesOnFollowupRequests() throws Exceptio .execute(); awaitForBlock(plugins); - cancelSearch(SearchScrollAction.NAME); + cancelSearch(TransportSearchScrollAction.TYPE.name()); disableBlocks(plugins); SearchResponse response = ensureSearchWasCancelled(scrollResponse); @@ -213,7 +215,7 @@ public void testCancelMultiSearch() throws Exception { ) .execute(); awaitForBlock(plugins); - cancelSearch(MultiSearchAction.NAME); + cancelSearch(TransportMultiSearchAction.TYPE.name()); disableBlocks(plugins); for (MultiSearchResponse.Item item : msearchResponse.actionGet()) { if (item.getFailure() != null) { @@ -300,7 +302,7 @@ List getCoordinatorSearchTasks() { for (String nodeName : internalCluster().getNodeNames()) { TransportService transportService = internalCluster().getInstance(TransportService.class, nodeName); for (Task task : transportService.getTaskManager().getCancellableTasks().values()) { - if (task.getAction().equals(SearchAction.NAME)) { + if (task.getAction().equals(TransportSearchAction.TYPE.name())) { tasks.add((SearchTask) task); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchTimeoutIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchTimeoutIT.java index 1bcf2d8fb327f..ecf839bff5e4c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchTimeoutIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchTimeoutIT.java @@ -47,7 +47,7 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { private void indexDocs() { for (int i = 0; i < 32; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value").get(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value").get(); } refresh("test"); } @@ -90,7 +90,7 @@ public void testAggsTimeout() { } public void testPartialResultsIntolerantTimeout() throws Exception { - client().prepareIndex("test").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); ElasticsearchException ex = expectThrows( ElasticsearchException.class, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchWithRejectionsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchWithRejectionsIT.java index 3202037c8486f..51c1269b87675 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchWithRejectionsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchWithRejectionsIT.java @@ -36,7 +36,7 @@ public void testOpenContextsAfterRejections() throws Exception { ensureGreen("test"); final int docs = scaledRandomIntBetween(20, 50); for (int i = 0; i < docs; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value").get(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value").get(); } IndicesStatsResponse indicesStats = indicesAdmin().prepareStats().get(); assertThat(indicesStats.getTotal().getSearch().getOpenContexts(), equalTo(0L)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/StressSearchServiceReaperIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/StressSearchServiceReaperIT.java index eec815d6957aa..c7aa4b3179288 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/StressSearchServiceReaperIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/StressSearchServiceReaperIT.java @@ -37,7 +37,7 @@ public void testStressReaper() throws ExecutionException, InterruptedException { int num = randomIntBetween(100, 150); IndexRequestBuilder[] builders = new IndexRequestBuilder[num]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex("test").setId("" + i).setSource("f", English.intToEnglish(i)); + builders[i] = prepareIndex("test").setId("" + i).setSource("f", English.intToEnglish(i)); } createIndex("test"); indexRandom(true, builders); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/AggregationsIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/AggregationsIntegrationIT.java index cc74dcc3d0d28..df8f3825a5ea6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/AggregationsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/AggregationsIntegrationIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.search.aggregations; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.test.ESIntegTestCase; @@ -19,7 +18,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; @ESIntegTestCase.SuiteScopeTestCase public class AggregationsIntegrationIT extends ESIntegTestCase { @@ -32,32 +31,39 @@ public void setupSuiteScopeCluster() throws Exception { numDocs = randomIntBetween(1, 20); List docs = new ArrayList<>(); for (int i = 0; i < numDocs; ++i) { - docs.add(client().prepareIndex("index").setSource("f", Integer.toString(i / 3))); + docs.add(prepareIndex("index").setSource("f", Integer.toString(i / 3))); } indexRandom(true, docs); } public void testScroll() { final int size = randomIntBetween(1, 4); - SearchResponse response = prepareSearch("index").setSize(size) - .setScroll(TimeValue.timeValueMinutes(1)) - .addAggregation(terms("f").field("f")) - .get(); - assertNoFailures(response); - Aggregations aggregations = response.getAggregations(); - assertNotNull(aggregations); - Terms terms = aggregations.get("f"); - assertEquals(Math.min(numDocs, 3L), terms.getBucketByKey("0").getDocCount()); - - int total = response.getHits().getHits().length; - while (response.getHits().getHits().length > 0) { - response = client().prepareSearchScroll(response.getScrollId()).setScroll(TimeValue.timeValueMinutes(1)).get(); - assertNoFailures(response); - assertNull(response.getAggregations()); - total += response.getHits().getHits().length; + final String[] scroll = new String[1]; + final int[] total = new int[1]; + assertNoFailuresAndResponse( + prepareSearch("index").setSize(size).setScroll(TimeValue.timeValueMinutes(1)).addAggregation(terms("f").field("f")), + response -> { + Aggregations aggregations = response.getAggregations(); + assertNotNull(aggregations); + Terms terms = aggregations.get("f"); + assertEquals(Math.min(numDocs, 3L), terms.getBucketByKey("0").getDocCount()); + scroll[0] = response.getScrollId(); + total[0] = response.getHits().getHits().length; + } + ); + int currentTotal = 0; + while (total[0] - currentTotal > 0) { + currentTotal = total[0]; + assertNoFailuresAndResponse( + client().prepareSearchScroll(scroll[0]).setScroll(TimeValue.timeValueMinutes(1)), + scrollResponse -> { + assertNull(scrollResponse.getAggregations()); + total[0] += scrollResponse.getHits().getHits().length; + scroll[0] = scrollResponse.getScrollId(); + } + ); } - clearScroll(response.getScrollId()); - assertEquals(numDocs, total); + clearScroll(scroll[0]); + assertEquals(numDocs, total[0]); } - } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/CombiIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/CombiIT.java index a0144d30a4728..fc0a93ad3d290 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/CombiIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/CombiIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.search.aggregations; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.missing.Missing; @@ -24,7 +23,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.missing; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.is; @@ -49,38 +48,40 @@ public void testMultipleAggsOnSameField_WithDifferentRequiredValueSourceType() t String name = "name_" + randomIntBetween(1, 10); if (rarely()) { missingValues++; - builders[i] = client().prepareIndex("idx").setSource(jsonBuilder().startObject().field("name", name).endObject()); + builders[i] = prepareIndex("idx").setSource(jsonBuilder().startObject().field("name", name).endObject()); } else { int value = randomIntBetween(1, 10); values.put(value, values.getOrDefault(value, 0) + 1); - builders[i] = client().prepareIndex("idx") - .setSource(jsonBuilder().startObject().field("name", name).field("value", value).endObject()); + builders[i] = prepareIndex("idx").setSource( + jsonBuilder().startObject().field("name", name).field("value", value).endObject() + ); } } indexRandom(true, builders); ensureSearchable(); + final long finalMissingValues = missingValues; SubAggCollectionMode aggCollectionMode = randomFrom(SubAggCollectionMode.values()); - SearchResponse response = prepareSearch("idx").addAggregation(missing("missing_values").field("value")) - .addAggregation(terms("values").field("value").collectMode(aggCollectionMode)) - .get(); - - assertNoFailures(response); - - Aggregations aggs = response.getAggregations(); - - Missing missing = aggs.get("missing_values"); - assertNotNull(missing); - assertThat(missing.getDocCount(), equalTo(missingValues)); - - Terms terms = aggs.get("values"); - assertNotNull(terms); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(values.size())); - for (Terms.Bucket bucket : buckets) { - values.remove(((Number) bucket.getKey()).intValue()); - } - assertTrue(values.isEmpty()); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation(missing("missing_values").field("value")) + .addAggregation(terms("values").field("value").collectMode(aggCollectionMode)), + response -> { + Aggregations aggs = response.getAggregations(); + + Missing missing = aggs.get("missing_values"); + assertNotNull(missing); + assertThat(missing.getDocCount(), equalTo(finalMissingValues)); + + Terms terms = aggs.get("values"); + assertNotNull(terms); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(values.size())); + for (Terms.Bucket bucket : buckets) { + values.remove(((Number) bucket.getKey()).intValue()); + } + assertTrue(values.isEmpty()); + } + ); } /** @@ -108,13 +109,16 @@ public void testSubAggregationForTopAggregationOnUnmappedField() throws Exceptio ensureSearchable("idx"); SubAggCollectionMode aggCollectionMode = randomFrom(SubAggCollectionMode.values()); - SearchResponse searchResponse = prepareSearch("idx").addAggregation( - histogram("values").field("value1").interval(1).subAggregation(terms("names").field("name").collectMode(aggCollectionMode)) - ).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, Matchers.equalTo(0L)); - Histogram values = searchResponse.getAggregations().get("values"); - assertThat(values, notNullValue()); - assertThat(values.getBuckets().isEmpty(), is(true)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("values").field("value1").interval(1).subAggregation(terms("names").field("name").collectMode(aggCollectionMode)) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, Matchers.equalTo(0L)); + Histogram values = response.getAggregations().get("values"); + assertThat(values, notNullValue()); + assertThat(values.getBuckets().isEmpty(), is(true)); + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/EquivalenceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/EquivalenceIT.java index ea896c73f8882..f2aa79d115c4c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/EquivalenceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/EquivalenceIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.Maps; @@ -57,6 +56,8 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.core.IsNull.notNullValue; @@ -125,7 +126,7 @@ public void testRandomRanges() throws Exception { source = source.value(docs[i][j]); } source = source.endArray().endObject(); - client().prepareIndex("idx").setSource(source).get(); + prepareIndex("idx").setSource(source).get(); } assertNoFailures(indicesAdmin().prepareRefresh("idx").setIndicesOptions(IndicesOptions.lenientExpandOpen()).get()); @@ -164,34 +165,35 @@ public void testRandomRanges() throws Exception { reqBuilder = reqBuilder.addAggregation(filter("filter" + i, filter)); } - SearchResponse resp = reqBuilder.get(); - Range range = resp.getAggregations().get("range"); - List buckets = range.getBuckets(); + assertResponse(reqBuilder, response -> { + Range range = response.getAggregations().get("range"); + List buckets = range.getBuckets(); - Map bucketMap = Maps.newMapWithExpectedSize(buckets.size()); - for (Bucket bucket : buckets) { - bucketMap.put(bucket.getKeyAsString(), bucket); - } + Map bucketMap = Maps.newMapWithExpectedSize(buckets.size()); + for (Bucket bucket : buckets) { + bucketMap.put(bucket.getKeyAsString(), bucket); + } - for (int i = 0; i < ranges.length; ++i) { + for (int i = 0; i < ranges.length; ++i) { - long count = 0; - for (double[] values : docs) { - for (double value : values) { - if (value >= ranges[i][0] && value < ranges[i][1]) { - ++count; - break; + long count = 0; + for (double[] values : docs) { + for (double value : values) { + if (value >= ranges[i][0] && value < ranges[i][1]) { + ++count; + break; + } } } - } - final Range.Bucket bucket = bucketMap.get(Integer.toString(i)); - assertEquals(bucket.getKeyAsString(), Integer.toString(i), bucket.getKeyAsString()); - assertEquals(bucket.getKeyAsString(), count, bucket.getDocCount()); + final Range.Bucket bucket = bucketMap.get(Integer.toString(i)); + assertEquals(bucket.getKeyAsString(), Integer.toString(i), bucket.getKeyAsString()); + assertEquals(bucket.getKeyAsString(), count, bucket.getDocCount()); - final Filter filter = resp.getAggregations().get("filter" + i); - assertThat(filter.getDocCount(), equalTo(count)); - } + final Filter filter = response.getAggregations().get("filter" + i); + assertThat(filter.getDocCount(), equalTo(count)); + } + }); } // test long/double/string terms aggs with high number of buckets that require array growth @@ -248,74 +250,77 @@ public void testDuelTerms() throws Exception { source = source.value(Integer.toString(values[j])); } source = source.endArray().endObject(); - indexingRequests.add(client().prepareIndex("idx").setSource(source)); + indexingRequests.add(prepareIndex("idx").setSource(source)); } indexRandom(true, indexingRequests); assertNoFailures(indicesAdmin().prepareRefresh("idx").setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute().get()); - SearchResponse resp = prepareSearch("idx").addAggregation( - terms("long").field("long_values") - .size(maxNumTerms) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(min("min").field("num")) - ) - .addAggregation( - terms("double").field("double_values") - .size(maxNumTerms) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(max("max").field("num")) - ) - .addAggregation( - terms("string_map").field("string_values") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .executionHint(TermsAggregatorFactory.ExecutionMode.MAP.toString()) - .size(maxNumTerms) - .subAggregation(stats("stats").field("num")) - ) - .addAggregation( - terms("string_global_ordinals").field("string_values") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .executionHint(TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS.toString()) + assertResponse( + prepareSearch("idx").addAggregation( + terms("long").field("long_values") .size(maxNumTerms) - .subAggregation(extendedStats("stats").field("num")) - ) - .addAggregation( - terms("string_global_ordinals_doc_values").field("string_values.doc_values") .collectMode(randomFrom(SubAggCollectionMode.values())) - .executionHint(TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS.toString()) - .size(maxNumTerms) - .subAggregation(extendedStats("stats").field("num")) + .subAggregation(min("min").field("num")) ) - .get(); - assertAllSuccessful(resp); - assertEquals(numDocs, resp.getHits().getTotalHits().value); - - final Terms longTerms = resp.getAggregations().get("long"); - final Terms doubleTerms = resp.getAggregations().get("double"); - final Terms stringMapTerms = resp.getAggregations().get("string_map"); - final Terms stringGlobalOrdinalsTerms = resp.getAggregations().get("string_global_ordinals"); - final Terms stringGlobalOrdinalsDVTerms = resp.getAggregations().get("string_global_ordinals_doc_values"); - - assertEquals(valuesSet.size(), longTerms.getBuckets().size()); - assertEquals(valuesSet.size(), doubleTerms.getBuckets().size()); - assertEquals(valuesSet.size(), stringMapTerms.getBuckets().size()); - assertEquals(valuesSet.size(), stringGlobalOrdinalsTerms.getBuckets().size()); - assertEquals(valuesSet.size(), stringGlobalOrdinalsDVTerms.getBuckets().size()); - for (Terms.Bucket bucket : longTerms.getBuckets()) { - final Terms.Bucket doubleBucket = doubleTerms.getBucketByKey(Double.toString(Long.parseLong(bucket.getKeyAsString()))); - final Terms.Bucket stringMapBucket = stringMapTerms.getBucketByKey(bucket.getKeyAsString()); - final Terms.Bucket stringGlobalOrdinalsBucket = stringGlobalOrdinalsTerms.getBucketByKey(bucket.getKeyAsString()); - final Terms.Bucket stringGlobalOrdinalsDVBucket = stringGlobalOrdinalsDVTerms.getBucketByKey(bucket.getKeyAsString()); - assertNotNull(doubleBucket); - assertNotNull(stringMapBucket); - assertNotNull(stringGlobalOrdinalsBucket); - assertNotNull(stringGlobalOrdinalsDVBucket); - assertEquals(bucket.getDocCount(), doubleBucket.getDocCount()); - assertEquals(bucket.getDocCount(), stringMapBucket.getDocCount()); - assertEquals(bucket.getDocCount(), stringGlobalOrdinalsBucket.getDocCount()); - assertEquals(bucket.getDocCount(), stringGlobalOrdinalsDVBucket.getDocCount()); - } + .addAggregation( + terms("double").field("double_values") + .size(maxNumTerms) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(max("max").field("num")) + ) + .addAggregation( + terms("string_map").field("string_values") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .executionHint(TermsAggregatorFactory.ExecutionMode.MAP.toString()) + .size(maxNumTerms) + .subAggregation(stats("stats").field("num")) + ) + .addAggregation( + terms("string_global_ordinals").field("string_values") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .executionHint(TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS.toString()) + .size(maxNumTerms) + .subAggregation(extendedStats("stats").field("num")) + ) + .addAggregation( + terms("string_global_ordinals_doc_values").field("string_values.doc_values") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .executionHint(TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS.toString()) + .size(maxNumTerms) + .subAggregation(extendedStats("stats").field("num")) + ), + response -> { + assertAllSuccessful(response); + assertEquals(numDocs, response.getHits().getTotalHits().value); + + final Terms longTerms = response.getAggregations().get("long"); + final Terms doubleTerms = response.getAggregations().get("double"); + final Terms stringMapTerms = response.getAggregations().get("string_map"); + final Terms stringGlobalOrdinalsTerms = response.getAggregations().get("string_global_ordinals"); + final Terms stringGlobalOrdinalsDVTerms = response.getAggregations().get("string_global_ordinals_doc_values"); + + assertEquals(valuesSet.size(), longTerms.getBuckets().size()); + assertEquals(valuesSet.size(), doubleTerms.getBuckets().size()); + assertEquals(valuesSet.size(), stringMapTerms.getBuckets().size()); + assertEquals(valuesSet.size(), stringGlobalOrdinalsTerms.getBuckets().size()); + assertEquals(valuesSet.size(), stringGlobalOrdinalsDVTerms.getBuckets().size()); + for (Terms.Bucket bucket : longTerms.getBuckets()) { + final Terms.Bucket doubleBucket = doubleTerms.getBucketByKey(Double.toString(Long.parseLong(bucket.getKeyAsString()))); + final Terms.Bucket stringMapBucket = stringMapTerms.getBucketByKey(bucket.getKeyAsString()); + final Terms.Bucket stringGlobalOrdinalsBucket = stringGlobalOrdinalsTerms.getBucketByKey(bucket.getKeyAsString()); + final Terms.Bucket stringGlobalOrdinalsDVBucket = stringGlobalOrdinalsDVTerms.getBucketByKey(bucket.getKeyAsString()); + assertNotNull(doubleBucket); + assertNotNull(stringMapBucket); + assertNotNull(stringGlobalOrdinalsBucket); + assertNotNull(stringGlobalOrdinalsDVBucket); + assertEquals(bucket.getDocCount(), doubleBucket.getDocCount()); + assertEquals(bucket.getDocCount(), stringMapBucket.getDocCount()); + assertEquals(bucket.getDocCount(), stringGlobalOrdinalsBucket.getDocCount()); + assertEquals(bucket.getDocCount(), stringGlobalOrdinalsDVBucket.getDocCount()); + } + } + ); } // Duel between histograms and scripted terms @@ -348,32 +353,33 @@ public void testDuelTermsHistogram() throws Exception { source = source.value(randomFrom(values)); } source = source.endArray().endObject(); - client().prepareIndex("idx").setSource(source).get(); + prepareIndex("idx").setSource(source).get(); } assertNoFailures(indicesAdmin().prepareRefresh("idx").setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute().get()); Map params = new HashMap<>(); params.put("interval", interval); - SearchResponse resp = prepareSearch("idx").addAggregation( - terms("terms").field("values") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "floor(_value / interval)", params)) - .size(maxNumTerms) - ).addAggregation(histogram("histo").field("values").interval(interval).minDocCount(1)).get(); - - assertNoFailures(resp); - - Terms terms = resp.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - Histogram histo = resp.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(terms.getBuckets().size(), equalTo(histo.getBuckets().size())); - for (Histogram.Bucket bucket : histo.getBuckets()) { - final double key = ((Number) bucket.getKey()).doubleValue() / interval; - final Terms.Bucket termsBucket = terms.getBucketByKey(String.valueOf(key)); - assertEquals(bucket.getDocCount(), termsBucket.getDocCount()); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("terms").field("values") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "floor(_value / interval)", params)) + .size(maxNumTerms) + ).addAggregation(histogram("histo").field("values").interval(interval).minDocCount(1)), + response -> { + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(terms.getBuckets().size(), equalTo(histo.getBuckets().size())); + for (Histogram.Bucket bucket : histo.getBuckets()) { + final double key = ((Number) bucket.getKey()).doubleValue() / interval; + final Terms.Bucket termsBucket = terms.getBucketByKey(String.valueOf(key)); + assertEquals(bucket.getDocCount(), termsBucket.getDocCount()); + } + } + ); } public void testLargeNumbersOfPercentileBuckets() throws Exception { @@ -394,59 +400,64 @@ public void testLargeNumbersOfPercentileBuckets() throws Exception { logger.info("Indexing [{}] docs", numDocs); List indexingRequests = new ArrayList<>(); for (int i = 0; i < numDocs; ++i) { - indexingRequests.add(client().prepareIndex("idx").setId(Integer.toString(i)).setSource("double_value", randomDouble())); + indexingRequests.add(prepareIndex("idx").setId(Integer.toString(i)).setSource("double_value", randomDouble())); } indexRandom(true, indexingRequests); - SearchResponse response = prepareSearch("idx").addAggregation( - terms("terms").field("double_value") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(percentiles("pcts").field("double_value")) - ).get(); - assertAllSuccessful(response); - assertEquals(numDocs, response.getHits().getTotalHits().value); + assertResponse( + prepareSearch("idx").addAggregation( + terms("terms").field("double_value") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(percentiles("pcts").field("double_value")) + ), + response -> { + assertAllSuccessful(response); + assertEquals(numDocs, response.getHits().getTotalHits().value); + } + ); } // https://github.com/elastic/elasticsearch/issues/6435 public void testReduce() throws Exception { createIndex("idx"); final int value = randomIntBetween(0, 10); - indexRandom(true, client().prepareIndex("idx").setSource("f", value)); - SearchResponse response = prepareSearch("idx").addAggregation( - filter("filter", QueryBuilders.matchAllQuery()).subAggregation( - range("range").field("f").addUnboundedTo(6).addUnboundedFrom(6).subAggregation(sum("sum").field("f")) - ) - ).get(); - - assertNoFailures(response); - - Filter filter = response.getAggregations().get("filter"); - assertNotNull(filter); - assertEquals(1, filter.getDocCount()); - - Range range = filter.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(buckets.size(), equalTo(2)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("*-6.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); - assertThat(bucket.getDocCount(), equalTo(value < 6 ? 1L : 0L)); - Sum sum = bucket.getAggregations().get("sum"); - assertEquals(value < 6 ? value : 0, sum.value(), 0d); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("6.0-*")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getDocCount(), equalTo(value >= 6 ? 1L : 0L)); - sum = bucket.getAggregations().get("sum"); - assertEquals(value >= 6 ? value : 0, sum.value(), 0d); + indexRandom(true, prepareIndex("idx").setSource("f", value)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + filter("filter", QueryBuilders.matchAllQuery()).subAggregation( + range("range").field("f").addUnboundedTo(6).addUnboundedFrom(6).subAggregation(sum("sum").field("f")) + ) + ), + response -> { + Filter filter = response.getAggregations().get("filter"); + assertNotNull(filter); + assertEquals(1, filter.getDocCount()); + + Range range = filter.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(buckets.size(), equalTo(2)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("*-6.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); + assertThat(bucket.getDocCount(), equalTo(value < 6 ? 1L : 0L)); + Sum sum = bucket.getAggregations().get("sum"); + assertEquals(value < 6 ? value : 0, sum.value(), 0d); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("6.0-*")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getDocCount(), equalTo(value >= 6 ? 1L : 0L)); + sum = bucket.getAggregations().get("sum"); + assertEquals(value >= 6 ? value : 0, sum.value(), 0d); + } + ); } private void assertEquals(Terms t1, Terms t2) { @@ -469,46 +480,49 @@ public void testDuelDepthBreadthFirst() throws Exception { final int v1 = randomInt(1 << randomInt(7)); final int v2 = randomInt(1 << randomInt(7)); final int v3 = randomInt(1 << randomInt(7)); - reqs.add(client().prepareIndex("idx").setSource("f1", v1, "f2", v2, "f3", v3)); + reqs.add(prepareIndex("idx").setSource("f1", v1, "f2", v2, "f3", v3)); } indexRandom(true, reqs); - final SearchResponse r1 = prepareSearch("idx").addAggregation( - terms("f1").field("f1") - .collectMode(SubAggCollectionMode.DEPTH_FIRST) - .subAggregation( - terms("f2").field("f2") - .collectMode(SubAggCollectionMode.DEPTH_FIRST) - .subAggregation(terms("f3").field("f3").collectMode(SubAggCollectionMode.DEPTH_FIRST)) - ) - ).get(); - assertNoFailures(r1); - final SearchResponse r2 = prepareSearch("idx").addAggregation( - terms("f1").field("f1") - .collectMode(SubAggCollectionMode.BREADTH_FIRST) - .subAggregation( - terms("f2").field("f2") + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("f1").field("f1") + .collectMode(SubAggCollectionMode.DEPTH_FIRST) + .subAggregation( + terms("f2").field("f2") + .collectMode(SubAggCollectionMode.DEPTH_FIRST) + .subAggregation(terms("f3").field("f3").collectMode(SubAggCollectionMode.DEPTH_FIRST)) + ) + ), + response1 -> assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("f1").field("f1") .collectMode(SubAggCollectionMode.BREADTH_FIRST) - .subAggregation(terms("f3").field("f3").collectMode(SubAggCollectionMode.BREADTH_FIRST)) - ) - ).get(); - assertNoFailures(r2); - - final Terms t1 = r1.getAggregations().get("f1"); - final Terms t2 = r2.getAggregations().get("f1"); - assertEquals(t1, t2); - for (Terms.Bucket b1 : t1.getBuckets()) { - final Terms.Bucket b2 = t2.getBucketByKey(b1.getKeyAsString()); - final Terms sub1 = b1.getAggregations().get("f2"); - final Terms sub2 = b2.getAggregations().get("f2"); - assertEquals(sub1, sub2); - for (Terms.Bucket subB1 : sub1.getBuckets()) { - final Terms.Bucket subB2 = sub2.getBucketByKey(subB1.getKeyAsString()); - final Terms subSub1 = subB1.getAggregations().get("f3"); - final Terms subSub2 = subB2.getAggregations().get("f3"); - assertEquals(subSub1, subSub2); - } - } + .subAggregation( + terms("f2").field("f2") + .collectMode(SubAggCollectionMode.BREADTH_FIRST) + .subAggregation(terms("f3").field("f3").collectMode(SubAggCollectionMode.BREADTH_FIRST)) + ) + ), + response2 -> { + final Terms t1 = response1.getAggregations().get("f1"); + final Terms t2 = response2.getAggregations().get("f1"); + assertEquals(t1, t2); + for (Terms.Bucket b1 : t1.getBuckets()) { + final Terms.Bucket b2 = t2.getBucketByKey(b1.getKeyAsString()); + final Terms sub1 = b1.getAggregations().get("f2"); + final Terms sub2 = b2.getAggregations().get("f2"); + assertEquals(sub1, sub2); + for (Terms.Bucket subB1 : sub1.getBuckets()) { + final Terms.Bucket subB2 = sub2.getBucketByKey(subB1.getKeyAsString()); + final Terms subSub1 = subB1.getAggregations().get("f3"); + final Terms subSub2 = subB2.getAggregations().get("f3"); + assertEquals(subSub1, subSub2); + } + } + } + ) + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/FiltersAggsRewriteIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/FiltersAggsRewriteIT.java index fe51f4a1e2fb4..3568391279a7a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/FiltersAggsRewriteIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/FiltersAggsRewriteIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.WrapperQueryBuilder; @@ -24,13 +23,15 @@ import java.util.HashMap; import java.util.Map; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; + public class FiltersAggsRewriteIT extends ESSingleNodeTestCase { public void testWrapperQueryIsRewritten() throws IOException { createIndex("test", Settings.EMPTY, "test", "title", "type=text"); - client().prepareIndex("test").setId("1").setSource("title", "foo bar baz").get(); - client().prepareIndex("test").setId("2").setSource("title", "foo foo foo").get(); - client().prepareIndex("test").setId("3").setSource("title", "bar baz bax").get(); + prepareIndex("test").setId("1").setSource("title", "foo bar baz").get(); + prepareIndex("test").setId("2").setSource("title", "foo foo foo").get(); + prepareIndex("test").setId("3").setSource("title", "bar baz bax").get(); client().admin().indices().prepareRefresh("test").get(); XContentType xContentType = randomFrom(XContentType.values()); @@ -54,11 +55,12 @@ public void testWrapperQueryIsRewritten() throws IOException { Map metadata = new HashMap<>(); metadata.put(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); builder.setMetadata(metadata); - SearchResponse searchResponse = client().prepareSearch("test").setSize(0).addAggregation(builder).get(); - assertEquals(3, searchResponse.getHits().getTotalHits().value); - InternalFilters filters = searchResponse.getAggregations().get("titles"); - assertEquals(1, filters.getBuckets().size()); - assertEquals(2, filters.getBuckets().get(0).getDocCount()); - assertEquals(metadata, filters.getMetadata()); + assertResponse(client().prepareSearch("test").setSize(0).addAggregation(builder), response -> { + assertEquals(3, response.getHits().getTotalHits().value); + InternalFilters filters = response.getAggregations().get("titles"); + assertEquals(1, filters.getBuckets().size()); + assertEquals(2, filters.getBuckets().get(0).getDocCount()); + assertEquals(metadata, filters.getMetadata()); + }); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MetadataIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MetadataIT.java index b255a7b5f9bb6..f22e0a2931634 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MetadataIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MetadataIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.search.aggregations; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.search.aggregations.pipeline.InternalBucketMetricValue; @@ -22,7 +21,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.search.aggregations.PipelineAggregatorBuilders.maxBucket; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; public class MetadataIT extends ESIntegTestCase { @@ -31,7 +30,7 @@ public void testMetadataSetOnAggregationResult() throws Exception { IndexRequestBuilder[] builders = new IndexRequestBuilder[randomInt(30)]; for (int i = 0; i < builders.length; i++) { String name = "name_" + randomIntBetween(1, 10); - builders[i] = client().prepareIndex("idx").setSource("name", name, "value", randomInt()); + builders[i] = prepareIndex("idx").setSource("name", name, "value", randomInt()); } indexRandom(true, builders); ensureSearchable(); @@ -39,32 +38,33 @@ public void testMetadataSetOnAggregationResult() throws Exception { final var nestedMetadata = Map.of("nested", "value"); var metadata = Map.of("key", "value", "numeric", 1.2, "bool", true, "complex", nestedMetadata); - SearchResponse response = prepareSearch("idx").addAggregation( - terms("the_terms").setMetadata(metadata).field("name").subAggregation(sum("the_sum").setMetadata(metadata).field("value")) - ).addAggregation(maxBucket("the_max_bucket", "the_terms>the_sum").setMetadata(metadata)).get(); - - assertNoFailures(response); - - Aggregations aggs = response.getAggregations(); - assertNotNull(aggs); - - Terms terms = aggs.get("the_terms"); - assertNotNull(terms); - assertMetadata(terms.getMetadata()); - - List buckets = terms.getBuckets(); - for (Terms.Bucket bucket : buckets) { - Aggregations subAggs = bucket.getAggregations(); - assertNotNull(subAggs); - - Sum sum = subAggs.get("the_sum"); - assertNotNull(sum); - assertMetadata(sum.getMetadata()); - } - - InternalBucketMetricValue maxBucket = aggs.get("the_max_bucket"); - assertNotNull(maxBucket); - assertMetadata(maxBucket.getMetadata()); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("the_terms").setMetadata(metadata).field("name").subAggregation(sum("the_sum").setMetadata(metadata).field("value")) + ).addAggregation(maxBucket("the_max_bucket", "the_terms>the_sum").setMetadata(metadata)), + response -> { + Aggregations aggs = response.getAggregations(); + assertNotNull(aggs); + + Terms terms = aggs.get("the_terms"); + assertNotNull(terms); + assertMetadata(terms.getMetadata()); + + List buckets = terms.getBuckets(); + for (Terms.Bucket bucket : buckets) { + Aggregations subAggs = bucket.getAggregations(); + assertNotNull(subAggs); + + Sum sum = subAggs.get("the_sum"); + assertNotNull(sum); + assertMetadata(sum.getMetadata()); + } + + InternalBucketMetricValue maxBucket = aggs.get("the_max_bucket"); + assertNotNull(maxBucket); + assertMetadata(maxBucket.getMetadata()); + } + ); } private void assertMetadata(Map returnedMetadata) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MissingValueIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MissingValueIT.java index ba20e86237530..8110bc124132a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MissingValueIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MissingValueIT.java @@ -46,10 +46,8 @@ protected void setupSuiteScopeCluster() throws Exception { assertAcked(prepareCreate("idx").setMapping("date", "type=date", "location", "type=geo_point", "str", "type=keyword").get()); indexRandom( true, - client().prepareIndex("idx").setId("1").setSource(), - client().prepareIndex("idx") - .setId("2") - .setSource("str", "foo", "long", 3L, "double", 5.5, "date", "2015-05-07", "location", "1,2") + prepareIndex("idx").setId("1").setSource(), + prepareIndex("idx").setId("2").setSource("str", "foo", "long", 3L, "double", 5.5, "date", "2015-05-07", "location", "1,2") ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java index ad1d4086c690c..4a6859620563c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java @@ -60,13 +60,12 @@ public void setupSuiteScopeCluster() throws Exception { } default -> throw new AssertionError(); } - builders[i] = client().prepareIndex("idx") - .setSource( - jsonBuilder().startObject() - .field(SINGLE_VALUED_FIELD_NAME, singleValue) - .array(MULTI_VALUED_FIELD_NAME, multiValue) - .endObject() - ); + builders[i] = prepareIndex("idx").setSource( + jsonBuilder().startObject() + .field(SINGLE_VALUED_FIELD_NAME, singleValue) + .array(MULTI_VALUED_FIELD_NAME, multiValue) + .endObject() + ); } indexRandom(true, builders); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java index e3242a561c2ad..a9ff9f15a7e92 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java @@ -85,32 +85,30 @@ private static String format(ZonedDateTime date, String pattern) { } private IndexRequestBuilder indexDoc(String idx, ZonedDateTime date, int value) throws Exception { - return client().prepareIndex(idx) - .setSource( - jsonBuilder().startObject() - .timeField("date", date) - .field("value", value) - .startArray("dates") - .timeValue(date) - .timeValue(date.plusMonths(1).plusDays(1)) - .endArray() - .endObject() - ); + return prepareIndex(idx).setSource( + jsonBuilder().startObject() + .timeField("date", date) + .field("value", value) + .startArray("dates") + .timeValue(date) + .timeValue(date.plusMonths(1).plusDays(1)) + .endArray() + .endObject() + ); } private IndexRequestBuilder indexDoc(int month, int day, int value) throws Exception { - return client().prepareIndex("idx") - .setSource( - jsonBuilder().startObject() - .field("value", value) - .field("constant", 1) - .timeField("date", date(month, day)) - .startArray("dates") - .timeValue(date(month, day)) - .timeValue(date(month + 1, day + 1)) - .endArray() - .endObject() - ); + return prepareIndex("idx").setSource( + jsonBuilder().startObject() + .field("value", value) + .field("constant", 1) + .timeField("date", date(month, day)) + .startArray("dates") + .timeValue(date(month, day)) + .timeValue(date(month + 1, day + 1)) + .endArray() + .endObject() + ); } @Override @@ -121,9 +119,7 @@ public void setupSuiteScopeCluster() throws Exception { List builders = new ArrayList<>(); for (int i = 0; i < 2; i++) { builders.add( - client().prepareIndex("empty_bucket_idx") - .setId("" + i) - .setSource(jsonBuilder().startObject().field("value", i * 2).endObject()) + prepareIndex("empty_bucket_idx").setId("" + i).setSource(jsonBuilder().startObject().field("value", i * 2).endObject()) ); } @@ -164,45 +160,55 @@ private void getMultiSortDocs(List builders) throws IOExcep assertAcked(indicesAdmin().prepareCreate("sort_idx").setMapping("date", "type=date").get()); for (int i = 1; i <= 3; i++) { builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().timeField("date", date(1, 1)).field("l", 1).field("d", i).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().timeField("date", date(1, 1)).field("l", 1).field("d", i).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().timeField("date", date(1, 2)).field("l", 2).field("d", i).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().timeField("date", date(1, 2)).field("l", 2).field("d", i).endObject() + ) ); } builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().timeField("date", date(1, 3)).field("l", 3).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().timeField("date", date(1, 3)).field("l", 3).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().timeField("date", date(1, 3).plusHours(1)).field("l", 3).field("d", 2).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().timeField("date", date(1, 3).plusHours(1)).field("l", 3).field("d", 2).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().timeField("date", date(1, 4)).field("l", 3).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().timeField("date", date(1, 4)).field("l", 3).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().timeField("date", date(1, 4).plusHours(2)).field("l", 3).field("d", 3).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().timeField("date", date(1, 4).plusHours(2)).field("l", 3).field("d", 3).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().timeField("date", date(1, 5)).field("l", 5).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().timeField("date", date(1, 5)).field("l", 5).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().timeField("date", date(1, 5).plusHours(12)).field("l", 5).field("d", 2).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().timeField("date", date(1, 5).plusHours(12)).field("l", 5).field("d", 2).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().timeField("date", date(1, 6)).field("l", 5).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().timeField("date", date(1, 6)).field("l", 5).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().timeField("date", date(1, 7)).field("l", 5).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().timeField("date", date(1, 7)).field("l", 5).field("d", 1).endObject() + ) ); } @@ -990,9 +996,7 @@ public void testSingleValueWithTimeZone() throws Exception { IndexRequestBuilder[] reqs = new IndexRequestBuilder[5]; ZonedDateTime date = date("2014-03-11T00:00:00+00:00"); for (int i = 0; i < reqs.length; i++) { - reqs[i] = client().prepareIndex("idx2") - .setId("" + i) - .setSource(jsonBuilder().startObject().timeField("date", date).endObject()); + reqs[i] = prepareIndex("idx2").setId("" + i).setSource(jsonBuilder().startObject().timeField("date", date).endObject()); date = date.plusHours(1); } indexRandom(true, reqs); @@ -1267,9 +1271,7 @@ public void testSingleValueWithMultipleDateFormatsFromMapping() throws Exception prepareCreate("idx2").setMapping(mappingJson).get(); IndexRequestBuilder[] reqs = new IndexRequestBuilder[5]; for (int i = 0; i < reqs.length; i++) { - reqs[i] = client().prepareIndex("idx2") - .setId("" + i) - .setSource(jsonBuilder().startObject().field("date", "10-03-2014").endObject()); + reqs[i] = prepareIndex("idx2").setId("" + i).setSource(jsonBuilder().startObject().field("date", "10-03-2014").endObject()); } indexRandom(true, reqs); @@ -1340,8 +1342,8 @@ public void testDSTBoundaryIssue9491() throws InterruptedException, ExecutionExc assertAcked(indicesAdmin().prepareCreate("test9491").setMapping("d", "type=date").get()); indexRandom( true, - client().prepareIndex("test9491").setSource("d", "2014-10-08T13:00:00Z"), - client().prepareIndex("test9491").setSource("d", "2014-11-08T13:00:00Z") + prepareIndex("test9491").setSource("d", "2014-10-08T13:00:00Z"), + prepareIndex("test9491").setSource("d", "2014-11-08T13:00:00Z") ); ensureSearchable("test9491"); assertNoFailuresAndResponse( @@ -1364,9 +1366,9 @@ public void testIssue8209() throws InterruptedException, ExecutionException { assertAcked(indicesAdmin().prepareCreate("test8209").setMapping("d", "type=date").get()); indexRandom( true, - client().prepareIndex("test8209").setSource("d", "2014-01-01T00:00:00Z"), - client().prepareIndex("test8209").setSource("d", "2014-04-01T00:00:00Z"), - client().prepareIndex("test8209").setSource("d", "2014-04-30T00:00:00Z") + prepareIndex("test8209").setSource("d", "2014-01-01T00:00:00Z"), + prepareIndex("test8209").setSource("d", "2014-04-01T00:00:00Z"), + prepareIndex("test8209").setSource("d", "2014-04-30T00:00:00Z") ); ensureSearchable("test8209"); assertNoFailuresAndResponse( @@ -1404,7 +1406,7 @@ public void testIssue8209() throws InterruptedException, ExecutionException { */ public void testFormatIndexUnmapped() throws InterruptedException, ExecutionException { String indexDateUnmapped = "test31760"; - indexRandom(true, client().prepareIndex(indexDateUnmapped).setSource("foo", "bar")); + indexRandom(true, prepareIndex(indexDateUnmapped).setSource("foo", "bar")); ensureSearchable(indexDateUnmapped); assertNoFailuresAndResponse( @@ -1433,7 +1435,7 @@ public void testFormatIndexUnmapped() throws InterruptedException, ExecutionExce public void testRewriteTimeZone_EpochMillisFormat() throws InterruptedException, ExecutionException { String index = "test31392"; assertAcked(indicesAdmin().prepareCreate(index).setMapping("d", "type=date,format=epoch_millis").get()); - indexRandom(true, client().prepareIndex(index).setSource("d", "1477954800000")); + indexRandom(true, prepareIndex(index).setSource("d", "1477954800000")); ensureSearchable(index); assertNoFailuresAndResponse( prepareSearch(index).addAggregation( @@ -1554,8 +1556,8 @@ public void testScriptCaching() throws Exception { String date2 = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(date(2, 1)); indexRandom( true, - client().prepareIndex("cache_test_idx").setId("1").setSource("d", date), - client().prepareIndex("cache_test_idx").setId("2").setSource("d", date2) + prepareIndex("cache_test_idx").setId("1").setSource("d", date), + prepareIndex("cache_test_idx").setId("2").setSource("d", date2) ); // Make sure we are starting with a clear cache @@ -1707,8 +1709,8 @@ private ZonedDateTime key(Histogram.Bucket bucket) { */ public void testDateNanosHistogram() throws Exception { assertAcked(prepareCreate("nanos").setMapping("date", "type=date_nanos").get()); - indexRandom(true, client().prepareIndex("nanos").setId("1").setSource("date", "2000-01-01")); - indexRandom(true, client().prepareIndex("nanos").setId("2").setSource("date", "2000-01-02")); + indexRandom(true, prepareIndex("nanos").setId("1").setSource("date", "2000-01-01")); + indexRandom(true, prepareIndex("nanos").setId("2").setSource("date", "2000-01-02")); // Search interval 24 hours assertNoFailuresAndResponse( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java index c3a1209c7d3bf..5abf52cf37f88 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java @@ -61,8 +61,7 @@ private void prepareIndex(ZonedDateTime date, int numHours, int stepSizeHours, i IndexRequestBuilder[] reqs = new IndexRequestBuilder[numHours]; for (int i = idxIdStart; i < idxIdStart + reqs.length; i++) { - reqs[i - idxIdStart] = client().prepareIndex("idx2") - .setId("" + i) + reqs[i - idxIdStart] = prepareIndex("idx2").setId("" + i) .setSource(jsonBuilder().startObject().timeField("date", date).endObject()); date = date.plusHours(stepSizeHours); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java index e7acc10e98f58..0a726fcec5a88 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java @@ -55,17 +55,16 @@ public class DateRangeIT extends ESIntegTestCase { private static IndexRequestBuilder indexDoc(int month, int day, int value) throws Exception { - return client().prepareIndex("idx") - .setSource( - jsonBuilder().startObject() - .field("value", value) - .timeField("date", date(month, day)) - .startArray("dates") - .timeValue(date(month, day)) - .timeValue(date(month + 1, day + 1)) - .endArray() - .endObject() - ); + return prepareIndex("idx").setSource( + jsonBuilder().startObject() + .field("value", value) + .timeField("date", date(month, day)) + .startArray("dates") + .timeValue(date(month, day)) + .timeValue(date(month + 1, day + 1)) + .endArray() + .endObject() + ); } private static ZonedDateTime date(int month, int day) { @@ -104,9 +103,7 @@ public void setupSuiteScopeCluster() throws Exception { assertAcked(prepareCreate("empty_bucket_idx").setMapping("value", "type=integer")); for (int i = 0; i < 2; i++) { docs.add( - client().prepareIndex("empty_bucket_idx") - .setId("" + i) - .setSource(jsonBuilder().startObject().field("value", i * 2).endObject()) + prepareIndex("empty_bucket_idx").setId("" + i).setSource(jsonBuilder().startObject().field("value", i * 2).endObject()) ); } indexRandom(true, docs); @@ -623,12 +620,8 @@ public void testScriptCaching() throws Exception { ); indexRandom( true, - client().prepareIndex("cache_test_idx") - .setId("1") - .setSource(jsonBuilder().startObject().timeField("date", date(1, 1)).endObject()), - client().prepareIndex("cache_test_idx") - .setId("2") - .setSource(jsonBuilder().startObject().timeField("date", date(2, 1)).endObject()) + prepareIndex("cache_test_idx").setId("1").setSource(jsonBuilder().startObject().timeField("date", date(1, 1)).endObject()), + prepareIndex("cache_test_idx").setId("2").setSource(jsonBuilder().startObject().timeField("date", date(2, 1)).endObject()) ); // Make sure we are starting with a clear cache @@ -718,9 +711,9 @@ public void testRangeWithFormatStringValue() throws Exception { assertAcked(prepareCreate(indexName).setMapping("date", "type=date,format=strict_hour_minute_second")); indexRandom( true, - client().prepareIndex(indexName).setId("1").setSource(jsonBuilder().startObject().field("date", "00:16:40").endObject()), - client().prepareIndex(indexName).setId("2").setSource(jsonBuilder().startObject().field("date", "00:33:20").endObject()), - client().prepareIndex(indexName).setId("3").setSource(jsonBuilder().startObject().field("date", "00:50:00").endObject()) + prepareIndex(indexName).setId("1").setSource(jsonBuilder().startObject().field("date", "00:16:40").endObject()), + prepareIndex(indexName).setId("2").setSource(jsonBuilder().startObject().field("date", "00:33:20").endObject()), + prepareIndex(indexName).setId("3").setSource(jsonBuilder().startObject().field("date", "00:50:00").endObject()) ); // using no format should work when to/from is compatible with format in @@ -785,9 +778,9 @@ public void testRangeWithFormatNumericValue() throws Exception { assertAcked(prepareCreate(indexName).setMapping("date", "type=date,format=epoch_second")); indexRandom( true, - client().prepareIndex(indexName).setId("1").setSource(jsonBuilder().startObject().field("date", 1002).endObject()), - client().prepareIndex(indexName).setId("2").setSource(jsonBuilder().startObject().field("date", 2000).endObject()), - client().prepareIndex(indexName).setId("3").setSource(jsonBuilder().startObject().field("date", 3008).endObject()) + prepareIndex(indexName).setId("1").setSource(jsonBuilder().startObject().field("date", 1002).endObject()), + prepareIndex(indexName).setId("2").setSource(jsonBuilder().startObject().field("date", 2000).endObject()), + prepareIndex(indexName).setId("3").setSource(jsonBuilder().startObject().field("date", 3008).endObject()) ); // using no format should work when to/from is compatible with format in diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java index a4ee100fa7541..3a313cec29402 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java @@ -77,12 +77,10 @@ public void setupSuiteScopeCluster() throws Exception { for (int i = 0; i < data.length; i++) { String[] parts = data[i].split(","); - client().prepareIndex("test") - .setId("" + i) + prepareIndex("test").setId("" + i) .setSource("author", parts[5], "name", parts[2], "genre", parts[8], "price", Float.parseFloat(parts[3])) .get(); - client().prepareIndex("idx_unmapped_author") - .setId("" + i) + prepareIndex("idx_unmapped_author").setId("" + i) .setSource("name", parts[2], "genre", parts[8], "price", Float.parseFloat(parts[3])) .get(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java index 2477d61b9e608..1500c203ea4db 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java @@ -115,33 +115,31 @@ public void setupSuiteScopeCluster() throws Exception { List builders = new ArrayList<>(); for (int i = 0; i < NUM_DOCS; i++) { builders.add( - client().prepareIndex("idx") - .setSource( - jsonBuilder().startObject() - .field(SINGLE_VALUED_FIELD_NAME, (double) i) - .field("num_tag", i < NUM_DOCS / 2 + 1 ? 1.0 : 0.0) // used to test order by single-bucket sub agg - .field("constant", 1) - .startArray(MULTI_VALUED_FIELD_NAME) - .value((double) i) - .value(i + 1d) - .endArray() - .endObject() - ) + prepareIndex("idx").setSource( + jsonBuilder().startObject() + .field(SINGLE_VALUED_FIELD_NAME, (double) i) + .field("num_tag", i < NUM_DOCS / 2 + 1 ? 1.0 : 0.0) // used to test order by single-bucket sub agg + .field("constant", 1) + .startArray(MULTI_VALUED_FIELD_NAME) + .value((double) i) + .value(i + 1d) + .endArray() + .endObject() + ) ); } for (int i = 0; i < 100; i++) { builders.add( - client().prepareIndex("high_card_idx") - .setSource( - jsonBuilder().startObject() - .field(SINGLE_VALUED_FIELD_NAME, (double) i) - .startArray(MULTI_VALUED_FIELD_NAME) - .value((double) i) - .value(i + 1d) - .endArray() - .endObject() - ) + prepareIndex("high_card_idx").setSource( + jsonBuilder().startObject() + .field(SINGLE_VALUED_FIELD_NAME, (double) i) + .startArray(MULTI_VALUED_FIELD_NAME) + .value((double) i) + .value(i + 1d) + .endArray() + .endObject() + ) ); } @@ -149,8 +147,7 @@ public void setupSuiteScopeCluster() throws Exception { assertAcked(prepareCreate("empty_bucket_idx").setMapping(SINGLE_VALUED_FIELD_NAME, "type=integer")); for (int i = 0; i < 2; i++) { builders.add( - client().prepareIndex("empty_bucket_idx") - .setId("" + i) + prepareIndex("empty_bucket_idx").setId("" + i) .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject()) ); } @@ -209,45 +206,55 @@ private void getMultiSortDocs(List builders) throws IOExcep assertAcked(prepareCreate("sort_idx").setMapping(SINGLE_VALUED_FIELD_NAME, "type=double")); for (int i = 1; i <= 3; i++) { builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 1).field("l", 1).field("d", i).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 1).field("l", 1).field("d", i).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 2).field("l", 2).field("d", i).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 2).field("l", 2).field("d", i).endObject() + ) ); } builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 3).field("l", 3).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 3).field("l", 3).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 3).field("l", 3).field("d", 2).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 3).field("l", 3).field("d", 2).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4).field("l", 3).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4).field("l", 3).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4).field("l", 3).field("d", 3).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4).field("l", 3).field("d", 3).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5).field("l", 5).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5).field("l", 5).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5).field("l", 5).field("d", 2).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5).field("l", 5).field("d", 2).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 6).field("l", 5).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 6).field("l", 5).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 7).field("l", 5).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 7).field("l", 5).field("d", 1).endObject() + ) ); } @@ -944,8 +951,8 @@ public void testScriptCaching() throws Exception { ); indexRandom( true, - client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1.5), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2.5) + prepareIndex("cache_test_idx").setId("1").setSource("s", 1.5), + prepareIndex("cache_test_idx").setId("2").setSource("s", 2.5) ); // Make sure we are starting with a clear cache diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/FilterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/FilterIT.java index b2098aee48b10..b5dea9cbbba49 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/FilterIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/FilterIT.java @@ -48,8 +48,7 @@ public void setupSuiteScopeCluster() throws Exception { List builders = new ArrayList<>(); for (int i = 0; i < numTag1Docs; i++) { builders.add( - client().prepareIndex("idx") - .setId("" + i) + prepareIndex("idx").setId("" + i) .setSource(jsonBuilder().startObject().field("value", i + 1).field("tag", "tag1").endObject()) ); } @@ -59,18 +58,16 @@ public void setupSuiteScopeCluster() throws Exception { .field("tag", "tag2") .field("name", "name" + i) .endObject(); - builders.add(client().prepareIndex("idx").setId("" + i).setSource(source)); + builders.add(prepareIndex("idx").setId("" + i).setSource(source)); if (randomBoolean()) { // randomly index the document twice so that we have deleted docs that match the filter - builders.add(client().prepareIndex("idx").setId("" + i).setSource(source)); + builders.add(prepareIndex("idx").setId("" + i).setSource(source)); } } prepareCreate("empty_bucket_idx").setMapping("value", "type=integer").get(); for (int i = 0; i < 2; i++) { builders.add( - client().prepareIndex("empty_bucket_idx") - .setId("" + i) - .setSource(jsonBuilder().startObject().field("value", i * 2).endObject()) + prepareIndex("empty_bucket_idx").setId("" + i).setSource(jsonBuilder().startObject().field("value", i * 2).endObject()) ); } indexRandom(true, builders); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/FiltersIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/FiltersIT.java index 664644a3a2632..b04cb5325a82d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/FiltersIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/FiltersIT.java @@ -55,10 +55,10 @@ public void setupSuiteScopeCluster() throws Exception { List builders = new ArrayList<>(); for (int i = 0; i < numTag1Docs; i++) { XContentBuilder source = jsonBuilder().startObject().field("value", i + 1).field("tag", "tag1").endObject(); - builders.add(client().prepareIndex("idx").setId("" + i).setSource(source)); + builders.add(prepareIndex("idx").setId("" + i).setSource(source)); if (randomBoolean()) { // randomly index the document twice so that we have deleted docs that match the filter - builders.add(client().prepareIndex("idx").setId("" + i).setSource(source)); + builders.add(prepareIndex("idx").setId("" + i).setSource(source)); } } for (int i = numTag1Docs; i < (numTag1Docs + numTag2Docs); i++) { @@ -67,9 +67,9 @@ public void setupSuiteScopeCluster() throws Exception { .field("tag", "tag2") .field("name", "name" + i) .endObject(); - builders.add(client().prepareIndex("idx").setId("" + i).setSource(source)); + builders.add(prepareIndex("idx").setId("" + i).setSource(source)); if (randomBoolean()) { - builders.add(client().prepareIndex("idx").setId("" + i).setSource(source)); + builders.add(prepareIndex("idx").setId("" + i).setSource(source)); } } for (int i = numTag1Docs + numTag2Docs; i < numDocs; i++) { @@ -79,17 +79,15 @@ public void setupSuiteScopeCluster() throws Exception { .field("tag", "tag3") .field("name", "name" + i) .endObject(); - builders.add(client().prepareIndex("idx").setId("" + i).setSource(source)); + builders.add(prepareIndex("idx").setId("" + i).setSource(source)); if (randomBoolean()) { - builders.add(client().prepareIndex("idx").setId("" + i).setSource(source)); + builders.add(prepareIndex("idx").setId("" + i).setSource(source)); } } prepareCreate("empty_bucket_idx").setMapping("value", "type=integer").get(); for (int i = 0; i < 2; i++) { builders.add( - client().prepareIndex("empty_bucket_idx") - .setId("" + i) - .setSource(jsonBuilder().startObject().field("value", i * 2).endObject()) + prepareIndex("empty_bucket_idx").setId("" + i).setSource(jsonBuilder().startObject().field("value", i * 2).endObject()) ); } indexRandom(true, builders); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java index bb895c2564d39..0ed83f73e418d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java @@ -65,7 +65,7 @@ private IndexRequestBuilder indexCity(String idx, String name, String... latLons } source.endArray(); source = source.endObject(); - return client().prepareIndex(idx).setSource(source); + return prepareIndex(idx).setSource(source); } @Override @@ -120,8 +120,7 @@ public void setupSuiteScopeCluster() throws Exception { List builders = new ArrayList<>(); for (int i = 0; i < 2; i++) { builders.add( - client().prepareIndex("empty_bucket_idx") - .setId("" + i) + prepareIndex("empty_bucket_idx").setId("" + i) .setSource(jsonBuilder().startObject().field("value", i * 2).field("location", "52.0945, 5.116").endObject()) ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java index dcb56eeb10385..57b11df3b7d31 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java @@ -62,7 +62,7 @@ private static IndexRequestBuilder indexCity(String index, String name, List builders) throws IOExcep assertAcked(indicesAdmin().prepareCreate("sort_idx").setMapping(SINGLE_VALUED_FIELD_NAME, "type=double").get()); for (int i = 1; i <= 3; i++) { builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 1).field("l", 1).field("d", i).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 1).field("l", 1).field("d", i).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 2).field("l", 2).field("d", i).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 2).field("l", 2).field("d", i).endObject() + ) ); } builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 3).field("l", 3).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 3).field("l", 3).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 3.8).field("l", 3).field("d", 2).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 3.8).field("l", 3).field("d", 2).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4).field("l", 3).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4).field("l", 3).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4.4).field("l", 3).field("d", 3).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4.4).field("l", 3).field("d", 3).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5).field("l", 5).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5).field("l", 5).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5.1).field("l", 5).field("d", 2).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5.1).field("l", 5).field("d", 2).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 6).field("l", 5).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 6).field("l", 5).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 7).field("l", 5).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 7).field("l", 5).field("d", 1).endObject() + ) ); } @@ -1082,8 +1090,8 @@ public void testDecimalIntervalAndOffset() throws Exception { assertAcked(prepareCreate("decimal_values").setMapping("d", "type=float").get()); indexRandom( true, - client().prepareIndex("decimal_values").setId("1").setSource("d", -0.6), - client().prepareIndex("decimal_values").setId("2").setSource("d", 0.1) + prepareIndex("decimal_values").setId("1").setSource("d", -0.6), + prepareIndex("decimal_values").setId("2").setSource("d", 0.1) ); assertNoFailuresAndResponse( @@ -1111,8 +1119,8 @@ public void testScriptCaching() throws Exception { ); indexRandom( true, - client().prepareIndex("cache_test_idx").setId("1").setSource("d", -0.6), - client().prepareIndex("cache_test_idx").setId("2").setSource("d", 0.1) + prepareIndex("cache_test_idx").setId("1").setSource("d", -0.6), + prepareIndex("cache_test_idx").setId("2").setSource("d", 0.1) ); // Make sure we are starting with a clear cache @@ -1242,9 +1250,9 @@ public void testHardBounds() throws Exception { assertAcked(prepareCreate("test").setMapping("d", "type=double").get()); indexRandom( true, - client().prepareIndex("test").setId("1").setSource("d", -0.6), - client().prepareIndex("test").setId("2").setSource("d", 0.5), - client().prepareIndex("test").setId("3").setSource("d", 0.1) + prepareIndex("test").setId("1").setSource("d", -0.6), + prepareIndex("test").setId("2").setSource("d", 0.5), + prepareIndex("test").setId("3").setSource("d", 0.1) ); assertNoFailuresAndResponse( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/IpRangeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/IpRangeIT.java index 449d0626ade3a..887afdb578fdb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/IpRangeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/IpRangeIT.java @@ -50,10 +50,9 @@ public void setupSuiteScopeCluster() throws Exception { indexRandom( true, - client().prepareIndex("idx").setId("1").setSource("ip", "192.168.1.7", "ips", Arrays.asList("192.168.0.13", "192.168.1.2")), - client().prepareIndex("idx").setId("2").setSource("ip", "192.168.1.10", "ips", Arrays.asList("192.168.1.25", "192.168.1.28")), - client().prepareIndex("idx") - .setId("3") + prepareIndex("idx").setId("1").setSource("ip", "192.168.1.7", "ips", Arrays.asList("192.168.0.13", "192.168.1.2")), + prepareIndex("idx").setId("2").setSource("ip", "192.168.1.10", "ips", Arrays.asList("192.168.1.25", "192.168.1.28")), + prepareIndex("idx").setId("3") .setSource("ip", "2001:db8::ff00:42:8329", "ips", Arrays.asList("2001:db8::ff00:42:8329", "2001:db8::ff00:42:8380")) ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/IpTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/IpTermsIT.java index 91702bd6d8159..7f9cb01599a00 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/IpTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/IpTermsIT.java @@ -54,9 +54,9 @@ public void testScriptValue() throws Exception { assertAcked(prepareCreate("index").setMapping("ip", "type=ip")); indexRandom( true, - client().prepareIndex("index").setId("1").setSource("ip", "192.168.1.7"), - client().prepareIndex("index").setId("2").setSource("ip", "192.168.1.7"), - client().prepareIndex("index").setId("3").setSource("ip", "2001:db8::2:1") + prepareIndex("index").setId("1").setSource("ip", "192.168.1.7"), + prepareIndex("index").setId("2").setSource("ip", "192.168.1.7"), + prepareIndex("index").setId("3").setSource("ip", "2001:db8::2:1") ); Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['ip'].value", Collections.emptyMap()); @@ -85,9 +85,9 @@ public void testScriptValues() throws Exception { assertAcked(prepareCreate("index").setMapping("ip", "type=ip")); indexRandom( true, - client().prepareIndex("index").setId("1").setSource("ip", "192.168.1.7"), - client().prepareIndex("index").setId("2").setSource("ip", "192.168.1.7"), - client().prepareIndex("index").setId("3").setSource("ip", "2001:db8::2:1") + prepareIndex("index").setId("1").setSource("ip", "192.168.1.7"), + prepareIndex("index").setId("2").setSource("ip", "192.168.1.7"), + prepareIndex("index").setId("3").setSource("ip", "2001:db8::2:1") ); Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['ip']", Collections.emptyMap()); @@ -116,10 +116,10 @@ public void testMissingValue() throws Exception { assertAcked(prepareCreate("index").setMapping("ip", "type=ip")); indexRandom( true, - client().prepareIndex("index").setId("1").setSource("ip", "192.168.1.7"), - client().prepareIndex("index").setId("2").setSource("ip", "192.168.1.7"), - client().prepareIndex("index").setId("3").setSource("ip", "127.0.0.1"), - client().prepareIndex("index").setId("4").setSource("not_ip", "something") + prepareIndex("index").setId("1").setSource("ip", "192.168.1.7"), + prepareIndex("index").setId("2").setSource("ip", "192.168.1.7"), + prepareIndex("index").setId("3").setSource("ip", "127.0.0.1"), + prepareIndex("index").setId("4").setSource("not_ip", "something") ); assertNoFailuresAndResponse( prepareSearch("index").addAggregation( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java index e734047172305..f0c5cbf9c76bb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java @@ -102,32 +102,30 @@ public void setupSuiteScopeCluster() throws Exception { createIndex("idx", "high_card_idx"); IndexRequestBuilder[] lowCardBuilders = new IndexRequestBuilder[NUM_DOCS]; for (int i = 0; i < lowCardBuilders.length; i++) { - lowCardBuilders[i] = client().prepareIndex("idx") - .setSource( - jsonBuilder().startObject() - .field(SINGLE_VALUED_FIELD_NAME, i) - .startArray(MULTI_VALUED_FIELD_NAME) - .value(i) - .value(i + 1) - .endArray() - .field("num_tag", i < lowCardBuilders.length / 2 + 1 ? 1 : 0) // used to test order by single-bucket sub agg - .field("constant", 1) - .endObject() - ); + lowCardBuilders[i] = prepareIndex("idx").setSource( + jsonBuilder().startObject() + .field(SINGLE_VALUED_FIELD_NAME, i) + .startArray(MULTI_VALUED_FIELD_NAME) + .value(i) + .value(i + 1) + .endArray() + .field("num_tag", i < lowCardBuilders.length / 2 + 1 ? 1 : 0) // used to test order by single-bucket sub agg + .field("constant", 1) + .endObject() + ); } indexRandom(true, lowCardBuilders); IndexRequestBuilder[] highCardBuilders = new IndexRequestBuilder[100]; // TODO randomize the size? for (int i = 0; i < highCardBuilders.length; i++) { - highCardBuilders[i] = client().prepareIndex("high_card_idx") - .setSource( - jsonBuilder().startObject() - .field(SINGLE_VALUED_FIELD_NAME, i) - .startArray(MULTI_VALUED_FIELD_NAME) - .value(i) - .value(i + 1) - .endArray() - .endObject() - ); + highCardBuilders[i] = prepareIndex("high_card_idx").setSource( + jsonBuilder().startObject() + .field(SINGLE_VALUED_FIELD_NAME, i) + .startArray(MULTI_VALUED_FIELD_NAME) + .value(i) + .value(i + 1) + .endArray() + .endObject() + ); } indexRandom(true, highCardBuilders); @@ -137,8 +135,7 @@ public void setupSuiteScopeCluster() throws Exception { List builders = new ArrayList<>(); for (int i = 0; i < 2; i++) { builders.add( - client().prepareIndex("empty_bucket_idx") - .setId("" + i) + prepareIndex("empty_bucket_idx").setId("" + i) .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject()) ); } @@ -197,45 +194,55 @@ private void getMultiSortDocs(List builders) throws IOExcep createIndex("sort_idx"); for (int i = 1; i <= 3; i++) { builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 1).field("l", 1).field("d", i).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 1).field("l", 1).field("d", i).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 2).field("l", 2).field("d", i).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 2).field("l", 2).field("d", i).endObject() + ) ); } builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 3).field("l", 3).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 3).field("l", 3).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 3).field("l", 3).field("d", 2).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 3).field("l", 3).field("d", 2).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4).field("l", 3).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4).field("l", 3).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4).field("l", 3).field("d", 3).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4).field("l", 3).field("d", 3).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5).field("l", 5).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5).field("l", 5).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5).field("l", 5).field("d", 2).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5).field("l", 5).field("d", 2).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 6).field("l", 5).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 6).field("l", 5).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 7).field("l", 5).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 7).field("l", 5).field("d", 1).endObject() + ) ); } @@ -897,8 +904,8 @@ public void testScriptCaching() throws Exception { ); indexRandom( true, - client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2) + prepareIndex("cache_test_idx").setId("1").setSource("s", 1), + prepareIndex("cache_test_idx").setId("2").setSource("s", 2) ); // Make sure we are starting with a clear cache diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java index 6404b06365967..4e93bf578cc87 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java @@ -113,16 +113,15 @@ public void setupSuiteScopeCluster() throws Exception { final int frequency = randomBoolean() ? 1 : randomIntBetween(2, 20); for (int j = 0; j < frequency; ++j) { indexRequests.add( - client().prepareIndex("idx") - .setSource( - jsonBuilder().startObject() - .field("s", stringTerm) - .field("l", longTerm) - .field("d", doubleTerm) - .field("date", dateTerm) - .field("match", randomBoolean()) - .endObject() - ) + prepareIndex("idx").setSource( + jsonBuilder().startObject() + .field("s", stringTerm) + .field("l", longTerm) + .field("d", doubleTerm) + .field("date", dateTerm) + .field("match", randomBoolean()) + .endObject() + ) ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NaNSortingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NaNSortingIT.java index 23908daf32607..e6d0b6a1f9f1c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NaNSortingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NaNSortingIT.java @@ -113,7 +113,7 @@ public void setupSuiteScopeCluster() throws Exception { if (randomBoolean()) { source.field("numeric_value", randomDouble()); } - client().prepareIndex("idx").setSource(source.endObject()).get(); + prepareIndex("idx").setSource(source.endObject()).get(); } refresh(); ensureSearchable(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java index d832d7060ffda..9a27b0d8f75a3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java @@ -91,15 +91,14 @@ public void setupSuiteScopeCluster() throws Exception { source = source.startObject().field("value", i + 1 + j).endObject(); } source = source.endArray().endObject(); - builders.add(client().prepareIndex("idx").setId("" + i + 1).setSource(source)); + builders.add(prepareIndex("idx").setId("" + i + 1).setSource(source)); } prepareCreate("empty_bucket_idx").setMapping("value", "type=integer", "nested", "type=nested").get(); ensureGreen("empty_bucket_idx"); for (int i = 0; i < 2; i++) { builders.add( - client().prepareIndex("empty_bucket_idx") - .setId("" + i) + prepareIndex("empty_bucket_idx").setId("" + i) .setSource( jsonBuilder().startObject() .field("value", i * 2) @@ -146,8 +145,7 @@ public void setupSuiteScopeCluster() throws Exception { ensureGreen("idx_nested_nested_aggs"); builders.add( - client().prepareIndex("idx_nested_nested_aggs") - .setId("1") + prepareIndex("idx_nested_nested_aggs").setId("1") .setSource( jsonBuilder().startObject() .startArray("nested1") @@ -431,7 +429,7 @@ public void testParentFilterResolvedCorrectly() throws Exception { ensureGreen("idx2"); List indexRequests = new ArrayList<>(2); - indexRequests.add(client().prepareIndex("idx2").setId("1").setSource(""" + indexRequests.add(prepareIndex("idx2").setId("1").setSource(""" { "dates": { "month": { @@ -453,7 +451,7 @@ public void testParentFilterResolvedCorrectly() throws Exception { } ] }""", XContentType.JSON)); - indexRequests.add(client().prepareIndex("idx2").setId("2").setSource(""" + indexRequests.add(prepareIndex("idx2").setId("2").setSource(""" { "dates": { "month": { @@ -545,8 +543,7 @@ public void testNestedSameDocIdProcessedMultipleTime() throws Exception { ); ensureGreen("idx4"); - client().prepareIndex("idx4") - .setId("1") + prepareIndex("idx4").setId("1") .setSource( jsonBuilder().startObject() .field("name", "product1") @@ -565,8 +562,7 @@ public void testNestedSameDocIdProcessedMultipleTime() throws Exception { .endObject() ) .get(); - client().prepareIndex("idx4") - .setId("2") + prepareIndex("idx4").setId("2") .setSource( jsonBuilder().startObject() .field("name", "product2") @@ -682,8 +678,7 @@ public void testFilterAggInsideNestedAgg() throws Exception { ) ); - client().prepareIndex("classes") - .setId("1") + prepareIndex("classes").setId("1") .setSource( jsonBuilder().startObject() .field("name", "QueryBuilder") @@ -722,8 +717,7 @@ public void testFilterAggInsideNestedAgg() throws Exception { .endObject() ) .get(); - client().prepareIndex("classes") - .setId("2") + prepareIndex("classes").setId("2") .setSource( jsonBuilder().startObject() .field("name", "Document") diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RandomSamplerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RandomSamplerIT.java index 32496434d32d3..28c186c559dff 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RandomSamplerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RandomSamplerIT.java @@ -60,14 +60,13 @@ public void setupSuiteScopeCluster() throws Exception { numericValue = randomDoubleBetween(5.0, 9.0, false); } builders.add( - client().prepareIndex("idx") - .setSource( - jsonBuilder().startObject() - .field(KEYWORD_VALUE, keywordValue) - .field(MONOTONIC_VALUE, monotonicValue) - .field(NUMERIC_VALUE, numericValue) - .endObject() - ) + prepareIndex("idx").setSource( + jsonBuilder().startObject() + .field(KEYWORD_VALUE, keywordValue) + .field(MONOTONIC_VALUE, monotonicValue) + .field(NUMERIC_VALUE, numericValue) + .endObject() + ) ); final double oldAvgMonotonic = avgMonotonic; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java index 441187916cb7f..10e3649e9f161 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java @@ -91,24 +91,22 @@ public void setupSuiteScopeCluster() throws Exception { List builders = new ArrayList<>(); for (int i = 0; i < numDocs; i++) { builders.add( - client().prepareIndex("idx") - .setSource( - jsonBuilder().startObject() - .field(SINGLE_VALUED_FIELD_NAME, i + 1) - .startArray(MULTI_VALUED_FIELD_NAME) - .value(i + 1) - .value(i + 2) - .endArray() - .endObject() - ) + prepareIndex("idx").setSource( + jsonBuilder().startObject() + .field(SINGLE_VALUED_FIELD_NAME, i + 1) + .startArray(MULTI_VALUED_FIELD_NAME) + .value(i + 1) + .value(i + 2) + .endArray() + .endObject() + ) ); } createIndex("idx_unmapped"); prepareCreate("empty_bucket_idx").setMapping(SINGLE_VALUED_FIELD_NAME, "type=integer").get(); for (int i = 0; i < 2; i++) { builders.add( - client().prepareIndex("empty_bucket_idx") - .setId("" + i) + prepareIndex("empty_bucket_idx").setId("" + i) .setSource( jsonBuilder().startObject() // shift sequence by 1, to ensure we have negative values, and value 3 on the edge of the tested ranges @@ -123,10 +121,10 @@ public void setupSuiteScopeCluster() throws Exception { prepareCreate("old_index").setMapping("distance", "type=double", "route_length_miles", "type=alias,path=distance").get(); prepareCreate("new_index").setMapping("route_length_miles", "type=double").get(); - builders.add(client().prepareIndex("old_index").setSource("distance", 42.0)); - builders.add(client().prepareIndex("old_index").setSource("distance", 50.5)); - builders.add(client().prepareIndex("new_index").setSource("route_length_miles", 100.2)); - builders.add(client().prepareIndex("new_index").setSource(Collections.emptyMap())); + builders.add(prepareIndex("old_index").setSource("distance", 42.0)); + builders.add(prepareIndex("old_index").setSource("distance", 50.5)); + builders.add(prepareIndex("new_index").setSource("route_length_miles", 100.2)); + builders.add(prepareIndex("new_index").setSource(Collections.emptyMap())); indexRandom(true, builders); ensureSearchable(); @@ -902,8 +900,8 @@ public void testScriptCaching() throws Exception { ); indexRandom( true, - client().prepareIndex("cache_test_idx").setId("1").setSource(jsonBuilder().startObject().field("i", 1).endObject()), - client().prepareIndex("cache_test_idx").setId("2").setSource(jsonBuilder().startObject().field("i", 2).endObject()) + prepareIndex("cache_test_idx").setId("1").setSource(jsonBuilder().startObject().field("i", 1).endObject()), + prepareIndex("cache_test_idx").setId("2").setSource(jsonBuilder().startObject().field("i", 2).endObject()) ); // Make sure we are starting with a clear cache diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ReverseNestedIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ReverseNestedIT.java index 5f42eb3b2ab19..7585ed42da830 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ReverseNestedIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ReverseNestedIT.java @@ -125,7 +125,7 @@ private void insertIdx1(List values1, List values2) throws Excep source.startObject().field("field2", value1).endObject(); } source.endArray().endObject(); - indexRandom(false, client().prepareIndex("idx1").setRouting("1").setSource(source)); + indexRandom(false, prepareIndex("idx1").setRouting("1").setSource(source)); } private void insertIdx2(String[][] values) throws Exception { @@ -138,7 +138,7 @@ private void insertIdx2(String[][] values) throws Exception { source.endArray().endObject(); } source.endArray().endObject(); - indexRandom(false, client().prepareIndex("idx2").setRouting("1").setSource(source)); + indexRandom(false, prepareIndex("idx2").setRouting("1").setSource(source)); } public void testSimpleReverseNestedToRoot() throws Exception { @@ -535,8 +535,7 @@ public void testSameParentDocHavingMultipleBuckets() throws Exception { .endObject(); assertAcked(prepareCreate("idx3").setSettings(indexSettings(1, 0)).setMapping(mapping)); - client().prepareIndex("idx3") - .setId("1") + prepareIndex("idx3").setId("1") .setRefreshPolicy(IMMEDIATE) .setSource( jsonBuilder().startObject() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java index c367752cc0460..7f46856cdd594 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java @@ -75,12 +75,10 @@ public void setupSuiteScopeCluster() throws Exception { for (int i = 0; i < data.length; i++) { String[] parts = data[i].split(","); - client().prepareIndex("test") - .setId("" + i) + prepareIndex("test").setId("" + i) .setSource("author", parts[5], "name", parts[2], "genre", parts[8], "price", Float.parseFloat(parts[3])) .get(); - client().prepareIndex("idx_unmapped_author") - .setId("" + i) + prepareIndex("idx_unmapped_author").setId("" + i) .setSource("name", parts[2], "genre", parts[8], "price", Float.parseFloat(parts[3])) .get(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ShardReduceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ShardReduceIT.java index 5b9aead6b9e05..94db8926f59e7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ShardReduceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ShardReduceIT.java @@ -49,21 +49,20 @@ public class ShardReduceIT extends ESIntegTestCase { private IndexRequestBuilder indexDoc(String date, int value) throws Exception { - return client().prepareIndex("idx") - .setSource( - jsonBuilder().startObject() - .field("value", value) - .field("ip", "10.0.0." + value) - .field("location", Geohash.stringEncode(5, 52, Geohash.PRECISION)) - .field("date", date) - .field("term-l", 1) - .field("term-d", 1.5) - .field("term-s", "term") - .startObject("nested") - .field("date", date) - .endObject() - .endObject() - ); + return prepareIndex("idx").setSource( + jsonBuilder().startObject() + .field("value", value) + .field("ip", "10.0.0." + value) + .field("location", Geohash.stringEncode(5, 52, Geohash.PRECISION)) + .field("date", date) + .field("term-l", 1) + .field("term-d", 1.5) + .field("term-s", "term") + .startObject("nested") + .field("date", date) + .endObject() + .endObject() + ); } @Override diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java index 4d94173f8d978..b672325891b50 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java @@ -9,7 +9,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; @@ -57,7 +56,9 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.significantText; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertCheckedResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -130,71 +131,70 @@ public void testXContentResponse() throws Exception { ); } - SearchResponse response = request.get(); - - assertNoFailures(response); - StringTerms classes = response.getAggregations().get("class"); - assertThat(classes.getBuckets().size(), equalTo(2)); - for (Terms.Bucket classBucket : classes.getBuckets()) { - Map aggs = classBucket.getAggregations().asMap(); - assertTrue(aggs.containsKey("sig_terms")); - SignificantTerms agg = (SignificantTerms) aggs.get("sig_terms"); - assertThat(agg.getBuckets().size(), equalTo(1)); - String term = agg.iterator().next().getKeyAsString(); - String classTerm = classBucket.getKeyAsString(); - assertTrue(term.equals(classTerm)); - } + assertCheckedResponse(request, response -> { + assertNoFailures(response); + StringTerms classes = response.getAggregations().get("class"); + assertThat(classes.getBuckets().size(), equalTo(2)); + for (Terms.Bucket classBucket : classes.getBuckets()) { + Map aggs = classBucket.getAggregations().asMap(); + assertTrue(aggs.containsKey("sig_terms")); + SignificantTerms agg = (SignificantTerms) aggs.get("sig_terms"); + assertThat(agg.getBuckets().size(), equalTo(1)); + String term = agg.iterator().next().getKeyAsString(); + String classTerm = classBucket.getKeyAsString(); + assertTrue(term.equals(classTerm)); + } - XContentBuilder responseBuilder = XContentFactory.jsonBuilder(); - responseBuilder.startObject(); - classes.toXContent(responseBuilder, ToXContent.EMPTY_PARAMS); - responseBuilder.endObject(); - - Object[] args = new Object[] { type.equals("long") ? "0" : "\"0\"", type.equals("long") ? "1" : "\"1\"" }; - String result = Strings.format(""" - { - "class": { - "doc_count_error_upper_bound": 0, - "sum_other_doc_count": 0, - "buckets": [ - { - "key": "0", - "doc_count": 4, - "sig_terms": { - "doc_count": 4, - "bg_count": 7, - "buckets": [ - { - "key": %s, + XContentBuilder responseBuilder = XContentFactory.jsonBuilder(); + responseBuilder.startObject(); + classes.toXContent(responseBuilder, ToXContent.EMPTY_PARAMS); + responseBuilder.endObject(); + + Object[] args = new Object[] { type.equals("long") ? "0" : "\"0\"", type.equals("long") ? "1" : "\"1\"" }; + String result = Strings.format(""" + { + "class": { + "doc_count_error_upper_bound": 0, + "sum_other_doc_count": 0, + "buckets": [ + { + "key": "0", + "doc_count": 4, + "sig_terms": { "doc_count": 4, - "score": 0.39999999999999997, - "bg_count": 5 + "bg_count": 7, + "buckets": [ + { + "key": %s, + "doc_count": 4, + "score": 0.39999999999999997, + "bg_count": 5 + } + ] } - ] - } - }, - { - "key": "1", - "doc_count": 3, - "sig_terms": { - "doc_count": 3, - "bg_count": 7, - "buckets": [ - { - "key":%s, + }, + { + "key": "1", + "doc_count": 3, + "sig_terms": { "doc_count": 3, - "score": 0.75, - "bg_count": 4 + "bg_count": 7, + "buckets": [ + { + "key":%s, + "doc_count": 3, + "score": 0.75, + "bg_count": 4 + } + ] } - ] - } + } + ] } - ] - } - } - """, args); - assertThat(Strings.toString(responseBuilder), equalTo(XContentHelper.stripWhitespace(result))); - + } + """, args); + assertThat(Strings.toString(responseBuilder), equalTo(XContentHelper.stripWhitespace(result))); + }); } public void testPopularTermManyDeletedDocs() throws Exception { @@ -208,10 +208,10 @@ public void testPopularTermManyDeletedDocs() throws Exception { String[] cat2v1 = { "constant", "two" }; String[] cat2v2 = { "constant", "duo" }; List indexRequestBuilderList = new ArrayList<>(); - indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME).setId("1").setSource(TEXT_FIELD, cat1v1, CLASS_FIELD, "1")); - indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME).setId("2").setSource(TEXT_FIELD, cat1v2, CLASS_FIELD, "1")); - indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME).setId("3").setSource(TEXT_FIELD, cat2v1, CLASS_FIELD, "2")); - indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME).setId("4").setSource(TEXT_FIELD, cat2v2, CLASS_FIELD, "2")); + indexRequestBuilderList.add(prepareIndex(INDEX_NAME).setId("1").setSource(TEXT_FIELD, cat1v1, CLASS_FIELD, "1")); + indexRequestBuilderList.add(prepareIndex(INDEX_NAME).setId("2").setSource(TEXT_FIELD, cat1v2, CLASS_FIELD, "1")); + indexRequestBuilderList.add(prepareIndex(INDEX_NAME).setId("3").setSource(TEXT_FIELD, cat2v1, CLASS_FIELD, "2")); + indexRequestBuilderList.add(prepareIndex(INDEX_NAME).setId("4").setSource(TEXT_FIELD, cat2v2, CLASS_FIELD, "2")); indexRandom(true, false, indexRequestBuilderList); // Now create some holes in the index with selective deletes caused by updates. @@ -222,7 +222,7 @@ public void testPopularTermManyDeletedDocs() throws Exception { indexRequestBuilderList.clear(); for (int i = 0; i < 50; i++) { text = text == cat1v2 ? cat1v1 : cat1v2; - indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME).setId("1").setSource(TEXT_FIELD, text, CLASS_FIELD, "1")); + indexRequestBuilderList.add(prepareIndex(INDEX_NAME).setId("1").setSource(TEXT_FIELD, text, CLASS_FIELD, "1")); } indexRandom(true, false, indexRequestBuilderList); @@ -286,9 +286,6 @@ public void testBackgroundVsSeparateSet( ); } - SearchResponse response1 = request1.get(); - assertNoFailures(response1); - SearchRequestBuilder request2; if (useSigText) { request2 = prepareSearch(INDEX_NAME).addAggregation( @@ -324,32 +321,32 @@ public void testBackgroundVsSeparateSet( ); } - SearchResponse response2 = request2.get(); - - StringTerms classes = response1.getAggregations().get("class"); + assertNoFailuresAndResponse(request1, response1 -> assertNoFailuresAndResponse(request2, response2 -> { + StringTerms classes = response1.getAggregations().get("class"); - SignificantTerms sigTerms0 = ((SignificantTerms) (classes.getBucketByKey("0").getAggregations().asMap().get("sig_terms"))); - assertThat(sigTerms0.getBuckets().size(), equalTo(2)); - double score00Background = sigTerms0.getBucketByKey("0").getSignificanceScore(); - double score01Background = sigTerms0.getBucketByKey("1").getSignificanceScore(); - SignificantTerms sigTerms1 = ((SignificantTerms) (classes.getBucketByKey("1").getAggregations().asMap().get("sig_terms"))); - double score10Background = sigTerms1.getBucketByKey("0").getSignificanceScore(); - double score11Background = sigTerms1.getBucketByKey("1").getSignificanceScore(); + SignificantTerms sigTerms0 = ((SignificantTerms) (classes.getBucketByKey("0").getAggregations().asMap().get("sig_terms"))); + assertThat(sigTerms0.getBuckets().size(), equalTo(2)); + double score00Background = sigTerms0.getBucketByKey("0").getSignificanceScore(); + double score01Background = sigTerms0.getBucketByKey("1").getSignificanceScore(); + SignificantTerms sigTerms1 = ((SignificantTerms) (classes.getBucketByKey("1").getAggregations().asMap().get("sig_terms"))); + double score10Background = sigTerms1.getBucketByKey("0").getSignificanceScore(); + double score11Background = sigTerms1.getBucketByKey("1").getSignificanceScore(); - Aggregations aggs = response2.getAggregations(); + Aggregations aggs = response2.getAggregations(); - sigTerms0 = (SignificantTerms) ((InternalFilter) aggs.get("0")).getAggregations().getAsMap().get("sig_terms"); - double score00SeparateSets = sigTerms0.getBucketByKey("0").getSignificanceScore(); - double score01SeparateSets = sigTerms0.getBucketByKey("1").getSignificanceScore(); + sigTerms0 = (SignificantTerms) ((InternalFilter) aggs.get("0")).getAggregations().getAsMap().get("sig_terms"); + double score00SeparateSets = sigTerms0.getBucketByKey("0").getSignificanceScore(); + double score01SeparateSets = sigTerms0.getBucketByKey("1").getSignificanceScore(); - sigTerms1 = (SignificantTerms) ((InternalFilter) aggs.get("1")).getAggregations().getAsMap().get("sig_terms"); - double score10SeparateSets = sigTerms1.getBucketByKey("0").getSignificanceScore(); - double score11SeparateSets = sigTerms1.getBucketByKey("1").getSignificanceScore(); + sigTerms1 = (SignificantTerms) ((InternalFilter) aggs.get("1")).getAggregations().getAsMap().get("sig_terms"); + double score10SeparateSets = sigTerms1.getBucketByKey("0").getSignificanceScore(); + double score11SeparateSets = sigTerms1.getBucketByKey("1").getSignificanceScore(); - assertThat(score00Background, equalTo(score00SeparateSets)); - assertThat(score01Background, equalTo(score01SeparateSets)); - assertThat(score10Background, equalTo(score10SeparateSets)); - assertThat(score11Background, equalTo(score11SeparateSets)); + assertThat(score00Background, equalTo(score00SeparateSets)); + assertThat(score01Background, equalTo(score01SeparateSets)); + assertThat(score10Background, equalTo(score10SeparateSets)); + assertThat(score11Background, equalTo(score11SeparateSets)); + })); } public void testScoresEqualForPositiveAndNegative() throws Exception { @@ -385,25 +382,23 @@ public void testScoresEqualForPositiveAndNegative(SignificanceHeuristic heuristi ) ); } - SearchResponse response = request.get(); - assertNoFailures(response); - - assertNoFailures(response); - StringTerms classes = response.getAggregations().get("class"); - assertThat(classes.getBuckets().size(), equalTo(2)); - Iterator classBuckets = classes.getBuckets().iterator(); - - Aggregations aggregations = classBuckets.next().getAggregations(); - SignificantTerms sigTerms = aggregations.get("mySignificantTerms"); - - List classA = sigTerms.getBuckets(); - Iterator classBBucketIterator = sigTerms.iterator(); - assertThat(classA.size(), greaterThan(0)); - for (SignificantTerms.Bucket classABucket : classA) { - SignificantTerms.Bucket classBBucket = classBBucketIterator.next(); - assertThat(classABucket.getKey(), equalTo(classBBucket.getKey())); - assertThat(classABucket.getSignificanceScore(), closeTo(classBBucket.getSignificanceScore(), 1.e-5)); - } + assertNoFailuresAndResponse(request, response -> { + StringTerms classes = response.getAggregations().get("class"); + assertThat(classes.getBuckets().size(), equalTo(2)); + Iterator classBuckets = classes.getBuckets().iterator(); + + Aggregations aggregations = classBuckets.next().getAggregations(); + SignificantTerms sigTerms = aggregations.get("mySignificantTerms"); + + List classA = sigTerms.getBuckets(); + Iterator classBBucketIterator = sigTerms.iterator(); + assertThat(classA.size(), greaterThan(0)); + for (SignificantTerms.Bucket classABucket : classA) { + SignificantTerms.Bucket classBBucket = classBBucketIterator.next(); + assertThat(classABucket.getKey(), equalTo(classBBucket.getKey())); + assertThat(classABucket.getSignificanceScore(), closeTo(classBBucket.getSignificanceScore(), 1.e-5)); + } + }); } /** @@ -423,16 +418,15 @@ public void testSubAggregations() throws Exception { .size(1000) .subAggregation(subAgg); - SearchResponse response = prepareSearch("test").setQuery(query).addAggregation(agg).get(); - assertNoFailures(response); - - SignificantTerms sigTerms = response.getAggregations().get("significant_terms"); - assertThat(sigTerms.getBuckets().size(), equalTo(2)); + assertNoFailuresAndResponse(prepareSearch("test").setQuery(query).addAggregation(agg), response -> { + SignificantTerms sigTerms = response.getAggregations().get("significant_terms"); + assertThat(sigTerms.getBuckets().size(), equalTo(2)); - for (SignificantTerms.Bucket bucket : sigTerms) { - StringTerms terms = bucket.getAggregations().get("class"); - assertThat(terms.getBuckets().size(), equalTo(2)); - } + for (SignificantTerms.Bucket bucket : sigTerms) { + StringTerms terms = bucket.getAggregations().get("class"); + assertThat(terms.getBuckets().size(), equalTo(2)); + } + }); } private void indexEqualTestData() throws ExecutionException, InterruptedException { @@ -463,7 +457,7 @@ private void indexEqualTestData() throws ExecutionException, InterruptedExceptio List indexRequestBuilders = new ArrayList<>(); for (int i = 0; i < data.length; i++) { String[] parts = data[i].split("\t"); - indexRequestBuilders.add(client().prepareIndex("test").setId("" + i).setSource("class", parts[0], "text", parts[1])); + indexRequestBuilders.add(prepareIndex("test").setId("" + i).setSource("class", parts[0], "text", parts[1])); } indexRandom(true, false, indexRequestBuilders); } @@ -497,17 +491,17 @@ public void testScriptScore() throws ExecutionException, InterruptedException, I ) ); } - SearchResponse response = request.get(); - assertNoFailures(response); - for (Terms.Bucket classBucket : ((Terms) response.getAggregations().get("class")).getBuckets()) { - SignificantTerms sigTerms = classBucket.getAggregations().get("mySignificantTerms"); - for (SignificantTerms.Bucket bucket : sigTerms.getBuckets()) { - assertThat( - bucket.getSignificanceScore(), - is((double) bucket.getSubsetDf() + bucket.getSubsetSize() + bucket.getSupersetDf() + bucket.getSupersetSize()) - ); + assertNoFailuresAndResponse(request, response -> { + for (Terms.Bucket classBucket : ((Terms) response.getAggregations().get("class")).getBuckets()) { + SignificantTerms sigTerms = classBucket.getAggregations().get("mySignificantTerms"); + for (SignificantTerms.Bucket bucket : sigTerms.getBuckets()) { + assertThat( + bucket.getSignificanceScore(), + is((double) bucket.getSubsetDf() + bucket.getSubsetSize() + bucket.getSupersetDf() + bucket.getSupersetSize()) + ); + } } - } + }); } private ScriptHeuristic getScriptSignificanceHeuristic() throws IOException { @@ -539,7 +533,7 @@ private void indexRandomFrequencies01(String type) throws ExecutionException, In text[0] = gb[randNum]; } indexRequestBuilderList.add( - client().prepareIndex(INDEX_NAME).setSource(TEXT_FIELD, text, CLASS_FIELD, randomBoolean() ? "one" : "zero") + prepareIndex(INDEX_NAME).setSource(TEXT_FIELD, text, CLASS_FIELD, randomBoolean() ? "one" : "zero") ); } indexRandom(true, indexRequestBuilderList); @@ -560,8 +554,8 @@ public void testScriptCaching() throws Exception { ); indexRandom( true, - client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1, "t", "foo"), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2, "t", "bar") + prepareIndex("cache_test_idx").setId("1").setSource("s", 1, "t", "foo"), + prepareIndex("cache_test_idx").setId("2").setSource("s", 2, "t", "bar") ); // Make sure we are starting with a clear cache @@ -579,17 +573,15 @@ public void testScriptCaching() throws Exception { new Script(ScriptType.INLINE, "mockscript", "Math.random()", Collections.emptyMap()) ); boolean useSigText = randomBoolean(); - SearchResponse r; + SearchRequestBuilder request; if (useSigText) { - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation(significantText("foo", "t").significanceHeuristic(scriptHeuristic)) - .get(); + request = prepareSearch("cache_test_idx").setSize(0) + .addAggregation(significantText("foo", "t").significanceHeuristic(scriptHeuristic)); } else { - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation(significantTerms("foo").field("s").significanceHeuristic(scriptHeuristic)) - .get(); + request = prepareSearch("cache_test_idx").setSize(0) + .addAggregation(significantTerms("foo").field("s").significanceHeuristic(scriptHeuristic)); } - assertNoFailures(r); + assertNoFailures(request); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -604,15 +596,13 @@ public void testScriptCaching() throws Exception { scriptHeuristic = getScriptSignificanceHeuristic(); useSigText = randomBoolean(); if (useSigText) { - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation(significantText("foo", "t").significanceHeuristic(scriptHeuristic)) - .get(); + request = prepareSearch("cache_test_idx").setSize(0) + .addAggregation(significantText("foo", "t").significanceHeuristic(scriptHeuristic)); } else { - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation(significantTerms("foo").field("s").significanceHeuristic(scriptHeuristic)) - .get(); + request = prepareSearch("cache_test_idx").setSize(0) + .addAggregation(significantTerms("foo").field("s").significanceHeuristic(scriptHeuristic)); } - assertNoFailures(r); + assertNoFailures(request); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -625,11 +615,11 @@ public void testScriptCaching() throws Exception { // Ensure that non-scripted requests are cached as normal if (useSigText) { - r = prepareSearch("cache_test_idx").setSize(0).addAggregation(significantText("foo", "t")).get(); + request = prepareSearch("cache_test_idx").setSize(0).addAggregation(significantText("foo", "t")); } else { - r = prepareSearch("cache_test_idx").setSize(0).addAggregation(significantTerms("foo").field("s")).get(); + request = prepareSearch("cache_test_idx").setSize(0).addAggregation(significantTerms("foo").field("s")); } - assertNoFailures(r); + assertNoFailures(request); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java index 58609df7ae8fe..25d6dfb850bbc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java @@ -28,7 +28,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; @@ -57,8 +57,7 @@ public void setupSuiteScopeCluster() throws Exception { int numUniqueTerms = between(2, numDocs / 2); for (int i = 0; i < numDocs; i++) { builders.add( - client().prepareIndex("idx") - .setId("" + i) + prepareIndex("idx").setId("" + i) .setSource( jsonBuilder().startObject() .field(STRING_FIELD_NAME, "val" + randomInt(numUniqueTerms)) @@ -74,8 +73,7 @@ public void setupSuiteScopeCluster() throws Exception { ); for (int i = 0; i < numDocs; i++) { builders.add( - client().prepareIndex("idx_single_shard") - .setId("" + i) + prepareIndex("idx_single_shard").setId("" + i) .setSource( jsonBuilder().startObject() .field(STRING_FIELD_NAME, "val" + randomInt(numUniqueTerms)) @@ -89,8 +87,7 @@ public void setupSuiteScopeCluster() throws Exception { assertAcked(prepareCreate("idx_with_routing").setMapping("{ \"_routing\" : { \"required\" : true } }")); for (int i = 0; i < numDocs; i++) { builders.add( - client().prepareIndex("idx_single_shard") - .setId("" + i) + prepareIndex("idx_single_shard").setId("" + i) .setRouting(String.valueOf(randomInt(numRoutingValues))) .setSource( jsonBuilder().startObject() @@ -162,8 +159,7 @@ private void buildIndex(Map docsPerTerm, String index, int shar for (int i = 0; i < entry.getValue(); i++) { String term = entry.getKey(); builders.add( - client().prepareIndex(index) - .setId(term + "-" + i) + prepareIndex(index).setId(term + "-" + i) .setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, term).field("shard", shard).endObject()) ); } @@ -267,691 +263,643 @@ private void assertUnboundedDocCountError(int size, SearchResponse accurateRespo public void testStringValueField() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertDocCountErrorWithinBounds(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertDocCountErrorWithinBounds(size, accurateResponse, testResponse) + ) + ); } public void testStringValueFieldSingleShard() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertNoDocCountError(size, accurateResponse, testResponse); - } - - public void testStringValueFieldWithRouting() throws Exception { - int size = randomIntBetween(1, 20); - int shardSize = randomIntBetween(size, size * 2); - - SearchResponse testResponse = prepareSearch("idx_with_routing").setRouting(String.valueOf(between(1, numRoutingValues))) - .addAggregation( + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(STRING_FIELD_NAME) .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) + .size(10000) + .shardSize(10000) .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertNoDocCountError(size, accurateResponse, testResponse) ) - .get(); + ); + } - assertNoFailures(testResponse); + public void testStringValueFieldWithRouting() throws Exception { + int size = randomIntBetween(1, 20); + int shardSize = randomIntBetween(size, size * 2); - assertNoDocCountErrorSingleResponse(size, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_with_routing").setRouting(String.valueOf(between(1, numRoutingValues))) + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertNoDocCountErrorSingleResponse(size, testResponse) + ); } public void testStringValueFieldDocCountAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.count(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.count(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertUnboundedDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.count(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.count(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertUnboundedDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testStringValueFieldTermSortAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.key(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.key(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertNoDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.key(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.key(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertNoDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testStringValueFieldTermSortDesc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.key(false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.key(false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertNoDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.key(false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.key(false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertNoDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testStringValueFieldSubAggAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.aggregation("sortAgg", true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.aggregation("sortAgg", true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) - ).get(); - - assertNoFailures(testResponse); - - assertUnboundedDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.aggregation("sortAgg", true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.aggregation("sortAgg", true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ), + testResponse -> assertUnboundedDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testStringValueFieldSubAggDesc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.aggregation("sortAgg", false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.aggregation("sortAgg", false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) - ).get(); - - assertNoFailures(testResponse); - - assertUnboundedDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.aggregation("sortAgg", false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.aggregation("sortAgg", false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ), + testResponse -> assertUnboundedDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testLongValueField() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertDocCountErrorWithinBounds(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertDocCountErrorWithinBounds(size, accurateResponse, testResponse) + ) + ); } public void testLongValueFieldSingleShard() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertNoDocCountError(size, accurateResponse, testResponse); - } - - public void testLongValueFieldWithRouting() throws Exception { - int size = randomIntBetween(1, 20); - int shardSize = randomIntBetween(size, size * 2); - - SearchResponse testResponse = prepareSearch("idx_with_routing").setRouting(String.valueOf(between(1, numRoutingValues))) - .addAggregation( + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(LONG_FIELD_NAME) .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) + .size(10000) + .shardSize(10000) .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertNoDocCountError(size, accurateResponse, testResponse) ) - .get(); + ); + } - assertNoFailures(testResponse); + public void testLongValueFieldWithRouting() throws Exception { + int size = randomIntBetween(1, 20); + int shardSize = randomIntBetween(size, size * 2); - assertNoDocCountErrorSingleResponse(size, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_with_routing").setRouting(String.valueOf(between(1, numRoutingValues))) + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertNoDocCountErrorSingleResponse(size, testResponse) + ); } public void testLongValueFieldDocCountAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.count(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.count(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertUnboundedDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.count(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.count(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertUnboundedDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testLongValueFieldTermSortAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.key(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.key(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertNoDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.key(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.key(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertNoDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testLongValueFieldTermSortDesc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.key(false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.key(false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertNoDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.key(false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.key(false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertNoDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testLongValueFieldSubAggAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.aggregation("sortAgg", true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.aggregation("sortAgg", true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) - ).get(); - - assertNoFailures(testResponse); - - assertUnboundedDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.aggregation("sortAgg", true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.aggregation("sortAgg", true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ), + testResponse -> assertUnboundedDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testLongValueFieldSubAggDesc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.aggregation("sortAgg", false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(DOUBLE_FIELD_NAME)) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.aggregation("sortAgg", false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(DOUBLE_FIELD_NAME)) - ).get(); - - assertNoFailures(testResponse); - - assertUnboundedDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.aggregation("sortAgg", false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(DOUBLE_FIELD_NAME)) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.aggregation("sortAgg", false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(DOUBLE_FIELD_NAME)) + ), + testResponse -> assertUnboundedDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testDoubleValueField() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertDocCountErrorWithinBounds(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertDocCountErrorWithinBounds(size, accurateResponse, testResponse) + ) + ); } public void testDoubleValueFieldSingleShard() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertNoDocCountError(size, accurateResponse, testResponse); - } - - public void testDoubleValueFieldWithRouting() throws Exception { - int size = randomIntBetween(1, 20); - int shardSize = randomIntBetween(size, size * 2); - - SearchResponse testResponse = prepareSearch("idx_with_routing").setRouting(String.valueOf(between(1, numRoutingValues))) - .addAggregation( + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(DOUBLE_FIELD_NAME) .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) + .size(10000) + .shardSize(10000) .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertNoDocCountError(size, accurateResponse, testResponse) ) - .get(); + ); + } - assertNoFailures(testResponse); + public void testDoubleValueFieldWithRouting() throws Exception { + int size = randomIntBetween(1, 20); + int shardSize = randomIntBetween(size, size * 2); - assertNoDocCountErrorSingleResponse(size, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_with_routing").setRouting(String.valueOf(between(1, numRoutingValues))) + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertNoDocCountErrorSingleResponse(size, testResponse) + ); } public void testDoubleValueFieldDocCountAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.count(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.count(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertUnboundedDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.count(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.count(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertUnboundedDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testDoubleValueFieldTermSortAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.key(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.key(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertNoDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.key(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.key(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertNoDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testDoubleValueFieldTermSortDesc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.key(false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.key(false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertNoDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.key(false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.key(false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertNoDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testDoubleValueFieldSubAggAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.aggregation("sortAgg", true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.aggregation("sortAgg", true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) - ).get(); - - assertNoFailures(testResponse); - - assertUnboundedDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.aggregation("sortAgg", true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.aggregation("sortAgg", true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ), + testResponse -> assertUnboundedDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testDoubleValueFieldSubAggDesc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.aggregation("sortAgg", false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.aggregation("sortAgg", false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) - ).get(); - - assertNoFailures(testResponse); - - assertUnboundedDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.aggregation("sortAgg", false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.aggregation("sortAgg", false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ), + testResponse -> assertUnboundedDocCountError(size, accurateResponse, testResponse) + ) + ); } /** @@ -960,52 +908,54 @@ public void testDoubleValueFieldSubAggDesc() throws Exception { * 3 one-shard indices. */ public void testFixedDocs() throws Exception { - SearchResponse response = prepareSearch("idx_fixed_docs_0", "idx_fixed_docs_1", "idx_fixed_docs_2").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(5) - .shardSize(5) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - assertNoFailures(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getDocCountError(), equalTo(46L)); - List buckets = terms.getBuckets(); - assertThat(buckets, notNullValue()); - assertThat(buckets.size(), equalTo(5)); - - Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("A")); - assertThat(bucket.getDocCount(), equalTo(100L)); - assertThat(bucket.getDocCountError(), equalTo(0L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("Z")); - assertThat(bucket.getDocCount(), equalTo(52L)); - assertThat(bucket.getDocCountError(), equalTo(2L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("C")); - assertThat(bucket.getDocCount(), equalTo(50L)); - assertThat(bucket.getDocCountError(), equalTo(15L)); - - bucket = buckets.get(3); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("G")); - assertThat(bucket.getDocCount(), equalTo(45L)); - assertThat(bucket.getDocCountError(), equalTo(2L)); - - bucket = buckets.get(4); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("B")); - assertThat(bucket.getDocCount(), equalTo(43L)); - assertThat(bucket.getDocCountError(), equalTo(29L)); + assertNoFailuresAndResponse( + prepareSearch("idx_fixed_docs_0", "idx_fixed_docs_1", "idx_fixed_docs_2").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(5) + .shardSize(5) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + response -> { + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getDocCountError(), equalTo(46L)); + List buckets = terms.getBuckets(); + assertThat(buckets, notNullValue()); + assertThat(buckets.size(), equalTo(5)); + + Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("A")); + assertThat(bucket.getDocCount(), equalTo(100L)); + assertThat(bucket.getDocCountError(), equalTo(0L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("Z")); + assertThat(bucket.getDocCount(), equalTo(52L)); + assertThat(bucket.getDocCountError(), equalTo(2L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("C")); + assertThat(bucket.getDocCount(), equalTo(50L)); + assertThat(bucket.getDocCountError(), equalTo(15L)); + + bucket = buckets.get(3); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("G")); + assertThat(bucket.getDocCount(), equalTo(45L)); + assertThat(bucket.getDocCountError(), equalTo(2L)); + + bucket = buckets.get(4); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("B")); + assertThat(bucket.getDocCount(), equalTo(43L)); + assertThat(bucket.getDocCountError(), equalTo(29L)); + } + ); } /** @@ -1013,16 +963,19 @@ public void testFixedDocs() throws Exception { * See https://github.com/elastic/elasticsearch/issues/40005 for more details */ public void testIncrementalReduction() { - SearchResponse response = prepareSearch("idx_fixed_docs_3", "idx_fixed_docs_4", "idx_fixed_docs_5").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(5) - .shardSize(5) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - assertNoFailures(response); - Terms terms = response.getAggregations().get("terms"); - assertThat(terms.getDocCountError(), equalTo(0L)); + assertNoFailuresAndResponse( + prepareSearch("idx_fixed_docs_3", "idx_fixed_docs_4", "idx_fixed_docs_5").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(5) + .shardSize(5) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + response -> { + Terms terms = response.getAggregations().get("terms"); + assertThat(terms.getDocCountError(), equalTo(0L)); + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsShardMinDocCountIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsShardMinDocCountIT.java index 2a09e5f90f19c..c8e23d65b4e37 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsShardMinDocCountIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsShardMinDocCountIT.java @@ -102,10 +102,10 @@ private void addTermsDocs(String term, int numInClass, int numNotInClass, List builders) { String sourceClass = "{\"text\": \"" + term + "\"}"; for (int i = 0; i < numDocs; i++) { - builders.add(client().prepareIndex(index).setSource(sourceClass, XContentType.JSON)); + builders.add(prepareIndex(index).setSource(sourceClass, XContentType.JSON)); } } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java index 527753df7fc3e..2277f4415d4db 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java @@ -126,19 +126,18 @@ public void setupSuiteScopeCluster() throws Exception { List builders = new ArrayList<>(); for (int i = 0; i < 5; i++) { builders.add( - client().prepareIndex("idx") - .setSource( - jsonBuilder().startObject() - .field(SINGLE_VALUED_FIELD_NAME, "val" + i) - .field("i", i) - .field("constant", 1) - .field("tag", i < 5 / 2 + 1 ? "more" : "less") - .startArray(MULTI_VALUED_FIELD_NAME) - .value("val" + i) - .value("val" + (i + 1)) - .endArray() - .endObject() - ) + prepareIndex("idx").setSource( + jsonBuilder().startObject() + .field(SINGLE_VALUED_FIELD_NAME, "val" + i) + .field("i", i) + .field("constant", 1) + .field("tag", i < 5 / 2 + 1 ? "more" : "less") + .startArray(MULTI_VALUED_FIELD_NAME) + .value("val" + i) + .value("val" + (i + 1)) + .endArray() + .endObject() + ) ); } @@ -150,24 +149,22 @@ public void setupSuiteScopeCluster() throws Exception { ); for (int i = 0; i < 100; i++) { builders.add( - client().prepareIndex("high_card_idx") - .setSource( - jsonBuilder().startObject() - .field(SINGLE_VALUED_FIELD_NAME, "val" + Strings.padStart(i + "", 3, '0')) - .startArray(MULTI_VALUED_FIELD_NAME) - .value("val" + Strings.padStart(i + "", 3, '0')) - .value("val" + Strings.padStart((i + 1) + "", 3, '0')) - .endArray() - .endObject() - ) + prepareIndex("high_card_idx").setSource( + jsonBuilder().startObject() + .field(SINGLE_VALUED_FIELD_NAME, "val" + Strings.padStart(i + "", 3, '0')) + .startArray(MULTI_VALUED_FIELD_NAME) + .value("val" + Strings.padStart(i + "", 3, '0')) + .value("val" + Strings.padStart((i + 1) + "", 3, '0')) + .endArray() + .endObject() + ) ); } prepareCreate("empty_bucket_idx").setMapping(SINGLE_VALUED_FIELD_NAME, "type=integer").get(); for (int i = 0; i < 2; i++) { builders.add( - client().prepareIndex("empty_bucket_idx") - .setId("" + i) + prepareIndex("empty_bucket_idx").setId("" + i) .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject()) ); } @@ -227,45 +224,55 @@ private void getMultiSortDocs(List builders) throws IOExcep ); for (int i = 1; i <= 3; i++) { builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val1").field("l", 1).field("d", i).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val1").field("l", 1).field("d", i).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val2").field("l", 2).field("d", i).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val2").field("l", 2).field("d", i).endObject() + ) ); } builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val3").field("l", 3).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val3").field("l", 3).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val3").field("l", 3).field("d", 2).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val3").field("l", 3).field("d", 2).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val4").field("l", 3).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val4").field("l", 3).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val4").field("l", 3).field("d", 3).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val4").field("l", 3).field("d", 3).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val5").field("l", 5).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val5").field("l", 5).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val5").field("l", 5).field("d", 2).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val5").field("l", 5).field("d", 2).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val6").field("l", 5).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val6").field("l", 5).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val7").field("l", 5).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val7").field("l", 5).field("d", 1).endObject() + ) ); } @@ -922,7 +929,7 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationWithoutMetric .collectMode(randomFrom(SubAggCollectionMode.values())) .order(BucketOrder.aggregation("stats", true)) .subAggregation(stats("stats").field("i")) - ).execute().actionGet(); + ).get(); fail( "Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " @@ -1185,8 +1192,8 @@ public void testScriptCaching() throws Exception { ); indexRandom( true, - client().prepareIndex("cache_test_idx").setId("1").setSource("s", "foo"), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", "bar") + prepareIndex("cache_test_idx").setId("1").setSource("s", "foo"), + prepareIndex("cache_test_idx").setId("2").setSource("s", "bar") ); // Make sure we are starting with a clear cache diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java index 44361587dd09e..8b1fa4abe09a5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java @@ -37,8 +37,7 @@ public void testRequestBreaker() throws Exception { true, IntStream.range(0, randomIntBetween(10, 1000)) .mapToObj( - i -> client().prepareIndex("test") - .setId("id_" + i) + i -> prepareIndex("test").setId("id_" + i) .setSource(Map.of("field0", randomAlphaOfLength(5), "field1", randomAlphaOfLength(5))) ) .toArray(IndexRequestBuilder[]::new) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java index 295486fba2e56..e15ad15bb4e3a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java @@ -910,8 +910,8 @@ public void testScriptCaching() throws Exception { ); indexRandom( true, - client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2) + prepareIndex("cache_test_idx").setId("1").setSource("s", 1), + prepareIndex("cache_test_idx").setId("2").setSource("s", 2) ); // Make sure we are starting with a clear cache diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java index d4b5be3045cdf..d263c14fe4710 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java @@ -575,8 +575,8 @@ public void testScriptCaching() throws Exception { ); indexRandom( true, - client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2) + prepareIndex("cache_test_idx").setId("1").setSource("s", 1), + prepareIndex("cache_test_idx").setId("2").setSource("s", 2) ); // Make sure we are starting with a clear cache diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java index 9eac8d4a06a43..0dbc811a7debc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java @@ -546,8 +546,8 @@ public void testScriptCaching() throws Exception { ); indexRandom( true, - client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2) + prepareIndex("cache_test_idx").setId("1").setSource("s", 1), + prepareIndex("cache_test_idx").setId("2").setSource("s", 2) ); // Make sure we are starting with a clear cache diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java index f494a339a7a71..06f43416eb03a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java @@ -89,8 +89,7 @@ public void setupSuiteScopeCluster() throws Exception { multiValueSample[i * 2] = firstMultiValueDatapoint; multiValueSample[(i * 2) + 1] = secondMultiValueDatapoint; - IndexRequestBuilder builder = client().prepareIndex("idx") - .setId(String.valueOf(i)) + IndexRequestBuilder builder = prepareIndex("idx").setId(String.valueOf(i)) .setSource( jsonBuilder().startObject() .field("value", singleValueDatapoint) @@ -114,8 +113,7 @@ public void setupSuiteScopeCluster() throws Exception { builders = new ArrayList<>(); for (int i = 0; i < 2; i++) { builders.add( - client().prepareIndex("empty_bucket_idx") - .setId(String.valueOf(i)) + prepareIndex("empty_bucket_idx").setId(String.valueOf(i)) .setSource(jsonBuilder().startObject().field("value", i * 2).endObject()) ); } @@ -502,8 +500,8 @@ public void testScriptCaching() throws Exception { indexRandom( true, - client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2) + prepareIndex("cache_test_idx").setId("1").setSource("s", 1), + prepareIndex("cache_test_idx").setId("2").setSource("s", 2) ); // Make sure we are starting with a clear cache diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java index a6876f606ffee..0ab26e1d9a049 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java @@ -274,8 +274,7 @@ public void setupSuiteScopeCluster() throws Exception { numDocs = randomIntBetween(10, 100); for (int i = 0; i < numDocs; i++) { builders.add( - client().prepareIndex("idx") - .setId("" + i) + prepareIndex("idx").setId("" + i) .setSource( jsonBuilder().startObject().field("value", randomAlphaOfLengthBetween(5, 15)).field("l_value", i).endObject() ) @@ -295,9 +294,7 @@ public void setupSuiteScopeCluster() throws Exception { builders = new ArrayList<>(); for (int i = 0; i < 2; i++) { builders.add( - client().prepareIndex("empty_bucket_idx") - .setId("" + i) - .setSource(jsonBuilder().startObject().field("value", i * 2).endObject()) + prepareIndex("empty_bucket_idx").setId("" + i).setSource(jsonBuilder().startObject().field("value", i * 2).endObject()) ); } @@ -373,7 +370,7 @@ public void testMap() { assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); List aggregationList = (List) scriptedMetricAggregation.aggregation(); - assertThat(aggregationList.size(), equalTo(getNumShards("idx").numPrimaries)); + assertThat(aggregationList.size(), greaterThanOrEqualTo(getNumShards("idx").numPrimaries)); int numShardsRun = 0; for (Object object : aggregationList) { assertThat(object, notNullValue()); @@ -422,7 +419,7 @@ public void testMapWithParams() { assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); List aggregationList = (List) scriptedMetricAggregation.aggregation(); - assertThat(aggregationList.size(), equalTo(getNumShards("idx").numPrimaries)); + assertThat(aggregationList.size(), greaterThanOrEqualTo(getNumShards("idx").numPrimaries)); int numShardsRun = 0; for (Object object : aggregationList) { assertThat(object, notNullValue()); @@ -482,7 +479,7 @@ public void testInitMutatesParams() { assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); List aggregationList = (List) scriptedMetricAggregation.aggregation(); - assertThat(aggregationList.size(), equalTo(getNumShards("idx").numPrimaries)); + assertThat(aggregationList.size(), greaterThanOrEqualTo(getNumShards("idx").numPrimaries)); long totalCount = 0; for (Object object : aggregationList) { assertThat(object, notNullValue()); @@ -537,7 +534,7 @@ public void testMapCombineWithParams() { assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); List aggregationList = (List) scriptedMetricAggregation.aggregation(); - assertThat(aggregationList.size(), equalTo(getNumShards("idx").numPrimaries)); + assertThat(aggregationList.size(), greaterThanOrEqualTo(getNumShards("idx").numPrimaries)); long totalCount = 0; for (Object object : aggregationList) { assertThat(object, notNullValue()); @@ -601,7 +598,7 @@ public void testInitMapCombineWithParams() { assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); List aggregationList = (List) scriptedMetricAggregation.aggregation(); - assertThat(aggregationList.size(), equalTo(getNumShards("idx").numPrimaries)); + assertThat(aggregationList.size(), greaterThanOrEqualTo(getNumShards("idx").numPrimaries)); long totalCount = 0; for (Object object : aggregationList) { assertThat(object, notNullValue()); @@ -1148,8 +1145,8 @@ public void testScriptCaching() throws Exception { ); indexRandom( true, - client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2) + prepareIndex("cache_test_idx").setId("1").setSource("s", 1), + prepareIndex("cache_test_idx").setId("2").setSource("s", 2) ); // Make sure we are starting with a clear cache diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java index c27751d5c52b8..f97d886ae8df6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java @@ -239,8 +239,8 @@ public void testScriptCaching() throws Exception { ); indexRandom( true, - client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2) + prepareIndex("cache_test_idx").setId("1").setSource("s", 1), + prepareIndex("cache_test_idx").setId("2").setSource("s", 2) ); // Make sure we are starting with a clear cache diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/SumIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/SumIT.java index e60f0308412cb..37524dabe7f09 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/SumIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/SumIT.java @@ -67,9 +67,9 @@ public void setupSuiteScopeCluster() throws Exception { prepareCreate("new_index").setMapping("transit_mode", "type=keyword", "route_length_miles", "type=double").get(); List builders = new ArrayList<>(); - builders.add(client().prepareIndex("old_index").setSource("transit_mode", "train", "distance", 42.0)); - builders.add(client().prepareIndex("old_index").setSource("transit_mode", "bus", "distance", 50.5)); - builders.add(client().prepareIndex("new_index").setSource("transit_mode", "train", "route_length_miles", 100.2)); + builders.add(prepareIndex("old_index").setSource("transit_mode", "train", "distance", 42.0)); + builders.add(prepareIndex("old_index").setSource("transit_mode", "bus", "distance", 50.5)); + builders.add(prepareIndex("new_index").setSource("transit_mode", "train", "route_length_miles", 100.2)); indexRandom(true, builders); ensureSearchable(); @@ -213,8 +213,8 @@ public void testScriptCaching() throws Exception { ); indexRandom( true, - client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2) + prepareIndex("cache_test_idx").setId("1").setSource("s", 1), + prepareIndex("cache_test_idx").setId("2").setSource("s", 2) ); // Make sure we are starting with a clear cache diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java index 3156b934fdd06..f1a4c9e5bd7a5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java @@ -492,8 +492,8 @@ public void testScriptCaching() throws Exception { ); indexRandom( true, - client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2) + prepareIndex("cache_test_idx").setId("1").setSource("s", 1), + prepareIndex("cache_test_idx").setId("2").setSource("s", 2) ); // Make sure we are starting with a clear cache diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java index 6d2c11a5868a6..98086451c3456 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java @@ -462,8 +462,8 @@ public void testScriptCaching() throws Exception { ); indexRandom( true, - client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2) + prepareIndex("cache_test_idx").setId("1").setSource("s", 1), + prepareIndex("cache_test_idx").setId("2").setSource("s", 2) ); // Make sure we are starting with a clear cache diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java index 432da3a05f860..d878dc981b17f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java @@ -156,8 +156,7 @@ public void setupSuiteScopeCluster() throws Exception { List builders = new ArrayList<>(); for (int i = 0; i < 50; i++) { builders.add( - client().prepareIndex("idx") - .setId(Integer.toString(i)) + prepareIndex("idx").setId(Integer.toString(i)) .setSource( jsonBuilder().startObject() .field(TERMS_AGGS_FIELD, "val" + (i / 10)) @@ -171,48 +170,39 @@ public void setupSuiteScopeCluster() throws Exception { } builders.add( - client().prepareIndex("field-collapsing") - .setId("1") + prepareIndex("field-collapsing").setId("1") .setSource(jsonBuilder().startObject().field("group", "a").field("text", "term x y z b").endObject()) ); builders.add( - client().prepareIndex("field-collapsing") - .setId("2") + prepareIndex("field-collapsing").setId("2") .setSource(jsonBuilder().startObject().field("group", "a").field("text", "term x y z n rare").field("value", 1).endObject()) ); builders.add( - client().prepareIndex("field-collapsing") - .setId("3") + prepareIndex("field-collapsing").setId("3") .setSource(jsonBuilder().startObject().field("group", "b").field("text", "x y z term").endObject()) ); builders.add( - client().prepareIndex("field-collapsing") - .setId("4") + prepareIndex("field-collapsing").setId("4") .setSource(jsonBuilder().startObject().field("group", "b").field("text", "x y term").endObject()) ); builders.add( - client().prepareIndex("field-collapsing") - .setId("5") + prepareIndex("field-collapsing").setId("5") .setSource(jsonBuilder().startObject().field("group", "b").field("text", "x term").endObject()) ); builders.add( - client().prepareIndex("field-collapsing") - .setId("6") + prepareIndex("field-collapsing").setId("6") .setSource(jsonBuilder().startObject().field("group", "b").field("text", "term rare").field("value", 3).endObject()) ); builders.add( - client().prepareIndex("field-collapsing") - .setId("7") + prepareIndex("field-collapsing").setId("7") .setSource(jsonBuilder().startObject().field("group", "c").field("text", "x y z term").endObject()) ); builders.add( - client().prepareIndex("field-collapsing") - .setId("8") + prepareIndex("field-collapsing").setId("8") .setSource(jsonBuilder().startObject().field("group", "c").field("text", "x y term b").endObject()) ); builders.add( - client().prepareIndex("field-collapsing") - .setId("9") + prepareIndex("field-collapsing").setId("9") .setSource(jsonBuilder().startObject().field("group", "c").field("text", "rare x term").field("value", 2).endObject()) ); @@ -227,12 +217,11 @@ public void setupSuiteScopeCluster() throws Exception { } builder.endArray().endObject(); - builders.add(client().prepareIndex("articles").setSource(builder)); + builders.add(prepareIndex("articles").setSource(builder)); } builders.add( - client().prepareIndex("articles") - .setId("1") + prepareIndex("articles").setId("1") .setSource( jsonBuilder().startObject() .field("title", "title 1") @@ -275,8 +264,7 @@ public void setupSuiteScopeCluster() throws Exception { ) ); builders.add( - client().prepareIndex("articles") - .setId("2") + prepareIndex("articles").setId("2") .setSource( jsonBuilder().startObject() .field("title", "title 2") @@ -1097,8 +1085,8 @@ public void testScriptCaching() throws Exception { ); indexRandom( true, - client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2) + prepareIndex("cache_test_idx").setId("1").setSource("s", 1), + prepareIndex("cache_test_idx").setId("2").setSource("s", 2) ); // Make sure we are starting with a clear cache diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java index 2b60456e2b2ba..7c5ab6600e365 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java @@ -49,8 +49,7 @@ public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); createIndex("idx_unmapped"); for (int i = 0; i < 10; i++) { - client().prepareIndex("idx") - .setId("" + i) + prepareIndex("idx").setId("" + i) .setSource( jsonBuilder().startObject().field("value", i + 1).startArray("values").value(i + 2).value(i + 3).endArray().endObject() ) @@ -221,8 +220,8 @@ public void testScriptCaching() throws Exception { ); indexRandom( true, - client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2) + prepareIndex("cache_test_idx").setId("1").setSource("s", 1), + prepareIndex("cache_test_idx").setId("2").setSource("s", 2) ); // Make sure we are starting with a clear cache diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketMetricsPipeLineAggregationTestCase.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketMetricsPipeLineAggregationTestCase.java index 01b2d92de7d89..7509cf3815085 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketMetricsPipeLineAggregationTestCase.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketMetricsPipeLineAggregationTestCase.java @@ -94,13 +94,9 @@ public void setupSuiteScopeCluster() throws Exception { for (int i = 0; i < numDocs; i++) { int fieldValue = randomIntBetween(minRandomValue, maxRandomValue); builders.add( - client().prepareIndex("idx") - .setSource( - jsonBuilder().startObject() - .field(SINGLE_VALUED_FIELD_NAME, fieldValue) - .field("tag", "tag" + (i % interval)) - .endObject() - ) + prepareIndex("idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, fieldValue).field("tag", "tag" + (i % interval)).endObject() + ) ); final int bucket = (fieldValue / interval); // + (fieldValue < 0 ? -1 : 0) - (minRandomValue / interval - 1); valueCounts[bucket]++; @@ -109,8 +105,7 @@ public void setupSuiteScopeCluster() throws Exception { assertAcked(prepareCreate("empty_bucket_idx").setMapping(SINGLE_VALUED_FIELD_NAME, "type=integer")); for (int i = 0; i < 2; i++) { builders.add( - client().prepareIndex("empty_bucket_idx") - .setId("" + i) + prepareIndex("empty_bucket_idx").setId("" + i) .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject()) ); } @@ -475,7 +470,7 @@ public void testFieldIsntWrittenOutTwice() throws Exception { .field("@timestamp", "2018-07-08T08:07:00.599Z") .endObject(); // end::noformat - client().prepareIndex("foo_2").setSource(docBuilder).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + prepareIndex("foo_2").setSource(docBuilder).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); indicesAdmin().prepareRefresh(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java index ba6522be755e9..16a570b6cd2fd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java @@ -134,7 +134,7 @@ public void setupSuiteScopeCluster() throws Exception { List builders = new ArrayList<>(); for (int docs = 0; docs < numDocs; docs++) { - builders.add(client().prepareIndex("idx").setSource(newDocBuilder())); + builders.add(prepareIndex("idx").setSource(newDocBuilder())); } indexRandom(true, builders); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java index 0351734358968..bc518eb6c1294 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java @@ -85,8 +85,7 @@ public void setupSuiteScopeCluster() throws Exception { // creates 6 documents where the value of the field is 0, 1, 2, 3, // 3, 5 builders.add( - client().prepareIndex("idx_gappy") - .setId("" + i) + prepareIndex("idx_gappy").setId("" + i) .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i == 4 ? 3 : i).endObject()) ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchRedStateIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchRedStateIndexIT.java index 74acaf95bd24a..e4bb11247d230 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchRedStateIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchRedStateIndexIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.health.ClusterHealthStatus; @@ -27,6 +26,7 @@ import java.util.List; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.emptyArray; @@ -43,15 +43,16 @@ public void testAllowPartialsWithRedState() throws Exception { final int numShards = cluster().numDataNodes() + 2; buildRedIndex(numShards); - SearchResponse searchResponse = prepareSearch().setSize(0).setAllowPartialSearchResults(true).get(); - assertThat(RestStatus.OK, equalTo(searchResponse.status())); - assertThat("Expect some shards failed", searchResponse.getFailedShards(), allOf(greaterThan(0), lessThanOrEqualTo(numShards))); - assertThat("Expect no shards skipped", searchResponse.getSkippedShards(), equalTo(0)); - assertThat("Expect subset of shards successful", searchResponse.getSuccessfulShards(), lessThan(numShards)); - assertThat("Expected total shards", searchResponse.getTotalShards(), equalTo(numShards)); - for (ShardSearchFailure failure : searchResponse.getShardFailures()) { - assertThat(failure.getCause(), instanceOf(NoShardAvailableActionException.class)); - } + assertResponse(prepareSearch().setSize(0).setAllowPartialSearchResults(true), response -> { + assertThat(RestStatus.OK, equalTo(response.status())); + assertThat("Expect some shards failed", response.getFailedShards(), allOf(greaterThan(0), lessThanOrEqualTo(numShards))); + assertThat("Expect no shards skipped", response.getSkippedShards(), equalTo(0)); + assertThat("Expect subset of shards successful", response.getSuccessfulShards(), lessThan(numShards)); + assertThat("Expected total shards", response.getTotalShards(), equalTo(numShards)); + for (ShardSearchFailure failure : response.getShardFailures()) { + assertThat(failure.getCause(), instanceOf(NoShardAvailableActionException.class)); + } + }); } public void testClusterAllowPartialsWithRedState() throws Exception { @@ -60,18 +61,19 @@ public void testClusterAllowPartialsWithRedState() throws Exception { setClusterDefaultAllowPartialResults(true); - SearchResponse searchResponse = prepareSearch().setSize(0).get(); - assertThat(RestStatus.OK, equalTo(searchResponse.status())); - assertThat("Expect some shards failed", searchResponse.getFailedShards(), allOf(greaterThan(0), lessThanOrEqualTo(numShards))); - assertThat("Expect no shards skipped", searchResponse.getSkippedShards(), equalTo(0)); - assertThat("Expect subset of shards successful", searchResponse.getSuccessfulShards(), lessThan(numShards)); - assertThat("Expected total shards", searchResponse.getTotalShards(), equalTo(numShards)); - for (ShardSearchFailure failure : searchResponse.getShardFailures()) { - assertThat(failure.getCause(), instanceOf(NoShardAvailableActionException.class)); - assertThat(failure.getCause().getStackTrace(), emptyArray()); - // We don't write out the entire, repetitive stacktrace in the reason - assertThat(failure.reason(), equalTo("org.elasticsearch.action.NoShardAvailableActionException" + System.lineSeparator())); - } + assertResponse(prepareSearch().setSize(0), response -> { + assertThat(RestStatus.OK, equalTo(response.status())); + assertThat("Expect some shards failed", response.getFailedShards(), allOf(greaterThan(0), lessThanOrEqualTo(numShards))); + assertThat("Expect no shards skipped", response.getSkippedShards(), equalTo(0)); + assertThat("Expect subset of shards successful", response.getSuccessfulShards(), lessThan(numShards)); + assertThat("Expected total shards", response.getTotalShards(), equalTo(numShards)); + for (ShardSearchFailure failure : response.getShardFailures()) { + assertThat(failure.getCause(), instanceOf(NoShardAvailableActionException.class)); + assertThat(failure.getCause().getStackTrace(), emptyArray()); + // We don't write out the entire, repetitive stacktrace in the reason + assertThat(failure.reason(), equalTo("org.elasticsearch.action.NoShardAvailableActionException" + System.lineSeparator())); + } + }); } public void testDisallowPartialsWithRedState() throws Exception { @@ -107,7 +109,7 @@ private void buildRedIndex(int numShards) throws Exception { assertAcked(prepareCreate("test").setSettings(indexSettings(numShards, 0))); ensureGreen(); for (int i = 0; i < 10; i++) { - client().prepareIndex("test").setId("" + i).setSource("field1", "value1").get(); + prepareIndex("test").setId("" + i).setSource("field1", "value1").get(); } refresh(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java index eb6dd2f0767f1..97a400709cde7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java @@ -9,13 +9,13 @@ package org.elasticsearch.search.basic; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.test.ESIntegTestCase; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.greaterThanOrEqualTo; /** @@ -54,7 +54,7 @@ private void searchWhileCreatingIndex(boolean createIndex, int numberOfReplicas) if (createIndex) { createIndex("test"); } - client().prepareIndex("test").setId(id).setSource("field", "test").get(); + prepareIndex("test").setId(id).setSource("field", "test").get(); RefreshResponse refreshResponse = indicesAdmin().prepareRefresh("test").get(); // at least one shard should be successful when refreshing assertThat(refreshResponse.getSuccessfulShards(), greaterThanOrEqualTo(1)); @@ -66,32 +66,37 @@ private void searchWhileCreatingIndex(boolean createIndex, int numberOfReplicas) // first, verify that search normal search works assertHitCount(prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "test")), 1); Client client = client(); - SearchResponse searchResponse = client.prepareSearch("test") - .setPreference(preference + Integer.toString(counter++)) - .setQuery(QueryBuilders.termQuery("field", "test")) - .get(); - if (searchResponse.getHits().getTotalHits().value != 1) { - refresh(); - SearchResponse searchResponseAfterRefresh = client.prepareSearch("test") - .setPreference(preference) - .setQuery(QueryBuilders.termQuery("field", "test")) - .get(); - logger.info( - "hits count mismatch on any shard search failed, post explicit refresh hits are {}", - searchResponseAfterRefresh.getHits().getTotalHits().value - ); - ensureGreen(); - SearchResponse searchResponseAfterGreen = client.prepareSearch("test") - .setPreference(preference) - .setQuery(QueryBuilders.termQuery("field", "test")) - .get(); - logger.info( - "hits count mismatch on any shard search failed, post explicit wait for green hits are {}", - searchResponseAfterGreen.getHits().getTotalHits().value - ); - assertHitCount(searchResponse, 1); - } - assertHitCount(searchResponse, 1); + assertResponse( + client.prepareSearch("test") + .setPreference(preference + Integer.toString(counter++)) + .setQuery(QueryBuilders.termQuery("field", "test")), + searchResponse -> { + if (searchResponse.getHits().getTotalHits().value != 1) { + refresh(); + assertResponse( + client.prepareSearch("test").setPreference(preference).setQuery(QueryBuilders.termQuery("field", "test")), + searchResponseAfterRefresh -> { + logger.info( + "hits count mismatch on any shard search failed, post explicit refresh hits are {}", + searchResponseAfterRefresh.getHits().getTotalHits().value + ); + ensureGreen(); + assertResponse( + client.prepareSearch("test") + .setPreference(preference) + .setQuery(QueryBuilders.termQuery("field", "test")), + searchResponseAfterGreen -> logger.info( + "hits count mismatch on any shard search failed, post explicit wait for green hits are {}", + searchResponseAfterGreen.getHits().getTotalHits().value + ) + ); + } + ); + assertHitCount(searchResponse, 1); + } + assertHitCount(searchResponse, 1); + } + ); status = clusterAdmin().prepareHealth("test").get().getStatus(); internalCluster().ensureAtLeastNumDataNodes(numberOfReplicas + 1); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java index 24df07217a5a2..26d81f672d650 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Priority; import org.elasticsearch.search.SearchHits; import org.elasticsearch.test.ESIntegTestCase; @@ -25,6 +24,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.formatShardStatus; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; @@ -47,8 +47,7 @@ private void testSearchAndRelocateConcurrently(final int numberOfReplicas) throw final int numDocs = between(10, 20); for (int i = 0; i < numDocs; i++) { indexBuilders.add( - client().prepareIndex("test") - .setId(Integer.toString(i)) + prepareIndex("test").setId(Integer.toString(i)) .setSource( jsonBuilder().startObject() .field("test", "value") @@ -74,33 +73,34 @@ private void testSearchAndRelocateConcurrently(final int numberOfReplicas) throw public void run() { try { while (stop.get() == false) { - SearchResponse sr = prepareSearch().setSize(numDocs).get(); - if (sr.getHits().getTotalHits().value != numDocs) { - // if we did not search all shards but had no serious failures that is potentially fine - // if only the hit-count is wrong. this can happen if the cluster-state is behind when the - // request comes in. It's a small window but a known limitation. - if (sr.getTotalShards() != sr.getSuccessfulShards() - && Stream.of(sr.getShardFailures()) - .allMatch(ssf -> ssf.getCause() instanceof NoShardAvailableActionException)) { - nonCriticalExceptions.add( - "Count is " - + sr.getHits().getTotalHits().value - + " but " - + numDocs - + " was expected. " - + formatShardStatus(sr) - ); - } else { - assertHitCount(sr, numDocs); + assertResponse(prepareSearch().setSize(numDocs), response -> { + if (response.getHits().getTotalHits().value != numDocs) { + // if we did not search all shards but had no serious failures that is potentially fine + // if only the hit-count is wrong. this can happen if the cluster-state is behind when the + // request comes in. It's a small window but a known limitation. + if (response.getTotalShards() != response.getSuccessfulShards() + && Stream.of(response.getShardFailures()) + .allMatch(ssf -> ssf.getCause() instanceof NoShardAvailableActionException)) { + nonCriticalExceptions.add( + "Count is " + + response.getHits().getTotalHits().value + + " but " + + numDocs + + " was expected. " + + formatShardStatus(response) + ); + } else { + assertHitCount(response, numDocs); + } } - } - final SearchHits sh = sr.getHits(); - assertThat( - "Expected hits to be the same size the actual hits array", - sh.getTotalHits().value, - equalTo((long) (sh.getHits().length)) - ); + final SearchHits sh = response.getHits(); + assertThat( + "Expected hits to be the same size the actual hits array", + sh.getTotalHits().value, + equalTo((long) (sh.getHits().length)) + ); + }); // this is the more critical but that we hit the actual hit array has a different size than the // actual number of hits. } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsIT.java index 6f701e956788b..6ebfc61830269 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsIT.java @@ -16,7 +16,6 @@ import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -41,6 +40,7 @@ import java.util.concurrent.ExecutionException; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; public class SearchWithRandomExceptionsIT extends ESIntegTestCase { @@ -99,8 +99,7 @@ public void testRandomExceptions() throws IOException, InterruptedException, Exe boolean[] added = new boolean[numDocs]; for (int i = 0; i < numDocs; i++) { try { - DocWriteResponse indexResponse = client().prepareIndex("test") - .setId("" + i) + DocWriteResponse indexResponse = prepareIndex("test").setId("" + i) .setTimeout(TimeValue.timeValueSeconds(1)) .setSource("test", English.intToEnglish(i)) .get(); @@ -125,28 +124,36 @@ public void testRandomExceptions() throws IOException, InterruptedException, Exe NumShards test = getNumShards("test"); final int numSearches = scaledRandomIntBetween(100, 200); + final int finalNumCreated = numCreated; // we don't check anything here really just making sure we don't leave any open files or a broken index behind. for (int i = 0; i < numSearches; i++) { try { int docToQuery = between(0, numDocs - 1); int expectedResults = added[docToQuery] ? 1 : 0; logger.info("Searching for [test:{}]", English.intToEnglish(docToQuery)); - SearchResponse searchResponse = prepareSearch().setQuery(QueryBuilders.matchQuery("test", English.intToEnglish(docToQuery))) - .setSize(expectedResults) - .get(); - logger.info("Successful shards: [{}] numShards: [{}]", searchResponse.getSuccessfulShards(), test.numPrimaries); - if (searchResponse.getSuccessfulShards() == test.numPrimaries && refreshFailed == false) { - assertResultsAndLogOnFailure(expectedResults, searchResponse); - } + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchQuery("test", English.intToEnglish(docToQuery))).setSize(expectedResults), + response -> { + logger.info("Successful shards: [{}] numShards: [{}]", response.getSuccessfulShards(), test.numPrimaries); + if (response.getSuccessfulShards() == test.numPrimaries && refreshFailed == false) { + assertResultsAndLogOnFailure(expectedResults, response); + } + } + ); // check match all - searchResponse = prepareSearch().setQuery(QueryBuilders.matchAllQuery()) - .setSize(numCreated) - .addSort("_id", SortOrder.ASC) - .get(); - logger.info("Match all Successful shards: [{}] numShards: [{}]", searchResponse.getSuccessfulShards(), test.numPrimaries); - if (searchResponse.getSuccessfulShards() == test.numPrimaries && refreshFailed == false) { - assertResultsAndLogOnFailure(numCreated, searchResponse); - } + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchAllQuery()).setSize(numCreated).addSort("_id", SortOrder.ASC), + response -> { + logger.info( + "Match all Successful shards: [{}] numShards: [{}]", + response.getSuccessfulShards(), + test.numPrimaries + ); + if (response.getSuccessfulShards() == test.numPrimaries && refreshFailed == false) { + assertResultsAndLogOnFailure(finalNumCreated, response); + } + } + ); } catch (SearchPhaseExecutionException ex) { logger.info("expected SearchPhaseException: [{}]", ex.getMessage()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java index 0ccde7a62a09e..33ef75b317e33 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java @@ -15,7 +15,6 @@ import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; @@ -33,6 +32,7 @@ import java.util.concurrent.ExecutionException; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCountAndNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; public class SearchWithRandomIOExceptionsIT extends ESIntegTestCase { @@ -82,7 +82,7 @@ public void testRandomDirectoryIOExceptions() throws IOException, InterruptedExc numInitialDocs = between(10, 100); ensureGreen(); for (int i = 0; i < numInitialDocs; i++) { - client().prepareIndex("test").setId("init" + i).setSource("test", "init").get(); + prepareIndex("test").setId("init" + i).setSource("test", "init").get(); } indicesAdmin().prepareRefresh("test").execute().get(); indicesAdmin().prepareFlush("test").execute().get(); @@ -121,8 +121,7 @@ public void testRandomDirectoryIOExceptions() throws IOException, InterruptedExc for (int i = 0; i < numDocs; i++) { added[i] = false; try { - DocWriteResponse indexResponse = client().prepareIndex("test") - .setId(Integer.toString(i)) + DocWriteResponse indexResponse = prepareIndex("test").setId(Integer.toString(i)) .setTimeout(TimeValue.timeValueSeconds(1)) .setSource("test", English.intToEnglish(i)) .get(); @@ -147,32 +146,39 @@ public void testRandomDirectoryIOExceptions() throws IOException, InterruptedExc refreshResponse.getTotalShards() ); final int numSearches = scaledRandomIntBetween(10, 20); + final int finalNumCreated = numCreated; + final int finalNumInitialDocs = numInitialDocs; // we don't check anything here really just making sure we don't leave any open files or a broken index behind. for (int i = 0; i < numSearches; i++) { try { int docToQuery = between(0, numDocs - 1); int expectedResults = added[docToQuery] ? 1 : 0; logger.info("Searching for [test:{}]", English.intToEnglish(docToQuery)); - SearchResponse searchResponse = prepareSearch().setQuery(QueryBuilders.matchQuery("test", English.intToEnglish(docToQuery))) - .setSize(expectedResults) - .get(); - logger.info("Successful shards: [{}] numShards: [{}]", searchResponse.getSuccessfulShards(), numShards.numPrimaries); - if (searchResponse.getSuccessfulShards() == numShards.numPrimaries && refreshFailed == false) { - assertResultsAndLogOnFailure(expectedResults, searchResponse); - } + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchQuery("test", English.intToEnglish(docToQuery))).setSize(expectedResults), + response -> { + logger.info("Successful shards: [{}] numShards: [{}]", response.getSuccessfulShards(), numShards.numPrimaries); + if (response.getSuccessfulShards() == numShards.numPrimaries && refreshFailed == false) { + assertResultsAndLogOnFailure(expectedResults, response); + } + } + ); // check match all - searchResponse = prepareSearch().setQuery(QueryBuilders.matchAllQuery()) - .setSize(numCreated + numInitialDocs) - .addSort("_uid", SortOrder.ASC) - .get(); - logger.info( - "Match all Successful shards: [{}] numShards: [{}]", - searchResponse.getSuccessfulShards(), - numShards.numPrimaries + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchAllQuery()) + .setSize(numCreated + numInitialDocs) + .addSort("_uid", SortOrder.ASC), + response -> { + logger.info( + "Match all Successful shards: [{}] numShards: [{}]", + response.getSuccessfulShards(), + numShards.numPrimaries + ); + if (response.getSuccessfulShards() == numShards.numPrimaries && refreshFailed == false) { + assertResultsAndLogOnFailure(finalNumCreated + finalNumInitialDocs, response); + } + } ); - if (searchResponse.getSuccessfulShards() == numShards.numPrimaries && refreshFailed == false) { - assertResultsAndLogOnFailure(numCreated + numInitialDocs, searchResponse); - } } catch (SearchPhaseExecutionException ex) { logger.info("SearchPhaseException: [{}]", ex.getMessage()); // if a scheduled refresh or flush fails all shards we see all shards failed here diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java index 54abecb5a1905..c4b0346170949 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java @@ -16,7 +16,6 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.Priority; @@ -26,8 +25,10 @@ import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; +import java.util.concurrent.ExecutionException; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; @@ -55,15 +56,18 @@ public void testFailedSearchWithWrongQuery() throws Exception { assertThat(refreshResponse.getFailedShards(), equalTo(0)); for (int i = 0; i < 5; i++) { try { - SearchResponse searchResponse = client().search( - new SearchRequest("test").source(new SearchSourceBuilder().query(new MatchQueryBuilder("foo", "biz"))) - ).actionGet(); - assertThat(searchResponse.getTotalShards(), equalTo(test.numPrimaries)); - assertThat(searchResponse.getSuccessfulShards(), equalTo(0)); - assertThat(searchResponse.getFailedShards(), equalTo(test.numPrimaries)); - fail("search should fail"); - } catch (ElasticsearchException e) { - assertThat(e.unwrapCause(), instanceOf(SearchPhaseExecutionException.class)); + assertResponse( + client().search(new SearchRequest("test").source(new SearchSourceBuilder().query(new MatchQueryBuilder("foo", "biz")))), + response -> { + assertThat(response.getTotalShards(), equalTo(test.numPrimaries)); + assertThat(response.getSuccessfulShards(), equalTo(0)); + assertThat(response.getFailedShards(), equalTo(test.numPrimaries)); + fail("search should fail"); + } + ); + } catch (ExecutionException e) { + assertThat(e.getCause(), instanceOf(ElasticsearchException.class)); + assertThat(((ElasticsearchException) e.getCause()).unwrapCause(), instanceOf(SearchPhaseExecutionException.class)); // all is well } } @@ -93,15 +97,18 @@ public void testFailedSearchWithWrongQuery() throws Exception { for (int i = 0; i < 5; i++) { try { - SearchResponse searchResponse = client().search( - new SearchRequest("test").source(new SearchSourceBuilder().query(new MatchQueryBuilder("foo", "biz"))) - ).actionGet(); - assertThat(searchResponse.getTotalShards(), equalTo(test.numPrimaries)); - assertThat(searchResponse.getSuccessfulShards(), equalTo(0)); - assertThat(searchResponse.getFailedShards(), equalTo(test.numPrimaries)); - fail("search should fail"); - } catch (ElasticsearchException e) { - assertThat(e.unwrapCause(), instanceOf(SearchPhaseExecutionException.class)); + assertResponse( + client().search(new SearchRequest("test").source(new SearchSourceBuilder().query(new MatchQueryBuilder("foo", "biz")))), + response -> { + assertThat(response.getTotalShards(), equalTo(test.numPrimaries)); + assertThat(response.getSuccessfulShards(), equalTo(0)); + assertThat(response.getFailedShards(), equalTo(test.numPrimaries)); + fail("search should fail"); + } + ); + } catch (ExecutionException e) { + assertThat(e.getCause(), instanceOf(ElasticsearchException.class)); + assertThat(((ElasticsearchException) e.getCause()).unwrapCause(), instanceOf(SearchPhaseExecutionException.class)); // all is well } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportTwoNodesSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportTwoNodesSearchIT.java index 54cff6efe3d17..e18c37aff783b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportTwoNodesSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportTwoNodesSearchIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; @@ -43,6 +42,8 @@ import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -287,15 +288,15 @@ public void testSimpleFacets() throws Exception { .aggregation(AggregationBuilders.global("global").subAggregation(AggregationBuilders.filter("all", termQuery("multi", "test")))) .aggregation(AggregationBuilders.filter("test1", termQuery("name", "test1"))); - SearchResponse searchResponse = client().search(new SearchRequest("test").source(sourceBuilder)).actionGet(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(100L)); + assertNoFailuresAndResponse(client().search(new SearchRequest("test").source(sourceBuilder)), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(100L)); - Global global = searchResponse.getAggregations().get("global"); - Filter all = global.getAggregations().get("all"); - Filter test1 = searchResponse.getAggregations().get("test1"); - assertThat(test1.getDocCount(), equalTo(1L)); - assertThat(all.getDocCount(), equalTo(100L)); + Global global = response.getAggregations().get("global"); + Filter all = global.getAggregations().get("all"); + Filter test1 = response.getAggregations().get("test1"); + assertThat(test1.getDocCount(), equalTo(1L)); + assertThat(all.getDocCount(), equalTo(100L)); + }); } public void testFailedSearchWithWrongQuery() throws Exception { @@ -352,20 +353,22 @@ public void testFailedMultiSearchWithWrongQuery() throws Exception { logger.info("Start Testing failed multi search with a wrong query"); - MultiSearchResponse response = client().prepareMultiSearch() - .add(prepareSearch("test").setQuery(new MatchQueryBuilder("foo", "biz"))) - .add(prepareSearch("test").setQuery(QueryBuilders.termQuery("nid", 2))) - .add(prepareSearch("test").setQuery(QueryBuilders.matchAllQuery())) - .get(); - assertThat(response.getResponses().length, equalTo(3)); - assertThat(response.getResponses()[0].getFailureMessage(), notNullValue()); - - assertThat(response.getResponses()[1].getFailureMessage(), nullValue()); - assertThat(response.getResponses()[1].getResponse().getHits().getHits().length, equalTo(1)); + assertResponse( + client().prepareMultiSearch() + .add(prepareSearch("test").setQuery(new MatchQueryBuilder("foo", "biz"))) + .add(prepareSearch("test").setQuery(QueryBuilders.termQuery("nid", 2))) + .add(prepareSearch("test").setQuery(QueryBuilders.matchAllQuery())), + response -> { + assertThat(response.getResponses().length, equalTo(3)); + assertThat(response.getResponses()[0].getFailureMessage(), notNullValue()); - assertThat(response.getResponses()[2].getFailureMessage(), nullValue()); - assertThat(response.getResponses()[2].getResponse().getHits().getHits().length, equalTo(10)); + assertThat(response.getResponses()[1].getFailureMessage(), nullValue()); + assertThat(response.getResponses()[1].getResponse().getHits().getHits().length, equalTo(1)); + assertThat(response.getResponses()[2].getFailureMessage(), nullValue()); + assertThat(response.getResponses()[2].getResponse().getHits().getHits().length, equalTo(10)); + } + ); logger.info("Done Testing failed search"); } @@ -374,28 +377,30 @@ public void testFailedMultiSearchWithWrongQueryWithFunctionScore() throws Except logger.info("Start Testing failed multi search with a wrong query"); - MultiSearchResponse response = client().prepareMultiSearch() - // Add custom score query with bogus script - .add( - prepareSearch("test").setQuery( - QueryBuilders.functionScoreQuery( - QueryBuilders.termQuery("nid", 1), - new ScriptScoreFunctionBuilder(new Script(ScriptType.INLINE, "bar", "foo", Collections.emptyMap())) + assertResponse( + client().prepareMultiSearch() + // Add custom score query with bogus script + .add( + prepareSearch("test").setQuery( + QueryBuilders.functionScoreQuery( + QueryBuilders.termQuery("nid", 1), + new ScriptScoreFunctionBuilder(new Script(ScriptType.INLINE, "bar", "foo", Collections.emptyMap())) + ) ) ) - ) - .add(prepareSearch("test").setQuery(QueryBuilders.termQuery("nid", 2))) - .add(prepareSearch("test").setQuery(QueryBuilders.matchAllQuery())) - .get(); - assertThat(response.getResponses().length, equalTo(3)); - assertThat(response.getResponses()[0].getFailureMessage(), notNullValue()); + .add(prepareSearch("test").setQuery(QueryBuilders.termQuery("nid", 2))) + .add(prepareSearch("test").setQuery(QueryBuilders.matchAllQuery())), + response -> { + assertThat(response.getResponses().length, equalTo(3)); + assertThat(response.getResponses()[0].getFailureMessage(), notNullValue()); - assertThat(response.getResponses()[1].getFailureMessage(), nullValue()); - assertThat(response.getResponses()[1].getResponse().getHits().getHits().length, equalTo(1)); - - assertThat(response.getResponses()[2].getFailureMessage(), nullValue()); - assertThat(response.getResponses()[2].getResponse().getHits().getHits().length, equalTo(10)); + assertThat(response.getResponses()[1].getFailureMessage(), nullValue()); + assertThat(response.getResponses()[1].getResponse().getHits().getHits().length, equalTo(1)); + assertThat(response.getResponses()[2].getFailureMessage(), nullValue()); + assertThat(response.getResponses()[2].getResponse().getHits().getHits().length, equalTo(10)); + } + ); logger.info("Done Testing failed search"); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSCanMatchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSCanMatchIT.java index 2ddbbec5bc1c8..582df3a5bb396 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSCanMatchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSCanMatchIT.java @@ -13,7 +13,6 @@ import org.apache.lucene.index.PointValues; import org.elasticsearch.action.search.CanMatchNodeRequest; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchTransportService; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -34,7 +33,6 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.test.AbstractMultiClustersTestCase; -import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.transport.TransportService; import org.hamcrest.Matchers; @@ -46,6 +44,9 @@ import java.util.List; import java.util.Optional; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.in; @@ -103,7 +104,7 @@ protected Collection> nodePlugins(String clusterAlias) { int createIndexAndIndexDocs(String cluster, String index, int numberOfShards, long timestamp, boolean exposeTimestamp) throws Exception { Client client = client(cluster); - ElasticsearchAssertions.assertAcked( + assertAcked( client.admin() .indices() .prepareCreate(index) @@ -175,11 +176,12 @@ public void testCanMatchOnTimeRange() throws Exception { SearchSourceBuilder source = new SearchSourceBuilder().query(new RangeQueryBuilder("@timestamp").from(timestamp)); SearchRequest request = new SearchRequest("local_*", "*:remote_*"); request.source(source).setCcsMinimizeRoundtrips(minimizeRoundTrips); - SearchResponse searchResp = client().search(request).actionGet(); - ElasticsearchAssertions.assertHitCount(searchResp, localDocs + remoteDocs); - int totalShards = oldLocalNumShards + newLocalNumShards + oldRemoteNumShards + newRemoteNumShards; - assertThat(searchResp.getTotalShards(), equalTo(totalShards)); - assertThat(searchResp.getSkippedShards(), equalTo(oldLocalNumShards + oldRemoteNumShards)); + assertResponse(client().search(request), response -> { + assertHitCount(response, localDocs + remoteDocs); + int totalShards = oldLocalNumShards + newLocalNumShards + oldRemoteNumShards + newRemoteNumShards; + assertThat(response.getTotalShards(), equalTo(totalShards)); + assertThat(response.getSkippedShards(), equalTo(oldLocalNumShards + oldRemoteNumShards)); + }); } } finally { for (String cluster : List.of(LOCAL_CLUSTER, REMOTE_CLUSTER)) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java index 1596a9a7e28a8..cf8d81f406f91 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java @@ -13,13 +13,13 @@ import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchShardsAction; import org.elasticsearch.action.search.SearchShardsGroup; import org.elasticsearch.action.search.SearchShardsRequest; import org.elasticsearch.action.search.SearchShardsResponse; +import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.action.search.TransportSearchShardsAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.WriteRequest; @@ -67,6 +67,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -130,13 +131,14 @@ public void testRemoteClusterClientRole() throws Exception { .toList() ); - final SearchResponse resp = localCluster.client(nodeWithRemoteClusterClientRole) - .prepareSearch("demo", "cluster_a:prod") - .setQuery(new MatchAllQueryBuilder()) - .setAllowPartialSearchResults(false) - .setSize(1000) - .get(); - assertHitCount(resp, demoDocs + prodDocs); + assertHitCount( + localCluster.client(nodeWithRemoteClusterClientRole) + .prepareSearch("demo", "cluster_a:prod") + .setQuery(new MatchAllQueryBuilder()) + .setAllowPartialSearchResults(false) + .setSize(1000), + demoDocs + prodDocs + ); } public void testProxyConnectionDisconnect() throws Exception { @@ -238,7 +240,7 @@ public void testCancel() throws Exception { final TaskInfo rootTask = client().admin() .cluster() .prepareListTasks() - .setActions(SearchAction.INSTANCE.name()) + .setActions(TransportSearchAction.TYPE.name()) .get() .getTasks() .stream() @@ -272,7 +274,7 @@ public void testCancel() throws Exception { for (TransportService transportService : transportServices) { Collection cancellableTasks = transportService.getTaskManager().getCancellableTasks().values(); for (CancellableTask cancellableTask : cancellableTasks) { - if (cancellableTask.getAction().contains(SearchAction.INSTANCE.name())) { + if (cancellableTask.getAction().contains(TransportSearchAction.TYPE.name())) { assertTrue(cancellableTask.getDescription(), cancellableTask.isCancelled()); } } @@ -398,17 +400,21 @@ public void testLookupFields() throws Exception { .fetchField("to"); SearchRequest request = new SearchRequest("cluster_a:remote_calls").source(searchSourceBuilder); request.setCcsMinimizeRoundtrips(randomBoolean()); - SearchResponse searchResponse = client().search(request).actionGet(); - ElasticsearchAssertions.assertHitCount(searchResponse, 2); - SearchHit hit0 = searchResponse.getHits().getHits()[0]; - assertThat(hit0.getIndex(), equalTo("remote_calls")); - assertThat(hit0.field("from"), nullValue()); - assertThat(hit0.field("to").getValues(), contains(Map.of("name", List.of("Remote C")))); - - SearchHit hit1 = searchResponse.getHits().getHits()[1]; - assertThat(hit1.getIndex(), equalTo("remote_calls")); - assertThat(hit1.field("from").getValues(), contains(Map.of("name", List.of("Remote A")), Map.of("name", List.of("Remote B")))); - assertThat(hit1.field("to").getValues(), contains(Map.of("name", List.of("Remote C")))); + assertResponse(client().search(request), response -> { + ElasticsearchAssertions.assertHitCount(response, 2); + SearchHit hit0 = response.getHits().getHits()[0]; + assertThat(hit0.getIndex(), equalTo("remote_calls")); + assertThat(hit0.field("from"), nullValue()); + assertThat(hit0.field("to").getValues(), contains(Map.of("name", List.of("Remote C")))); + + SearchHit hit1 = response.getHits().getHits()[1]; + assertThat(hit1.getIndex(), equalTo("remote_calls")); + assertThat( + hit1.field("from").getValues(), + contains(Map.of("name", List.of("Remote A")), Map.of("name", List.of("Remote B"))) + ); + assertThat(hit1.field("to").getValues(), contains(Map.of("name", List.of("Remote C")))); + }); } // Search on both clusters { @@ -419,22 +425,26 @@ public void testLookupFields() throws Exception { .fetchField("to"); SearchRequest request = new SearchRequest("local_calls", "cluster_a:remote_calls").source(searchSourceBuilder); request.setCcsMinimizeRoundtrips(randomBoolean()); - SearchResponse searchResponse = client().search(request).actionGet(); - ElasticsearchAssertions.assertHitCount(searchResponse, 3); - SearchHit hit0 = searchResponse.getHits().getHits()[0]; - assertThat(hit0.getIndex(), equalTo("remote_calls")); - assertThat(hit0.field("from"), nullValue()); - assertThat(hit0.field("to").getValues(), contains(Map.of("name", List.of("Remote C")))); - - SearchHit hit1 = searchResponse.getHits().getHits()[1]; - assertThat(hit1.getIndex(), equalTo("remote_calls")); - assertThat(hit1.field("from").getValues(), contains(Map.of("name", List.of("Remote A")), Map.of("name", List.of("Remote B")))); - assertThat(hit1.field("to").getValues(), contains(Map.of("name", List.of("Remote C")))); - - SearchHit hit2 = searchResponse.getHits().getHits()[2]; - assertThat(hit2.getIndex(), equalTo("local_calls")); - assertThat(hit2.field("from").getValues(), contains(Map.of("name", List.of("Local A")))); - assertThat(hit2.field("to").getValues(), contains(Map.of("name", List.of("Local B")), Map.of("name", List.of("Local C")))); + assertResponse(client().search(request), response -> { + assertHitCount(response, 3); + SearchHit hit0 = response.getHits().getHits()[0]; + assertThat(hit0.getIndex(), equalTo("remote_calls")); + assertThat(hit0.field("from"), nullValue()); + assertThat(hit0.field("to").getValues(), contains(Map.of("name", List.of("Remote C")))); + + SearchHit hit1 = response.getHits().getHits()[1]; + assertThat(hit1.getIndex(), equalTo("remote_calls")); + assertThat( + hit1.field("from").getValues(), + contains(Map.of("name", List.of("Remote A")), Map.of("name", List.of("Remote B"))) + ); + assertThat(hit1.field("to").getValues(), contains(Map.of("name", List.of("Remote C")))); + + SearchHit hit2 = response.getHits().getHits()[2]; + assertThat(hit2.getIndex(), equalTo("local_calls")); + assertThat(hit2.field("from").getValues(), contains(Map.of("name", List.of("Local A")))); + assertThat(hit2.field("to").getValues(), contains(Map.of("name", List.of("Local B")), Map.of("name", List.of("Local C")))); + }); } } @@ -518,7 +528,7 @@ public void testSearchShardsWithIndexNameQuery() { { QueryBuilder query = new TermQueryBuilder("_index", "cluster_a:my_index"); SearchShardsRequest request = new SearchShardsRequest(indices, indicesOptions, query, null, null, randomBoolean(), "cluster_a"); - SearchShardsResponse resp = remoteClient.execute(SearchShardsAction.INSTANCE, request).actionGet(); + SearchShardsResponse resp = remoteClient.execute(TransportSearchShardsAction.TYPE, request).actionGet(); assertThat(resp.getGroups(), hasSize(numShards)); for (SearchShardsGroup group : resp.getGroups()) { assertFalse(group.skipped()); @@ -535,7 +545,7 @@ public void testSearchShardsWithIndexNameQuery() { randomBoolean(), randomFrom("cluster_b", null) ); - SearchShardsResponse resp = remoteClient.execute(SearchShardsAction.INSTANCE, request).actionGet(); + SearchShardsResponse resp = remoteClient.execute(TransportSearchShardsAction.TYPE, request).actionGet(); assertThat(resp.getGroups(), hasSize(numShards)); for (SearchShardsGroup group : resp.getGroups()) { assertTrue(group.skipped()); @@ -552,7 +562,7 @@ public void testSearchShardsWithIndexNameQuery() { randomBoolean(), randomFrom("cluster_a", "cluster_b", null) ); - SearchShardsResponse resp = remoteClient.execute(SearchShardsAction.INSTANCE, request).actionGet(); + SearchShardsResponse resp = remoteClient.execute(TransportSearchShardsAction.TYPE, request).actionGet(); assertThat(resp.getGroups(), hasSize(numShards)); for (SearchShardsGroup group : resp.getGroups()) { assertTrue(group.skipped()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java index 0be427a5fd09d..379cdfc990207 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java @@ -11,6 +11,8 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchResponse.Cluster; +import org.elasticsearch.action.search.SearchResponse.Clusters; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.search.TransportSearchAction; @@ -41,6 +43,7 @@ import java.util.concurrent.TimeUnit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -119,39 +122,40 @@ public void testClusterDetailsAfterSuccessfulCCS() throws Exception { } searchRequest.source(new SearchSourceBuilder().query(new MatchAllQueryBuilder()).size(10)); - SearchResponse searchResponse = client(LOCAL_CLUSTER).search(searchRequest).get(); - assertNotNull(searchResponse); - - SearchResponse.Clusters clusters = searchResponse.getClusters(); - assertFalse("search cluster results should NOT be marked as partial", clusters.hasPartialResults()); - assertThat(clusters.getTotal(), equalTo(2)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(2)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), equalTo(0)); - - SearchResponse.Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); - assertNotNull(localClusterSearchInfo); - assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertThat(localClusterSearchInfo.getIndexExpression(), equalTo(localIndex)); - assertThat(localClusterSearchInfo.getTotalShards(), equalTo(localNumShards)); - assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(localNumShards)); - assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); - assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); - assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); - assertThat(localClusterSearchInfo.getTook().millis(), greaterThan(0L)); - - SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); - assertNotNull(remoteClusterSearchInfo); - assertThat(remoteClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertThat(remoteClusterSearchInfo.getIndexExpression(), equalTo(remoteIndex)); - assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); - assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); - assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThan(0L)); + assertResponse(client(LOCAL_CLUSTER).search(searchRequest), response -> { + assertNotNull(response); + + Clusters clusters = response.getClusters(); + assertFalse("search cluster results should NOT be marked as partial", clusters.hasPartialResults()); + assertThat(clusters.getTotal(), equalTo(2)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SUCCESSFUL), equalTo(2)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SKIPPED), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.RUNNING), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.PARTIAL), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.FAILED), equalTo(0)); + + Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertNotNull(localClusterSearchInfo); + assertThat(localClusterSearchInfo.getStatus(), equalTo(Cluster.Status.SUCCESSFUL)); + assertThat(localClusterSearchInfo.getIndexExpression(), equalTo(localIndex)); + assertThat(localClusterSearchInfo.getTotalShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(localClusterSearchInfo.getTook().millis(), greaterThan(0L)); + + Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); + assertNotNull(remoteClusterSearchInfo); + assertThat(remoteClusterSearchInfo.getStatus(), equalTo(Cluster.Status.SUCCESSFUL)); + assertThat(remoteClusterSearchInfo.getIndexExpression(), equalTo(remoteIndex)); + assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThan(0L)); + }); } // CCS with a search where the timestamp of the query cannot match so should be SUCCESSFUL with all shards skipped @@ -183,47 +187,49 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except searchRequest.source(new SearchSourceBuilder().query(rangeQueryBuilder).size(10)); - SearchResponse searchResponse = client(LOCAL_CLUSTER).search(searchRequest).get(); - assertNotNull(searchResponse); - - SearchResponse.Clusters clusters = searchResponse.getClusters(); - assertFalse("search cluster results should NOT be marked as partial", clusters.hasPartialResults()); - assertThat(clusters.getTotal(), equalTo(2)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(2)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), equalTo(0)); - - SearchResponse.Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); - assertNotNull(localClusterSearchInfo); - SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); - assertNotNull(remoteClusterSearchInfo); - - assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertThat(localClusterSearchInfo.getTotalShards(), equalTo(localNumShards)); - assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(localNumShards)); - if (dfs) { - // with DFS_QUERY_THEN_FETCH, the local shards are never skipped - assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); - } else { - assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(localNumShards - 1)); - } - assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); - assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); - assertThat(localClusterSearchInfo.getTook().millis(), greaterThanOrEqualTo(0L)); + assertResponse(client(LOCAL_CLUSTER).search(searchRequest), response -> { + assertNotNull(response); + + Clusters clusters = response.getClusters(); + assertFalse("search cluster results should NOT be marked as partial", clusters.hasPartialResults()); + assertThat(clusters.getTotal(), equalTo(2)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SUCCESSFUL), equalTo(2)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SKIPPED), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.RUNNING), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.PARTIAL), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.FAILED), equalTo(0)); + + Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertNotNull(localClusterSearchInfo); + Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); + assertNotNull(remoteClusterSearchInfo); + + assertThat(localClusterSearchInfo.getStatus(), equalTo(Cluster.Status.SUCCESSFUL)); + assertThat(localClusterSearchInfo.getTotalShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(localNumShards)); + if (dfs) { + // with DFS_QUERY_THEN_FETCH, the local shards are never skipped + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); + } else { + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(localNumShards - 1)); + } + assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(localClusterSearchInfo.getTook().millis(), greaterThanOrEqualTo(0L)); + + assertThat(remoteClusterSearchInfo.getStatus(), equalTo(Cluster.Status.SUCCESSFUL)); + assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); + if (clusters.isCcsMinimizeRoundtrips()) { + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards - 1)); + } else { + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards)); + } + assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThanOrEqualTo(0L)); + }); - assertThat(remoteClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); - assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); - if (clusters.isCcsMinimizeRoundtrips()) { - assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards - 1)); - } else { - assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards)); - } - assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThanOrEqualTo(0L)); } public void testClusterDetailsAfterCCSWithFailuresOnOneShardOnly() throws Exception { @@ -251,24 +257,25 @@ public void testClusterDetailsAfterCCSWithFailuresOnOneShardOnly() throws Except ThrowingQueryBuilder queryBuilder = new ThrowingQueryBuilder(randomLong(), new IllegalStateException("index corrupted"), 0); searchRequest.source(new SearchSourceBuilder().query(queryBuilder).size(10)); - SearchResponse searchResponse = client(LOCAL_CLUSTER).search(searchRequest).get(); - assertNotNull(searchResponse); + assertResponse(client(LOCAL_CLUSTER).search(searchRequest), response -> { + assertNotNull(response); - SearchResponse.Clusters clusters = searchResponse.getClusters(); - assertThat(clusters.getTotal(), equalTo(2)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), equalTo(2)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), equalTo(0)); + Clusters clusters = response.getClusters(); + assertThat(clusters.getTotal(), equalTo(2)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SUCCESSFUL), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SKIPPED), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.RUNNING), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.PARTIAL), equalTo(2)); + assertThat(clusters.getClusterStateCount(Cluster.Status.FAILED), equalTo(0)); - SearchResponse.Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); - assertNotNull(localClusterSearchInfo); - assertOneFailedShard(localClusterSearchInfo, localNumShards); + Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertNotNull(localClusterSearchInfo); + assertOneFailedShard(localClusterSearchInfo, localNumShards); - SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); - assertNotNull(remoteClusterSearchInfo); - assertOneFailedShard(remoteClusterSearchInfo, remoteNumShards); + Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); + assertNotNull(remoteClusterSearchInfo); + assertOneFailedShard(remoteClusterSearchInfo, remoteNumShards); + }); } // tests bug fix https://github.com/elastic/elasticsearch/issues/100350 @@ -296,39 +303,40 @@ public void testClusterDetailsAfterCCSWhereRemoteClusterHasNoShardsToSearch() th } searchRequest.source(new SearchSourceBuilder().query(new MatchAllQueryBuilder()).size(10)); - SearchResponse searchResponse = client(LOCAL_CLUSTER).search(searchRequest).get(); - assertNotNull(searchResponse); - - SearchResponse.Clusters clusters = searchResponse.getClusters(); - assertFalse("search cluster results should NOT be marked as partial", clusters.hasPartialResults()); - assertThat(clusters.getTotal(), equalTo(2)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(2)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), equalTo(0)); - - SearchResponse.Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); - assertNotNull(localClusterSearchInfo); - assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertThat(localClusterSearchInfo.getIndexExpression(), equalTo(localIndex)); - assertThat(localClusterSearchInfo.getTotalShards(), equalTo(localNumShards)); - assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(localNumShards)); - assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); - assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); - assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); - assertThat(localClusterSearchInfo.getTook().millis(), greaterThan(0L)); - - SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); - assertNotNull(remoteClusterSearchInfo); - assertThat(remoteClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertThat(remoteClusterSearchInfo.getIndexExpression(), equalTo("no_such_index*")); - assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(0)); // no shards since index does not exist - assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); - assertNotNull(remoteClusterSearchInfo.getTook()); + assertResponse(client(LOCAL_CLUSTER).search(searchRequest), response -> { + assertNotNull(response); + + Clusters clusters = response.getClusters(); + assertFalse("search cluster results should NOT be marked as partial", clusters.hasPartialResults()); + assertThat(clusters.getTotal(), equalTo(2)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SUCCESSFUL), equalTo(2)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SKIPPED), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.RUNNING), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.PARTIAL), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.FAILED), equalTo(0)); + + Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertNotNull(localClusterSearchInfo); + assertThat(localClusterSearchInfo.getStatus(), equalTo(Cluster.Status.SUCCESSFUL)); + assertThat(localClusterSearchInfo.getIndexExpression(), equalTo(localIndex)); + assertThat(localClusterSearchInfo.getTotalShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(localClusterSearchInfo.getTook().millis(), greaterThan(0L)); + + Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); + assertNotNull(remoteClusterSearchInfo); + assertThat(remoteClusterSearchInfo.getStatus(), equalTo(Cluster.Status.SUCCESSFUL)); + assertThat(remoteClusterSearchInfo.getIndexExpression(), equalTo("no_such_index*")); + assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(0)); // no shards since index does not exist + assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); + assertNotNull(remoteClusterSearchInfo.getTook()); + }); } public void testClusterDetailsAfterCCSWithFailuresOnRemoteClusterOnly() throws Exception { @@ -375,59 +383,58 @@ public void testClusterDetailsAfterCCSWithFailuresOnRemoteClusterOnly() throws E Throwable rootCause = ExceptionsHelper.unwrap(ee.getCause(), IllegalStateException.class); assertThat(rootCause.getMessage(), containsString("index corrupted")); } else { - SearchResponse searchResponse = queryFuture.get(); - assertNotNull(searchResponse); - - SearchResponse.Clusters clusters = searchResponse.getClusters(); - if (dfs == false) { - assertThat(clusters.isCcsMinimizeRoundtrips(), equalTo(minimizeRoundtrips)); - } - assertThat(clusters.getTotal(), equalTo(2)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(1)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), equalTo(0)); - if (skipUnavailable) { - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(1)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), equalTo(0)); - } else { - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), equalTo(1)); - } + assertResponse(queryFuture, response -> { + assertNotNull(response); - SearchResponse.Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); - assertNotNull(localClusterSearchInfo); - assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertThat(localClusterSearchInfo.getTotalShards(), equalTo(localNumShards)); - assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(localNumShards)); - assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); - assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); - assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); - assertThat(localClusterSearchInfo.getTook().millis(), greaterThan(0L)); - - SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); + Clusters clusters = response.getClusters(); + if (dfs == false) { + assertThat(clusters.isCcsMinimizeRoundtrips(), equalTo(minimizeRoundtrips)); + } + assertThat(clusters.getTotal(), equalTo(2)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SUCCESSFUL), equalTo(1)); + assertThat(clusters.getClusterStateCount(Cluster.Status.RUNNING), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.PARTIAL), equalTo(0)); + if (skipUnavailable) { + assertThat(clusters.getClusterStateCount(Cluster.Status.SKIPPED), equalTo(1)); + assertThat(clusters.getClusterStateCount(Cluster.Status.FAILED), equalTo(0)); + } else { + assertThat(clusters.getClusterStateCount(Cluster.Status.SKIPPED), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.FAILED), equalTo(1)); + } - assertNotNull(remoteClusterSearchInfo); - SearchResponse.Cluster.Status expectedStatus = skipUnavailable - ? SearchResponse.Cluster.Status.SKIPPED - : SearchResponse.Cluster.Status.FAILED; - assertThat(remoteClusterSearchInfo.getStatus(), equalTo(expectedStatus)); - if (clusters.isCcsMinimizeRoundtrips()) { - assertNull(remoteClusterSearchInfo.getTotalShards()); - assertNull(remoteClusterSearchInfo.getSuccessfulShards()); - assertNull(remoteClusterSearchInfo.getSkippedShards()); - assertNull(remoteClusterSearchInfo.getFailedShards()); - assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(1)); - } else { - assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); - assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(remoteNumShards)); - assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(remoteNumShards)); - } - assertNull(remoteClusterSearchInfo.getTook()); - assertFalse(remoteClusterSearchInfo.isTimedOut()); - ShardSearchFailure remoteShardSearchFailure = remoteClusterSearchInfo.getFailures().get(0); - assertTrue("should have 'index corrupted' in reason", remoteShardSearchFailure.reason().contains("index corrupted")); + Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertNotNull(localClusterSearchInfo); + assertThat(localClusterSearchInfo.getStatus(), equalTo(Cluster.Status.SUCCESSFUL)); + assertThat(localClusterSearchInfo.getTotalShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(localClusterSearchInfo.getTook().millis(), greaterThan(0L)); + + Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); + + assertNotNull(remoteClusterSearchInfo); + Cluster.Status expectedStatus = skipUnavailable ? Cluster.Status.SKIPPED : Cluster.Status.FAILED; + assertThat(remoteClusterSearchInfo.getStatus(), equalTo(expectedStatus)); + if (clusters.isCcsMinimizeRoundtrips()) { + assertNull(remoteClusterSearchInfo.getTotalShards()); + assertNull(remoteClusterSearchInfo.getSuccessfulShards()); + assertNull(remoteClusterSearchInfo.getSkippedShards()); + assertNull(remoteClusterSearchInfo.getFailedShards()); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(1)); + } else { + assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(remoteNumShards)); + } + assertNull(remoteClusterSearchInfo.getTook()); + assertFalse(remoteClusterSearchInfo.isTimedOut()); + ShardSearchFailure remoteShardSearchFailure = remoteClusterSearchInfo.getFailures().get(0); + assertTrue("should have 'index corrupted' in reason", remoteShardSearchFailure.reason().contains("index corrupted")); + }); } } @@ -458,40 +465,41 @@ public void testCCSWithSearchTimeoutOnRemoteCluster() throws Exception { SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(slowRunningQueryBuilder).timeout(searchTimeout); searchRequest.source(sourceBuilder); - SearchResponse searchResponse = client(LOCAL_CLUSTER).search(searchRequest).get(); - assertNotNull(searchResponse); - - SearchResponse.Clusters clusters = searchResponse.getClusters(); - assertThat(clusters.getTotal(), equalTo(2)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), equalTo(2)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), equalTo(0)); - - SearchResponse.Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); - assertNotNull(localClusterSearchInfo); - assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.PARTIAL)); - assertTrue(localClusterSearchInfo.isTimedOut()); - assertThat(localClusterSearchInfo.getIndexExpression(), equalTo(localIndex)); - assertThat(localClusterSearchInfo.getTotalShards(), equalTo(localNumShards)); - assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(localNumShards)); - assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); - assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); - assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); - assertThat(localClusterSearchInfo.getTook().millis(), greaterThanOrEqualTo(0L)); - - SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); - assertNotNull(remoteClusterSearchInfo); - assertThat(remoteClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.PARTIAL)); - assertTrue(remoteClusterSearchInfo.isTimedOut()); - assertThat(remoteClusterSearchInfo.getIndexExpression(), equalTo(remoteIndex)); - assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); - assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); - assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThanOrEqualTo(0L)); + assertResponse(client(LOCAL_CLUSTER).search(searchRequest), response -> { + assertNotNull(response); + + Clusters clusters = response.getClusters(); + assertThat(clusters.getTotal(), equalTo(2)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SUCCESSFUL), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SKIPPED), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.RUNNING), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.PARTIAL), equalTo(2)); + assertThat(clusters.getClusterStateCount(Cluster.Status.FAILED), equalTo(0)); + + Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertNotNull(localClusterSearchInfo); + assertThat(localClusterSearchInfo.getStatus(), equalTo(Cluster.Status.PARTIAL)); + assertTrue(localClusterSearchInfo.isTimedOut()); + assertThat(localClusterSearchInfo.getIndexExpression(), equalTo(localIndex)); + assertThat(localClusterSearchInfo.getTotalShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(localClusterSearchInfo.getTook().millis(), greaterThanOrEqualTo(0L)); + + Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); + assertNotNull(remoteClusterSearchInfo); + assertThat(remoteClusterSearchInfo.getStatus(), equalTo(Cluster.Status.PARTIAL)); + assertTrue(remoteClusterSearchInfo.isTimedOut()); + assertThat(remoteClusterSearchInfo.getIndexExpression(), equalTo(remoteIndex)); + assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThanOrEqualTo(0L)); + }); } public void testRemoteClusterOnlyCCSSuccessfulResult() throws Exception { @@ -513,29 +521,30 @@ public void testRemoteClusterOnlyCCSSuccessfulResult() throws Exception { } searchRequest.source(new SearchSourceBuilder().query(new MatchAllQueryBuilder()).size(10)); - SearchResponse searchResponse = client(LOCAL_CLUSTER).search(searchRequest).get(); - assertNotNull(searchResponse); - - SearchResponse.Clusters clusters = searchResponse.getClusters(); - assertFalse("search cluster results should NOT be marked as partial", clusters.hasPartialResults()); - assertThat(clusters.getTotal(), equalTo(1)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(1)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), equalTo(0)); - - assertNull(clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)); - - SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); - assertNotNull(remoteClusterSearchInfo); - assertThat(remoteClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); - assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); - assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThan(0L)); + assertResponse(client(LOCAL_CLUSTER).search(searchRequest), response -> { + assertNotNull(response); + + Clusters clusters = response.getClusters(); + assertFalse("search cluster results should NOT be marked as partial", clusters.hasPartialResults()); + assertThat(clusters.getTotal(), equalTo(1)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SUCCESSFUL), equalTo(1)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SKIPPED), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.RUNNING), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.PARTIAL), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.FAILED), equalTo(0)); + + assertNull(clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)); + + Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); + assertNotNull(remoteClusterSearchInfo); + assertThat(remoteClusterSearchInfo.getStatus(), equalTo(Cluster.Status.SUCCESSFUL)); + assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThan(0L)); + }); } public void testRemoteClusterOnlyCCSWithFailuresOnOneShardOnly() throws Exception { @@ -560,22 +569,22 @@ public void testRemoteClusterOnlyCCSWithFailuresOnOneShardOnly() throws Exceptio ThrowingQueryBuilder queryBuilder = new ThrowingQueryBuilder(randomLong(), new IllegalStateException("index corrupted"), 0); searchRequest.source(new SearchSourceBuilder().query(queryBuilder).size(10)); - SearchResponse searchResponse = client(LOCAL_CLUSTER).search(searchRequest).get(); - assertNotNull(searchResponse); + assertResponse(client(LOCAL_CLUSTER).search(searchRequest), response -> { - SearchResponse.Clusters clusters = searchResponse.getClusters(); - assertThat(clusters.getTotal(), equalTo(1)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), equalTo(1)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), equalTo(0)); + Clusters clusters = response.getClusters(); + assertThat(clusters.getTotal(), equalTo(1)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SUCCESSFUL), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SKIPPED), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.RUNNING), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.PARTIAL), equalTo(1)); + assertThat(clusters.getClusterStateCount(Cluster.Status.FAILED), equalTo(0)); - assertNull(clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)); + assertNull(clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)); - SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); - assertNotNull(remoteClusterSearchInfo); - assertOneFailedShard(remoteClusterSearchInfo, remoteNumShards); + Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); + assertNotNull(remoteClusterSearchInfo); + assertOneFailedShard(remoteClusterSearchInfo, remoteNumShards); + }); } public void testRemoteClusterOnlyCCSWithFailuresOnAllShards() throws Exception { @@ -612,44 +621,43 @@ public void testRemoteClusterOnlyCCSWithFailuresOnAllShards() throws Exception { Throwable rootCause = ExceptionsHelper.unwrap(ee, IllegalStateException.class); assertThat(rootCause.getMessage(), containsString("index corrupted")); } else { - SearchResponse searchResponse = queryFuture.get(); - assertNotNull(searchResponse); - SearchResponse.Clusters clusters = searchResponse.getClusters(); - assertThat(clusters.getTotal(), equalTo(1)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), equalTo(0)); - if (skipUnavailable) { - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(1)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), equalTo(0)); - } else { - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), equalTo(1)); - } + assertResponse(queryFuture, response -> { + assertNotNull(response); + Clusters clusters = response.getClusters(); + assertThat(clusters.getTotal(), equalTo(1)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SUCCESSFUL), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.RUNNING), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.PARTIAL), equalTo(0)); + if (skipUnavailable) { + assertThat(clusters.getClusterStateCount(Cluster.Status.SKIPPED), equalTo(1)); + assertThat(clusters.getClusterStateCount(Cluster.Status.FAILED), equalTo(0)); + } else { + assertThat(clusters.getClusterStateCount(Cluster.Status.SKIPPED), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.FAILED), equalTo(1)); + } - assertNull(clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)); + assertNull(clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)); - SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); - assertNotNull(remoteClusterSearchInfo); - SearchResponse.Cluster.Status expectedStatus = skipUnavailable - ? SearchResponse.Cluster.Status.SKIPPED - : SearchResponse.Cluster.Status.FAILED; - assertThat(remoteClusterSearchInfo.getStatus(), equalTo(expectedStatus)); - assertNull(remoteClusterSearchInfo.getTotalShards()); - assertNull(remoteClusterSearchInfo.getSuccessfulShards()); - assertNull(remoteClusterSearchInfo.getSkippedShards()); - assertNull(remoteClusterSearchInfo.getFailedShards()); - assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(1)); - assertNull(remoteClusterSearchInfo.getTook()); - assertFalse(remoteClusterSearchInfo.isTimedOut()); - ShardSearchFailure remoteShardSearchFailure = remoteClusterSearchInfo.getFailures().get(0); - assertTrue("should have 'index corrupted' in reason", remoteShardSearchFailure.reason().contains("index corrupted")); + Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); + assertNotNull(remoteClusterSearchInfo); + Cluster.Status expectedStatus = skipUnavailable ? Cluster.Status.SKIPPED : Cluster.Status.FAILED; + assertThat(remoteClusterSearchInfo.getStatus(), equalTo(expectedStatus)); + assertNull(remoteClusterSearchInfo.getTotalShards()); + assertNull(remoteClusterSearchInfo.getSuccessfulShards()); + assertNull(remoteClusterSearchInfo.getSkippedShards()); + assertNull(remoteClusterSearchInfo.getFailedShards()); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(1)); + assertNull(remoteClusterSearchInfo.getTook()); + assertFalse(remoteClusterSearchInfo.isTimedOut()); + ShardSearchFailure remoteShardSearchFailure = remoteClusterSearchInfo.getFailures().get(0); + assertTrue("should have 'index corrupted' in reason", remoteShardSearchFailure.reason().contains("index corrupted")); + }); } } - private static void assertOneFailedShard(SearchResponse.Cluster cluster, int totalShards) { + private static void assertOneFailedShard(Cluster cluster, int totalShards) { assertNotNull(cluster); - assertThat(cluster.getStatus(), equalTo(SearchResponse.Cluster.Status.PARTIAL)); + assertThat(cluster.getStatus(), equalTo(Cluster.Status.PARTIAL)); assertThat(cluster.getTotalShards(), equalTo(totalShards)); assertThat(cluster.getSuccessfulShards(), equalTo(totalShards - 1)); assertThat(cluster.getSkippedShards(), equalTo(0)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchLeakIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchLeakIT.java index fa84353b7c9cb..8b6f4112cfc17 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchLeakIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchLeakIT.java @@ -31,6 +31,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; public class CrossClusterSearchLeakIT extends AbstractMultiClustersTestCase { @@ -136,18 +137,23 @@ public void testSearch() throws Exception { } for (ActionFuture future : futures) { - SearchResponse searchResponse = future.get(); - if (searchResponse.getScrollId() != null) { - ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); - clearScrollRequest.scrollIds(List.of(searchResponse.getScrollId())); - client(LOCAL_CLUSTER).clearScroll(clearScrollRequest).get(); - } + assertResponse(future, response -> { + if (response.getScrollId() != null) { + ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); + clearScrollRequest.scrollIds(List.of(response.getScrollId())); + try { + client(LOCAL_CLUSTER).clearScroll(clearScrollRequest).get(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } - Terms terms = searchResponse.getAggregations().get("f"); - assertThat(terms.getBuckets().size(), equalTo(docs)); - for (Terms.Bucket bucket : terms.getBuckets()) { - assertThat(bucket.getDocCount(), equalTo(1L)); - } + Terms terms = response.getAggregations().get("f"); + assertThat(terms.getBuckets().size(), equalTo(docs)); + for (Terms.Bucket bucket : terms.getBuckets()) { + assertThat(bucket.getDocCount(), equalTo(1L)); + } + }); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java index b600098d82b33..15afd6897a40e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java @@ -14,7 +14,6 @@ import org.apache.lucene.index.TermsEnum; import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.io.stream.StreamInput; @@ -39,7 +38,7 @@ import java.util.Objects; import static java.util.Collections.singletonList; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.CoreMatchers.equalTo; @@ -72,21 +71,22 @@ public void testPlugin() throws Exception { indicesAdmin().prepareRefresh().get(); - SearchResponse response = prepareSearch().setSource( - new SearchSourceBuilder().ext(Collections.singletonList(new TermVectorsFetchBuilder("test"))) - ).get(); - assertNoFailures(response); - assertThat( - ((Map) response.getHits().getAt(0).field("term_vectors_fetch").getValues().get(0)).get("i"), - equalTo(2) - ); - assertThat( - ((Map) response.getHits().getAt(0).field("term_vectors_fetch").getValues().get(0)).get("am"), - equalTo(2) - ); - assertThat( - ((Map) response.getHits().getAt(0).field("term_vectors_fetch").getValues().get(0)).get("sam"), - equalTo(1) + assertNoFailuresAndResponse( + prepareSearch().setSource(new SearchSourceBuilder().ext(Collections.singletonList(new TermVectorsFetchBuilder("test")))), + response -> { + assertThat( + ((Map) response.getHits().getAt(0).field("term_vectors_fetch").getValues().get(0)).get("i"), + equalTo(2) + ); + assertThat( + ((Map) response.getHits().getAt(0).field("term_vectors_fetch").getValues().get(0)).get("am"), + equalTo(2) + ); + assertThat( + ((Map) response.getHits().getAt(0).field("term_vectors_fetch").getValues().get(0)).get("sam"), + equalTo(1) + ); + } ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java index dcfee8994b56b..607c6596d15c9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java @@ -12,7 +12,6 @@ import org.apache.lucene.util.ArrayUtil; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexSettings; @@ -53,6 +52,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCountAndNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHitsWithoutFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; @@ -102,8 +102,7 @@ public void testSimpleNested() throws Exception { List requests = new ArrayList<>(); requests.add( - client().prepareIndex("articles") - .setId("1") + prepareIndex("articles").setId("1") .setSource( jsonBuilder().startObject() .field("title", "quick brown fox") @@ -122,8 +121,7 @@ public void testSimpleNested() throws Exception { ) ); requests.add( - client().prepareIndex("articles") - .setId("2") + prepareIndex("articles").setId("2") .setSource( jsonBuilder().startObject() .field("title", "big gray elephant") @@ -143,75 +141,84 @@ public void testSimpleNested() throws Exception { ); indexRandom(true, requests); - SearchResponse response = prepareSearch("articles").setQuery( - nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.Avg).innerHit(new InnerHitBuilder("comment")) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - assertSearchHit(response, 1, hasId("1")); - assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); - SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comment"); - assertThat(innerHits.getTotalHits().value, equalTo(2L)); - assertThat(innerHits.getHits().length, equalTo(2)); - assertThat(innerHits.getAt(0).getId(), equalTo("1")); - assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(innerHits.getAt(1).getId(), equalTo("1")); - assertThat(innerHits.getAt(1).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(innerHits.getAt(1).getNestedIdentity().getOffset(), equalTo(1)); - - response = prepareSearch("articles").setQuery( - nestedQuery("comments", matchQuery("comments.message", "elephant"), ScoreMode.Avg).innerHit(new InnerHitBuilder("comment")) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - assertSearchHit(response, 1, hasId("2")); - assertThat(response.getHits().getAt(0).getShard(), notNullValue()); - assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); - innerHits = response.getHits().getAt(0).getInnerHits().get("comment"); - assertThat(innerHits.getTotalHits().value, equalTo(3L)); - assertThat(innerHits.getHits().length, equalTo(3)); - assertThat(innerHits.getAt(0).getId(), equalTo("2")); - assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(innerHits.getAt(1).getId(), equalTo("2")); - assertThat(innerHits.getAt(1).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(innerHits.getAt(1).getNestedIdentity().getOffset(), equalTo(1)); - assertThat(innerHits.getAt(2).getId(), equalTo("2")); - assertThat(innerHits.getAt(2).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(innerHits.getAt(2).getNestedIdentity().getOffset(), equalTo(2)); - - response = prepareSearch("articles").setQuery( - nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.Avg).innerHit( - new InnerHitBuilder().setHighlightBuilder(new HighlightBuilder().field("comments.message")) - .setExplain(true) - .addFetchField("comments.mes*") - .addScriptField("script", new Script(ScriptType.INLINE, MockScriptEngine.NAME, "5", Collections.emptyMap())) - .setSize(1) - ) - ).get(); - assertNoFailures(response); - innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); - assertThat(innerHits.getTotalHits().value, equalTo(2L)); - assertThat(innerHits.getHits().length, equalTo(1)); - HighlightField highlightField = innerHits.getAt(0).getHighlightFields().get("comments.message"); - assertThat(highlightField.fragments()[0].string(), equalTo("fox eat quick")); - assertThat(innerHits.getAt(0).getExplanation().toString(), containsString("weight(comments.message:fox in")); - assertThat( - innerHits.getAt(0).getFields().get("comments").getValue(), - equalTo(Collections.singletonMap("message", Collections.singletonList("fox eat quick"))) + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( + nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.Avg).innerHit(new InnerHitBuilder("comment")) + ), + response -> { + assertHitCount(response, 1); + assertSearchHit(response, 1, hasId("1")); + assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); + SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comment"); + assertThat(innerHits.getTotalHits().value, equalTo(2L)); + assertThat(innerHits.getHits().length, equalTo(2)); + assertThat(innerHits.getAt(0).getId(), equalTo("1")); + assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(innerHits.getAt(1).getId(), equalTo("1")); + assertThat(innerHits.getAt(1).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(innerHits.getAt(1).getNestedIdentity().getOffset(), equalTo(1)); + } + ); + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( + nestedQuery("comments", matchQuery("comments.message", "elephant"), ScoreMode.Avg).innerHit(new InnerHitBuilder("comment")) + ), + response -> { + assertHitCount(response, 1); + assertSearchHit(response, 1, hasId("2")); + assertThat(response.getHits().getAt(0).getShard(), notNullValue()); + assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); + SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comment"); + assertThat(innerHits.getTotalHits().value, equalTo(3L)); + assertThat(innerHits.getHits().length, equalTo(3)); + assertThat(innerHits.getAt(0).getId(), equalTo("2")); + assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(innerHits.getAt(1).getId(), equalTo("2")); + assertThat(innerHits.getAt(1).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(innerHits.getAt(1).getNestedIdentity().getOffset(), equalTo(1)); + assertThat(innerHits.getAt(2).getId(), equalTo("2")); + assertThat(innerHits.getAt(2).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(innerHits.getAt(2).getNestedIdentity().getOffset(), equalTo(2)); + } + ); + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( + nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.Avg).innerHit( + new InnerHitBuilder().setHighlightBuilder(new HighlightBuilder().field("comments.message")) + .setExplain(true) + .addFetchField("comments.mes*") + .addScriptField("script", new Script(ScriptType.INLINE, MockScriptEngine.NAME, "5", Collections.emptyMap())) + .setSize(1) + ) + ), + response -> { + SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); + assertThat(innerHits.getTotalHits().value, equalTo(2L)); + assertThat(innerHits.getHits().length, equalTo(1)); + HighlightField highlightField = innerHits.getAt(0).getHighlightFields().get("comments.message"); + assertThat(highlightField.fragments()[0].string(), equalTo("fox eat quick")); + assertThat(innerHits.getAt(0).getExplanation().toString(), containsString("weight(comments.message:fox in")); + assertThat( + innerHits.getAt(0).getFields().get("comments").getValue(), + equalTo(Collections.singletonMap("message", Collections.singletonList("fox eat quick"))) + ); + assertThat(innerHits.getAt(0).getFields().get("script").getValue().toString(), equalTo("5")); + } + ); + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( + nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.Avg).innerHit( + new InnerHitBuilder().addDocValueField("comments.mes*").setSize(1) + ) + ), + response -> { + SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); + assertThat(innerHits.getHits().length, equalTo(1)); + assertThat(innerHits.getAt(0).getFields().get("comments.message").getValue().toString(), equalTo("eat")); + } ); - assertThat(innerHits.getAt(0).getFields().get("script").getValue().toString(), equalTo("5")); - - response = prepareSearch("articles").setQuery( - nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.Avg).innerHit( - new InnerHitBuilder().addDocValueField("comments.mes*").setSize(1) - ) - ).get(); - assertNoFailures(response); - innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); - assertThat(innerHits.getHits().length, equalTo(1)); - assertThat(innerHits.getAt(0).getFields().get("comments.message").getValue().toString(), equalTo("eat")); } public void testRandomNested() throws Exception { @@ -233,7 +240,7 @@ public void testRandomNested() throws Exception { source.startObject().field("x", "y").endObject(); } source.endArray().endObject(); - requestBuilders.add(client().prepareIndex("idx").setId(Integer.toString(i)).setSource(source)); + requestBuilders.add(prepareIndex("idx").setId(Integer.toString(i)).setSource(source)); } indexRandom(true, requestBuilders); @@ -249,32 +256,31 @@ public void testRandomNested() throws Exception { new InnerHitBuilder("b").addSort(new FieldSortBuilder("_doc").order(SortOrder.ASC)).setSize(size) ) ); - SearchResponse searchResponse = prepareSearch("idx").setQuery(boolQuery).setSize(numDocs).addSort("foo", SortOrder.ASC).get(); - - assertNoFailures(searchResponse); - assertHitCount(searchResponse, numDocs); - assertThat(searchResponse.getHits().getHits().length, equalTo(numDocs)); - for (int i = 0; i < numDocs; i++) { - SearchHit searchHit = searchResponse.getHits().getAt(i); - assertThat(searchHit.getShard(), notNullValue()); - SearchHits inner = searchHit.getInnerHits().get("a"); - assertThat(inner.getTotalHits().value, equalTo((long) field1InnerObjects[i])); - for (int j = 0; j < field1InnerObjects[i] && j < size; j++) { - SearchHit innerHit = inner.getAt(j); - assertThat(innerHit.getNestedIdentity().getField().string(), equalTo("field1")); - assertThat(innerHit.getNestedIdentity().getOffset(), equalTo(j)); - assertThat(innerHit.getNestedIdentity().getChild(), nullValue()); - } - - inner = searchHit.getInnerHits().get("b"); - assertThat(inner.getTotalHits().value, equalTo((long) field2InnerObjects[i])); - for (int j = 0; j < field2InnerObjects[i] && j < size; j++) { - SearchHit innerHit = inner.getAt(j); - assertThat(innerHit.getNestedIdentity().getField().string(), equalTo("field2")); - assertThat(innerHit.getNestedIdentity().getOffset(), equalTo(j)); - assertThat(innerHit.getNestedIdentity().getChild(), nullValue()); + assertNoFailuresAndResponse(prepareSearch("idx").setQuery(boolQuery).setSize(numDocs).addSort("foo", SortOrder.ASC), response -> { + assertHitCount(response, numDocs); + assertThat(response.getHits().getHits().length, equalTo(numDocs)); + for (int i = 0; i < numDocs; i++) { + SearchHit searchHit = response.getHits().getAt(i); + assertThat(searchHit.getShard(), notNullValue()); + SearchHits inner = searchHit.getInnerHits().get("a"); + assertThat(inner.getTotalHits().value, equalTo((long) field1InnerObjects[i])); + for (int j = 0; j < field1InnerObjects[i] && j < size; j++) { + SearchHit innerHit = inner.getAt(j); + assertThat(innerHit.getNestedIdentity().getField().string(), equalTo("field1")); + assertThat(innerHit.getNestedIdentity().getOffset(), equalTo(j)); + assertThat(innerHit.getNestedIdentity().getChild(), nullValue()); + } + + inner = searchHit.getInnerHits().get("b"); + assertThat(inner.getTotalHits().value, equalTo((long) field2InnerObjects[i])); + for (int j = 0; j < field2InnerObjects[i] && j < size; j++) { + SearchHit innerHit = inner.getAt(j); + assertThat(innerHit.getNestedIdentity().getField().string(), equalTo("field2")); + assertThat(innerHit.getNestedIdentity().getOffset(), equalTo(j)); + assertThat(innerHit.getNestedIdentity().getChild(), nullValue()); + } } - } + }); } public void testNestedMultipleLayers() throws Exception { @@ -310,8 +316,7 @@ public void testNestedMultipleLayers() throws Exception { List requests = new ArrayList<>(); requests.add( - client().prepareIndex("articles") - .setId("1") + prepareIndex("articles").setId("1") .setSource( jsonBuilder().startObject() .field("title", "quick brown fox") @@ -337,8 +342,7 @@ public void testNestedMultipleLayers() throws Exception { ) ); requests.add( - client().prepareIndex("articles") - .setId("2") + prepareIndex("articles").setId("2") .setSource( jsonBuilder().startObject() .field("title", "big gray elephant") @@ -358,140 +362,154 @@ public void testNestedMultipleLayers() throws Exception { indexRandom(true, requests); // Check we can load the first doubly-nested document. - SearchResponse response = prepareSearch("articles").setQuery( - nestedQuery( - "comments", - nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "good"), ScoreMode.Avg).innerHit( - new InnerHitBuilder("remark") - ), - ScoreMode.Avg - ).innerHit(new InnerHitBuilder()) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - assertSearchHit(response, 1, hasId("1")); - assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); - SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); - assertThat(innerHits.getTotalHits().value, equalTo(1L)); - assertThat(innerHits.getHits().length, equalTo(1)); - assertThat(innerHits.getAt(0).getId(), equalTo("1")); - assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - innerHits = innerHits.getAt(0).getInnerHits().get("remark"); - assertThat(innerHits.getTotalHits().value, equalTo(1L)); - assertThat(innerHits.getHits().length, equalTo(1)); - assertThat(innerHits.getAt(0).getId(), equalTo("1")); - assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("remarks")); - assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0)); - + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( + nestedQuery( + "comments", + nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "good"), ScoreMode.Avg).innerHit( + new InnerHitBuilder("remark") + ), + ScoreMode.Avg + ).innerHit(new InnerHitBuilder()) + ), + response -> { + assertHitCount(response, 1); + assertSearchHit(response, 1, hasId("1")); + assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); + SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); + assertThat(innerHits.getTotalHits().value, equalTo(1L)); + assertThat(innerHits.getHits().length, equalTo(1)); + assertThat(innerHits.getAt(0).getId(), equalTo("1")); + assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); + innerHits = innerHits.getAt(0).getInnerHits().get("remark"); + assertThat(innerHits.getTotalHits().value, equalTo(1L)); + assertThat(innerHits.getHits().length, equalTo(1)); + assertThat(innerHits.getAt(0).getId(), equalTo("1")); + assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("remarks")); + assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0)); + } + ); // Check we can load the second doubly-nested document. - response = prepareSearch("articles").setQuery( - nestedQuery( - "comments", - nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "neutral"), ScoreMode.Avg).innerHit( - new InnerHitBuilder("remark") - ), - ScoreMode.Avg - ).innerHit(new InnerHitBuilder()) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - assertSearchHit(response, 1, hasId("1")); - assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); - innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); - assertThat(innerHits.getTotalHits().value, equalTo(1L)); - assertThat(innerHits.getHits().length, equalTo(1)); - assertThat(innerHits.getAt(0).getId(), equalTo("1")); - assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(1)); - innerHits = innerHits.getAt(0).getInnerHits().get("remark"); - assertThat(innerHits.getTotalHits().value, equalTo(1L)); - assertThat(innerHits.getHits().length, equalTo(1)); - assertThat(innerHits.getAt(0).getId(), equalTo("1")); - assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(1)); - assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("remarks")); - assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0)); - + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( + nestedQuery( + "comments", + nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "neutral"), ScoreMode.Avg).innerHit( + new InnerHitBuilder("remark") + ), + ScoreMode.Avg + ).innerHit(new InnerHitBuilder()) + ), + response -> { + assertHitCount(response, 1); + assertSearchHit(response, 1, hasId("1")); + assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); + SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); + assertThat(innerHits.getTotalHits().value, equalTo(1L)); + assertThat(innerHits.getHits().length, equalTo(1)); + assertThat(innerHits.getAt(0).getId(), equalTo("1")); + assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(1)); + innerHits = innerHits.getAt(0).getInnerHits().get("remark"); + assertThat(innerHits.getTotalHits().value, equalTo(1L)); + assertThat(innerHits.getHits().length, equalTo(1)); + assertThat(innerHits.getAt(0).getId(), equalTo("1")); + assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(1)); + assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("remarks")); + assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0)); + } + ); // Directly refer to the second level: - response = prepareSearch("articles").setQuery( - nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "bad"), ScoreMode.Avg).innerHit(new InnerHitBuilder()) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - assertSearchHit(response, 1, hasId("2")); - assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); - innerHits = response.getHits().getAt(0).getInnerHits().get("comments.remarks"); - assertThat(innerHits.getTotalHits().value, equalTo(1L)); - assertThat(innerHits.getHits().length, equalTo(1)); - assertThat(innerHits.getAt(0).getId(), equalTo("2")); - assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("remarks")); - assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0)); - - response = prepareSearch("articles").setQuery( - nestedQuery( - "comments", + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "bad"), ScoreMode.Avg).innerHit( - new InnerHitBuilder("remark") - ), - ScoreMode.Avg - ).innerHit(new InnerHitBuilder()) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - assertSearchHit(response, 1, hasId("2")); - assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); - innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); - assertThat(innerHits.getTotalHits().value, equalTo(1L)); - assertThat(innerHits.getHits().length, equalTo(1)); - assertThat(innerHits.getAt(0).getId(), equalTo("2")); - assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - innerHits = innerHits.getAt(0).getInnerHits().get("remark"); - assertThat(innerHits.getTotalHits().value, equalTo(1L)); - assertThat(innerHits.getHits().length, equalTo(1)); - assertThat(innerHits.getAt(0).getId(), equalTo("2")); - assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("remarks")); - assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0)); - - // Check that inner hits contain _source even when it's disabled on the parent request. - response = prepareSearch("articles").setFetchSource(false) - .setQuery( + new InnerHitBuilder() + ) + ), + response -> { + assertHitCount(response, 1); + assertSearchHit(response, 1, hasId("2")); + assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); + SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comments.remarks"); + assertThat(innerHits.getTotalHits().value, equalTo(1L)); + assertThat(innerHits.getHits().length, equalTo(1)); + assertThat(innerHits.getAt(0).getId(), equalTo("2")); + assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("remarks")); + assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0)); + } + ); + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( nestedQuery( "comments", - nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "good"), ScoreMode.Avg).innerHit( + nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "bad"), ScoreMode.Avg).innerHit( new InnerHitBuilder("remark") ), ScoreMode.Avg ).innerHit(new InnerHitBuilder()) - ) - .get(); - assertNoFailures(response); - innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); - innerHits = innerHits.getAt(0).getInnerHits().get("remark"); - assertNotNull(innerHits.getAt(0).getSourceAsMap()); - assertFalse(innerHits.getAt(0).getSourceAsMap().isEmpty()); - - response = prepareSearch("articles").setQuery( - nestedQuery( - "comments", - nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "good"), ScoreMode.Avg).innerHit( - new InnerHitBuilder("remark") + ), + response -> { + assertHitCount(response, 1); + assertSearchHit(response, 1, hasId("2")); + assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); + SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); + assertThat(innerHits.getTotalHits().value, equalTo(1L)); + assertThat(innerHits.getHits().length, equalTo(1)); + assertThat(innerHits.getAt(0).getId(), equalTo("2")); + assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); + innerHits = innerHits.getAt(0).getInnerHits().get("remark"); + assertThat(innerHits.getTotalHits().value, equalTo(1L)); + assertThat(innerHits.getHits().length, equalTo(1)); + assertThat(innerHits.getAt(0).getId(), equalTo("2")); + assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("remarks")); + assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0)); + } + ); + // Check that inner hits contain _source even when it's disabled on the parent request. + assertNoFailuresAndResponse( + prepareSearch("articles").setFetchSource(false) + .setQuery( + nestedQuery( + "comments", + nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "good"), ScoreMode.Avg).innerHit( + new InnerHitBuilder("remark") + ), + ScoreMode.Avg + ).innerHit(new InnerHitBuilder()) ), - ScoreMode.Avg - ).innerHit(new InnerHitBuilder().setFetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE)) - ).get(); - assertNoFailures(response); - innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); - innerHits = innerHits.getAt(0).getInnerHits().get("remark"); - assertNotNull(innerHits.getAt(0).getSourceAsMap()); - assertFalse(innerHits.getAt(0).getSourceAsMap().isEmpty()); + response -> { + SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); + innerHits = innerHits.getAt(0).getInnerHits().get("remark"); + assertNotNull(innerHits.getAt(0).getSourceAsMap()); + assertFalse(innerHits.getAt(0).getSourceAsMap().isEmpty()); + } + ); + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( + nestedQuery( + "comments", + nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "good"), ScoreMode.Avg).innerHit( + new InnerHitBuilder("remark") + ), + ScoreMode.Avg + ).innerHit(new InnerHitBuilder().setFetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE)) + ), + response -> { + SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); + innerHits = innerHits.getAt(0).getInnerHits().get("remark"); + assertNotNull(innerHits.getAt(0).getSourceAsMap()); + assertFalse(innerHits.getAt(0).getSourceAsMap().isEmpty()); + } + ); } // Issue #9723 @@ -500,8 +518,7 @@ public void testNestedDefinedAsObject() throws Exception { List requests = new ArrayList<>(); requests.add( - client().prepareIndex("articles") - .setId("1") + prepareIndex("articles").setId("1") .setSource( jsonBuilder().startObject() .field("title", "quick brown fox") @@ -513,20 +530,23 @@ public void testNestedDefinedAsObject() throws Exception { ); indexRandom(true, requests); - SearchResponse response = prepareSearch("articles").setQuery( - nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.Avg).innerHit(new InnerHitBuilder()) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits().value, equalTo(1L)); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getId(), equalTo("1")); - assertThat( - response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getField().string(), - equalTo("comments") + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( + nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.Avg).innerHit(new InnerHitBuilder()) + ), + response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getId(), equalTo("1")); + assertThat( + response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getField().string(), + equalTo("comments") + ); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getChild(), nullValue()); + } ); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getChild(), nullValue()); } public void testInnerHitsWithObjectFieldThatHasANestedField() throws Exception { @@ -553,8 +573,7 @@ public void testInnerHitsWithObjectFieldThatHasANestedField() throws Exception { List requests = new ArrayList<>(); requests.add( - client().prepareIndex("articles") - .setId("1") + prepareIndex("articles").setId("1") .setSource( jsonBuilder().startObject() .field("title", "quick brown fox") @@ -582,61 +601,66 @@ public void testInnerHitsWithObjectFieldThatHasANestedField() throws Exception { ); indexRandom(true, requests); - SearchResponse resp1 = prepareSearch("articles").setQuery( - nestedQuery("comments.messages", matchQuery("comments.messages.message", "fox"), ScoreMode.Avg).innerHit( - new InnerHitBuilder().setFetchSourceContext(FetchSourceContext.FETCH_SOURCE) - ) - ).get(); - assertNoFailures(resp1); - assertHitCount(resp1, 1); - SearchHit parent = resp1.getHits().getAt(0); - assertThat(parent.getId(), equalTo("1")); - SearchHits inner = parent.getInnerHits().get("comments.messages"); - assertThat(inner.getTotalHits().value, equalTo(2L)); - assertThat(inner.getAt(0).getSourceAsString(), equalTo("{\"message\":\"no fox\"}")); - assertThat(inner.getAt(1).getSourceAsString(), equalTo("{\"message\":\"fox eat quick\"}")); - - SearchResponse response = prepareSearch("articles").setQuery( - nestedQuery("comments.messages", matchQuery("comments.messages.message", "fox"), ScoreMode.Avg).innerHit( - new InnerHitBuilder().setFetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE) - ) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - SearchHit hit = response.getHits().getAt(0); - assertThat(hit.getId(), equalTo("1")); - SearchHits messages = hit.getInnerHits().get("comments.messages"); - assertThat(messages.getTotalHits().value, equalTo(2L)); - assertThat(messages.getAt(0).getId(), equalTo("1")); - assertThat(messages.getAt(0).getNestedIdentity().getField().string(), equalTo("comments.messages")); - assertThat(messages.getAt(0).getNestedIdentity().getOffset(), equalTo(2)); - assertThat(messages.getAt(0).getNestedIdentity().getChild(), nullValue()); - assertThat(messages.getAt(1).getId(), equalTo("1")); - assertThat(messages.getAt(1).getNestedIdentity().getField().string(), equalTo("comments.messages")); - assertThat(messages.getAt(1).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(messages.getAt(1).getNestedIdentity().getChild(), nullValue()); - - response = prepareSearch("articles").setQuery( - nestedQuery("comments.messages", matchQuery("comments.messages.message", "bear"), ScoreMode.Avg).innerHit( - new InnerHitBuilder().setFetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE) - ) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - hit = response.getHits().getAt(0); - assertThat(hit.getId(), equalTo("1")); - messages = hit.getInnerHits().get("comments.messages"); - assertThat(messages.getTotalHits().value, equalTo(1L)); - assertThat(messages.getAt(0).getId(), equalTo("1")); - assertThat(messages.getAt(0).getNestedIdentity().getField().string(), equalTo("comments.messages")); - assertThat(messages.getAt(0).getNestedIdentity().getOffset(), equalTo(1)); - assertThat(messages.getAt(0).getNestedIdentity().getChild(), nullValue()); - + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( + nestedQuery("comments.messages", matchQuery("comments.messages.message", "fox"), ScoreMode.Avg).innerHit( + new InnerHitBuilder().setFetchSourceContext(FetchSourceContext.FETCH_SOURCE) + ) + ), + response -> { + assertHitCount(response, 1); + SearchHit parent = response.getHits().getAt(0); + assertThat(parent.getId(), equalTo("1")); + SearchHits inner = parent.getInnerHits().get("comments.messages"); + assertThat(inner.getTotalHits().value, equalTo(2L)); + assertThat(inner.getAt(0).getSourceAsString(), equalTo("{\"message\":\"no fox\"}")); + assertThat(inner.getAt(1).getSourceAsString(), equalTo("{\"message\":\"fox eat quick\"}")); + } + ); + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( + nestedQuery("comments.messages", matchQuery("comments.messages.message", "fox"), ScoreMode.Avg).innerHit( + new InnerHitBuilder().setFetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE) + ) + ), + response -> { + assertHitCount(response, 1); + SearchHit hit = response.getHits().getAt(0); + assertThat(hit.getId(), equalTo("1")); + SearchHits messages = hit.getInnerHits().get("comments.messages"); + assertThat(messages.getTotalHits().value, equalTo(2L)); + assertThat(messages.getAt(0).getId(), equalTo("1")); + assertThat(messages.getAt(0).getNestedIdentity().getField().string(), equalTo("comments.messages")); + assertThat(messages.getAt(0).getNestedIdentity().getOffset(), equalTo(2)); + assertThat(messages.getAt(0).getNestedIdentity().getChild(), nullValue()); + assertThat(messages.getAt(1).getId(), equalTo("1")); + assertThat(messages.getAt(1).getNestedIdentity().getField().string(), equalTo("comments.messages")); + assertThat(messages.getAt(1).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(messages.getAt(1).getNestedIdentity().getChild(), nullValue()); + } + ); + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( + nestedQuery("comments.messages", matchQuery("comments.messages.message", "bear"), ScoreMode.Avg).innerHit( + new InnerHitBuilder().setFetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE) + ) + ), + response -> { + assertHitCount(response, 1); + SearchHit hit = response.getHits().getAt(0); + assertThat(hit.getId(), equalTo("1")); + SearchHits messages = hit.getInnerHits().get("comments.messages"); + assertThat(messages.getTotalHits().value, equalTo(1L)); + assertThat(messages.getAt(0).getId(), equalTo("1")); + assertThat(messages.getAt(0).getNestedIdentity().getField().string(), equalTo("comments.messages")); + assertThat(messages.getAt(0).getNestedIdentity().getOffset(), equalTo(1)); + assertThat(messages.getAt(0).getNestedIdentity().getChild(), nullValue()); + } + ); // index the message in an object form instead of an array requests = new ArrayList<>(); requests.add( - client().prepareIndex("articles") - .setId("1") + prepareIndex("articles").setId("1") .setSource( jsonBuilder().startObject() .field("title", "quick brown fox") @@ -649,21 +673,24 @@ public void testInnerHitsWithObjectFieldThatHasANestedField() throws Exception { ) ); indexRandom(true, requests); - response = prepareSearch("articles").setQuery( - nestedQuery("comments.messages", matchQuery("comments.messages.message", "fox"), ScoreMode.Avg).innerHit( - new InnerHitBuilder().setFetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE) - ) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - hit = response.getHits().getAt(0); - assertThat(hit.getId(), equalTo("1")); - messages = hit.getInnerHits().get("comments.messages"); - assertThat(messages.getTotalHits().value, equalTo(1L)); - assertThat(messages.getAt(0).getId(), equalTo("1")); - assertThat(messages.getAt(0).getNestedIdentity().getField().string(), equalTo("comments.messages")); - assertThat(messages.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(messages.getAt(0).getNestedIdentity().getChild(), nullValue()); + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( + nestedQuery("comments.messages", matchQuery("comments.messages.message", "fox"), ScoreMode.Avg).innerHit( + new InnerHitBuilder().setFetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE) + ) + ), + response -> { + assertHitCount(response, 1); + SearchHit hit = response.getHits().getAt(0); + assertThat(hit.getId(), equalTo("1")); + SearchHits messages = hit.getInnerHits().get("comments.messages"); + assertThat(messages.getTotalHits().value, equalTo(1L)); + assertThat(messages.getAt(0).getId(), equalTo("1")); + assertThat(messages.getAt(0).getNestedIdentity().getField().string(), equalTo("comments.messages")); + assertThat(messages.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(messages.getAt(0).getNestedIdentity().getChild(), nullValue()); + } + ); } public void testMatchesQueriesNestedInnerHits() throws Exception { @@ -690,8 +717,7 @@ public void testMatchesQueriesNestedInnerHits() throws Exception { List requests = new ArrayList<>(); int numDocs = randomIntBetween(2, 35); requests.add( - client().prepareIndex("test") - .setId("0") + prepareIndex("test").setId("0") .setSource( jsonBuilder().startObject() .field("field1", 0) @@ -709,8 +735,7 @@ public void testMatchesQueriesNestedInnerHits() throws Exception { ) ); requests.add( - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("field1", 1) @@ -730,8 +755,7 @@ public void testMatchesQueriesNestedInnerHits() throws Exception { for (int i = 2; i < numDocs; i++) { requests.add( - client().prepareIndex("test") - .setId(String.valueOf(i)) + prepareIndex("test").setId(String.valueOf(i)) .setSource( jsonBuilder().startObject() .field("field1", i) @@ -759,34 +783,33 @@ public void testMatchesQueriesNestedInnerHits() throws Exception { query = nestedQuery("nested1", query, ScoreMode.Avg).innerHit( new InnerHitBuilder().addSort(new FieldSortBuilder("nested1.n_field1").order(SortOrder.ASC)) ); - SearchResponse searchResponse = prepareSearch("test").setQuery(query).setSize(numDocs).addSort("field1", SortOrder.ASC).get(); - assertNoFailures(searchResponse); - assertAllSuccessful(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo((long) numDocs)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("0")); - assertThat(searchResponse.getHits().getAt(0).getInnerHits().get("nested1").getTotalHits().value, equalTo(2L)); - assertThat(searchResponse.getHits().getAt(0).getInnerHits().get("nested1").getAt(0).getMatchedQueries().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getInnerHits().get("nested1").getAt(0).getMatchedQueries()[0], equalTo("test1")); - assertThat(searchResponse.getHits().getAt(0).getInnerHits().get("nested1").getAt(1).getMatchedQueries().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getInnerHits().get("nested1").getAt(1).getMatchedQueries()[0], equalTo("test3")); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getInnerHits().get("nested1").getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getAt(1).getInnerHits().get("nested1").getAt(0).getMatchedQueries().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(1).getInnerHits().get("nested1").getAt(0).getMatchedQueries()[0], equalTo("test2")); - - for (int i = 2; i < numDocs; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(String.valueOf(i))); - assertThat(searchResponse.getHits().getAt(i).getInnerHits().get("nested1").getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getAt(i).getInnerHits().get("nested1").getAt(0).getMatchedQueries().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(i).getInnerHits().get("nested1").getAt(0).getMatchedQueries()[0], equalTo("test3")); - } + assertNoFailuresAndResponse(prepareSearch("test").setQuery(query).setSize(numDocs).addSort("field1", SortOrder.ASC), response -> { + assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo((long) numDocs)); + assertThat(response.getHits().getAt(0).getId(), equalTo("0")); + assertThat(response.getHits().getAt(0).getInnerHits().get("nested1").getTotalHits().value, equalTo(2L)); + assertThat(response.getHits().getAt(0).getInnerHits().get("nested1").getAt(0).getMatchedQueries().length, equalTo(1)); + assertThat(response.getHits().getAt(0).getInnerHits().get("nested1").getAt(0).getMatchedQueries()[0], equalTo("test1")); + assertThat(response.getHits().getAt(0).getInnerHits().get("nested1").getAt(1).getMatchedQueries().length, equalTo(1)); + assertThat(response.getHits().getAt(0).getInnerHits().get("nested1").getAt(1).getMatchedQueries()[0], equalTo("test3")); + + assertThat(response.getHits().getAt(1).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getInnerHits().get("nested1").getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getAt(1).getInnerHits().get("nested1").getAt(0).getMatchedQueries().length, equalTo(1)); + assertThat(response.getHits().getAt(1).getInnerHits().get("nested1").getAt(0).getMatchedQueries()[0], equalTo("test2")); + + for (int i = 2; i < numDocs; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(String.valueOf(i))); + assertThat(response.getHits().getAt(i).getInnerHits().get("nested1").getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getAt(i).getInnerHits().get("nested1").getAt(0).getMatchedQueries().length, equalTo(1)); + assertThat(response.getHits().getAt(i).getInnerHits().get("nested1").getAt(0).getMatchedQueries()[0], equalTo("test3")); + } + }); } public void testNestedSource() throws Exception { assertAcked(prepareCreate("index1").setMapping("comments", "type=nested")); - client().prepareIndex("index1") - .setId("1") + prepareIndex("index1").setId("1") .setSource( jsonBuilder().startObject() .field("message", "quick brown fox") @@ -811,71 +834,83 @@ public void testNestedSource() throws Exception { // the field name (comments.message) used for source filtering should be the same as when using that field for // other features (like in the query dsl or aggs) in order for consistency: - SearchResponse response = prepareSearch().setQuery( - nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.None).innerHit( - new InnerHitBuilder().setFetchSourceContext(FetchSourceContext.of(true, new String[] { "comments.message" }, null)) - ) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits().value, equalTo(2L)); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().size(), equalTo(1)); - assertThat( - response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().get("message"), - equalTo("fox eat quick") - ); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(1).getSourceAsMap().size(), equalTo(1)); - assertThat( - response.getHits().getAt(0).getInnerHits().get("comments").getAt(1).getSourceAsMap().get("message"), - equalTo("fox ate rabbit x y z") + assertNoFailuresAndResponse( + prepareSearch().setQuery( + nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.None).innerHit( + new InnerHitBuilder().setFetchSourceContext(FetchSourceContext.of(true, new String[] { "comments.message" }, null)) + ) + ), + response -> { + assertHitCount(response, 1); + + assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits().value, equalTo(2L)); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().size(), equalTo(1)); + assertThat( + response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().get("message"), + equalTo("fox eat quick") + ); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(1).getSourceAsMap().size(), equalTo(1)); + assertThat( + response.getHits().getAt(0).getInnerHits().get("comments").getAt(1).getSourceAsMap().get("message"), + equalTo("fox ate rabbit x y z") + ); + } ); - response = prepareSearch().setQuery( - nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.None).innerHit(new InnerHitBuilder()) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits().value, equalTo(2L)); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().size(), equalTo(2)); - assertThat( - response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().get("message"), - equalTo("fox eat quick") - ); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().size(), equalTo(2)); - assertThat( - response.getHits().getAt(0).getInnerHits().get("comments").getAt(1).getSourceAsMap().get("message"), - equalTo("fox ate rabbit x y z") + assertNoFailuresAndResponse( + prepareSearch().setQuery( + nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.None).innerHit(new InnerHitBuilder()) + ), + response -> { + assertHitCount(response, 1); + + assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits().value, equalTo(2L)); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().size(), equalTo(2)); + assertThat( + response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().get("message"), + equalTo("fox eat quick") + ); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().size(), equalTo(2)); + assertThat( + response.getHits().getAt(0).getInnerHits().get("comments").getAt(1).getSourceAsMap().get("message"), + equalTo("fox ate rabbit x y z") + ); + } ); // Source filter on a field that does not exist inside the nested document and just check that we do not fail and // return an empty _source: - response = prepareSearch().setQuery( - nestedQuery("comments", matchQuery("comments.message", "away"), ScoreMode.None).innerHit( - new InnerHitBuilder().setFetchSourceContext(FetchSourceContext.of(true, new String[] { "comments.missing_field" }, null)) - ) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits().value, equalTo(1L)); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().size(), equalTo(0)); - + assertNoFailuresAndResponse( + prepareSearch().setQuery( + nestedQuery("comments", matchQuery("comments.message", "away"), ScoreMode.None).innerHit( + new InnerHitBuilder().setFetchSourceContext( + FetchSourceContext.of(true, new String[] { "comments.missing_field" }, null) + ) + ) + ), + response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().size(), equalTo(0)); + } + ); // Check that inner hits contain _source even when it's disabled on the root request. - response = prepareSearch().setFetchSource(false) - .setQuery(nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.None).innerHit(new InnerHitBuilder())) - .get(); - assertNoFailures(response); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits().value, equalTo(2L)); - assertFalse(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().isEmpty()); + assertNoFailuresAndResponse( + prepareSearch().setFetchSource(false) + .setQuery(nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.None).innerHit(new InnerHitBuilder())), + response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits().value, equalTo(2L)); + assertFalse(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().isEmpty()); + } + ); } public void testInnerHitsWithIgnoreUnmapped() throws Exception { assertAcked(prepareCreate("index1").setMapping("nested_type", "type=nested")); createIndex("index2"); - client().prepareIndex("index1").setId("1").setSource("nested_type", Collections.singletonMap("key", "value")).get(); - client().prepareIndex("index2").setId("3").setSource("key", "value").get(); + prepareIndex("index1").setId("1").setSource("nested_type", Collections.singletonMap("key", "value")).get(); + prepareIndex("index2").setId("3").setSource("key", "value").get(); refresh(); assertSearchHitsWithoutFailures( @@ -896,8 +931,7 @@ public void testUseMaxDocInsteadOfSize() throws Exception { Settings.builder().put(IndexSettings.MAX_INNER_RESULT_WINDOW_SETTING.getKey(), ArrayUtil.MAX_ARRAY_LENGTH), "index2" ); - client().prepareIndex("index2") - .setId("1") + prepareIndex("index2").setId("1") .setSource( jsonBuilder().startObject().startArray("nested").startObject().field("field", "value1").endObject().endArray().endObject() ) @@ -912,8 +946,7 @@ public void testUseMaxDocInsteadOfSize() throws Exception { public void testTooHighResultWindow() throws Exception { assertAcked(prepareCreate("index2").setMapping("nested", "type=nested")); - client().prepareIndex("index2") - .setId("1") + prepareIndex("index2").setId("1") .setSource( jsonBuilder().startObject().startArray("nested").startObject().field("field", "value1").endObject().endArray().endObject() ) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesIT.java index d7347ef21328f..c996725e6285e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.fetch.subphase; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.query.MatchAllQueryBuilder; @@ -32,6 +31,7 @@ import static org.elasticsearch.index.query.QueryBuilders.termsQuery; import static org.elasticsearch.index.query.QueryBuilders.wrapperQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasKey; @@ -41,253 +41,280 @@ public void testSimpleMatchedQueryFromFilteredQuery() throws Exception { createIndex("test"); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("name", "test1", "number", 1).get(); - client().prepareIndex("test").setId("2").setSource("name", "test2", "number", 2).get(); - client().prepareIndex("test").setId("3").setSource("name", "test3", "number", 3).get(); + prepareIndex("test").setId("1").setSource("name", "test1", "number", 1).get(); + prepareIndex("test").setId("2").setSource("name", "test2", "number", 2).get(); + prepareIndex("test").setId("3").setSource("name", "test3", "number", 3).get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery( - boolQuery().must(matchAllQuery()) - .filter( - boolQuery().should(rangeQuery("number").lt(2).queryName("test1")).should(rangeQuery("number").gte(2).queryName("test2")) - ) - ).get(); - assertHitCount(searchResponse, 3L); - for (SearchHit hit : searchResponse.getHits()) { - if (hit.getId().equals("3") || hit.getId().equals("2")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("test2")); - assertThat(hit.getMatchedQueryScore("test2"), equalTo(1f)); - } else if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("test1")); - assertThat(hit.getMatchedQueryScore("test1"), equalTo(1f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); + assertResponse( + prepareSearch().setQuery( + boolQuery().must(matchAllQuery()) + .filter( + boolQuery().should(rangeQuery("number").lt(2).queryName("test1")) + .should(rangeQuery("number").gte(2).queryName("test2")) + ) + ), + response -> { + assertHitCount(response, 3L); + for (SearchHit hit : response.getHits()) { + if (hit.getId().equals("3") || hit.getId().equals("2")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("test2")); + assertThat(hit.getMatchedQueryScore("test2"), equalTo(1f)); + } else if (hit.getId().equals("1")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("test1")); + assertThat(hit.getMatchedQueryScore("test1"), equalTo(1f)); + } else { + fail("Unexpected document returned with id " + hit.getId()); + } + } } - } - - searchResponse = prepareSearch().setQuery( - boolQuery().should(rangeQuery("number").lte(2).queryName("test1")).should(rangeQuery("number").gt(2).queryName("test2")) - ).get(); - assertHitCount(searchResponse, 3L); - for (SearchHit hit : searchResponse.getHits()) { - if (hit.getId().equals("1") || hit.getId().equals("2")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("test1")); - assertThat(hit.getMatchedQueryScore("test1"), equalTo(1f)); - } else if (hit.getId().equals("3")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("test2")); - assertThat(hit.getMatchedQueryScore("test2"), equalTo(1f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); + ); + + assertResponse( + prepareSearch().setQuery( + boolQuery().should(rangeQuery("number").lte(2).queryName("test1")).should(rangeQuery("number").gt(2).queryName("test2")) + ), + response -> { + assertHitCount(response, 3L); + for (SearchHit hit : response.getHits()) { + if (hit.getId().equals("1") || hit.getId().equals("2")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("test1")); + assertThat(hit.getMatchedQueryScore("test1"), equalTo(1f)); + } else if (hit.getId().equals("3")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("test2")); + assertThat(hit.getMatchedQueryScore("test2"), equalTo(1f)); + } else { + fail("Unexpected document returned with id " + hit.getId()); + } + } } - } + ); } public void testSimpleMatchedQueryFromTopLevelFilter() throws Exception { createIndex("test"); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("name", "test", "title", "title1").get(); - client().prepareIndex("test").setId("2").setSource("name", "test").get(); - client().prepareIndex("test").setId("3").setSource("name", "test").get(); + prepareIndex("test").setId("1").setSource("name", "test", "title", "title1").get(); + prepareIndex("test").setId("2").setSource("name", "test").get(); + prepareIndex("test").setId("3").setSource("name", "test").get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .setPostFilter( - boolQuery().should(termQuery("name", "test").queryName("name")).should(termQuery("title", "title1").queryName("title")) - ) - .get(); - assertHitCount(searchResponse, 3L); - for (SearchHit hit : searchResponse.getHits()) { - if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(2)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); - assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("title")); - assertThat(hit.getMatchedQueryScore("title"), greaterThan(0f)); - } else if (hit.getId().equals("2") || hit.getId().equals("3")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); - assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .setPostFilter( + boolQuery().should(termQuery("name", "test").queryName("name")).should(termQuery("title", "title1").queryName("title")) + ), + response -> { + assertHitCount(response, 3L); + for (SearchHit hit : response.getHits()) { + if (hit.getId().equals("1")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(2)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); + assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("title")); + assertThat(hit.getMatchedQueryScore("title"), greaterThan(0f)); + } else if (hit.getId().equals("2") || hit.getId().equals("3")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); + assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); + } else { + fail("Unexpected document returned with id " + hit.getId()); + } + } } - } - - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .setPostFilter( - boolQuery().should(termQuery("name", "test").queryName("name")).should(termQuery("title", "title1").queryName("title")) - ) - .get(); - - assertHitCount(searchResponse, 3L); - for (SearchHit hit : searchResponse.getHits()) { - if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(2)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); - assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("title")); - assertThat(hit.getMatchedQueryScore("title"), greaterThan(0f)); - } else if (hit.getId().equals("2") || hit.getId().equals("3")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); - assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); + ); + + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .setPostFilter( + boolQuery().should(termQuery("name", "test").queryName("name")).should(termQuery("title", "title1").queryName("title")) + ), + response -> { + assertHitCount(response, 3L); + for (SearchHit hit : response.getHits()) { + if (hit.getId().equals("1")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(2)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); + assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("title")); + assertThat(hit.getMatchedQueryScore("title"), greaterThan(0f)); + } else if (hit.getId().equals("2") || hit.getId().equals("3")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); + assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); + } else { + fail("Unexpected document returned with id " + hit.getId()); + } + } } - } + ); } public void testSimpleMatchedQueryFromTopLevelFilterAndFilteredQuery() throws Exception { createIndex("test"); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("name", "test", "title", "title1").get(); - client().prepareIndex("test").setId("2").setSource("name", "test", "title", "title2").get(); - client().prepareIndex("test").setId("3").setSource("name", "test", "title", "title3").get(); + prepareIndex("test").setId("1").setSource("name", "test", "title", "title1").get(); + prepareIndex("test").setId("2").setSource("name", "test", "title", "title2").get(); + prepareIndex("test").setId("3").setSource("name", "test", "title", "title3").get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery( - boolQuery().must(matchAllQuery()).filter(termsQuery("title", "title1", "title2", "title3").queryName("title")) - ).setPostFilter(termQuery("name", "test").queryName("name")).get(); - assertHitCount(searchResponse, 3L); - for (SearchHit hit : searchResponse.getHits()) { - if (hit.getId().equals("1") || hit.getId().equals("2") || hit.getId().equals("3")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(2)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); - assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("title")); - assertThat(hit.getMatchedQueryScore("title"), greaterThan(0f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); + assertResponse( + prepareSearch().setQuery( + boolQuery().must(matchAllQuery()).filter(termsQuery("title", "title1", "title2", "title3").queryName("title")) + ).setPostFilter(termQuery("name", "test").queryName("name")), + response -> { + assertHitCount(response, 3L); + for (SearchHit hit : response.getHits()) { + if (hit.getId().equals("1") || hit.getId().equals("2") || hit.getId().equals("3")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(2)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); + assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("title")); + assertThat(hit.getMatchedQueryScore("title"), greaterThan(0f)); + } else { + fail("Unexpected document returned with id " + hit.getId()); + } + } } - } - - searchResponse = prepareSearch().setQuery(termsQuery("title", "title1", "title2", "title3").queryName("title")) - .setPostFilter(matchQuery("name", "test").queryName("name")) - .get(); - assertHitCount(searchResponse, 3L); - for (SearchHit hit : searchResponse.getHits()) { - if (hit.getId().equals("1") || hit.getId().equals("2") || hit.getId().equals("3")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(2)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); - assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("title")); - assertThat(hit.getMatchedQueryScore("title"), greaterThan(0f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); + ); + + assertResponse( + prepareSearch().setQuery(termsQuery("title", "title1", "title2", "title3").queryName("title")) + .setPostFilter(matchQuery("name", "test").queryName("name")), + response -> { + assertHitCount(response, 3L); + for (SearchHit hit : response.getHits()) { + if (hit.getId().equals("1") || hit.getId().equals("2") || hit.getId().equals("3")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(2)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); + assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("title")); + assertThat(hit.getMatchedQueryScore("title"), greaterThan(0f)); + } else { + fail("Unexpected document returned with id " + hit.getId()); + } + } } - } + ); } public void testRegExpQuerySupportsName() { createIndex("test1"); ensureGreen(); - client().prepareIndex("test1").setId("1").setSource("title", "title1").get(); + prepareIndex("test1").setId("1").setSource("title", "title1").get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(QueryBuilders.regexpQuery("title", "title1").queryName("regex")).get(); - assertHitCount(searchResponse, 1L); + assertResponse(prepareSearch().setQuery(QueryBuilders.regexpQuery("title", "title1").queryName("regex")), response -> { + assertHitCount(response, 1L); - for (SearchHit hit : searchResponse.getHits()) { - if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("regex")); - assertThat(hit.getMatchedQueryScore("regex"), equalTo(1f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); + for (SearchHit hit : response.getHits()) { + if (hit.getId().equals("1")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("regex")); + assertThat(hit.getMatchedQueryScore("regex"), equalTo(1f)); + } else { + fail("Unexpected document returned with id " + hit.getId()); + } } - } + }); } public void testPrefixQuerySupportsName() { createIndex("test1"); ensureGreen(); - client().prepareIndex("test1").setId("1").setSource("title", "title1").get(); + prepareIndex("test1").setId("1").setSource("title", "title1").get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(QueryBuilders.prefixQuery("title", "title").queryName("prefix")).get(); - assertHitCount(searchResponse, 1L); + assertResponse(prepareSearch().setQuery(QueryBuilders.prefixQuery("title", "title").queryName("prefix")), response -> { + assertHitCount(response, 1L); - for (SearchHit hit : searchResponse.getHits()) { - if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("prefix")); - assertThat(hit.getMatchedQueryScore("prefix"), equalTo(1f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); + for (SearchHit hit : response.getHits()) { + if (hit.getId().equals("1")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("prefix")); + assertThat(hit.getMatchedQueryScore("prefix"), equalTo(1f)); + } else { + fail("Unexpected document returned with id " + hit.getId()); + } } - } + }); } public void testFuzzyQuerySupportsName() { createIndex("test1"); ensureGreen(); - client().prepareIndex("test1").setId("1").setSource("title", "title1").get(); + prepareIndex("test1").setId("1").setSource("title", "title1").get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(QueryBuilders.fuzzyQuery("title", "titel1").queryName("fuzzy")).get(); - assertHitCount(searchResponse, 1L); + assertResponse(prepareSearch().setQuery(QueryBuilders.fuzzyQuery("title", "titel1").queryName("fuzzy")), response -> { + assertHitCount(response, 1L); - for (SearchHit hit : searchResponse.getHits()) { - if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("fuzzy")); - assertThat(hit.getMatchedQueryScore("fuzzy"), greaterThan(0f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); + for (SearchHit hit : response.getHits()) { + if (hit.getId().equals("1")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("fuzzy")); + assertThat(hit.getMatchedQueryScore("fuzzy"), greaterThan(0f)); + } else { + fail("Unexpected document returned with id " + hit.getId()); + } } - } + }); } public void testWildcardQuerySupportsName() { createIndex("test1"); ensureGreen(); - client().prepareIndex("test1").setId("1").setSource("title", "title1").get(); + prepareIndex("test1").setId("1").setSource("title", "title1").get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(QueryBuilders.wildcardQuery("title", "titl*").queryName("wildcard")).get(); - assertHitCount(searchResponse, 1L); + assertResponse(prepareSearch().setQuery(QueryBuilders.wildcardQuery("title", "titl*").queryName("wildcard")), response -> { + assertHitCount(response, 1L); - for (SearchHit hit : searchResponse.getHits()) { - if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("wildcard")); - assertThat(hit.getMatchedQueryScore("wildcard"), equalTo(1f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); + for (SearchHit hit : response.getHits()) { + if (hit.getId().equals("1")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("wildcard")); + assertThat(hit.getMatchedQueryScore("wildcard"), equalTo(1f)); + } else { + fail("Unexpected document returned with id " + hit.getId()); + } } - } + }); } public void testSpanFirstQuerySupportsName() { createIndex("test1"); ensureGreen(); - client().prepareIndex("test1").setId("1").setSource("title", "title1 title2").get(); + prepareIndex("test1").setId("1").setSource("title", "title1 title2").get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery( - QueryBuilders.spanFirstQuery(QueryBuilders.spanTermQuery("title", "title1"), 10).queryName("span") - ).get(); - assertHitCount(searchResponse, 1L); - - for (SearchHit hit : searchResponse.getHits()) { - if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("span")); - assertThat(hit.getMatchedQueryScore("span"), greaterThan(0f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); + assertResponse( + prepareSearch().setQuery(QueryBuilders.spanFirstQuery(QueryBuilders.spanTermQuery("title", "title1"), 10).queryName("span")), + response -> { + assertHitCount(response, 1L); + + for (SearchHit hit : response.getHits()) { + if (hit.getId().equals("1")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("span")); + assertThat(hit.getMatchedQueryScore("span"), greaterThan(0f)); + } else { + fail("Unexpected document returned with id " + hit.getId()); + } + } } - } + ); } /** @@ -297,33 +324,36 @@ public void testMatchedWithShould() throws Exception { createIndex("test"); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("content", "Lorem ipsum dolor sit amet").get(); - client().prepareIndex("test").setId("2").setSource("content", "consectetur adipisicing elit").get(); + prepareIndex("test").setId("1").setSource("content", "Lorem ipsum dolor sit amet").get(); + prepareIndex("test").setId("2").setSource("content", "consectetur adipisicing elit").get(); refresh(); // Execute search at least two times to load it in cache int iter = scaledRandomIntBetween(2, 10); for (int i = 0; i < iter; i++) { - SearchResponse searchResponse = prepareSearch().setQuery( - boolQuery().minimumShouldMatch(1) - .should(queryStringQuery("dolor").queryName("dolor")) - .should(queryStringQuery("elit").queryName("elit")) - ).get(); - - assertHitCount(searchResponse, 2L); - for (SearchHit hit : searchResponse.getHits()) { - if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("dolor")); - assertThat(hit.getMatchedQueryScore("dolor"), greaterThan(0f)); - } else if (hit.getId().equals("2")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("elit")); - assertThat(hit.getMatchedQueryScore("elit"), greaterThan(0f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); + assertResponse( + prepareSearch().setQuery( + boolQuery().minimumShouldMatch(1) + .should(queryStringQuery("dolor").queryName("dolor")) + .should(queryStringQuery("elit").queryName("elit")) + ), + response -> { + assertHitCount(response, 2L); + for (SearchHit hit : response.getHits()) { + if (hit.getId().equals("1")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("dolor")); + assertThat(hit.getMatchedQueryScore("dolor"), greaterThan(0f)); + } else if (hit.getId().equals("2")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("elit")); + assertThat(hit.getMatchedQueryScore("elit"), greaterThan(0f)); + } else { + fail("Unexpected document returned with id " + hit.getId()); + } + } } - } + ); } } @@ -331,7 +361,7 @@ public void testMatchedWithWrapperQuery() throws Exception { createIndex("test"); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("content", "Lorem ipsum dolor sit amet").get(); + prepareIndex("test").setId("1").setSource("content", "Lorem ipsum dolor sit amet").get(); refresh(); MatchQueryBuilder matchQueryBuilder = matchQuery("content", "amet").queryName("abc"); @@ -340,12 +370,13 @@ public void testMatchedWithWrapperQuery() throws Exception { BytesReference termBytes = XContentHelper.toXContent(termQueryBuilder, XContentType.JSON, false); QueryBuilder[] queries = new QueryBuilder[] { wrapperQuery(matchBytes), constantScoreQuery(wrapperQuery(termBytes)) }; for (QueryBuilder query : queries) { - SearchResponse searchResponse = prepareSearch().setQuery(query).get(); - assertHitCount(searchResponse, 1L); - SearchHit hit = searchResponse.getHits().getAt(0); - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("abc")); - assertThat(hit.getMatchedQueryScore("abc"), greaterThan(0f)); + assertResponse(prepareSearch().setQuery(query), response -> { + assertHitCount(response, 1L); + SearchHit hit = response.getHits().getAt(0); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("abc")); + assertThat(hit.getMatchedQueryScore("abc"), greaterThan(0f)); + }); } } @@ -353,20 +384,23 @@ public void testMatchedWithRescoreQuery() throws Exception { createIndex("test"); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("content", "hello world").get(); - client().prepareIndex("test").setId("2").setSource("content", "hello you").get(); + prepareIndex("test").setId("1").setSource("content", "hello world").get(); + prepareIndex("test").setId("2").setSource("content", "hello you").get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(new MatchAllQueryBuilder().queryName("all")) - .setRescorer( - new QueryRescorerBuilder(new MatchPhraseQueryBuilder("content", "hello you").boost(10).queryName("rescore_phrase")) - ) - .get(); - assertHitCount(searchResponse, 2L); - assertThat(searchResponse.getHits().getAt(0).getMatchedQueries().length, equalTo(2)); - assertThat(searchResponse.getHits().getAt(0).getMatchedQueries(), equalTo(new String[] { "all", "rescore_phrase" })); - - assertThat(searchResponse.getHits().getAt(1).getMatchedQueries().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(1).getMatchedQueries(), equalTo(new String[] { "all" })); + assertResponse( + prepareSearch().setQuery(new MatchAllQueryBuilder().queryName("all")) + .setRescorer( + new QueryRescorerBuilder(new MatchPhraseQueryBuilder("content", "hello you").boost(10).queryName("rescore_phrase")) + ), + response -> { + assertHitCount(response, 2L); + assertThat(response.getHits().getAt(0).getMatchedQueries().length, equalTo(2)); + assertThat(response.getHits().getAt(0).getMatchedQueries(), equalTo(new String[] { "all", "rescore_phrase" })); + + assertThat(response.getHits().getAt(1).getMatchedQueries().length, equalTo(1)); + assertThat(response.getHits().getAt(1).getMatchedQueries(), equalTo(new String[] { "all" })); + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/CustomHighlighterSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/CustomHighlighterSearchIT.java index 6b790f9e6f090..8aaa1bce252d7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/CustomHighlighterSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/CustomHighlighterSearchIT.java @@ -7,7 +7,6 @@ */ package org.elasticsearch.search.fetch.subphase.highlight; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; @@ -22,6 +21,7 @@ import java.util.Map; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHighlight; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; /** @@ -39,16 +39,17 @@ protected Collection> nodePlugins() { protected void setup() throws Exception { indexRandom( true, - client().prepareIndex("test").setId("1").setSource("name", "arbitrary content", "other_name", "foo", "other_other_name", "bar"), - client().prepareIndex("test").setId("2").setSource("other_name", "foo", "other_other_name", "bar") + prepareIndex("test").setId("1").setSource("name", "arbitrary content", "other_name", "foo", "other_other_name", "bar"), + prepareIndex("test").setId("2").setSource("other_name", "foo", "other_other_name", "bar") ); } public void testThatCustomHighlightersAreSupported() throws IOException { - SearchResponse searchResponse = prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()) - .highlighter(new HighlightBuilder().field("name").highlighterType("test-custom")) - .get(); - assertHighlight(searchResponse, 0, "name", 0, equalTo("standard response for name at position 1")); + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()) + .highlighter(new HighlightBuilder().field("name").highlighterType("test-custom")), + response -> assertHighlight(response, 0, "name", 0, equalTo("standard response for name at position 1")) + ); } public void testThatCustomHighlighterCanBeConfiguredPerField() throws Exception { @@ -58,44 +59,49 @@ public void testThatCustomHighlighterCanBeConfiguredPerField() throws Exception options.put("myFieldOption", "someValue"); highlightConfig.options(options); - SearchResponse searchResponse = prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()) - .highlighter(new HighlightBuilder().field(highlightConfig)) - .get(); - - assertHighlight(searchResponse, 0, "name", 0, equalTo("standard response for name at position 1")); - assertHighlight(searchResponse, 0, "name", 1, equalTo("field:myFieldOption:someValue")); + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).highlighter(new HighlightBuilder().field(highlightConfig)), + response -> { + assertHighlight(response, 0, "name", 0, equalTo("standard response for name at position 1")); + assertHighlight(response, 0, "name", 1, equalTo("field:myFieldOption:someValue")); + } + ); } public void testThatCustomHighlighterCanBeConfiguredGlobally() throws Exception { Map options = new HashMap<>(); options.put("myGlobalOption", "someValue"); - SearchResponse searchResponse = prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()) - .highlighter(new HighlightBuilder().field("name").highlighterType("test-custom").options(options)) - .get(); - - assertHighlight(searchResponse, 0, "name", 0, equalTo("standard response for name at position 1")); - assertHighlight(searchResponse, 0, "name", 1, equalTo("field:myGlobalOption:someValue")); + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()) + .highlighter(new HighlightBuilder().field("name").highlighterType("test-custom").options(options)), + response -> { + assertHighlight(response, 0, "name", 0, equalTo("standard response for name at position 1")); + assertHighlight(response, 0, "name", 1, equalTo("field:myGlobalOption:someValue")); + } + ); } public void testThatCustomHighlighterReceivesFieldsInOrder() throws Exception { - SearchResponse searchResponse = prepareSearch("test").setQuery( - QueryBuilders.boolQuery().must(QueryBuilders.matchAllQuery()).should(QueryBuilders.termQuery("name", "arbitrary")) - ) - .highlighter( - new HighlightBuilder().highlighterType("test-custom") - .field("name") - .field("other_name") - .field("other_other_name") - .useExplicitFieldOrder(true) + assertResponse( + prepareSearch("test").setQuery( + QueryBuilders.boolQuery().must(QueryBuilders.matchAllQuery()).should(QueryBuilders.termQuery("name", "arbitrary")) ) - .get(); - - assertHighlight(searchResponse, 0, "name", 0, equalTo("standard response for name at position 1")); - assertHighlight(searchResponse, 0, "other_name", 0, equalTo("standard response for other_name at position 2")); - assertHighlight(searchResponse, 0, "other_other_name", 0, equalTo("standard response for other_other_name at position 3")); - assertHighlight(searchResponse, 1, "name", 0, equalTo("standard response for name at position 1")); - assertHighlight(searchResponse, 1, "other_name", 0, equalTo("standard response for other_name at position 2")); - assertHighlight(searchResponse, 1, "other_other_name", 0, equalTo("standard response for other_other_name at position 3")); + .highlighter( + new HighlightBuilder().highlighterType("test-custom") + .field("name") + .field("other_name") + .field("other_other_name") + .useExplicitFieldOrder(true) + ), + response -> { + assertHighlight(response, 0, "name", 0, equalTo("standard response for name at position 1")); + assertHighlight(response, 0, "other_name", 0, equalTo("standard response for other_name at position 2")); + assertHighlight(response, 0, "other_other_name", 0, equalTo("standard response for other_other_name at position 3")); + assertHighlight(response, 1, "name", 0, equalTo("standard response for name at position 1")); + assertHighlight(response, 1, "other_name", 0, equalTo("standard response for other_name at position 2")); + assertHighlight(response, 1, "other_other_name", 0, equalTo("standard response for other_other_name at position 3")); + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java index 6500b969ee273..5c189c0c6c96a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java @@ -19,7 +19,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.Strings; import org.elasticsearch.common.geo.GeoPoint; @@ -94,7 +93,9 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCountAndNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNotHighlighted; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; @@ -128,24 +129,25 @@ public void testHighlightingWithKeywordIgnoreBoundaryScanner() throws IOExceptio .endObject(); mappings.endObject(); assertAcked(prepareCreate("test").setMapping(mappings)); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().array("tags", "foo bar", "foo bar", "foo bar", "foo baz").field("sort", 1).endObject()) .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource(jsonBuilder().startObject().array("tags", "foo baz", "foo baz", "foo baz", "foo bar").field("sort", 2).endObject()) .get(); refresh(); for (BoundaryScannerType scanner : BoundaryScannerType.values()) { - SearchResponse search = prepareSearch().addSort(SortBuilders.fieldSort("sort")) - .setQuery(matchQuery("tags", "foo bar")) - .highlighter(new HighlightBuilder().field(new Field("tags")).numOfFragments(2).boundaryScannerType(scanner)) - .get(); - assertHighlight(search, 0, "tags", 0, 2, equalTo("foo bar")); - assertHighlight(search, 0, "tags", 1, 2, equalTo("foo bar")); - assertHighlight(search, 1, "tags", 0, 1, equalTo("foo bar")); + assertResponse( + prepareSearch().addSort(SortBuilders.fieldSort("sort")) + .setQuery(matchQuery("tags", "foo bar")) + .highlighter(new HighlightBuilder().field(new Field("tags")).numOfFragments(2).boundaryScannerType(scanner)), + response -> { + assertHighlight(response, 0, "tags", 0, 2, equalTo("foo bar")); + assertHighlight(response, 0, "tags", 1, 2, equalTo("foo bar")); + assertHighlight(response, 1, "tags", 0, 1, equalTo("foo bar")); + } + ); } } @@ -162,12 +164,12 @@ public void testHighlightingWithStoredKeyword() throws IOException { .endObject(); mappings.endObject(); assertAcked(prepareCreate("test").setMapping(mappings)); - client().prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("text", "foo").endObject()).get(); + prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("text", "foo").endObject()).get(); refresh(); - SearchResponse search = prepareSearch().setQuery(matchQuery("text", "foo")) - .highlighter(new HighlightBuilder().field(new Field("text"))) - .get(); - assertHighlight(search, 0, "text", 0, equalTo("foo")); + assertResponse( + prepareSearch().setQuery(matchQuery("text", "foo")).highlighter(new HighlightBuilder().field(new Field("text"))), + response -> assertHighlight(response, 0, "text", 0, equalTo("foo")) + ); } public void testHighlightingWithWildcardName() throws IOException { @@ -186,13 +188,14 @@ public void testHighlightingWithWildcardName() throws IOException { .endObject(); mappings.endObject(); assertAcked(prepareCreate("test").setMapping(mappings)); - client().prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("text", "text").endObject()).get(); + prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("text", "text").endObject()).get(); refresh(); for (String type : ALL_TYPES) { - SearchResponse search = prepareSearch().setQuery(constantScoreQuery(matchQuery("text", "text"))) - .highlighter(new HighlightBuilder().field(new Field("*").highlighterType(type))) - .get(); - assertHighlight(search, 0, "text", 0, equalTo("text")); + assertResponse( + prepareSearch().setQuery(constantScoreQuery(matchQuery("text", "text"))) + .highlighter(new HighlightBuilder().field(new Field("*").highlighterType(type))), + response -> assertHighlight(response, 0, "text", 0, equalTo("text")) + ); } } @@ -214,14 +217,16 @@ public void testFieldAlias() throws IOException { .endObject(); assertAcked(prepareCreate("test").setMapping(mappings)); - client().prepareIndex("test").setId("1").setSource("text", "foo").get(); + prepareIndex("test").setId("1").setSource("text", "foo").get(); refresh(); for (String type : ALL_TYPES) { HighlightBuilder builder = new HighlightBuilder().field(new Field("alias").highlighterType(type)) .requireFieldMatch(randomBoolean()); - SearchResponse search = prepareSearch().setQuery(matchQuery("alias", "foo")).highlighter(builder).get(); - assertHighlight(search, 0, "alias", 0, equalTo("foo")); + assertResponse( + prepareSearch().setQuery(matchQuery("alias", "foo")).highlighter(builder), + response -> assertHighlight(response, 0, "alias", 0, equalTo("foo")) + ); } } @@ -244,14 +249,16 @@ public void testFieldAliasWithSourceLookup() throws IOException { .endObject(); assertAcked(prepareCreate("test").setMapping(mappings)); - client().prepareIndex("test").setId("1").setSource("text", "foo bar").get(); + prepareIndex("test").setId("1").setSource("text", "foo bar").get(); refresh(); for (String type : ALL_TYPES) { HighlightBuilder builder = new HighlightBuilder().field(new Field("alias").highlighterType(type)) .requireFieldMatch(randomBoolean()); - SearchResponse search = prepareSearch().setQuery(matchQuery("alias", "bar")).highlighter(builder).get(); - assertHighlight(search, 0, "alias", 0, equalTo("foo bar")); + assertResponse( + prepareSearch().setQuery(matchQuery("alias", "bar")).highlighter(builder), + response -> assertHighlight(response, 0, "alias", 0, equalTo("foo bar")) + ); } } @@ -271,12 +278,14 @@ public void testFieldAliasWithWildcardField() throws IOException { .endObject(); assertAcked(prepareCreate("test").setMapping(mappings)); - client().prepareIndex("test").setId("1").setSource("keyword", "foo").get(); + prepareIndex("test").setId("1").setSource("keyword", "foo").get(); refresh(); HighlightBuilder builder = new HighlightBuilder().field(new Field("al*")).requireFieldMatch(false); - SearchResponse search = prepareSearch().setQuery(matchQuery("alias", "foo")).highlighter(builder).get(); - assertHighlight(search, 0, "alias", 0, equalTo("foo")); + assertResponse( + prepareSearch().setQuery(matchQuery("alias", "foo")).highlighter(builder), + response -> assertHighlight(response, 0, "alias", 0, equalTo("foo")) + ); } public void testHighlightingWhenFieldsAreNotStoredThereIsNoSource() throws IOException { @@ -303,21 +312,21 @@ public void testHighlightingWhenFieldsAreNotStoredThereIsNoSource() throws IOExc .endObject(); mappings.endObject(); assertAcked(prepareCreate("test").setMapping(mappings)); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("unstored_text", "text").field("text", "text").endObject()) .get(); refresh(); for (String type : ALL_TYPES) { - SearchResponse search = prepareSearch().setQuery(constantScoreQuery(matchQuery("text", "text"))) - .highlighter(new HighlightBuilder().field(new Field("*").highlighterType(type))) - .get(); - assertHighlight(search, 0, "text", 0, equalTo("text")); - search = prepareSearch().setQuery(constantScoreQuery(matchQuery("text", "text"))) - .highlighter(new HighlightBuilder().field(new Field("unstored_text"))) - .get(); - assertNoFailures(search); - assertThat(search.getHits().getAt(0).getHighlightFields().size(), equalTo(0)); + assertResponse( + prepareSearch().setQuery(constantScoreQuery(matchQuery("text", "text"))) + .highlighter(new HighlightBuilder().field(new Field("*").highlighterType(type))), + response -> assertHighlight(response, 0, "text", 0, equalTo("text")) + ); + assertNoFailuresAndResponse( + prepareSearch().setQuery(constantScoreQuery(matchQuery("text", "text"))) + .highlighter(new HighlightBuilder().field(new Field("unstored_text"))), + response -> assertThat(response.getHits().getAt(0).getHighlightFields().size(), equalTo(0)) + ); } } @@ -328,12 +337,14 @@ public void testHighTermFrequencyDoc() throws IOException { for (int i = 0; i < 6000; i++) { builder.append("abc").append(" "); } - client().prepareIndex("test").setId("1").setSource("name", builder.toString()).get(); + prepareIndex("test").setId("1").setSource("name", builder.toString()).get(); refresh(); - SearchResponse search = prepareSearch().setQuery(constantScoreQuery(matchQuery("name", "abc"))) - .highlighter(new HighlightBuilder().field("name")) - .get(); - assertHighlight(search, 0, "name", 0, startsWith("abc abc abc abc")); + assertResponse( + prepareSearch().setQuery(constantScoreQuery(matchQuery("name", "abc"))).highlighter(new HighlightBuilder().field("name")), + response -> { + assertHighlight(response, 0, "name", 0, startsWith("abc abc abc abc")); + } + ); } public void testEnsureNoNegativeOffsets() throws Exception { @@ -346,8 +357,7 @@ public void testEnsureNoNegativeOffsets() throws Exception { ) ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( "no_long_term", "This is a test where foo is highlighed and should be highlighted", @@ -414,8 +424,7 @@ public void testSourceLookupHighlightingUsingPlainHighlighter() throws Exception IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5]; for (int i = 0; i < indexRequestBuilders.length; i++) { - indexRequestBuilders[i] = client().prepareIndex("test") - .setId(Integer.toString(i)) + indexRequestBuilders[i] = prepareIndex("test").setId(Integer.toString(i)) .setSource( XContentFactory.jsonBuilder() .startObject() @@ -433,22 +442,31 @@ public void testSourceLookupHighlightingUsingPlainHighlighter() throws Exception } indexRandom(true, indexRequestBuilders); - SearchResponse search = prepareSearch().setQuery(matchQuery("title", "bug")) - .highlighter(new HighlightBuilder().field("title", -1, 0)) - .get(); - - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight(search, i, "title", 0, equalTo("This is a test on the highlighting bug present in elasticsearch")); - } - - search = prepareSearch().setQuery(matchQuery("attachments.body", "attachment")) - .highlighter(new HighlightBuilder().field("attachments.body", -1, 0)) - .get(); + assertResponse( + prepareSearch().setQuery(matchQuery("title", "bug")).highlighter(new HighlightBuilder().field("title", -1, 0)), + response -> { + for (int i = 0; i < indexRequestBuilders.length; i++) { + assertHighlight( + response, + i, + "title", + 0, + equalTo("This is a test on the highlighting bug present in elasticsearch") + ); + } + } + ); - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight(search, i, "attachments.body", 0, equalTo("attachment 1")); - assertHighlight(search, i, "attachments.body", 1, equalTo("attachment 2")); - } + assertResponse( + prepareSearch().setQuery(matchQuery("attachments.body", "attachment")) + .highlighter(new HighlightBuilder().field("attachments.body", -1, 0)), + response -> { + for (int i = 0; i < indexRequestBuilders.length; i++) { + assertHighlight(response, i, "attachments.body", 0, equalTo("attachment 1")); + assertHighlight(response, i, "attachments.body", 1, equalTo("attachment 2")); + } + } + ); } @@ -481,8 +499,7 @@ public void testSourceLookupHighlightingUsingFastVectorHighlighter() throws Exce IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5]; for (int i = 0; i < indexRequestBuilders.length; i++) { - indexRequestBuilders[i] = client().prepareIndex("test") - .setId(Integer.toString(i)) + indexRequestBuilders[i] = prepareIndex("test").setId(Integer.toString(i)) .setSource( XContentFactory.jsonBuilder() .startObject() @@ -500,23 +517,32 @@ public void testSourceLookupHighlightingUsingFastVectorHighlighter() throws Exce } indexRandom(true, indexRequestBuilders); - SearchResponse search = prepareSearch().setQuery(matchQuery("title", "bug")) - .highlighter(new HighlightBuilder().field("title", -1, 0)) - .get(); - - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight(search, i, "title", 0, equalTo("This is a test on the highlighting bug present in elasticsearch")); - } - - search = prepareSearch().setQuery(matchQuery("attachments.body", "attachment")) - .highlighter(new HighlightBuilder().field("attachments.body", -1, 2)) - .execute() - .get(); + assertResponse( + prepareSearch().setQuery(matchQuery("title", "bug")).highlighter(new HighlightBuilder().field("title", -1, 0)), + response -> { + for (int i = 0; i < indexRequestBuilders.length; i++) { + assertHighlight( + response, + i, + "title", + 0, + equalTo("This is a test on the highlighting bug present in elasticsearch") + ); + } + } + ); - for (int i = 0; i < 5; i++) { - assertHighlight(search, i, "attachments.body", 0, equalTo("attachment 1")); - assertHighlight(search, i, "attachments.body", 1, equalTo("attachment 2")); - } + assertResponse( + prepareSearch().setQuery(matchQuery("attachments.body", "attachment")) + .highlighter(new HighlightBuilder().field("attachments.body", -1, 2)) + .execute(), + response -> { + for (int i = 0; i < 5; i++) { + assertHighlight(response, i, "attachments.body", 0, equalTo("attachment 1")); + assertHighlight(response, i, "attachments.body", 1, equalTo("attachment 2")); + } + } + ); } public void testSourceLookupHighlightingUsingPostingsHighlighter() throws Exception { @@ -548,8 +574,7 @@ public void testSourceLookupHighlightingUsingPostingsHighlighter() throws Except IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5]; for (int i = 0; i < indexRequestBuilders.length; i++) { - indexRequestBuilders[i] = client().prepareIndex("test") - .setId(Integer.toString(i)) + indexRequestBuilders[i] = prepareIndex("test").setId(Integer.toString(i)) .setSource( XContentFactory.jsonBuilder() .startObject() @@ -571,46 +596,52 @@ public void testSourceLookupHighlightingUsingPostingsHighlighter() throws Except } indexRandom(true, indexRequestBuilders); - SearchResponse search = prepareSearch().setQuery(matchQuery("title", "bug")) - // asking for the whole field to be highlighted - .highlighter(new HighlightBuilder().field("title", -1, 0)) - .get(); - - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight( - search, - i, - "title", - 0, - equalTo("This is a test on the highlighting bug present in elasticsearch. Hopefully it works.") - ); - assertHighlight(search, i, "title", 1, 2, equalTo("This is the second bug to perform highlighting on.")); - } - - search = prepareSearch().setQuery(matchQuery("title", "bug")) - // sentences will be generated out of each value - .highlighter(new HighlightBuilder().field("title")) - .get(); - - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight( - search, - i, - "title", - 0, - equalTo("This is a test on the highlighting bug present in elasticsearch. Hopefully it works.") - ); - assertHighlight(search, i, "title", 1, 2, equalTo("This is the second bug to perform highlighting on.")); - } + assertResponse( + prepareSearch().setQuery(matchQuery("title", "bug")) + // asking for the whole field to be highlighted + .highlighter(new HighlightBuilder().field("title", -1, 0)), + response -> { + for (int i = 0; i < indexRequestBuilders.length; i++) { + assertHighlight( + response, + i, + "title", + 0, + equalTo("This is a test on the highlighting bug present in elasticsearch. Hopefully it works.") + ); + assertHighlight(response, i, "title", 1, 2, equalTo("This is the second bug to perform highlighting on.")); + } + } + ); - search = prepareSearch().setQuery(matchQuery("attachments.body", "attachment")) - .highlighter(new HighlightBuilder().field("attachments.body", -1, 2)) - .get(); + assertResponse( + prepareSearch().setQuery(matchQuery("title", "bug")) + // sentences will be generated out of each value + .highlighter(new HighlightBuilder().field("title")), + response -> { + for (int i = 0; i < indexRequestBuilders.length; i++) { + assertHighlight( + response, + i, + "title", + 0, + equalTo("This is a test on the highlighting bug present in elasticsearch. Hopefully it works.") + ); + assertHighlight(response, i, "title", 1, 2, equalTo("This is the second bug to perform highlighting on.")); + } + } + ); - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight(search, i, "attachments.body", 0, equalTo("attachment for this test")); - assertHighlight(search, i, "attachments.body", 1, 2, equalTo("attachment 2")); - } + assertResponse( + prepareSearch().setQuery(matchQuery("attachments.body", "attachment")) + .highlighter(new HighlightBuilder().field("attachments.body", -1, 2)), + response -> { + for (int i = 0; i < indexRequestBuilders.length; i++) { + assertHighlight(response, i, "attachments.body", 0, equalTo("attachment for this test")); + assertHighlight(response, i, "attachments.body", 1, 2, equalTo("attachment 2")); + } + } + ); } public void testHighlightIssue1994() throws Exception { @@ -624,42 +655,54 @@ public void testHighlightIssue1994() throws Exception { ); String[] titles = new String[] { "This is a test on the highlighting bug present in elasticsearch", "The bug is bugging us" }; - indexRandom(false, client().prepareIndex("test").setId("1").setSource("title", titles, "titleTV", titles)); + indexRandom(false, prepareIndex("test").setId("1").setSource("title", titles, "titleTV", titles)); indexRandom( true, - client().prepareIndex("test").setId("2").setSource("titleTV", new String[] { "some text to highlight", "highlight other text" }) + prepareIndex("test").setId("2").setSource("titleTV", new String[] { "some text to highlight", "highlight other text" }) ); - SearchResponse search = prepareSearch().setQuery(matchQuery("title", "bug")) - .highlighter(new HighlightBuilder().field("title", -1, 2).field("titleTV", -1, 2).requireFieldMatch(false)) - .get(); - - assertHighlight(search, 0, "title", 0, equalTo("This is a test on the highlighting bug present in elasticsearch")); - assertHighlight(search, 0, "title", 1, 2, equalTo("The bug is bugging us")); - assertHighlight(search, 0, "titleTV", 0, equalTo("This is a test on the highlighting bug present in elasticsearch")); - assertHighlight(search, 0, "titleTV", 1, 2, equalTo("The bug is bugging us")); - - search = prepareSearch().setQuery(matchQuery("titleTV", "highlight")) - .highlighter(new HighlightBuilder().field("titleTV", -1, 2)) - .get(); - - assertHighlight(search, 0, "titleTV", 0, equalTo("some text to highlight")); - assertHighlight(search, 0, "titleTV", 1, 2, equalTo("highlight other text")); + assertResponse( + prepareSearch().setQuery(matchQuery("title", "bug")) + .highlighter(new HighlightBuilder().field("title", -1, 2).field("titleTV", -1, 2).requireFieldMatch(false)), + response -> { + assertHighlight( + response, + 0, + "title", + 0, + equalTo("This is a test on the highlighting bug present in elasticsearch") + ); + assertHighlight(response, 0, "title", 1, 2, equalTo("The bug is bugging us")); + assertHighlight( + response, + 0, + "titleTV", + 0, + equalTo("This is a test on the highlighting bug present in elasticsearch") + ); + assertHighlight(response, 0, "titleTV", 1, 2, equalTo("The bug is bugging us")); + } + ); + assertResponse( + prepareSearch().setQuery(matchQuery("titleTV", "highlight")).highlighter(new HighlightBuilder().field("titleTV", -1, 2)), + response -> { + assertHighlight(response, 0, "titleTV", 0, equalTo("some text to highlight")); + assertHighlight(response, 0, "titleTV", 1, 2, equalTo("highlight other text")); + } + ); } public void testGlobalHighlightingSettingsOverriddenAtFieldLevel() { createIndex("test"); ensureGreen(); - client().prepareIndex("test") - .setSource( - "field1", - new String[] { "this is a test", "this is the second test" }, - "field2", - new String[] { "this is another test", "yet another test" } - ) - .get(); + prepareIndex("test").setSource( + "field1", + new String[] { "this is a test", "this is the second test" }, + "field2", + new String[] { "this is another test", "yet another test" } + ).get(); refresh(); logger.info("--> highlighting and searching on field1 and field2 produces different tags"); @@ -679,11 +722,11 @@ public void testGlobalHighlightingSettingsOverriddenAtFieldLevel() { ) ); - SearchResponse searchResponse = prepareSearch("test").setSource(source).get(); - - assertHighlight(searchResponse, 0, "field1", 0, 2, equalTo("test")); - assertHighlight(searchResponse, 0, "field1", 1, 2, equalTo("test")); - assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("yet another test")); + assertResponse(prepareSearch("test").setSource(source), response -> { + assertHighlight(response, 0, "field1", 0, 2, equalTo("test")); + assertHighlight(response, 0, "field1", 1, 2, equalTo("test")); + assertHighlight(response, 0, "field2", 0, 1, equalTo("yet another test")); + }); } // Issue #5175 @@ -700,16 +743,14 @@ public void testHighlightingOnWildcardFields() throws Exception { ); ensureGreen(); - client().prepareIndex("test") - .setSource( - "field-postings", - "This is the first test sentence. Here is the second one.", - "field-fvh", - "This is the test with term_vectors", - "field-plain", - "This is the test for the plain highlighter" - ) - .get(); + prepareIndex("test").setSource( + "field-postings", + "This is the first test sentence. Here is the second one.", + "field-fvh", + "This is the test with term_vectors", + "field-plain", + "This is the test for the plain highlighter" + ).get(); refresh(); logger.info("--> highlighting and searching on field*"); @@ -718,24 +759,24 @@ public void testHighlightingOnWildcardFields() throws Exception { .query(termQuery("field-postings", "test")) .highlighter(highlight().field("field*").preTags("").postTags("").requireFieldMatch(false)); - SearchResponse searchResponse = client().search(new SearchRequest("test").source(source)).actionGet(); - - assertHighlight( - searchResponse, - 0, - "field-postings", - 0, - 1, - equalTo("This is the first test sentence. Here is the second one.") - ); - assertHighlight(searchResponse, 0, "field-fvh", 0, 1, equalTo("This is the test with term_vectors")); - assertHighlight(searchResponse, 0, "field-plain", 0, 1, equalTo("This is the test for the plain highlighter")); + assertResponse(client().search(new SearchRequest("test").source(source)), response -> { + assertHighlight( + response, + 0, + "field-postings", + 0, + 1, + equalTo("This is the first test sentence. Here is the second one.") + ); + assertHighlight(response, 0, "field-fvh", 0, 1, equalTo("This is the test with term_vectors")); + assertHighlight(response, 0, "field-plain", 0, 1, equalTo("This is the test for the plain highlighter")); + }); } public void testPlainHighlighter() throws Exception { ensureGreen(); - client().prepareIndex("test").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get(); + prepareIndex("test").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get(); refresh(); SearchSourceBuilder source = searchSource().query(termQuery("field1", "test")) @@ -746,8 +787,7 @@ public void testPlainHighlighter() throws Exception { public void testPlainHighlighterOrder() throws Exception { ensureGreen(); - client().prepareIndex("test") - .setSource("field1", "The quick brown fox jumps over the lazy brown dog but to no suprise the dog doesn't care") + prepareIndex("test").setSource("field1", "The quick brown fox jumps over the lazy brown dog but to no suprise the dog doesn't care") .get(); refresh(); @@ -756,23 +796,23 @@ public void testPlainHighlighterOrder() throws Exception { SearchSourceBuilder source = searchSource().query(matchQuery("field1", "brown dog")) .highlighter(highlight().highlighterType("plain").field("field1").preTags("").postTags("").fragmentSize(25)); - SearchResponse searchResponse = prepareSearch("test").setSource(source).get(); - - assertHighlight(searchResponse, 0, "field1", 0, 3, equalTo("The quick brown fox")); - assertHighlight(searchResponse, 0, "field1", 1, 3, equalTo(" jumps over the lazy brown dog")); - assertHighlight(searchResponse, 0, "field1", 2, 3, equalTo(" dog doesn't care")); + assertResponse(prepareSearch("test").setSource(source), response -> { + assertHighlight(response, 0, "field1", 0, 3, equalTo("The quick brown fox")); + assertHighlight(response, 0, "field1", 1, 3, equalTo(" jumps over the lazy brown dog")); + assertHighlight(response, 0, "field1", 2, 3, equalTo(" dog doesn't care")); + }); // lets be explicit about the order source = searchSource().query(matchQuery("field1", "brown dog")) .highlighter( highlight().highlighterType("plain").field("field1").order("none").preTags("").postTags("").fragmentSize(25) ); - searchResponse = prepareSearch("test").setSource(source).get(); - - assertHighlight(searchResponse, 0, "field1", 0, 3, equalTo("The quick brown fox")); - assertHighlight(searchResponse, 0, "field1", 1, 3, equalTo(" jumps over the lazy brown dog")); - assertHighlight(searchResponse, 0, "field1", 2, 3, equalTo(" dog doesn't care")); + assertResponse(prepareSearch("test").setSource(source), response -> { + assertHighlight(response, 0, "field1", 0, 3, equalTo("The quick brown fox")); + assertHighlight(response, 0, "field1", 1, 3, equalTo(" jumps over the lazy brown dog")); + assertHighlight(response, 0, "field1", 2, 3, equalTo(" dog doesn't care")); + }); } { // order by score @@ -781,11 +821,11 @@ public void testPlainHighlighterOrder() throws Exception { highlight().highlighterType("plain").order("score").field("field1").preTags("").postTags("").fragmentSize(25) ); - SearchResponse searchResponse = prepareSearch("test").setSource(source).get(); - - assertHighlight(searchResponse, 0, "field1", 0, 3, equalTo(" jumps over the lazy brown dog")); - assertHighlight(searchResponse, 0, "field1", 1, 3, equalTo("The quick brown fox")); - assertHighlight(searchResponse, 0, "field1", 2, 3, equalTo(" dog doesn't care")); + assertResponse(prepareSearch("test").setSource(source), response -> { + assertHighlight(response, 0, "field1", 0, 3, equalTo(" jumps over the lazy brown dog")); + assertHighlight(response, 0, "field1", 1, 3, equalTo("The quick brown fox")); + assertHighlight(response, 0, "field1", 2, 3, equalTo(" dog doesn't care")); + }); } } @@ -795,7 +835,7 @@ public void testFastVectorHighlighter() throws Exception { indexRandom( true, - client().prepareIndex("test").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog") + prepareIndex("test").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog") ); logger.info("--> highlighting and searching on field1"); @@ -821,10 +861,7 @@ public void testHighlighterWithSentenceBoundaryScanner() throws Exception { assertAcked(prepareCreate("test").setMapping(type1TermVectorMapping())); ensureGreen(); - indexRandom( - true, - client().prepareIndex("test").setSource("field1", "A sentence with few words. Another sentence with even more words.") - ); + indexRandom(true, prepareIndex("test").setSource("field1", "A sentence with few words. Another sentence with even more words.")); for (String type : new String[] { "unified", "fvh" }) { logger.info("--> highlighting and searching on 'field' with sentence boundary_scanner"); @@ -836,25 +873,25 @@ public void testHighlighterWithSentenceBoundaryScanner() throws Exception { .postTags("") .boundaryScannerType(BoundaryScannerType.SENTENCE) ); - SearchResponse searchResponse = prepareSearch("test").setSource(source).get(); - - assertHighlight( - searchResponse, - 0, - "field1", - 0, - 2, - anyOf(equalTo("A sentence with few words"), equalTo("A sentence with few words. ")) - ); + assertResponse(prepareSearch("test").setSource(source), response -> { + assertHighlight( + response, + 0, + "field1", + 0, + 2, + anyOf(equalTo("A sentence with few words"), equalTo("A sentence with few words. ")) + ); - assertHighlight( - searchResponse, - 0, - "field1", - 1, - 2, - anyOf(equalTo("Another sentence with"), equalTo("Another sentence with even more words. ")) - ); + assertHighlight( + response, + 0, + "field1", + 1, + 2, + anyOf(equalTo("Another sentence with"), equalTo("Another sentence with even more words. ")) + ); + }); } } @@ -862,10 +899,7 @@ public void testHighlighterWithSentenceBoundaryScannerAndLocale() throws Excepti assertAcked(prepareCreate("test").setMapping(type1TermVectorMapping())); ensureGreen(); - indexRandom( - true, - client().prepareIndex("test").setSource("field1", "A sentence with few words. Another sentence with even more words.") - ); + indexRandom(true, prepareIndex("test").setSource("field1", "A sentence with few words. Another sentence with even more words.")); for (String type : new String[] { "fvh", "unified" }) { logger.info("--> highlighting and searching on 'field' with sentence boundary_scanner"); @@ -879,25 +913,25 @@ public void testHighlighterWithSentenceBoundaryScannerAndLocale() throws Excepti .boundaryScannerLocale(Locale.ENGLISH.toLanguageTag()) ); - SearchResponse searchResponse = prepareSearch("test").setSource(source).get(); - - assertHighlight( - searchResponse, - 0, - "field1", - 0, - 2, - anyOf(equalTo("A sentence with few words"), equalTo("A sentence with few words. ")) - ); + assertResponse(prepareSearch("test").setSource(source), response -> { + assertHighlight( + response, + 0, + "field1", + 0, + 2, + anyOf(equalTo("A sentence with few words"), equalTo("A sentence with few words. ")) + ); - assertHighlight( - searchResponse, - 0, - "field1", - 1, - 2, - anyOf(equalTo("Another sentence with"), equalTo("Another sentence with even more words. ")) - ); + assertHighlight( + response, + 0, + "field1", + 1, + 2, + anyOf(equalTo("Another sentence with"), equalTo("Another sentence with even more words. ")) + ); + }); } } @@ -905,7 +939,7 @@ public void testHighlighterWithWordBoundaryScanner() throws Exception { assertAcked(prepareCreate("test").setMapping(type1TermVectorMapping())); ensureGreen(); - indexRandom(true, client().prepareIndex("test").setSource("field1", "some quick and hairy brown:fox jumped over the lazy dog")); + indexRandom(true, prepareIndex("test").setSource("field1", "some quick and hairy brown:fox jumped over the lazy dog")); logger.info("--> highlighting and searching on 'field' with word boundary_scanner"); for (String type : new String[] { "unified", "fvh" }) { @@ -933,7 +967,7 @@ public void testHighlighterWithWordBoundaryScannerAndLocale() throws Exception { assertAcked(prepareCreate("test").setMapping(type1TermVectorMapping())); ensureGreen(); - indexRandom(true, client().prepareIndex("test").setSource("field1", "some quick and hairy brown:fox jumped over the lazy dog")); + indexRandom(true, prepareIndex("test").setSource("field1", "some quick and hairy brown:fox jumped over the lazy dog")); for (String type : new String[] { "unified", "fvh" }) { SearchSourceBuilder source = searchSource().query(termQuery("field1", "some")) @@ -968,15 +1002,19 @@ public void testFVHManyMatches() throws Exception { // Index one megabyte of "t " over and over and over again String pattern = "t "; String value = new String(new char[1024 * 256 / pattern.length()]).replace("\0", pattern); - client().prepareIndex("test").setSource("field1", value).get(); + prepareIndex("test").setSource("field1", value).get(); refresh(); + final long[] tookDefaultPhrase = new long[1]; + final long[] tookLargePhrase = new long[1]; + logger.info("--> highlighting and searching on field1 with default phrase limit"); SearchSourceBuilder source = searchSource().query(termQuery("field1", "t")) .highlighter(highlight().highlighterType("fvh").field("field1", 20, 1).order("score").preTags("").postTags("")); - SearchResponse defaultPhraseLimit = client().search(new SearchRequest("test").source(source)).actionGet(); - assertHighlight(defaultPhraseLimit, 0, "field1", 0, 1, containsString("t")); - + assertResponse(client().search(new SearchRequest("test").source(source)), defaultPhraseLimit -> { + assertHighlight(defaultPhraseLimit, 0, "field1", 0, 1, containsString("t")); + tookDefaultPhrase[0] = defaultPhraseLimit.getTook().getMillis(); + }); logger.info("--> highlighting and searching on field1 with large phrase limit"); source = searchSource().query(termQuery("field1", "t")) .highlighter( @@ -987,15 +1025,16 @@ public void testFVHManyMatches() throws Exception { .postTags("") .phraseLimit(30000) ); - SearchResponse largePhraseLimit = client().search(new SearchRequest("test").source(source)).actionGet(); - assertHighlight(largePhraseLimit, 0, "field1", 0, 1, containsString("t")); - + assertResponse(client().search(new SearchRequest("test").source(source)), largePhraseLimit -> { + assertHighlight(largePhraseLimit, 0, "field1", 0, 1, containsString("t")); + tookLargePhrase[0] = largePhraseLimit.getTook().getMillis(); + }); /* * I hate comparing times because it can be inconsistent but default is * in the neighborhood of 300ms and the large phrase limit is in the * neighborhood of 8 seconds. */ - assertThat(defaultPhraseLimit.getTook().getMillis(), lessThan(largePhraseLimit.getTook().getMillis())); + assertThat(tookDefaultPhrase[0], lessThan(tookLargePhrase[0])); } public void testMatchedFieldsFvhRequireFieldMatch() throws Exception { @@ -1071,12 +1110,16 @@ private void checkMatchedFieldsCase(boolean requireFieldMatch) throws Exception SearchRequestBuilder req = prepareSearch("test").highlighter(new HighlightBuilder().field(fooField)); // First check highlighting without any matched fields set - SearchResponse resp = req.setQuery(queryStringQuery("running scissors").field("foo")).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("running with scissors")); + assertResponse( + req.setQuery(queryStringQuery("running scissors").field("foo")), + response -> assertHighlight(response, 0, "foo", 0, equalTo("running with scissors")) + ); // And that matching a subfield doesn't automatically highlight it - resp = req.setQuery(queryStringQuery("foo.plain:running scissors").field("foo")).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("running with scissors")); + assertResponse( + req.setQuery(queryStringQuery("foo.plain:running scissors").field("foo")), + response -> assertHighlight(response, 0, "foo", 0, equalTo("running with scissors")) + ); // Add the subfield to the list of matched fields but don't match it. Everything should still work // like before we added it. @@ -1087,12 +1130,16 @@ private void checkMatchedFieldsCase(boolean requireFieldMatch) throws Exception .requireFieldMatch(requireFieldMatch); fooField.matchedFields("foo", "foo.plain"); req = prepareSearch("test").highlighter(new HighlightBuilder().field(fooField)); - resp = req.setQuery(queryStringQuery("running scissors").field("foo")).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("running with scissors")); + assertResponse( + req.setQuery(queryStringQuery("running scissors").field("foo")), + response -> assertHighlight(response, 0, "foo", 0, equalTo("running with scissors")) + ); // Now make half the matches come from the stored field and half from just a matched field. - resp = req.setQuery(queryStringQuery("foo.plain:running scissors").field("foo")).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("running with scissors")); + assertResponse( + req.setQuery(queryStringQuery("foo.plain:running scissors").field("foo")), + response -> assertHighlight(response, 0, "foo", 0, equalTo("running with scissors")) + ); // Now remove the stored field from the matched field list. That should work too. fooField = new Field("foo").numOfFragments(1) @@ -1102,8 +1149,10 @@ private void checkMatchedFieldsCase(boolean requireFieldMatch) throws Exception .requireFieldMatch(requireFieldMatch); fooField.matchedFields("foo.plain"); req = prepareSearch("test").highlighter(new HighlightBuilder().field(fooField)); - resp = req.setQuery(queryStringQuery("foo.plain:running scissors").field("foo")).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("running with scissors")); + assertResponse( + req.setQuery(queryStringQuery("foo.plain:running scissors").field("foo")), + response -> assertHighlight(response, 0, "foo", 0, equalTo("running with scissors")) + ); // Now make sure boosted fields don't blow up when matched fields is both the subfield and stored field. fooField = new Field("foo").numOfFragments(1) @@ -1113,28 +1162,40 @@ private void checkMatchedFieldsCase(boolean requireFieldMatch) throws Exception .requireFieldMatch(requireFieldMatch); fooField.matchedFields("foo", "foo.plain"); req = prepareSearch("test").highlighter(new HighlightBuilder().field(fooField)); - resp = req.setQuery(queryStringQuery("foo.plain:running^5 scissors").field("foo")).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("running with scissors")); + assertResponse( + req.setQuery(queryStringQuery("foo.plain:running^5 scissors").field("foo")), + response -> assertHighlight(response, 0, "foo", 0, equalTo("running with scissors")) + ); // Now just all matches are against the matched field. This still returns highlighting. - resp = req.setQuery(queryStringQuery("foo.plain:running foo.plain:scissors").field("foo")).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("running with scissors")); + assertResponse( + req.setQuery(queryStringQuery("foo.plain:running foo.plain:scissors").field("foo")), + response -> assertHighlight(response, 0, "foo", 0, equalTo("running with scissors")) + ); // And all matched field via the queryString's field parameter, just in case - resp = req.setQuery(queryStringQuery("running scissors").field("foo.plain")).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("running with scissors")); + assertResponse( + req.setQuery(queryStringQuery("running scissors").field("foo.plain")), + response -> assertHighlight(response, 0, "foo", 0, equalTo("running with scissors")) + ); // Finding the same string two ways is ok too - resp = req.setQuery(queryStringQuery("run foo.plain:running^5 scissors").field("foo")).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("running with scissors")); + assertResponse( + req.setQuery(queryStringQuery("run foo.plain:running^5 scissors").field("foo")), + response -> assertHighlight(response, 0, "foo", 0, equalTo("running with scissors")) + ); // But we use the best found score when sorting fragments - resp = req.setQuery(queryStringQuery("cats foo.plain:cats^5").field("foo")).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("junk junk cats junk junk")); + assertResponse( + req.setQuery(queryStringQuery("cats foo.plain:cats^5").field("foo")), + response -> assertHighlight(response, 0, "foo", 0, equalTo("junk junk cats junk junk")) + ); // which can also be written by searching on the subfield - resp = req.setQuery(queryStringQuery("cats").field("foo").field("foo.plain", 5)).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("junk junk cats junk junk")); + assertResponse( + req.setQuery(queryStringQuery("cats").field("foo").field("foo.plain", 5)), + response -> assertHighlight(response, 0, "foo", 0, equalTo("junk junk cats junk junk")) + ); // Speaking of two fields, you can have two fields, only one of which has matchedFields enabled QueryBuilder twoFieldsQuery = queryStringQuery("cats").field("foo").field("foo.plain", 5).field("bar").field("bar.plain", 5); @@ -1143,50 +1204,63 @@ private void checkMatchedFieldsCase(boolean requireFieldMatch) throws Exception .fragmentSize(25) .highlighterType("fvh") .requireFieldMatch(requireFieldMatch); - resp = req.setQuery(twoFieldsQuery).highlighter(new HighlightBuilder().field(fooField).field(barField)).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("junk junk cats junk junk")); - assertHighlight(resp, 0, "bar", 0, equalTo("cat cat junk junk junk junk")); + assertResponse(req.setQuery(twoFieldsQuery).highlighter(new HighlightBuilder().field(fooField).field(barField)), response -> { + assertHighlight(response, 0, "foo", 0, equalTo("junk junk cats junk junk")); + assertHighlight(response, 0, "bar", 0, equalTo("cat cat junk junk junk junk")); + }); // And you can enable matchedField highlighting on both barField.matchedFields("bar", "bar.plain"); - resp = req.setQuery(twoFieldsQuery).highlighter(new HighlightBuilder().field(fooField).field(barField)).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("junk junk cats junk junk")); - assertHighlight(resp, 0, "bar", 0, equalTo("junk junk cats junk junk")); + assertResponse(req.setQuery(twoFieldsQuery).highlighter(new HighlightBuilder().field(fooField).field(barField)), response -> { + assertHighlight(response, 0, "foo", 0, equalTo("junk junk cats junk junk")); + assertHighlight(response, 0, "bar", 0, equalTo("junk junk cats junk junk")); + }); // Setting a matchedField that isn't searched/doesn't exist is simply ignored. barField.matchedFields("bar", "candy"); - resp = req.setQuery(twoFieldsQuery).highlighter(new HighlightBuilder().field(fooField).field(barField)).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("junk junk cats junk junk")); - assertHighlight(resp, 0, "bar", 0, equalTo("cat cat junk junk junk junk")); + assertResponse(req.setQuery(twoFieldsQuery).highlighter(new HighlightBuilder().field(fooField).field(barField)), response -> { + assertHighlight(response, 0, "foo", 0, equalTo("junk junk cats junk junk")); + assertHighlight(response, 0, "bar", 0, equalTo("cat cat junk junk junk junk")); + }); // If the stored field doesn't have a value it doesn't matter what you match, you get nothing. barField.matchedFields("bar", "foo.plain"); - resp = req.setQuery(queryStringQuery("running scissors").field("foo.plain").field("bar")) - .highlighter(new HighlightBuilder().field(fooField).field(barField)) - .get(); - assertHighlight(resp, 0, "foo", 0, equalTo("running with scissors")); - assertThat(resp.getHits().getAt(0).getHighlightFields(), not(hasKey("bar"))); + assertResponse( + req.setQuery(queryStringQuery("running scissors").field("foo.plain").field("bar")) + .highlighter(new HighlightBuilder().field(fooField).field(barField)), + response -> { + assertHighlight(response, 0, "foo", 0, equalTo("running with scissors")); + assertThat(response.getHits().getAt(0).getHighlightFields(), not(hasKey("bar"))); + } + ); // If the stored field is found but the matched field isn't then you don't get a result either. fooField.matchedFields("bar.plain"); - resp = req.setQuery(queryStringQuery("running scissors").field("foo").field("foo.plain").field("bar").field("bar.plain")) - .highlighter(new HighlightBuilder().field(fooField).field(barField)) - .get(); - assertThat(resp.getHits().getAt(0).getHighlightFields(), not(hasKey("foo"))); + assertResponse( + req.setQuery(queryStringQuery("running scissors").field("foo").field("foo.plain").field("bar").field("bar.plain")) + .highlighter(new HighlightBuilder().field(fooField).field(barField)), + response -> assertThat(response.getHits().getAt(0).getHighlightFields(), not(hasKey("foo"))) + ); // But if you add the stored field to the list of matched fields then you'll get a result again fooField.matchedFields("foo", "bar.plain"); - resp = req.setQuery(queryStringQuery("running scissors").field("foo").field("foo.plain").field("bar").field("bar.plain")) - .highlighter(new HighlightBuilder().field(fooField).field(barField)) - .get(); - assertHighlight(resp, 0, "foo", 0, equalTo("running with scissors")); - assertThat(resp.getHits().getAt(0).getHighlightFields(), not(hasKey("bar"))); + assertResponse( + req.setQuery(queryStringQuery("running scissors").field("foo").field("foo.plain").field("bar").field("bar.plain")) + .highlighter(new HighlightBuilder().field(fooField).field(barField)), + response -> { + assertHighlight(response, 0, "foo", 0, equalTo("running with scissors")); + assertThat(response.getHits().getAt(0).getHighlightFields(), not(hasKey("bar"))); + } + ); // You _can_ highlight fields that aren't subfields of one another. - resp = req.setQuery(queryStringQuery("weird").field("foo").field("foo.plain").field("bar").field("bar.plain")) - .highlighter(new HighlightBuilder().field(fooField).field(barField)) - .get(); - assertHighlight(resp, 0, "foo", 0, equalTo("weird")); - assertHighlight(resp, 0, "bar", 0, equalTo("result")); + assertResponse( + req.setQuery(queryStringQuery("weird").field("foo").field("foo.plain").field("bar").field("bar.plain")) + .highlighter(new HighlightBuilder().field(fooField).field(barField)), + response -> { + assertHighlight(response, 0, "foo", 0, equalTo("weird")); + assertHighlight(response, 0, "bar", 0, equalTo("result")); + } + ); assertFailures( req.setQuery(queryStringQuery("result").field("foo").field("foo.plain").field("bar").field("bar.plain")), @@ -1202,21 +1276,24 @@ public void testFastVectorHighlighterManyDocs() throws Exception { int COUNT = between(20, 100); IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[COUNT]; for (int i = 0; i < COUNT; i++) { - indexRequestBuilders[i] = client().prepareIndex("test").setId(Integer.toString(i)).setSource("field1", "test " + i); + indexRequestBuilders[i] = prepareIndex("test").setId(Integer.toString(i)).setSource("field1", "test " + i); } logger.info("--> indexing docs"); indexRandom(true, indexRequestBuilders); logger.info("--> searching explicitly on field1 and highlighting on it"); - SearchResponse searchResponse = prepareSearch().setSize(COUNT) - .setQuery(termQuery("field1", "test")) - .highlighter(new HighlightBuilder().field("field1", 100, 0)) - .get(); - for (int i = 0; i < COUNT; i++) { - SearchHit hit = searchResponse.getHits().getHits()[i]; - // LUCENE 3.1 UPGRADE: Caused adding the space at the end... - assertHighlight(searchResponse, i, "field1", 0, 1, equalTo("test " + hit.getId())); - } + assertResponse( + prepareSearch().setSize(COUNT) + .setQuery(termQuery("field1", "test")) + .highlighter(new HighlightBuilder().field("field1", 100, 0)), + response -> { + for (int i = 0; i < COUNT; i++) { + SearchHit hit = response.getHits().getHits()[i]; + // LUCENE 3.1 UPGRADE: Caused adding the space at the end... + assertHighlight(response, i, "field1", 0, 1, equalTo("test " + hit.getId())); + } + } + ); } public XContentBuilder type1TermVectorMapping() throws IOException { @@ -1242,19 +1319,26 @@ public void testSameContent() throws Exception { IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5]; for (int i = 0; i < 5; i++) { - indexRequestBuilders[i] = client().prepareIndex("test") - .setId(Integer.toString(i)) + indexRequestBuilders[i] = prepareIndex("test").setId(Integer.toString(i)) .setSource("title", "This is a test on the highlighting bug present in elasticsearch"); } indexRandom(true, indexRequestBuilders); - SearchResponse search = prepareSearch().setQuery(matchQuery("title", "bug")) - .highlighter(new HighlightBuilder().field("title", -1, 0)) - .get(); - - for (int i = 0; i < 5; i++) { - assertHighlight(search, i, "title", 0, 1, equalTo("This is a test on the highlighting bug present in elasticsearch")); - } + assertResponse( + prepareSearch().setQuery(matchQuery("title", "bug")).highlighter(new HighlightBuilder().field("title", -1, 0)), + response -> { + for (int i = 0; i < 5; i++) { + assertHighlight( + response, + i, + "title", + 0, + 1, + equalTo("This is a test on the highlighting bug present in elasticsearch") + ); + } + } + ); } public void testFastVectorHighlighterOffsetParameter() throws Exception { @@ -1262,20 +1346,21 @@ public void testFastVectorHighlighterOffsetParameter() throws Exception { IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5]; for (int i = 0; i < 5; i++) { - indexRequestBuilders[i] = client().prepareIndex("test") - .setId(Integer.toString(i)) + indexRequestBuilders[i] = prepareIndex("test").setId(Integer.toString(i)) .setSource("title", "This is a test on the highlighting bug present in elasticsearch"); } indexRandom(true, indexRequestBuilders); - SearchResponse search = prepareSearch().setQuery(matchQuery("title", "bug")) - .highlighter(new HighlightBuilder().field("title", 30, 1, 10).highlighterType("fvh")) - .get(); - - for (int i = 0; i < 5; i++) { - // LUCENE 3.1 UPGRADE: Caused adding the space at the end... - assertHighlight(search, i, "title", 0, 1, equalTo("highlighting bug present in elasticsearch")); - } + assertResponse( + prepareSearch().setQuery(matchQuery("title", "bug")) + .highlighter(new HighlightBuilder().field("title", 30, 1, 10).highlighterType("fvh")), + response -> { + for (int i = 0; i < 5; i++) { + // LUCENE 3.1 UPGRADE: Caused adding the space at the end... + assertHighlight(response, i, "title", 0, 1, equalTo("highlighting bug present in elasticsearch")); + } + } + ); } public void testEscapeHtml() throws Exception { @@ -1283,19 +1368,27 @@ public void testEscapeHtml() throws Exception { IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5]; for (int i = 0; i < indexRequestBuilders.length; i++) { - indexRequestBuilders[i] = client().prepareIndex("test") - .setId(Integer.toString(i)) + indexRequestBuilders[i] = prepareIndex("test").setId(Integer.toString(i)) .setSource("title", "This is a html escaping highlighting test for *&? elasticsearch"); } indexRandom(true, indexRequestBuilders); - SearchResponse search = prepareSearch().setQuery(matchQuery("title", "test")) - .highlighter(new HighlightBuilder().encoder("html").field("title", 50, 1, 10)) - .get(); - - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight(search, i, "title", 0, 1, startsWith("This is a html escaping highlighting test for *&?")); - } + assertResponse( + prepareSearch().setQuery(matchQuery("title", "test")) + .highlighter(new HighlightBuilder().encoder("html").field("title", 50, 1, 10)), + response -> { + for (int i = 0; i < indexRequestBuilders.length; i++) { + assertHighlight( + response, + i, + "title", + 0, + 1, + startsWith("This is a html escaping highlighting test for *&?") + ); + } + } + ); } public void testEscapeHtmlVector() throws Exception { @@ -1303,19 +1396,20 @@ public void testEscapeHtmlVector() throws Exception { IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5]; for (int i = 0; i < 5; i++) { - indexRequestBuilders[i] = client().prepareIndex("test") - .setId(Integer.toString(i)) + indexRequestBuilders[i] = prepareIndex("test").setId(Integer.toString(i)) .setSource("title", "This is a html escaping highlighting test for *&? elasticsearch"); } indexRandom(true, indexRequestBuilders); - SearchResponse search = prepareSearch().setQuery(matchQuery("title", "test")) - .highlighter(new HighlightBuilder().encoder("html").field("title", 30, 1, 10).highlighterType("plain")) - .get(); - - for (int i = 0; i < 5; i++) { - assertHighlight(search, i, "title", 0, 1, equalTo(" highlighting test for *&? elasticsearch")); - } + assertResponse( + prepareSearch().setQuery(matchQuery("title", "test")) + .highlighter(new HighlightBuilder().encoder("html").field("title", 30, 1, 10).highlighterType("plain")), + response -> { + for (int i = 0; i < 5; i++) { + assertHighlight(response, i, "title", 0, 1, equalTo(" highlighting test for *&? elasticsearch")); + } + } + ); } public void testMultiMapperVectorWithStore() throws Exception { @@ -1344,7 +1438,7 @@ public void testMultiMapperVectorWithStore() throws Exception { ) ); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("title", "this is a test").get(); + prepareIndex("test").setId("1").setSource("title", "this is a test").get(); refresh(); // simple search on body with standard analyzer with a simple field query @@ -1397,7 +1491,7 @@ public void testMultiMapperVectorFromSource() throws Exception { ); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("title", "this is a test").get(); + prepareIndex("test").setId("1").setSource("title", "this is a test").get(); refresh(); // simple search on body with standard analyzer with a simple field query @@ -1450,7 +1544,7 @@ public void testMultiMapperNoVectorWithStore() throws Exception { ); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("title", "this is a test").get(); + prepareIndex("test").setId("1").setSource("title", "this is a test").get(); refresh(); // simple search on body with standard analyzer with a simple field query @@ -1502,7 +1596,7 @@ public void testMultiMapperNoVectorFromSource() throws Exception { ) ); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("title", "this is a test").get(); + prepareIndex("test").setId("1").setSource("title", "this is a test").get(); refresh(); // simple search on body with standard analyzer with a simple field query @@ -1534,8 +1628,7 @@ public void testFastVectorHighlighterShouldFailIfNoTermVectors() throws Exceptio IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5]; for (int i = 0; i < 5; i++) { - indexRequestBuilders[i] = client().prepareIndex("test") - .setId(Integer.toString(i)) + indexRequestBuilders[i] = prepareIndex("test").setId(Integer.toString(i)) .setSource("title", "This is a test for the enabling fast vector highlighter"); } indexRandom(true, indexRequestBuilders); @@ -1567,61 +1660,66 @@ public void testDisableFastVectorHighlighter() throws Exception { IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5]; for (int i = 0; i < indexRequestBuilders.length; i++) { - indexRequestBuilders[i] = client().prepareIndex("test") - .setId(Integer.toString(i)) + indexRequestBuilders[i] = prepareIndex("test").setId(Integer.toString(i)) .setSource("title", "This is a test for the workaround for the fast vector highlighting SOLR-3724"); } indexRandom(true, indexRequestBuilders); - SearchResponse search = prepareSearch().setQuery(matchPhraseQuery("title", "test for the workaround")) - .highlighter(new HighlightBuilder().field("title", 50, 1, 10).highlighterType("fvh")) - .get(); + assertResponse( + prepareSearch().setQuery(matchPhraseQuery("title", "test for the workaround")) + .highlighter(new HighlightBuilder().field("title", 50, 1, 10).highlighterType("fvh")), + response -> { - for (int i = 0; i < indexRequestBuilders.length; i++) { - // Because of SOLR-3724 nothing is highlighted when FVH is used - assertNotHighlighted(search, i, "title"); - } + for (int i = 0; i < indexRequestBuilders.length; i++) { + // Because of SOLR-3724 nothing is highlighted when FVH is used + assertNotHighlighted(response, i, "title"); + } + } + ); // Using plain highlighter instead of FVH - search = prepareSearch().setQuery(matchPhraseQuery("title", "test for the workaround")) - .highlighter(new HighlightBuilder().field("title", 50, 1, 10).highlighterType("plain")) - .get(); - - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight( - search, - i, - "title", - 0, - 1, - equalTo("This is a test for the workaround for the fast vector highlighting SOLR-3724") - ); - } + assertResponse( + prepareSearch().setQuery(matchPhraseQuery("title", "test for the workaround")) + .highlighter(new HighlightBuilder().field("title", 50, 1, 10).highlighterType("plain")), + response -> { + for (int i = 0; i < indexRequestBuilders.length; i++) { + assertHighlight( + response, + i, + "title", + 0, + 1, + equalTo("This is a test for the workaround for the fast vector highlighting SOLR-3724") + ); + } + } + ); // Using plain highlighter instead of FVH on the field level - search = prepareSearch().setQuery(matchPhraseQuery("title", "test for the workaround")) - .highlighter( - new HighlightBuilder().field(new HighlightBuilder.Field("title").highlighterType("plain")).highlighterType("plain") - ) - .get(); - - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight( - search, - i, - "title", - 0, - 1, - equalTo("This is a test for the workaround for the fast vector highlighting SOLR-3724") - ); - } + assertResponse( + prepareSearch().setQuery(matchPhraseQuery("title", "test for the workaround")) + .highlighter( + new HighlightBuilder().field(new HighlightBuilder.Field("title").highlighterType("plain")).highlighterType("plain") + ), + response -> { + for (int i = 0; i < indexRequestBuilders.length; i++) { + assertHighlight( + response, + i, + "title", + 0, + 1, + equalTo("This is a test for the workaround for the fast vector highlighting SOLR-3724") + ); + } + } + ); } public void testFSHHighlightAllMvFragments() throws Exception { assertAcked(prepareCreate("test").setMapping("tags", "type=text,term_vector=with_positions_offsets")); ensureGreen(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( "tags", new String[] { @@ -1631,25 +1729,27 @@ public void testFSHHighlightAllMvFragments() throws Exception { .get(); refresh(); - SearchResponse response = prepareSearch("test").setQuery(QueryBuilders.matchQuery("tags", "tag")) - .highlighter(new HighlightBuilder().field("tags", -1, 0).highlighterType("fvh")) - .get(); - - assertHighlight(response, 0, "tags", 0, equalTo("this is a really long tag i would like to highlight")); - assertHighlight( - response, - 0, - "tags", - 1, - 2, - equalTo("here is another one that is very long and has the tag token near the end") + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.matchQuery("tags", "tag")) + .highlighter(new HighlightBuilder().field("tags", -1, 0).highlighterType("fvh")), + response -> { + assertHighlight(response, 0, "tags", 0, equalTo("this is a really long tag i would like to highlight")); + assertHighlight( + response, + 0, + "tags", + 1, + 2, + equalTo("here is another one that is very long and has the tag token near the end") + ); + } ); } public void testBoostingQuery() { createIndex("test"); ensureGreen(); - client().prepareIndex("test").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get(); + prepareIndex("test").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get(); refresh(); logger.info("--> highlighting and searching on field1"); @@ -1669,7 +1769,7 @@ public void testBoostingQuery() { public void testBoostingQueryTermVector() throws IOException { assertAcked(prepareCreate("test").setMapping(type1TermVectorMapping())); ensureGreen(); - client().prepareIndex("test").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get(); + prepareIndex("test").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get(); refresh(); logger.info("--> highlighting and searching on field1"); @@ -1689,8 +1789,7 @@ public void testBoostingQueryTermVector() throws IOException { public void testPlainHighlightDifferentFragmenter() throws Exception { assertAcked(prepareCreate("test").setMapping("tags", "type=text")); ensureGreen(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .array( @@ -1703,40 +1802,44 @@ public void testPlainHighlightDifferentFragmenter() throws Exception { .get(); refresh(); - SearchResponse response = prepareSearch("test").setQuery(QueryBuilders.matchPhraseQuery("tags", "long tag")) - .highlighter( - new HighlightBuilder().field( - new HighlightBuilder.Field("tags").highlighterType("plain").fragmentSize(-1).numOfFragments(2).fragmenter("simple") - ) - ) - .get(); - - assertHighlight(response, 0, "tags", 0, equalTo("this is a really long tag i would like to highlight")); - assertHighlight( - response, - 0, - "tags", - 1, - 2, - equalTo("here is another one that is very long tag and has the tag token near the end") + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.matchPhraseQuery("tags", "long tag")) + .highlighter( + new HighlightBuilder().field( + new HighlightBuilder.Field("tags").highlighterType("plain").fragmentSize(-1).numOfFragments(2).fragmenter("simple") + ) + ), + response -> { + assertHighlight(response, 0, "tags", 0, equalTo("this is a really long tag i would like to highlight")); + assertHighlight( + response, + 0, + "tags", + 1, + 2, + equalTo("here is another one that is very long tag and has the tag token near the end") + ); + } ); - response = prepareSearch("test").setQuery(QueryBuilders.matchPhraseQuery("tags", "long tag")) - .highlighter( - new HighlightBuilder().field( - new Field("tags").highlighterType("plain").fragmentSize(-1).numOfFragments(2).fragmenter("span") - ) - ) - .get(); - - assertHighlight(response, 0, "tags", 0, equalTo("this is a really long tag i would like to highlight")); - assertHighlight( - response, - 0, - "tags", - 1, - 2, - equalTo("here is another one that is very long tag and has the tag token near the end") + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.matchPhraseQuery("tags", "long tag")) + .highlighter( + new HighlightBuilder().field( + new Field("tags").highlighterType("plain").fragmentSize(-1).numOfFragments(2).fragmenter("span") + ) + ), + response -> { + assertHighlight(response, 0, "tags", 0, equalTo("this is a really long tag i would like to highlight")); + assertHighlight( + response, + 0, + "tags", + 1, + 2, + equalTo("here is another one that is very long tag and has the tag token near the end") + ); + } ); assertFailures( @@ -1758,14 +1861,18 @@ public void testPlainHighlighterMultipleFields() { indexDoc("test", "1", "field1", "The quick brown fox", "field2", "The slow brown fox"); refresh(); - SearchResponse response = prepareSearch("test").setQuery(QueryBuilders.matchQuery("field1", "fox")) - .highlighter( - new HighlightBuilder().field(new HighlightBuilder.Field("field1").preTags("<1>").postTags("").requireFieldMatch(true)) - .field(new HighlightBuilder.Field("field2").preTags("<2>").postTags("").requireFieldMatch(false)) - ) - .get(); - assertHighlight(response, 0, "field1", 0, 1, equalTo("The quick brown <1>fox")); - assertHighlight(response, 0, "field2", 0, 1, equalTo("The slow brown <2>fox")); + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.matchQuery("field1", "fox")) + .highlighter( + new HighlightBuilder().field( + new HighlightBuilder.Field("field1").preTags("<1>").postTags("").requireFieldMatch(true) + ).field(new HighlightBuilder.Field("field2").preTags("<2>").postTags("").requireFieldMatch(false)) + ), + response -> { + assertHighlight(response, 0, "field1", 0, 1, equalTo("The quick brown <1>fox")); + assertHighlight(response, 0, "field2", 0, 1, equalTo("The slow brown <2>fox")); + } + ); } public void testFastVectorHighlighterMultipleFields() { @@ -1782,31 +1889,36 @@ public void testFastVectorHighlighterMultipleFields() { indexDoc("test", "1", "field1", "The quick brown fox", "field2", "The slow brown fox"); refresh(); - SearchResponse response = prepareSearch("test").setQuery(QueryBuilders.matchQuery("field1", "fox")) - .highlighter( - new HighlightBuilder().field(new HighlightBuilder.Field("field1").preTags("<1>").postTags("").requireFieldMatch(true)) - .field(new HighlightBuilder.Field("field2").preTags("<2>").postTags("").requireFieldMatch(false)) - ) - .get(); - assertHighlight(response, 0, "field1", 0, 1, equalTo("The quick brown <1>fox")); - assertHighlight(response, 0, "field2", 0, 1, equalTo("The slow brown <2>fox")); + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.matchQuery("field1", "fox")) + .highlighter( + new HighlightBuilder().field( + new HighlightBuilder.Field("field1").preTags("<1>").postTags("").requireFieldMatch(true) + ).field(new HighlightBuilder.Field("field2").preTags("<2>").postTags("").requireFieldMatch(false)) + ), + response -> { + assertHighlight(response, 0, "field1", 0, 1, equalTo("The quick brown <1>fox")); + assertHighlight(response, 0, "field2", 0, 1, equalTo("The slow brown <2>fox")); + } + ); } public void testMissingStoredField() throws Exception { assertAcked(prepareCreate("test").setMapping("highlight_field", "type=text,store=true")); ensureGreen(); - client().prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("field", "highlight").endObject()).get(); + prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("field", "highlight").endObject()).get(); refresh(); // This query used to fail when the field to highlight was absent - SearchResponse response = prepareSearch("test").setQuery(QueryBuilders.matchQuery("field", "highlight")) - .highlighter( - new HighlightBuilder().field( - new HighlightBuilder.Field("highlight_field").fragmentSize(-1).numOfFragments(1).fragmenter("simple") - ) - ) - .get(); - assertThat(response.getHits().getHits()[0].getHighlightFields().isEmpty(), equalTo(true)); + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.matchQuery("field", "highlight")) + .highlighter( + new HighlightBuilder().field( + new HighlightBuilder.Field("highlight_field").fragmentSize(-1).numOfFragments(1).fragmenter("simple") + ) + ), + response -> assertThat(response.getHits().getHits()[0].getHighlightFields().isEmpty(), equalTo(true)) + ); } // Issue #3211 @@ -1831,8 +1943,7 @@ public void testNumericHighlighting() throws Exception { ); ensureGreen(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource("text", "elasticsearch test", "byte", 25, "short", 42, "int", 100, "long", -1, "float", 3.2f, "double", 42.42) .get(); refresh(); @@ -1862,7 +1973,7 @@ public void testResetTwice() throws Exception { ).setMapping("text", "type=text,analyzer=my_analyzer") ); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("text", "elasticsearch test").get(); + prepareIndex("test").setId("1").setSource("text", "elasticsearch test").get(); refresh(); // Mock tokenizer will throw an exception if it is resetted twice @@ -1891,22 +2002,19 @@ public void testHighlightUsesHighlightQuery() throws IOException { .highlighter(highlightBuilder); Matcher searchQueryMatcher = equalTo("Testing the highlight query feature"); - SearchResponse response = search.get(); - assertHighlight(response, 0, "text", 0, searchQueryMatcher); + assertResponse(search, response -> assertHighlight(response, 0, "text", 0, searchQueryMatcher)); field = new HighlightBuilder.Field("text"); Matcher hlQueryMatcher = equalTo("Testing the highlight query feature"); field.highlightQuery(matchQuery("text", "query")); highlightBuilder = new HighlightBuilder().field(field); search = prepareSearch("test").setQuery(QueryBuilders.matchQuery("text", "testing")).highlighter(highlightBuilder); - response = search.get(); - assertHighlight(response, 0, "text", 0, hlQueryMatcher); + assertResponse(search, response -> assertHighlight(response, 0, "text", 0, hlQueryMatcher)); // Make sure the highlightQuery is taken into account when it is set on the highlight context instead of the field highlightBuilder.highlightQuery(matchQuery("text", "query")); field.highlighterType(type).highlightQuery(null); - response = search.get(); - assertHighlight(response, 0, "text", 0, hlQueryMatcher); + assertResponse(search, response -> assertHighlight(response, 0, "text", 0, hlQueryMatcher)); } } @@ -2212,28 +2320,28 @@ public void testHighlightNoMatchSizeNumberOfFragments() { // if there's a match we only return the values with matches (whole value as number_of_fragments == 0) MatchQueryBuilder queryBuilder = QueryBuilders.matchQuery("text", "third fifth"); field.highlighterType("plain"); - SearchResponse response = prepareSearch("test").setQuery(queryBuilder).highlighter(new HighlightBuilder().field(field)).get(); - assertHighlight(response, 0, "text", 0, 2, equalTo("This is the third sentence. This is the fourth sentence.")); - assertHighlight(response, 0, "text", 1, 2, equalTo("This is the fifth sentence")); + assertResponse(prepareSearch("test").setQuery(queryBuilder).highlighter(new HighlightBuilder().field(field)), response -> { + assertHighlight(response, 0, "text", 0, 2, equalTo("This is the third sentence. This is the fourth sentence.")); + assertHighlight(response, 0, "text", 1, 2, equalTo("This is the fifth sentence")); + }); field.highlighterType("fvh"); - response = prepareSearch("test").setQuery(queryBuilder).highlighter(new HighlightBuilder().field(field)).get(); - assertHighlight(response, 0, "text", 0, 2, equalTo("This is the third sentence. This is the fourth sentence.")); - assertHighlight(response, 0, "text", 1, 2, equalTo("This is the fifth sentence")); - + assertResponse(prepareSearch("test").setQuery(queryBuilder).highlighter(new HighlightBuilder().field(field)), response -> { + assertHighlight(response, 0, "text", 0, 2, equalTo("This is the third sentence. This is the fourth sentence.")); + assertHighlight(response, 0, "text", 1, 2, equalTo("This is the fifth sentence")); + }); field.highlighterType("unified"); - response = prepareSearch("test").setQuery(queryBuilder).highlighter(new HighlightBuilder().field(field)).get(); - assertHighlight(response, 0, "text", 0, 2, equalTo("This is the third sentence. This is the fourth sentence.")); - assertHighlight(response, 0, "text", 1, 2, equalTo("This is the fifth sentence")); + assertResponse(prepareSearch("test").setQuery(queryBuilder).highlighter(new HighlightBuilder().field(field)), response -> { + assertHighlight(response, 0, "text", 0, 2, equalTo("This is the third sentence. This is the fourth sentence.")); + assertHighlight(response, 0, "text", 1, 2, equalTo("This is the fifth sentence")); + }); } public void testPostingsHighlighter() throws Exception { assertAcked(prepareCreate("test").setMapping(type1PostingsffsetsMapping())); ensureGreen(); - client().prepareIndex("test") - .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy quick dog") - .get(); + prepareIndex("test").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy quick dog").get(); refresh(); logger.info("--> highlighting and searching on field1"); @@ -2312,8 +2420,7 @@ public void testPostingsHighlighterNumberOfFragments() throws Exception { assertAcked(prepareCreate("test").setMapping(type1PostingsffsetsMapping())); ensureGreen(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( "field1", "The quick brown fox jumps over the lazy dog. The lazy red fox jumps over the quick dog. " @@ -2329,24 +2436,22 @@ public void testPostingsHighlighterNumberOfFragments() throws Exception { SearchSourceBuilder source = searchSource().query(termQuery("field1", "fox")) .highlighter(highlight().field(new Field("field1").numOfFragments(5).preTags("").postTags(""))); - SearchResponse searchResponse = client().search(new SearchRequest("test").source(source)).actionGet(); - - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - assertHighlight( - searchResponse, - 0, - "field1", - 0, - 2, - equalTo( - "The quick brown fox jumps over the lazy dog." - + " The lazy red fox jumps over the quick dog." - ) - ); - assertHighlight(searchResponse, 0, "field1", 1, 2, equalTo("The quick brown dog jumps over the lazy fox.")); - - client().prepareIndex("test") - .setId("2") + assertResponse(client().search(new SearchRequest("test").source(source)), response -> { + assertThat(response.getHits().getHits().length, equalTo(1)); + assertHighlight( + response, + 0, + "field1", + 0, + 2, + equalTo( + "The quick brown fox jumps over the lazy dog." + + " The lazy red fox jumps over the quick dog." + ) + ); + assertHighlight(response, 0, "field1", 1, 2, equalTo("The quick brown dog jumps over the lazy fox.")); + }); + prepareIndex("test").setId("2") .setSource( "field1", new String[] { @@ -2360,39 +2465,40 @@ public void testPostingsHighlighterNumberOfFragments() throws Exception { source = searchSource().query(termQuery("field1", "fox")) .highlighter(highlight().field(new Field("field1").numOfFragments(0).preTags("").postTags(""))); - searchResponse = client().search(new SearchRequest("test").source(source)).actionGet(); - assertHitCount(searchResponse, 2L); - - for (SearchHit searchHit : searchResponse.getHits()) { - if ("1".equals(searchHit.getId())) { - assertHighlight( - searchHit, - "field1", - 0, - 1, - equalTo( - "The quick brown fox jumps over the lazy dog. " - + "The lazy red fox jumps over the quick dog. " - + "The quick brown dog jumps over the lazy fox." - ) - ); - } else if ("2".equals(searchHit.getId())) { - assertHighlight( - searchHit, - "field1", - 0, - 3, - equalTo("The quick brown fox jumps over the lazy dog. Second sentence not finished") - ); - assertHighlight(searchHit, "field1", 1, 3, equalTo("The lazy red fox jumps over the quick dog.")); - assertHighlight(searchHit, "field1", 2, 3, equalTo("The quick brown dog jumps over the lazy fox.")); - } else { - fail("Only hits with id 1 and 2 are returned"); + assertResponse(client().search(new SearchRequest("test").source(source)), response -> { + assertHitCount(response, 2L); + + for (SearchHit searchHit : response.getHits()) { + if ("1".equals(searchHit.getId())) { + assertHighlight( + searchHit, + "field1", + 0, + 1, + equalTo( + "The quick brown fox jumps over the lazy dog. " + + "The lazy red fox jumps over the quick dog. " + + "The quick brown dog jumps over the lazy fox." + ) + ); + } else if ("2".equals(searchHit.getId())) { + assertHighlight( + searchHit, + "field1", + 0, + 3, + equalTo("The quick brown fox jumps over the lazy dog. Second sentence not finished") + ); + assertHighlight(searchHit, "field1", 1, 3, equalTo("The lazy red fox jumps over the quick dog.")); + assertHighlight(searchHit, "field1", 2, 3, equalTo("The quick brown dog jumps over the lazy fox.")); + } else { + fail("Only hits with id 1 and 2 are returned"); + } } - } + }); } - public void testMultiMatchQueryHighlight() throws IOException { + public void testMultiMatchQueryHighlight() throws Exception { XContentBuilder mapping = XContentFactory.jsonBuilder() .startObject() .startObject("_doc") @@ -2412,9 +2518,7 @@ public void testMultiMatchQueryHighlight() throws IOException { .endObject(); assertAcked(prepareCreate("test").setMapping(mapping)); ensureGreen(); - client().prepareIndex("test") - .setSource("field1", "The quick brown fox jumps over", "field2", "The quick brown fox jumps over") - .get(); + prepareIndex("test").setSource("field1", "The quick brown fox jumps over", "field2", "The quick brown fox jumps over").get(); refresh(); final int iters = scaledRandomIntBetween(20, 30); for (int i = 0; i < iters; i++) { @@ -2429,22 +2533,23 @@ public void testMultiMatchQueryHighlight() throws IOException { .field(new Field("field1").requireFieldMatch(true).preTags("").postTags("")) ); logger.info("Running multi-match type: [{}] highlight with type: [{}]", matchQueryType, highlighterType); - SearchResponse searchResponse = client().search(new SearchRequest("test").source(source)).actionGet(); - assertHitCount(searchResponse, 1L); - assertHighlight( - searchResponse, - 0, - "field1", - 0, - anyOf( - equalTo("The quick brown fox jumps over"), - equalTo("The quick brown fox jumps over") - ) - ); + assertResponse(client().search(new SearchRequest("test").source(source)), response -> { + assertHitCount(response, 1L); + assertHighlight( + response, + 0, + "field1", + 0, + anyOf( + equalTo("The quick brown fox jumps over"), + equalTo("The quick brown fox jumps over") + ) + ); + }); } } - public void testCombinedFieldsQueryHighlight() throws IOException { + public void testCombinedFieldsQueryHighlight() throws Exception { XContentBuilder mapping = XContentFactory.jsonBuilder() .startObject() .startObject("_doc") @@ -2465,9 +2570,7 @@ public void testCombinedFieldsQueryHighlight() throws IOException { assertAcked(prepareCreate("test").setMapping(mapping)); ensureGreen(); - client().prepareIndex("test") - .setSource("field1", "The quick brown fox jumps over", "field2", "The quick brown fox jumps over") - .get(); + prepareIndex("test").setSource("field1", "The quick brown fox jumps over", "field2", "The quick brown fox jumps over").get(); refresh(); for (String highlighterType : ALL_TYPES) { @@ -2478,15 +2581,16 @@ public void testCombinedFieldsQueryHighlight() throws IOException { .field(new Field("field1").requireFieldMatch(true).preTags("").postTags("")) ); - SearchResponse searchResponse = client().search(new SearchRequest("test").source(source)).actionGet(); - assertHitCount(searchResponse, 1L); - assertHighlight( - searchResponse, - 0, - "field1", - 0, - equalTo("The quick brown fox jumps over") - ); + assertResponse(client().search(new SearchRequest("test").source(source)), response -> { + assertHitCount(response, 1L); + assertHighlight( + response, + 0, + "field1", + 0, + equalTo("The quick brown fox jumps over") + ); + }); } } @@ -2494,49 +2598,47 @@ public void testPostingsHighlighterOrderByScore() throws Exception { assertAcked(prepareCreate("test").setMapping(type1PostingsffsetsMapping())); ensureGreen(); - client().prepareIndex("test") - .setSource( - "field1", - new String[] { - "This sentence contains one match, not that short. This sentence contains two sentence matches. " - + "This one contains no matches.", - "This is the second value's first sentence. This one contains no matches. " - + "This sentence contains three sentence occurrences (sentence).", - "One sentence match here and scored lower since the text is quite long, not that appealing. " - + "This one contains no matches." } - ) - .get(); + prepareIndex("test").setSource( + "field1", + new String[] { + "This sentence contains one match, not that short. This sentence contains two sentence matches. " + + "This one contains no matches.", + "This is the second value's first sentence. This one contains no matches. " + + "This sentence contains three sentence occurrences (sentence).", + "One sentence match here and scored lower since the text is quite long, not that appealing. " + + "This one contains no matches." } + ).get(); refresh(); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(termQuery("field1", "sentence")) .highlighter(highlight().field("field1").order("score")); - SearchResponse searchResponse = client().search(new SearchRequest("test").source(source)).actionGet(); - - Map highlightFieldMap = searchResponse.getHits().getAt(0).getHighlightFields(); - assertThat(highlightFieldMap.size(), equalTo(1)); - HighlightField field1 = highlightFieldMap.get("field1"); - assertThat(field1.fragments().length, equalTo(4)); - assertThat( - field1.fragments()[0].string(), - equalTo("This sentence contains three sentence occurrences (sentence).") - ); - assertThat( - field1.fragments()[1].string(), - equalTo( - "This sentence contains one match, not that short. " - + "This sentence contains two sentence matches." - ) - ); - assertThat( - field1.fragments()[2].string(), - equalTo("This is the second value's first sentence. This one contains no matches.") - ); - assertThat( - field1.fragments()[3].string(), - equalTo("One sentence match here and scored lower since the text is quite long, not that appealing.") - ); + assertResponse(client().search(new SearchRequest("test").source(source)), response -> { + Map highlightFieldMap = response.getHits().getAt(0).getHighlightFields(); + assertThat(highlightFieldMap.size(), equalTo(1)); + HighlightField field1 = highlightFieldMap.get("field1"); + assertThat(field1.fragments().length, equalTo(4)); + assertThat( + field1.fragments()[0].string(), + equalTo("This sentence contains three sentence occurrences (sentence).") + ); + assertThat( + field1.fragments()[1].string(), + equalTo( + "This sentence contains one match, not that short. " + + "This sentence contains two sentence matches." + ) + ); + assertThat( + field1.fragments()[2].string(), + equalTo("This is the second value's first sentence. This one contains no matches.") + ); + assertThat( + field1.fragments()[3].string(), + equalTo("One sentence match here and scored lower since the text is quite long, not that appealing.") + ); + }); } public void testPostingsHighlighterEscapeHtml() throws Exception { @@ -2544,26 +2646,26 @@ public void testPostingsHighlighterEscapeHtml() throws Exception { IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5]; for (int i = 0; i < 5; i++) { - indexRequestBuilders[i] = client().prepareIndex("test") - .setId(Integer.toString(i)) + indexRequestBuilders[i] = prepareIndex("test").setId(Integer.toString(i)) .setSource("title", "This is a html escaping highlighting test for *&? elasticsearch"); } indexRandom(true, indexRequestBuilders); - SearchResponse searchResponse = prepareSearch().setQuery(matchQuery("title", "test")) - .highlighter(new HighlightBuilder().field("title").encoder("html")) - .get(); - - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight( - searchResponse, - i, - "title", - 0, - 1, - equalTo("This is a html escaping highlighting test for *&? elasticsearch") - ); - } + assertResponse( + prepareSearch().setQuery(matchQuery("title", "test")).highlighter(new HighlightBuilder().field("title").encoder("html")), + response -> { + for (int i = 0; i < indexRequestBuilders.length; i++) { + assertHighlight( + response, + i, + "title", + 0, + 1, + equalTo("This is a html escaping highlighting test for *&? elasticsearch") + ); + } + } + ); } public void testPostingsHighlighterMultiMapperWithStore() throws Exception { @@ -2592,35 +2694,39 @@ public void testPostingsHighlighterMultiMapperWithStore() throws Exception { ) ); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("title", "this is a test . Second sentence.").get(); + prepareIndex("test").setId("1").setSource("title", "this is a test . Second sentence.").get(); refresh(); // simple search on body with standard analyzer with a simple field query - SearchResponse searchResponse = prepareSearch() - // lets make sure we analyze the query and we highlight the resulting terms - .setQuery(matchQuery("title", "This is a Test")) - .highlighter(new HighlightBuilder().field("title")) - .get(); - - assertHitCount(searchResponse, 1L); - SearchHit hit = searchResponse.getHits().getAt(0); - // stopwords are not highlighted since not indexed - assertHighlight(hit, "title", 0, 1, equalTo("this is a test . Second sentence.")); - + assertResponse( + prepareSearch() + // lets make sure we analyze the query and we highlight the resulting terms + .setQuery(matchQuery("title", "This is a Test")) + .highlighter(new HighlightBuilder().field("title")), + response -> { + + assertHitCount(response, 1L); + SearchHit hit = response.getHits().getAt(0); + // stopwords are not highlighted since not indexed + assertHighlight(hit, "title", 0, 1, equalTo("this is a test . Second sentence.")); + } + ); // search on title.key and highlight on title - searchResponse = prepareSearch().setQuery(matchQuery("title.key", "this is a test")) - .highlighter(new HighlightBuilder().field("title.key")) - .get(); - assertHitCount(searchResponse, 1L); + assertResponse( + prepareSearch().setQuery(matchQuery("title.key", "this is a test")).highlighter(new HighlightBuilder().field("title.key")), + response -> { + assertHitCount(response, 1L); - // stopwords are now highlighted since we used only whitespace analyzer here - assertHighlight( - searchResponse, - 0, - "title.key", - 0, - 1, - equalTo("this is a test . Second sentence.") + // stopwords are now highlighted since we used only whitespace analyzer here + assertHighlight( + response, + 0, + "title.key", + 0, + 1, + equalTo("this is a test . Second sentence.") + ); + } ); } @@ -2651,7 +2757,7 @@ public void testPostingsHighlighterMultiMapperFromSource() throws Exception { ); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("title", "this is a test").get(); + prepareIndex("test").setId("1").setSource("title", "this is a test").get(); refresh(); // simple search on body with standard analyzer with a simple field query @@ -2695,8 +2801,7 @@ public void testPostingsHighlighterShouldFailIfNoOffsets() throws Exception { IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5]; for (int i = 0; i < indexRequestBuilders.length; i++) { - indexRequestBuilders[i] = client().prepareIndex("test") - .setId(Integer.toString(i)) + indexRequestBuilders[i] = prepareIndex("test").setId(Integer.toString(i)) .setSource("title", "This is a test for the postings highlighter"); } indexRandom(true, indexRequestBuilders); @@ -2709,9 +2814,12 @@ public void testPostingsHighlighterShouldFailIfNoOffsets() throws Exception { public void testPostingsHighlighterBoostingQuery() throws IOException, ExecutionException, InterruptedException { assertAcked(prepareCreate("test").setMapping(type1PostingsffsetsMapping())); ensureGreen(); - client().prepareIndex("test") - .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") - .get(); + prepareIndex("test").setSource( + "field1", + "this is a test", + "field2", + "The quick brown fox jumps over the lazy dog! Second sentence." + ).get(); refresh(); logger.info("--> highlighting and searching on field1"); @@ -2750,9 +2858,12 @@ public void testPostingsHighlighterPrefixQuery() throws Exception { assertAcked(prepareCreate("test").setMapping(type1PostingsffsetsMapping())); ensureGreen(); - client().prepareIndex("test") - .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") - .get(); + prepareIndex("test").setSource( + "field1", + "this is a test", + "field2", + "The quick brown fox jumps over the lazy dog! Second sentence." + ).get(); refresh(); logger.info("--> highlighting and searching on field2"); @@ -2771,9 +2882,12 @@ public void testPostingsHighlighterFuzzyQuery() throws Exception { assertAcked(prepareCreate("test").setMapping(type1PostingsffsetsMapping())); ensureGreen(); - client().prepareIndex("test") - .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") - .get(); + prepareIndex("test").setSource( + "field1", + "this is a test", + "field2", + "The quick brown fox jumps over the lazy dog! Second sentence." + ).get(); refresh(); logger.info("--> highlighting and searching on field2"); @@ -2792,9 +2906,12 @@ public void testPostingsHighlighterRegexpQuery() throws Exception { assertAcked(prepareCreate("test").setMapping(type1PostingsffsetsMapping())); ensureGreen(); - client().prepareIndex("test") - .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") - .get(); + prepareIndex("test").setSource( + "field1", + "this is a test", + "field2", + "The quick brown fox jumps over the lazy dog! Second sentence." + ).get(); refresh(); logger.info("--> highlighting and searching on field2"); @@ -2813,9 +2930,12 @@ public void testPostingsHighlighterWildcardQuery() throws Exception { assertAcked(prepareCreate("test").setMapping(type1PostingsffsetsMapping())); ensureGreen(); - client().prepareIndex("test") - .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") - .get(); + prepareIndex("test").setSource( + "field1", + "this is a test", + "field2", + "The quick brown fox jumps over the lazy dog! Second sentence." + ).get(); refresh(); logger.info("--> highlighting and searching on field2"); @@ -2830,24 +2950,18 @@ public void testPostingsHighlighterWildcardQuery() throws Exception { ); source = searchSource().query(wildcardQuery("field2", "qu*k")).highlighter(highlight().field("field2")); - SearchResponse searchResponse = prepareSearch("test").setSource(source).get(); - assertHitCount(searchResponse, 1L); + assertResponse(prepareSearch("test").setSource(source), response -> { + assertHitCount(response, 1L); - assertHighlight( - searchResponse, - 0, - "field2", - 0, - 1, - equalTo("The quick brown fox jumps over the lazy dog! Second sentence.") - ); + assertHighlight(response, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog! Second sentence.")); + }); } public void testPostingsHighlighterTermRangeQuery() throws Exception { assertAcked(prepareCreate("test").setMapping(type1PostingsffsetsMapping())); ensureGreen(); - client().prepareIndex("test").setSource("field1", "this is a test", "field2", "aaab").get(); + prepareIndex("test").setSource("field1", "this is a test", "field2", "aaab").get(); refresh(); logger.info("--> highlighting and searching on field2"); @@ -2860,9 +2974,12 @@ public void testPostingsHighlighterQueryString() throws Exception { assertAcked(prepareCreate("test").setMapping(type1PostingsffsetsMapping())); ensureGreen(); - client().prepareIndex("test") - .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") - .get(); + prepareIndex("test").setSource( + "field1", + "this is a test", + "field2", + "The quick brown fox jumps over the lazy dog! Second sentence." + ).get(); refresh(); logger.info("--> highlighting and searching on field2"); @@ -2882,7 +2999,7 @@ public void testPostingsHighlighterRegexpQueryWithinConstantScoreQuery() throws assertAcked(prepareCreate("test").setMapping(type1PostingsffsetsMapping())); ensureGreen(); - client().prepareIndex("test").setSource("field1", "The photography word will get highlighted").get(); + prepareIndex("test").setSource("field1", "The photography word will get highlighted").get(); refresh(); logger.info("--> highlighting and searching on field1"); @@ -2902,7 +3019,7 @@ public void testPostingsHighlighterMultiTermQueryMultipleLevels() throws Excepti assertAcked(prepareCreate("test").setMapping(type1PostingsffsetsMapping())); ensureGreen(); - client().prepareIndex("test").setSource("field1", "The photography word will get highlighted").get(); + prepareIndex("test").setSource("field1", "The photography word will get highlighted").get(); refresh(); logger.info("--> highlighting and searching on field1"); @@ -2925,7 +3042,7 @@ public void testPostingsHighlighterPrefixQueryWithinBooleanQuery() throws Except assertAcked(prepareCreate("test").setMapping(type1PostingsffsetsMapping())); ensureGreen(); - client().prepareIndex("test").setSource("field1", "The photography word will get highlighted").get(); + prepareIndex("test").setSource("field1", "The photography word will get highlighted").get(); refresh(); logger.info("--> highlighting and searching on field1"); @@ -2946,7 +3063,7 @@ public void testPostingsHighlighterQueryStringWithinFilteredQuery() throws Excep assertAcked(prepareCreate("test").setMapping(type1PostingsffsetsMapping())); ensureGreen(); - client().prepareIndex("test").setSource("field1", "The photography word will get highlighted").get(); + prepareIndex("test").setSource("field1", "The photography word will get highlighted").get(); refresh(); logger.info("--> highlighting and searching on field1"); @@ -2976,8 +3093,7 @@ public void testPostingsHighlighterManyDocs() throws Exception { // (https://github.com/elastic/elasticsearch/issues/4103) String prefix = randomAlphaOfLengthBetween(5, 30); prefixes.put(String.valueOf(i), prefix); - indexRequestBuilders[i] = client().prepareIndex("test") - .setId(Integer.toString(i)) + indexRequestBuilders[i] = prepareIndex("test").setId(Integer.toString(i)) .setSource("field1", "Sentence " + prefix + " test. Sentence two."); } logger.info("--> indexing docs"); @@ -2987,13 +3103,14 @@ public void testPostingsHighlighterManyDocs() throws Exception { SearchRequestBuilder searchRequestBuilder = prepareSearch().setSize(COUNT) .setQuery(termQuery("field1", "test")) .highlighter(new HighlightBuilder().field("field1")); - SearchResponse searchResponse = searchRequestBuilder.get(); - assertHitCount(searchResponse, COUNT); - assertThat(searchResponse.getHits().getHits().length, equalTo(COUNT)); - for (SearchHit hit : searchResponse.getHits()) { - String prefix = prefixes.get(hit.getId()); - assertHighlight(hit, "field1", 0, 1, equalTo("Sentence " + prefix + " test. Sentence two.")); - } + assertResponse(searchRequestBuilder, response -> { + assertHitCount(response, COUNT); + assertThat(response.getHits().getHits().length, equalTo(COUNT)); + for (SearchHit hit : response.getHits()) { + String prefix = prefixes.get(hit.getId()); + assertHighlight(hit, "field1", 0, 1, equalTo("Sentence " + prefix + " test. Sentence two.")); + } + }); } public void testDoesNotHighlightTypeName() throws Exception { @@ -3012,7 +3129,7 @@ public void testDoesNotHighlightTypeName() throws Exception { assertAcked(prepareCreate("test").setMapping(mapping)); ensureGreen(); - indexRandom(true, client().prepareIndex("test").setSource("foo", "test typename")); + indexRandom(true, prepareIndex("test").setSource("foo", "test typename")); for (String highlighter : ALL_TYPES) { assertHighlight( @@ -3044,7 +3161,7 @@ public void testDoesNotHighlightAliasFilters() throws Exception { assertAcked(indicesAdmin().prepareAliases().addAlias("test", "filtered_alias", matchQuery("foo", "japanese"))); ensureGreen(); - indexRandom(true, client().prepareIndex("test").setSource("foo", "test japanese")); + indexRandom(true, prepareIndex("test").setSource("foo", "test japanese")); for (String highlighter : ALL_TYPES) { assertHighlight( @@ -3174,8 +3291,7 @@ public void testGeoFieldHighlightingWithDifferentHighlighters() throws IOExcepti mappings.endObject(); assertAcked(prepareCreate("test").setMapping(mappings)); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("text", "Arbitrary text field which will should not cause a failure").endObject()) .get(); refresh(); @@ -3186,12 +3302,15 @@ public void testGeoFieldHighlightingWithDifferentHighlighters() throws IOExcepti .setCorners(61.10078883158897, -170.15625, -64.92354174306496, 118.47656249999999) ) .should(QueryBuilders.termQuery("text", "failure")); - SearchResponse search = prepareSearch().setSource( - new SearchSourceBuilder().query(query).highlighter(new HighlightBuilder().field("*").highlighterType(highlighterType)) - ).get(); - assertNoFailures(search); - assertThat(search.getHits().getTotalHits().value, equalTo(1L)); - assertThat(search.getHits().getAt(0).getHighlightFields().get("text").fragments().length, equalTo(1)); + assertNoFailuresAndResponse( + prepareSearch().setSource( + new SearchSourceBuilder().query(query).highlighter(new HighlightBuilder().field("*").highlighterType(highlighterType)) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getAt(0).getHighlightFields().get("text").fragments().length, equalTo(1)); + } + ); } public void testGeoFieldHighlightingWhenQueryGetsRewritten() throws IOException { @@ -3213,8 +3332,7 @@ public void testGeoFieldHighlightingWhenQueryGetsRewritten() throws IOException assertAcked(prepareCreate("test").setMapping(mappings)); ensureYellow(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("jd", "some आवश्यकता है- आर्य समाज अनाथालय, 68 सिविल लाइन्स, बरेली को एक पुरूष" + " रस text") @@ -3253,19 +3371,19 @@ public void testKeywordFieldHighlighting() throws IOException { mappings.endObject(); assertAcked(prepareCreate("test").setMapping(mappings)); - client().prepareIndex("test") - .setId("1") - .setSource(jsonBuilder().startObject().field("keyword_field", "some text").endObject()) - .get(); + prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("keyword_field", "some text").endObject()).get(); refresh(); - SearchResponse search = prepareSearch().setSource( - new SearchSourceBuilder().query(QueryBuilders.matchQuery("keyword_field", "some text")) - .highlighter(new HighlightBuilder().field("*")) - ).get(); - assertNoFailures(search); - assertThat(search.getHits().getTotalHits().value, equalTo(1L)); - HighlightField highlightField = search.getHits().getAt(0).getHighlightFields().get("keyword_field"); - assertThat(highlightField.fragments()[0].string(), equalTo("some text")); + assertNoFailuresAndResponse( + prepareSearch().setSource( + new SearchSourceBuilder().query(QueryBuilders.matchQuery("keyword_field", "some text")) + .highlighter(new HighlightBuilder().field("*")) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + HighlightField highlightField = response.getHits().getAt(0).getHighlightFields().get("keyword_field"); + assertThat(highlightField.fragments()[0].string(), equalTo("some text")); + } + ); } public void testCopyToFields() throws Exception { @@ -3280,20 +3398,20 @@ public void testCopyToFields() throws Exception { b.endObject().endObject(); prepareCreate("test").setMapping(b).get(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("foo", "how now brown cow").endObject()) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); - SearchResponse response = prepareSearch().setQuery(matchQuery("foo_copy", "brown")) - .highlighter(new HighlightBuilder().field(new Field("foo_copy"))) - .get(); - - assertHitCount(response, 1); - HighlightField field = response.getHits().getAt(0).getHighlightFields().get("foo_copy"); - assertThat(field.fragments().length, equalTo(1)); - assertThat(field.fragments()[0].string(), equalTo("how now brown cow")); + assertResponse( + prepareSearch().setQuery(matchQuery("foo_copy", "brown")).highlighter(new HighlightBuilder().field(new Field("foo_copy"))), + response -> { + assertHitCount(response, 1); + HighlightField field = response.getHits().getAt(0).getHighlightFields().get("foo_copy"); + assertThat(field.fragments().length, equalTo(1)); + assertThat(field.fragments()[0].string(), equalTo("how now brown cow")); + } + ); } public void testACopyFieldWithNestedQuery() throws Exception { @@ -3319,8 +3437,7 @@ public void testACopyFieldWithNestedQuery() throws Exception { ); prepareCreate("test").setMapping(mapping).get(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .startArray("foo") @@ -3336,35 +3453,39 @@ public void testACopyFieldWithNestedQuery() throws Exception { .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); - SearchResponse searchResponse = prepareSearch().setQuery(nestedQuery("foo", matchQuery("foo.text", "brown cow"), ScoreMode.None)) - .highlighter(new HighlightBuilder().field(new Field("foo_text").highlighterType("fvh")).requireFieldMatch(false)) - .get(); - assertHitCount(searchResponse, 1); - HighlightField field = searchResponse.getHits().getAt(0).getHighlightFields().get("foo_text"); - assertThat(field.fragments().length, equalTo(2)); - assertThat(field.fragments()[0].string(), equalTo("brown")); - assertThat(field.fragments()[1].string(), equalTo("cow")); + assertResponse( + prepareSearch().setQuery(nestedQuery("foo", matchQuery("foo.text", "brown cow"), ScoreMode.None)) + .highlighter(new HighlightBuilder().field(new Field("foo_text").highlighterType("fvh")).requireFieldMatch(false)), + response -> { + assertHitCount(response, 1); + HighlightField field = response.getHits().getAt(0).getHighlightFields().get("foo_text"); + assertThat(field.fragments().length, equalTo(2)); + assertThat(field.fragments()[0].string(), equalTo("brown")); + assertThat(field.fragments()[1].string(), equalTo("cow")); + } + ); } public void testFunctionScoreQueryHighlight() throws Exception { - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("text", "brown").endObject()) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); - SearchResponse searchResponse = prepareSearch().setQuery(new FunctionScoreQueryBuilder(QueryBuilders.prefixQuery("text", "bro"))) - .highlighter(new HighlightBuilder().field(new Field("text"))) - .get(); - assertHitCount(searchResponse, 1); - HighlightField field = searchResponse.getHits().getAt(0).getHighlightFields().get("text"); - assertThat(field.fragments().length, equalTo(1)); - assertThat(field.fragments()[0].string(), equalTo("brown")); + assertResponse( + prepareSearch().setQuery(new FunctionScoreQueryBuilder(QueryBuilders.prefixQuery("text", "bro"))) + .highlighter(new HighlightBuilder().field(new Field("text"))), + response -> { + assertHitCount(response, 1); + HighlightField field = response.getHits().getAt(0).getHighlightFields().get("text"); + assertThat(field.fragments().length, equalTo(1)); + assertThat(field.fragments()[0].string(), equalTo("brown")); + } + ); } public void testFiltersFunctionScoreQueryHighlight() throws Exception { - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("text", "brown").field("enable", "yes").endObject()) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); @@ -3373,16 +3494,20 @@ public void testFiltersFunctionScoreQueryHighlight() throws Exception { new RandomScoreFunctionBuilder() ); - SearchResponse searchResponse = prepareSearch().setQuery( - new FunctionScoreQueryBuilder( - QueryBuilders.prefixQuery("text", "bro"), - new FunctionScoreQueryBuilder.FilterFunctionBuilder[] { filterBuilder } - ) - ).highlighter(new HighlightBuilder().field(new Field("text"))).get(); - assertHitCount(searchResponse, 1); - HighlightField field = searchResponse.getHits().getAt(0).getHighlightFields().get("text"); - assertThat(field.fragments().length, equalTo(1)); - assertThat(field.fragments()[0].string(), equalTo("brown")); + assertResponse( + prepareSearch().setQuery( + new FunctionScoreQueryBuilder( + QueryBuilders.prefixQuery("text", "bro"), + new FunctionScoreQueryBuilder.FilterFunctionBuilder[] { filterBuilder } + ) + ).highlighter(new HighlightBuilder().field(new Field("text"))), + response -> { + assertHitCount(response, 1); + HighlightField field = response.getHits().getAt(0).getHighlightFields().get("text"); + assertThat(field.fragments().length, equalTo(1)); + assertThat(field.fragments()[0].string(), equalTo("brown")); + } + ); } public void testHighlightQueryRewriteDatesWithNow() throws Exception { @@ -3395,25 +3520,26 @@ public void testHighlightQueryRewriteDatesWithNow() throws Exception { DateFormatter formatter = DateFormatter.forPattern("strict_date_optional_time"); indexRandom( true, - client().prepareIndex("index-1").setId("1").setSource("d", formatter.format(now), "field", "hello world"), - client().prepareIndex("index-1").setId("2").setSource("d", formatter.format(now.minusDays(1)), "field", "hello"), - client().prepareIndex("index-1").setId("3").setSource("d", formatter.format(now.minusDays(2)), "field", "world") + prepareIndex("index-1").setId("1").setSource("d", formatter.format(now), "field", "hello world"), + prepareIndex("index-1").setId("2").setSource("d", formatter.format(now.minusDays(1)), "field", "hello"), + prepareIndex("index-1").setId("3").setSource("d", formatter.format(now.minusDays(2)), "field", "world") ); ensureSearchable("index-1"); for (int i = 0; i < 5; i++) { - final SearchResponse r1 = prepareSearch("index-1").addSort("d", SortOrder.DESC) - .setTrackScores(true) - .highlighter(highlight().field("field").preTags("").postTags("")) - .setQuery( - QueryBuilders.boolQuery() - .must(QueryBuilders.rangeQuery("d").gte("now-12h").lte("now").includeLower(true).includeUpper(true).boost(1.0f)) - .should(QueryBuilders.termQuery("field", "hello")) - ) - .get(); - - assertNoFailures(r1); - assertThat(r1.getHits().getTotalHits().value, equalTo(1L)); - assertHighlight(r1, 0, "field", 0, 1, equalTo("hello world")); + assertNoFailuresAndResponse( + prepareSearch("index-1").addSort("d", SortOrder.DESC) + .setTrackScores(true) + .highlighter(highlight().field("field").preTags("").postTags("")) + .setQuery( + QueryBuilders.boolQuery() + .must(QueryBuilders.rangeQuery("d").gte("now-12h").lte("now").includeLower(true).includeUpper(true).boost(1.0f)) + .should(QueryBuilders.termQuery("field", "hello")) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertHighlight(response, 0, "field", 0, 1, equalTo("hello world")); + } + ); } } @@ -3439,8 +3565,7 @@ public void testWithNestedQuery() throws Exception { ); prepareCreate("test").setMapping(mapping).get(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .startArray("foo") @@ -3458,51 +3583,63 @@ public void testWithNestedQuery() throws Exception { .get(); for (String type : new String[] { "unified", "plain" }) { - SearchResponse searchResponse = prepareSearch().setQuery( - nestedQuery("foo", matchQuery("foo.text", "brown cow"), ScoreMode.None) - ).highlighter(new HighlightBuilder().field(new Field("foo.text").highlighterType(type))).get(); - assertHitCount(searchResponse, 1); - HighlightField field = searchResponse.getHits().getAt(0).getHighlightFields().get("foo.text"); - assertThat(field.fragments().length, equalTo(2)); - assertThat(field.fragments()[0].string(), equalTo("brown shoes")); - assertThat(field.fragments()[1].string(), equalTo("cow")); - - searchResponse = prepareSearch().setQuery(nestedQuery("foo", prefixQuery("foo.text", "bro"), ScoreMode.None)) - .highlighter(new HighlightBuilder().field(new Field("foo.text").highlighterType(type))) - .get(); - assertHitCount(searchResponse, 1); - field = searchResponse.getHits().getAt(0).getHighlightFields().get("foo.text"); - assertThat(field.fragments().length, equalTo(1)); - assertThat(field.fragments()[0].string(), equalTo("brown shoes")); - - searchResponse = prepareSearch().setQuery(nestedQuery("foo", matchPhraseQuery("foo.text", "brown shoes"), ScoreMode.None)) - .highlighter(new HighlightBuilder().field(new Field("foo.text").highlighterType(type))) - .get(); - assertHitCount(searchResponse, 1); - field = searchResponse.getHits().getAt(0).getHighlightFields().get("foo.text"); - assertThat(field.fragments().length, equalTo(1)); - assertThat(field.fragments()[0].string(), equalTo("brown shoes")); - - searchResponse = prepareSearch().setQuery(nestedQuery("foo", matchPhrasePrefixQuery("foo.text", "bro"), ScoreMode.None)) - .highlighter(new HighlightBuilder().field(new Field("foo.text").highlighterType(type))) - .get(); - assertHitCount(searchResponse, 1); - field = searchResponse.getHits().getAt(0).getHighlightFields().get("foo.text"); - assertThat(field.fragments().length, equalTo(1)); - assertThat(field.fragments()[0].string(), equalTo("brown shoes")); + assertResponse( + prepareSearch().setQuery(nestedQuery("foo", matchQuery("foo.text", "brown cow"), ScoreMode.None)) + .highlighter(new HighlightBuilder().field(new Field("foo.text").highlighterType(type))), + response -> { + assertHitCount(response, 1); + HighlightField field = response.getHits().getAt(0).getHighlightFields().get("foo.text"); + assertThat(field.fragments().length, equalTo(2)); + assertThat(field.fragments()[0].string(), equalTo("brown shoes")); + assertThat(field.fragments()[1].string(), equalTo("cow")); + } + ); + assertResponse( + prepareSearch().setQuery(nestedQuery("foo", prefixQuery("foo.text", "bro"), ScoreMode.None)) + .highlighter(new HighlightBuilder().field(new Field("foo.text").highlighterType(type))), + response -> { + assertHitCount(response, 1); + HighlightField field = response.getHits().getAt(0).getHighlightFields().get("foo.text"); + assertThat(field.fragments().length, equalTo(1)); + assertThat(field.fragments()[0].string(), equalTo("brown shoes")); + } + ); + assertResponse( + prepareSearch().setQuery(nestedQuery("foo", matchPhraseQuery("foo.text", "brown shoes"), ScoreMode.None)) + .highlighter(new HighlightBuilder().field(new Field("foo.text").highlighterType(type))), + response -> { + assertHitCount(response, 1); + HighlightField field = response.getHits().getAt(0).getHighlightFields().get("foo.text"); + assertThat(field.fragments().length, equalTo(1)); + assertThat(field.fragments()[0].string(), equalTo("brown shoes")); + } + ); + assertResponse( + prepareSearch().setQuery(nestedQuery("foo", matchPhrasePrefixQuery("foo.text", "bro"), ScoreMode.None)) + .highlighter(new HighlightBuilder().field(new Field("foo.text").highlighterType(type))), + response -> { + assertHitCount(response, 1); + HighlightField field = response.getHits().getAt(0).getHighlightFields().get("foo.text"); + assertThat(field.fragments().length, equalTo(1)); + assertThat(field.fragments()[0].string(), equalTo("brown shoes")); + } + ); } // For unified and fvh highlighters we just check that the nested query is correctly extracted // but we highlight the root text field since nested documents cannot be highlighted with postings nor term vectors // directly. for (String type : ALL_TYPES) { - SearchResponse searchResponse = prepareSearch().setQuery(nestedQuery("foo", prefixQuery("foo.text", "bro"), ScoreMode.None)) - .highlighter(new HighlightBuilder().field(new Field("text").highlighterType(type).requireFieldMatch(false))) - .get(); - assertHitCount(searchResponse, 1); - HighlightField field = searchResponse.getHits().getAt(0).getHighlightFields().get("text"); - assertThat(field.fragments().length, equalTo(1)); - assertThat(field.fragments()[0].string(), equalTo("brown")); + assertResponse( + prepareSearch().setQuery(nestedQuery("foo", prefixQuery("foo.text", "bro"), ScoreMode.None)) + .highlighter(new HighlightBuilder().field(new Field("text").highlighterType(type).requireFieldMatch(false))), + response -> { + assertHitCount(response, 1); + HighlightField field = response.getHits().getAt(0).getHighlightFields().get("text"); + assertThat(field.fragments().length, equalTo(1)); + assertThat(field.fragments()[0].string(), equalTo("brown")); + } + ); } } @@ -3512,20 +3649,19 @@ public void testWithNormalizer() throws Exception { assertAcked(prepareCreate("test").setSettings(builder.build()).setMapping("keyword", "type=keyword,normalizer=my_normalizer")); ensureGreen(); - client().prepareIndex("test") - .setId("0") - .setSource("keyword", "Hello World") - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .get(); + prepareIndex("test").setId("0").setSource("keyword", "Hello World").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); for (String highlighterType : new String[] { "unified", "plain" }) { - SearchResponse searchResponse = prepareSearch().setQuery(matchQuery("keyword", "hello world")) - .highlighter(new HighlightBuilder().field(new Field("keyword").highlighterType(highlighterType))) - .get(); - assertHitCount(searchResponse, 1); - HighlightField field = searchResponse.getHits().getAt(0).getHighlightFields().get("keyword"); - assertThat(field.fragments().length, equalTo(1)); - assertThat(field.fragments()[0].string(), equalTo("hello world")); + assertResponse( + prepareSearch().setQuery(matchQuery("keyword", "hello world")) + .highlighter(new HighlightBuilder().field(new Field("keyword").highlighterType(highlighterType))), + response -> { + assertHitCount(response, 1); + HighlightField field = response.getHits().getAt(0).getHighlightFields().get("keyword"); + assertThat(field.fragments().length, equalTo(1)); + assertThat(field.fragments()[0].string(), equalTo("hello world")); + } + ); } } @@ -3533,18 +3669,20 @@ public void testDisableHighlightIdField() throws Exception { assertAcked(prepareCreate("test").setMapping("keyword", "type=keyword")); ensureGreen(); - client().prepareIndex("test") - .setId("d33f85bf1e51e84d9ab38948db9f3a068e1fe5294f1d8603914ac8c7bcc39ca1") + prepareIndex("test").setId("d33f85bf1e51e84d9ab38948db9f3a068e1fe5294f1d8603914ac8c7bcc39ca1") .setSource("keyword", "Hello World") .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); for (String highlighterType : new String[] { "plain", "unified" }) { - SearchResponse searchResponse = prepareSearch().setQuery( - matchQuery("_id", "d33f85bf1e51e84d9ab38948db9f3a068e1fe5294f1d8603914ac8c7bcc39ca1") - ).highlighter(new HighlightBuilder().field(new Field("*").highlighterType(highlighterType).requireFieldMatch(false))).get(); - assertHitCount(searchResponse, 1); - assertNull(searchResponse.getHits().getAt(0).getHighlightFields().get("_id")); + assertResponse( + prepareSearch().setQuery(matchQuery("_id", "d33f85bf1e51e84d9ab38948db9f3a068e1fe5294f1d8603914ac8c7bcc39ca1")) + .highlighter(new HighlightBuilder().field(new Field("*").highlighterType(highlighterType).requireFieldMatch(false))), + response -> { + assertHitCount(response, 1); + assertNull(response.getHits().getAt(0).getHighlightFields().get("_id")); + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java index 480556b942ac8..02867e0cf6920 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java @@ -317,10 +317,10 @@ public void testWithIndexFilter() throws InterruptedException { assertAcked(prepareCreate("index-2").setMapping("timestamp", "type=date", "field1", "type=long")); List reqs = new ArrayList<>(); - reqs.add(client().prepareIndex("index-1").setSource("timestamp", "2015-07-08")); - reqs.add(client().prepareIndex("index-1").setSource("timestamp", "2018-07-08")); - reqs.add(client().prepareIndex("index-2").setSource("timestamp", "2019-10-12")); - reqs.add(client().prepareIndex("index-2").setSource("timestamp", "2020-07-08")); + reqs.add(prepareIndex("index-1").setSource("timestamp", "2015-07-08")); + reqs.add(prepareIndex("index-1").setSource("timestamp", "2018-07-08")); + reqs.add(prepareIndex("index-2").setSource("timestamp", "2019-10-12")); + reqs.add(prepareIndex("index-2").setSource("timestamp", "2020-07-08")); indexRandom(true, reqs); FieldCapabilitiesResponse response = client().prepareFieldCaps("index-*").setFields("*").get(); @@ -446,13 +446,13 @@ private void populateTimeRangeIndices() throws Exception { .setMapping("timestamp", "type=date", "field1", "type=long") ); List reqs = new ArrayList<>(); - reqs.add(client().prepareIndex("log-index-1").setSource("timestamp", "2015-07-08")); - reqs.add(client().prepareIndex("log-index-1").setSource("timestamp", "2018-07-08")); - reqs.add(client().prepareIndex("log-index-1").setSource("timestamp", "2020-03-03")); - reqs.add(client().prepareIndex("log-index-1").setSource("timestamp", "2020-09-09")); - reqs.add(client().prepareIndex("log-index-2").setSource("timestamp", "2019-10-12")); - reqs.add(client().prepareIndex("log-index-2").setSource("timestamp", "2020-02-02")); - reqs.add(client().prepareIndex("log-index-2").setSource("timestamp", "2020-10-10")); + reqs.add(prepareIndex("log-index-1").setSource("timestamp", "2015-07-08")); + reqs.add(prepareIndex("log-index-1").setSource("timestamp", "2018-07-08")); + reqs.add(prepareIndex("log-index-1").setSource("timestamp", "2020-03-03")); + reqs.add(prepareIndex("log-index-1").setSource("timestamp", "2020-09-09")); + reqs.add(prepareIndex("log-index-2").setSource("timestamp", "2019-10-12")); + reqs.add(prepareIndex("log-index-2").setSource("timestamp", "2020-02-02")); + reqs.add(prepareIndex("log-index-2").setSource("timestamp", "2020-10-10")); indexRandom(true, reqs); ensureGreen("log-index-1", "log-index-2"); indicesAdmin().prepareRefresh("log-index-1", "log-index-2").get(); @@ -549,8 +549,7 @@ private void moveOrCloseShardsOnNodes(String nodeName) throws Exception { assertNotNull(toNode); clusterAdmin().prepareReroute() .add(new MoveAllocationCommand(shardId.getIndexName(), shardId.id(), fromNode.getId(), toNode.getId())) - .execute() - .actionGet(); + .get(); } } } @@ -638,7 +637,7 @@ public void testManyIndicesWithSameMapping() { ensureGreen(indices); assertAcked(indicesAdmin().preparePutMapping(indicesWithExtraField).setSource("extra_field", "type=integer").get()); for (String index : indicesWithExtraField) { - client().prepareIndex(index).setSource("extra_field", randomIntBetween(1, 1000)).get(); + prepareIndex(index).setSource("extra_field", randomIntBetween(1, 1000)).get(); } FieldCapabilitiesResponse resp = client().execute(FieldCapabilitiesAction.INSTANCE, request).actionGet(); verifyResponse.accept(resp); @@ -664,7 +663,7 @@ public void testCancel() throws Exception { ) ); BlockingOnRewriteQueryBuilder.blockOnRewrite(); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); Request restRequest = new Request("POST", "/_field_caps?fields=*"); restRequest.setEntity(new StringEntity(""" { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fields/SearchFieldsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fields/SearchFieldsIT.java index e3c9558eba907..9ad6363d0e57d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fields/SearchFieldsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fields/SearchFieldsIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -61,8 +60,10 @@ import static org.elasticsearch.common.util.set.Sets.newHashSet; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertCheckedResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; @@ -180,8 +181,7 @@ public void testStoredFields() throws Exception { indicesAdmin().preparePutMapping().setSource(mapping, XContentType.JSON).get(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject().field("field1", "value1").field("field2", "value2").field("field3", "value3").endObject() ) @@ -189,70 +189,70 @@ public void testStoredFields() throws Exception { indicesAdmin().prepareRefresh().get(); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()).addStoredField("field1").get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getFields().size(), equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getFields().get("field1").getValue().toString(), equalTo("value1")); - + assertResponse(prepareSearch().setQuery(matchAllQuery()).addStoredField("field1"), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getHits().length, equalTo(1)); + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(1)); + assertThat(response.getHits().getAt(0).getFields().get("field1").getValue().toString(), equalTo("value1")); + }); // field2 is not stored, check that it is not extracted from source. - searchResponse = prepareSearch().setQuery(matchAllQuery()).addStoredField("field2").get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getFields().size(), equalTo(0)); - assertThat(searchResponse.getHits().getAt(0).getFields().get("field2"), nullValue()); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).addStoredField("field3").get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getFields().size(), equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).addStoredField("*3").get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getFields().size(), equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); - - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addStoredField("*3") - .addStoredField("field1") - .addStoredField("field2") - .get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getFields().size(), equalTo(2)); - assertThat(searchResponse.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); - assertThat(searchResponse.getHits().getAt(0).getFields().get("field1").getValue().toString(), equalTo("value1")); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).addStoredField("field*").get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getFields().size(), equalTo(2)); - assertThat(searchResponse.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); - assertThat(searchResponse.getHits().getAt(0).getFields().get("field1").getValue().toString(), equalTo("value1")); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).addStoredField("f*3").get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getFields().size(), equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).addStoredField("*").get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getSourceAsMap(), nullValue()); - assertThat(searchResponse.getHits().getAt(0).getFields().size(), equalTo(2)); - assertThat(searchResponse.getHits().getAt(0).getFields().get("field1").getValue().toString(), equalTo("value1")); - assertThat(searchResponse.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).addStoredField("*").addStoredField("_source").get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getSourceAsMap(), notNullValue()); - assertThat(searchResponse.getHits().getAt(0).getFields().size(), equalTo(2)); - assertThat(searchResponse.getHits().getAt(0).getFields().get("field1").getValue().toString(), equalTo("value1")); - assertThat(searchResponse.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); + assertResponse(prepareSearch().setQuery(matchAllQuery()).addStoredField("field2"), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getHits().length, equalTo(1)); + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(0)); + assertThat(response.getHits().getAt(0).getFields().get("field2"), nullValue()); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).addStoredField("field3"), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getHits().length, equalTo(1)); + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(1)); + assertThat(response.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).addStoredField("*3"), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getHits().length, equalTo(1)); + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(1)); + assertThat(response.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); + }); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).addStoredField("*3").addStoredField("field1").addStoredField("field2"), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getHits().length, equalTo(1)); + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(2)); + assertThat(response.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); + assertThat(response.getHits().getAt(0).getFields().get("field1").getValue().toString(), equalTo("value1")); + } + ); + assertResponse(prepareSearch().setQuery(matchAllQuery()).addStoredField("field*"), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getHits().length, equalTo(1)); + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(2)); + assertThat(response.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); + assertThat(response.getHits().getAt(0).getFields().get("field1").getValue().toString(), equalTo("value1")); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).addStoredField("f*3"), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getHits().length, equalTo(1)); + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(1)); + assertThat(response.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).addStoredField("*"), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getHits().length, equalTo(1)); + assertThat(response.getHits().getAt(0).getSourceAsMap(), nullValue()); + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(2)); + assertThat(response.getHits().getAt(0).getFields().get("field1").getValue().toString(), equalTo("value1")); + assertThat(response.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).addStoredField("*").addStoredField("_source"), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getHits().length, equalTo(1)); + assertThat(response.getHits().getAt(0).getSourceAsMap(), notNullValue()); + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(2)); + assertThat(response.getHits().getAt(0).getFields().get("field1").getValue().toString(), equalTo("value1")); + assertThat(response.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); + }); } public void testScriptDocAndFields() throws Exception { @@ -274,22 +274,19 @@ public void testScriptDocAndFields() throws Exception { indicesAdmin().preparePutMapping().setSource(mapping, XContentType.JSON).get(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject().field("test", "value beck").field("num1", 1.0f).field("date", "1970-01-01T00:00:00").endObject() ) .get(); indicesAdmin().prepareFlush().get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource( jsonBuilder().startObject().field("test", "value beck").field("num1", 2.0f).field("date", "1970-01-01T00:00:25").endObject() ) .get(); indicesAdmin().prepareFlush().get(); - client().prepareIndex("test") - .setId("3") + prepareIndex("test").setId("3") .setSource( jsonBuilder().startObject().field("test", "value beck").field("num1", 3.0f).field("date", "1970-01-01T00:02:00").endObject() ) @@ -297,64 +294,68 @@ public void testScriptDocAndFields() throws Exception { indicesAdmin().refresh(new RefreshRequest()).actionGet(); logger.info("running doc['num1'].value"); - SearchResponse response = prepareSearch().setQuery(matchAllQuery()) - .addSort("num1", SortOrder.ASC) - .addScriptField("sNum1", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value", Collections.emptyMap())) - .addScriptField( - "sNum1_field", - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_fields['num1'].value", Collections.emptyMap()) - ) - .addScriptField( - "date1", - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['date'].date.millis", Collections.emptyMap()) - ) - .get(); - - assertNoFailures(response); - - assertThat(response.getHits().getTotalHits().value, equalTo(3L)); - assertFalse(response.getHits().getAt(0).hasSource()); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - Set fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); - assertThat(fields, equalTo(newHashSet("sNum1", "sNum1_field", "date1"))); - assertThat(response.getHits().getAt(0).getFields().get("sNum1").getValues().get(0), equalTo(1.0)); - assertThat(response.getHits().getAt(0).getFields().get("sNum1_field").getValues().get(0), equalTo(1.0)); - assertThat(response.getHits().getAt(0).getFields().get("date1").getValues().get(0), equalTo(0L)); - assertThat(response.getHits().getAt(1).getId(), equalTo("2")); - fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); - assertThat(fields, equalTo(newHashSet("sNum1", "sNum1_field", "date1"))); - assertThat(response.getHits().getAt(1).getFields().get("sNum1").getValues().get(0), equalTo(2.0)); - assertThat(response.getHits().getAt(1).getFields().get("sNum1_field").getValues().get(0), equalTo(2.0)); - assertThat(response.getHits().getAt(1).getFields().get("date1").getValues().get(0), equalTo(25000L)); - assertThat(response.getHits().getAt(2).getId(), equalTo("3")); - fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); - assertThat(fields, equalTo(newHashSet("sNum1", "sNum1_field", "date1"))); - assertThat(response.getHits().getAt(2).getFields().get("sNum1").getValues().get(0), equalTo(3.0)); - assertThat(response.getHits().getAt(2).getFields().get("sNum1_field").getValues().get(0), equalTo(3.0)); - assertThat(response.getHits().getAt(2).getFields().get("date1").getValues().get(0), equalTo(120000L)); - + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort("num1", SortOrder.ASC) + .addScriptField( + "sNum1", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value", Collections.emptyMap()) + ) + .addScriptField( + "sNum1_field", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_fields['num1'].value", Collections.emptyMap()) + ) + .addScriptField( + "date1", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['date'].date.millis", Collections.emptyMap()) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertFalse(response.getHits().getAt(0).hasSource()); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + Set fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); + assertThat(fields, equalTo(newHashSet("sNum1", "sNum1_field", "date1"))); + assertThat(response.getHits().getAt(0).getFields().get("sNum1").getValues().get(0), equalTo(1.0)); + assertThat(response.getHits().getAt(0).getFields().get("sNum1_field").getValues().get(0), equalTo(1.0)); + assertThat(response.getHits().getAt(0).getFields().get("date1").getValues().get(0), equalTo(0L)); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); + assertThat(fields, equalTo(newHashSet("sNum1", "sNum1_field", "date1"))); + assertThat(response.getHits().getAt(1).getFields().get("sNum1").getValues().get(0), equalTo(2.0)); + assertThat(response.getHits().getAt(1).getFields().get("sNum1_field").getValues().get(0), equalTo(2.0)); + assertThat(response.getHits().getAt(1).getFields().get("date1").getValues().get(0), equalTo(25000L)); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); + assertThat(fields, equalTo(newHashSet("sNum1", "sNum1_field", "date1"))); + assertThat(response.getHits().getAt(2).getFields().get("sNum1").getValues().get(0), equalTo(3.0)); + assertThat(response.getHits().getAt(2).getFields().get("sNum1_field").getValues().get(0), equalTo(3.0)); + assertThat(response.getHits().getAt(2).getFields().get("date1").getValues().get(0), equalTo(120000L)); + } + ); logger.info("running doc['num1'].value * factor"); - response = prepareSearch().setQuery(matchAllQuery()) - .addSort("num1", SortOrder.ASC) - .addScriptField( - "sNum1", - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value * factor", Map.of("factor", 2.0)) - ) - .get(); - - assertThat(response.getHits().getTotalHits().value, equalTo(3L)); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); - assertThat(fields, equalTo(singleton("sNum1"))); - assertThat(response.getHits().getAt(0).getFields().get("sNum1").getValues().get(0), equalTo(2.0)); - assertThat(response.getHits().getAt(1).getId(), equalTo("2")); - fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); - assertThat(fields, equalTo(singleton("sNum1"))); - assertThat(response.getHits().getAt(1).getFields().get("sNum1").getValues().get(0), equalTo(4.0)); - assertThat(response.getHits().getAt(2).getId(), equalTo("3")); - fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); - assertThat(fields, equalTo(singleton("sNum1"))); - assertThat(response.getHits().getAt(2).getFields().get("sNum1").getValues().get(0), equalTo(6.0)); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort("num1", SortOrder.ASC) + .addScriptField( + "sNum1", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value * factor", Map.of("factor", 2.0)) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + Set fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); + assertThat(fields, equalTo(singleton("sNum1"))); + assertThat(response.getHits().getAt(0).getFields().get("sNum1").getValues().get(0), equalTo(2.0)); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); + assertThat(fields, equalTo(singleton("sNum1"))); + assertThat(response.getHits().getAt(1).getFields().get("sNum1").getValues().get(0), equalTo(4.0)); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); + assertThat(fields, equalTo(singleton("sNum1"))); + assertThat(response.getHits().getAt(2).getFields().get("sNum1").getValues().get(0), equalTo(6.0)); + } + ); } public void testScriptFieldWithNanos() throws Exception { @@ -378,38 +379,36 @@ public void testScriptFieldWithNanos() throws Exception { indexRandom( true, false, - client().prepareIndex("test") - .setId("1") - .setSource(jsonBuilder().startObject().field("date", "1970-01-01T00:00:00.000Z").endObject()), - client().prepareIndex("test").setId("2").setSource(jsonBuilder().startObject().field("date", date).endObject()) + prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("date", "1970-01-01T00:00:00.000Z").endObject()), + prepareIndex("test").setId("2").setSource(jsonBuilder().startObject().field("date", date).endObject()) ); - SearchResponse response = prepareSearch().setQuery(matchAllQuery()) - .addSort("date", SortOrder.ASC) - .addScriptField( - "date1", - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['date'].date.millis", Collections.emptyMap()) - ) - .addScriptField( - "date2", - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['date'].date.nanos", Collections.emptyMap()) - ) - .get(); - - assertNoFailures(response); - - assertThat(response.getHits().getAt(0).getId(), is("1")); - assertThat(response.getHits().getAt(0).getFields().get("date1").getValues().get(0), equalTo(0L)); - assertThat(response.getHits().getAt(0).getFields().get("date2").getValues().get(0), equalTo(0L)); - assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo(0L)); - - assertThat(response.getHits().getAt(1).getId(), is("2")); - Instant instant = ZonedDateTime.parse(date).toInstant(); - long dateAsNanos = DateUtils.toLong(instant); - long dateAsMillis = instant.toEpochMilli(); - assertThat(response.getHits().getAt(1).getFields().get("date1").getValues().get(0), equalTo(dateAsMillis)); - assertThat(response.getHits().getAt(1).getFields().get("date2").getValues().get(0), equalTo(dateAsNanos)); - assertThat(response.getHits().getAt(1).getSortValues()[0], equalTo(dateAsNanos)); + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort("date", SortOrder.ASC) + .addScriptField( + "date1", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['date'].date.millis", Collections.emptyMap()) + ) + .addScriptField( + "date2", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['date'].date.nanos", Collections.emptyMap()) + ), + response -> { + assertThat(response.getHits().getAt(0).getId(), is("1")); + assertThat(response.getHits().getAt(0).getFields().get("date1").getValues().get(0), equalTo(0L)); + assertThat(response.getHits().getAt(0).getFields().get("date2").getValues().get(0), equalTo(0L)); + assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo(0L)); + + assertThat(response.getHits().getAt(1).getId(), is("2")); + Instant instant = ZonedDateTime.parse(date).toInstant(); + long dateAsNanos = DateUtils.toLong(instant); + long dateAsMillis = instant.toEpochMilli(); + assertThat(response.getHits().getAt(1).getFields().get("date1").getValues().get(0), equalTo(dateAsMillis)); + assertThat(response.getHits().getAt(1).getFields().get("date2").getValues().get(0), equalTo(dateAsNanos)); + assertThat(response.getHits().getAt(1).getSortValues()[0], equalTo(dateAsNanos)); + } + ); } public void testIdBasedScriptFields() throws Exception { @@ -418,34 +417,32 @@ public void testIdBasedScriptFields() throws Exception { int numDocs = randomIntBetween(1, 30); IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { - indexRequestBuilders[i] = client().prepareIndex("test") - .setId(Integer.toString(i)) + indexRequestBuilders[i] = prepareIndex("test").setId(Integer.toString(i)) .setSource(jsonBuilder().startObject().field("num1", i).endObject()); } indexRandom(true, indexRequestBuilders); - SearchResponse response = prepareSearch().setQuery(matchAllQuery()) - .addSort("num1", SortOrder.ASC) - .setSize(numDocs) - .addScriptField("id", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_fields._id.value", Collections.emptyMap())) - .get(); - - assertNoFailures(response); - - assertThat(response.getHits().getTotalHits().value, equalTo((long) numDocs)); - for (int i = 0; i < numDocs; i++) { - assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); - Set fields = new HashSet<>(response.getHits().getAt(i).getFields().keySet()); - assertThat(fields, equalTo(singleton("id"))); - assertThat(response.getHits().getAt(i).getFields().get("id").getValue(), equalTo(Integer.toString(i))); - } + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort("num1", SortOrder.ASC) + .setSize(numDocs) + .addScriptField("id", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_fields._id.value", Collections.emptyMap())), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo((long) numDocs)); + for (int i = 0; i < numDocs; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + Set fields = new HashSet<>(response.getHits().getAt(i).getFields().keySet()); + assertThat(fields, equalTo(singleton("id"))); + assertThat(response.getHits().getAt(i).getFields().get("id").getValue(), equalTo(Integer.toString(i))); + } + } + ); } public void testScriptFieldUsingSource() throws Exception { createIndex("test"); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .startObject("obj1") @@ -467,64 +464,69 @@ public void testScriptFieldUsingSource() throws Exception { .get(); indicesAdmin().refresh(new RefreshRequest()).actionGet(); - SearchResponse response = prepareSearch().setQuery(matchAllQuery()) - .addScriptField("s_obj1", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.obj1", Collections.emptyMap())) - .addScriptField( - "s_obj1_test", - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.obj1.test", Collections.emptyMap()) - ) - .addScriptField("s_obj2", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.obj2", Collections.emptyMap())) - .addScriptField( - "s_obj2_arr2", - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.obj2.arr2", Collections.emptyMap()) - ) - .addScriptField("s_arr3", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.arr3", Collections.emptyMap())) - .get(); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addScriptField("s_obj1", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.obj1", Collections.emptyMap())) + .addScriptField( + "s_obj1_test", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.obj1.test", Collections.emptyMap()) + ) + .addScriptField("s_obj2", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.obj2", Collections.emptyMap())) + .addScriptField( + "s_obj2_arr2", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.obj2.arr2", Collections.emptyMap()) + ) + .addScriptField("s_arr3", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.arr3", Collections.emptyMap())), + response -> { - assertThat("Failures " + Arrays.toString(response.getShardFailures()), response.getShardFailures().length, equalTo(0)); + assertThat("Failures " + Arrays.toString(response.getShardFailures()), response.getShardFailures().length, equalTo(0)); - assertThat(response.getHits().getAt(0).field("s_obj1_test").getValue().toString(), equalTo("something")); + assertThat(response.getHits().getAt(0).field("s_obj1_test").getValue().toString(), equalTo("something")); - Map sObj1 = response.getHits().getAt(0).field("s_obj1").getValue(); - assertThat(sObj1.get("test").toString(), equalTo("something")); - assertThat(response.getHits().getAt(0).field("s_obj1_test").getValue().toString(), equalTo("something")); + Map sObj1 = response.getHits().getAt(0).field("s_obj1").getValue(); + assertThat(sObj1.get("test").toString(), equalTo("something")); + assertThat(response.getHits().getAt(0).field("s_obj1_test").getValue().toString(), equalTo("something")); - Map sObj2 = response.getHits().getAt(0).field("s_obj2").getValue(); - List sObj2Arr2 = (List) sObj2.get("arr2"); - assertThat(sObj2Arr2.size(), equalTo(2)); - assertThat(sObj2Arr2.get(0).toString(), equalTo("arr_value1")); - assertThat(sObj2Arr2.get(1).toString(), equalTo("arr_value2")); + Map sObj2 = response.getHits().getAt(0).field("s_obj2").getValue(); + List sObj2Arr2 = (List) sObj2.get("arr2"); + assertThat(sObj2Arr2.size(), equalTo(2)); + assertThat(sObj2Arr2.get(0).toString(), equalTo("arr_value1")); + assertThat(sObj2Arr2.get(1).toString(), equalTo("arr_value2")); - sObj2Arr2 = response.getHits().getAt(0).field("s_obj2_arr2").getValues(); - assertThat(sObj2Arr2.size(), equalTo(2)); - assertThat(sObj2Arr2.get(0).toString(), equalTo("arr_value1")); - assertThat(sObj2Arr2.get(1).toString(), equalTo("arr_value2")); + sObj2Arr2 = response.getHits().getAt(0).field("s_obj2_arr2").getValues(); + assertThat(sObj2Arr2.size(), equalTo(2)); + assertThat(sObj2Arr2.get(0).toString(), equalTo("arr_value1")); + assertThat(sObj2Arr2.get(1).toString(), equalTo("arr_value2")); - List sObj2Arr3 = response.getHits().getAt(0).field("s_arr3").getValues(); - assertThat(((Map) sObj2Arr3.get(0)).get("arr3_field1").toString(), equalTo("arr3_value1")); + List sObj2Arr3 = response.getHits().getAt(0).field("s_arr3").getValues(); + assertThat(((Map) sObj2Arr3.get(0)).get("arr3_field1").toString(), equalTo("arr3_value1")); + } + ); } public void testScriptFieldsForNullReturn() throws Exception { - client().prepareIndex("test").setId("1").setSource("foo", "bar").setRefreshPolicy("true").get(); - - SearchResponse response = prepareSearch().setQuery(matchAllQuery()) - .addScriptField("test_script_1", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "return null", Collections.emptyMap())) - .get(); - - assertNoFailures(response); - - DocumentField fieldObj = response.getHits().getAt(0).field("test_script_1"); - assertThat(fieldObj, notNullValue()); - List fieldValues = fieldObj.getValues(); - assertThat(fieldValues, hasSize(1)); - assertThat(fieldValues.get(0), nullValue()); + prepareIndex("test").setId("1").setSource("foo", "bar").setRefreshPolicy("true").get(); + + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()) + .addScriptField( + "test_script_1", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "return null", Collections.emptyMap()) + ), + response -> { + DocumentField fieldObj = response.getHits().getAt(0).field("test_script_1"); + assertThat(fieldObj, notNullValue()); + List fieldValues = fieldObj.getValues(); + assertThat(fieldValues, hasSize(1)); + assertThat(fieldValues.get(0), nullValue()); + } + ); } public void testPartialFields() throws Exception { createIndex("test"); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( XContentFactory.jsonBuilder() .startObject() @@ -605,8 +607,7 @@ public void testStoredFieldsWithoutSource() throws Exception { indicesAdmin().preparePutMapping().setSource(mapping, XContentType.JSON).get(); ZonedDateTime date = ZonedDateTime.of(2012, 3, 22, 0, 0, 0, 0, ZoneOffset.UTC); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("byte_field", (byte) 1) @@ -624,64 +625,65 @@ public void testStoredFieldsWithoutSource() throws Exception { indicesAdmin().prepareRefresh().get(); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addStoredField("byte_field") - .addStoredField("short_field") - .addStoredField("integer_field") - .addStoredField("long_field") - .addStoredField("float_field") - .addStoredField("double_field") - .addStoredField("date_field") - .addStoredField("boolean_field") - .addStoredField("binary_field") - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - Set fields = new HashSet<>(searchResponse.getHits().getAt(0).getFields().keySet()); - assertThat( - fields, - equalTo( - newHashSet( - "byte_field", - "short_field", - "integer_field", - "long_field", - "float_field", - "double_field", - "date_field", - "boolean_field", - "binary_field" - ) - ) + assertCheckedResponse( + prepareSearch().setQuery(matchAllQuery()) + .addStoredField("byte_field") + .addStoredField("short_field") + .addStoredField("integer_field") + .addStoredField("long_field") + .addStoredField("float_field") + .addStoredField("double_field") + .addStoredField("date_field") + .addStoredField("boolean_field") + .addStoredField("binary_field"), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getHits().length, equalTo(1)); + Set fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); + assertThat( + fields, + equalTo( + newHashSet( + "byte_field", + "short_field", + "integer_field", + "long_field", + "float_field", + "double_field", + "date_field", + "boolean_field", + "binary_field" + ) + ) + ); + + SearchHit searchHit = response.getHits().getAt(0); + assertThat(searchHit.getFields().get("byte_field").getValue().toString(), equalTo("1")); + assertThat(searchHit.getFields().get("short_field").getValue().toString(), equalTo("2")); + assertThat(searchHit.getFields().get("integer_field").getValue(), equalTo((Object) 3)); + assertThat(searchHit.getFields().get("long_field").getValue(), equalTo((Object) 4L)); + assertThat(searchHit.getFields().get("float_field").getValue(), equalTo((Object) 5.0f)); + assertThat(searchHit.getFields().get("double_field").getValue(), equalTo((Object) 6.0d)); + String dateTime = DateFormatter.forPattern("date_optional_time").format(date); + assertThat(searchHit.getFields().get("date_field").getValue(), equalTo((Object) dateTime)); + assertThat(searchHit.getFields().get("boolean_field").getValue(), equalTo((Object) Boolean.TRUE)); + assertThat(searchHit.getFields().get("binary_field").getValue(), equalTo(new BytesArray("testing text".getBytes("UTF8")))); + } ); - - SearchHit searchHit = searchResponse.getHits().getAt(0); - assertThat(searchHit.getFields().get("byte_field").getValue().toString(), equalTo("1")); - assertThat(searchHit.getFields().get("short_field").getValue().toString(), equalTo("2")); - assertThat(searchHit.getFields().get("integer_field").getValue(), equalTo((Object) 3)); - assertThat(searchHit.getFields().get("long_field").getValue(), equalTo((Object) 4L)); - assertThat(searchHit.getFields().get("float_field").getValue(), equalTo((Object) 5.0f)); - assertThat(searchHit.getFields().get("double_field").getValue(), equalTo((Object) 6.0d)); - String dateTime = DateFormatter.forPattern("date_optional_time").format(date); - assertThat(searchHit.getFields().get("date_field").getValue(), equalTo((Object) dateTime)); - assertThat(searchHit.getFields().get("boolean_field").getValue(), equalTo((Object) Boolean.TRUE)); - assertThat(searchHit.getFields().get("binary_field").getValue(), equalTo(new BytesArray("testing text".getBytes("UTF8")))); } public void testSearchFieldsMetadata() throws Exception { - client().prepareIndex("my-index") - .setId("1") + prepareIndex("my-index").setId("1") .setRouting("1") .setSource(jsonBuilder().startObject().field("field1", "value").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); - SearchResponse searchResponse = prepareSearch("my-index").addStoredField("field1").addStoredField("_routing").get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getAt(0).field("field1"), nullValue()); - assertThat(searchResponse.getHits().getAt(0).field("_routing").getValue().toString(), equalTo("1")); + assertResponse(prepareSearch("my-index").addStoredField("field1").addStoredField("_routing"), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getAt(0).field("field1"), nullValue()); + assertThat(response.getHits().getAt(0).field("_routing").getValue().toString(), equalTo("1")); + }); } public void testGetFieldsComplexField() throws Exception { @@ -741,28 +743,31 @@ public void testGetFieldsComplexField() throws Exception { .endObject() ); - client().prepareIndex("my-index").setId("1").setRefreshPolicy(IMMEDIATE).setSource(source, XContentType.JSON).get(); + prepareIndex("my-index").setId("1").setRefreshPolicy(IMMEDIATE).setSource(source, XContentType.JSON).get(); String field = "field1.field2.field3.field4"; - SearchResponse searchResponse = prepareSearch("my-index").addStoredField(field).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getAt(0).field(field).getValues().size(), equalTo(2)); - assertThat(searchResponse.getHits().getAt(0).field(field).getValues().get(0).toString(), equalTo("value1")); - assertThat(searchResponse.getHits().getAt(0).field(field).getValues().get(1).toString(), equalTo("value2")); + assertResponse(prepareSearch("my-index").addStoredField(field), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getAt(0).field(field).getValues().size(), equalTo(2)); + assertThat(response.getHits().getAt(0).field(field).getValues().get(0).toString(), equalTo("value1")); + assertThat(response.getHits().getAt(0).field(field).getValues().get(1).toString(), equalTo("value2")); + }); } // see #8203 public void testSingleValueFieldDatatField() throws ExecutionException, InterruptedException { assertAcked(indicesAdmin().prepareCreate("test").setMapping("test_field", "type=keyword").get()); - indexRandom(true, client().prepareIndex("test").setId("1").setSource("test_field", "foobar")); + indexRandom(true, prepareIndex("test").setId("1").setSource("test_field", "foobar")); refresh(); - SearchResponse searchResponse = prepareSearch("test").setSource( - new SearchSourceBuilder().query(QueryBuilders.matchAllQuery()).docValueField("test_field") - ).get(); - assertHitCount(searchResponse, 1); - Map fields = searchResponse.getHits().getHits()[0].getFields(); - assertThat(fields.get("test_field").getValue(), equalTo("foobar")); + assertResponse( + prepareSearch("test").setSource(new SearchSourceBuilder().query(QueryBuilders.matchAllQuery()).docValueField("test_field")), + response -> { + assertHitCount(response, 1); + Map fields = response.getHits().getHits()[0].getFields(); + assertThat(fields.get("test_field").getValue(), equalTo("foobar")); + } + ); } public void testDocValueFields() throws Exception { @@ -822,8 +827,7 @@ public void testDocValueFields() throws Exception { indicesAdmin().preparePutMapping().setSource(mapping, XContentType.JSON).get(); ZonedDateTime date = ZonedDateTime.of(2012, 3, 22, 0, 0, 0, 0, ZoneOffset.UTC); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("text_field", "foo") @@ -860,116 +864,116 @@ public void testDocValueFields() throws Exception { if (randomBoolean()) { builder.addDocValueField("*_field"); } - SearchResponse searchResponse = builder.get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - Set fields = new HashSet<>(searchResponse.getHits().getAt(0).getFields().keySet()); - assertThat( - fields, - equalTo( - newHashSet( - "byte_field", - "short_field", - "integer_field", - "long_field", - "float_field", - "double_field", - "date_field", - "boolean_field", - "text_field", - "keyword_field", - "binary_field", - "ip_field" + assertResponse(builder, response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getHits().length, equalTo(1)); + Set fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); + assertThat( + fields, + equalTo( + newHashSet( + "byte_field", + "short_field", + "integer_field", + "long_field", + "float_field", + "double_field", + "date_field", + "boolean_field", + "text_field", + "keyword_field", + "binary_field", + "ip_field" + ) ) - ) - ); + ); - assertThat(searchResponse.getHits().getAt(0).getFields().get("byte_field").getValues(), equalTo(List.of(1L))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("short_field").getValues(), equalTo(List.of(2L))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("integer_field").getValues(), equalTo(List.of(3L))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("long_field").getValues(), equalTo(List.of(4L))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("float_field").getValues(), equalTo(List.of(5.0))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("double_field").getValues(), equalTo(List.of(6.0d))); - assertThat( - searchResponse.getHits().getAt(0).getFields().get("date_field").getValue(), - equalTo(DateFormatter.forPattern("date_optional_time").format(date)) - ); - assertThat(searchResponse.getHits().getAt(0).getFields().get("boolean_field").getValues(), equalTo(List.of(true))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("text_field").getValues(), equalTo(List.of("foo"))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValues(), equalTo(List.of("foo"))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("binary_field").getValues(), equalTo(List.of("KmQ="))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValues(), equalTo(List.of("::1"))); - - builder = prepareSearch().setQuery(matchAllQuery()).addDocValueField("*field"); - searchResponse = builder.get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - fields = new HashSet<>(searchResponse.getHits().getAt(0).getFields().keySet()); - assertThat( - fields, - equalTo( - newHashSet( - "byte_field", - "short_field", - "integer_field", - "long_field", - "float_field", - "double_field", - "date_field", - "boolean_field", - "text_field", - "keyword_field", - "binary_field", - "ip_field" + assertThat(response.getHits().getAt(0).getFields().get("byte_field").getValues(), equalTo(List.of(1L))); + assertThat(response.getHits().getAt(0).getFields().get("short_field").getValues(), equalTo(List.of(2L))); + assertThat(response.getHits().getAt(0).getFields().get("integer_field").getValues(), equalTo(List.of(3L))); + assertThat(response.getHits().getAt(0).getFields().get("long_field").getValues(), equalTo(List.of(4L))); + assertThat(response.getHits().getAt(0).getFields().get("float_field").getValues(), equalTo(List.of(5.0))); + assertThat(response.getHits().getAt(0).getFields().get("double_field").getValues(), equalTo(List.of(6.0d))); + assertThat( + response.getHits().getAt(0).getFields().get("date_field").getValue(), + equalTo(DateFormatter.forPattern("date_optional_time").format(date)) + ); + assertThat(response.getHits().getAt(0).getFields().get("boolean_field").getValues(), equalTo(List.of(true))); + assertThat(response.getHits().getAt(0).getFields().get("text_field").getValues(), equalTo(List.of("foo"))); + assertThat(response.getHits().getAt(0).getFields().get("keyword_field").getValues(), equalTo(List.of("foo"))); + assertThat(response.getHits().getAt(0).getFields().get("binary_field").getValues(), equalTo(List.of("KmQ="))); + assertThat(response.getHits().getAt(0).getFields().get("ip_field").getValues(), equalTo(List.of("::1"))); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).addDocValueField("*field"), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getHits().length, equalTo(1)); + Set fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); + assertThat( + fields, + equalTo( + newHashSet( + "byte_field", + "short_field", + "integer_field", + "long_field", + "float_field", + "double_field", + "date_field", + "boolean_field", + "text_field", + "keyword_field", + "binary_field", + "ip_field" + ) ) - ) - ); - - assertThat(searchResponse.getHits().getAt(0).getFields().get("byte_field").getValues(), equalTo(List.of(1L))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("short_field").getValues(), equalTo(List.of(2L))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("integer_field").getValues(), equalTo(List.of(3L))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("long_field").getValues(), equalTo(List.of(4L))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("float_field").getValues(), equalTo(List.of(5.0))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("double_field").getValues(), equalTo(List.of(6.0d))); - assertThat( - searchResponse.getHits().getAt(0).getFields().get("date_field").getValue(), - equalTo(DateFormatter.forPattern("date_optional_time").format(date)) - ); - assertThat(searchResponse.getHits().getAt(0).getFields().get("boolean_field").getValues(), equalTo(List.of(true))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("text_field").getValues(), equalTo(List.of("foo"))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValues(), equalTo(List.of("foo"))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("binary_field").getValues(), equalTo(List.of("KmQ="))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValues(), equalTo(List.of("::1"))); - - builder = prepareSearch().setQuery(matchAllQuery()) - .addDocValueField("byte_field", "#.0") - .addDocValueField("short_field", "#.0") - .addDocValueField("integer_field", "#.0") - .addDocValueField("long_field", "#.0") - .addDocValueField("float_field", "#.0") - .addDocValueField("double_field", "#.0") - .addDocValueField("date_field", "epoch_millis"); - searchResponse = builder.get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - fields = new HashSet<>(searchResponse.getHits().getAt(0).getFields().keySet()); - assertThat( - fields, - equalTo(newHashSet("byte_field", "short_field", "integer_field", "long_field", "float_field", "double_field", "date_field")) - ); + ); - assertThat(searchResponse.getHits().getAt(0).getFields().get("byte_field").getValues(), equalTo(List.of("1.0"))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("short_field").getValues(), equalTo(List.of("2.0"))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("integer_field").getValues(), equalTo(List.of("3.0"))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("long_field").getValues(), equalTo(List.of("4.0"))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("float_field").getValues(), equalTo(List.of("5.0"))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("double_field").getValues(), equalTo(List.of("6.0"))); - assertThat( - searchResponse.getHits().getAt(0).getFields().get("date_field").getValue(), - equalTo(DateFormatter.forPattern("epoch_millis").format(date)) + assertThat(response.getHits().getAt(0).getFields().get("byte_field").getValues(), equalTo(List.of(1L))); + assertThat(response.getHits().getAt(0).getFields().get("short_field").getValues(), equalTo(List.of(2L))); + assertThat(response.getHits().getAt(0).getFields().get("integer_field").getValues(), equalTo(List.of(3L))); + assertThat(response.getHits().getAt(0).getFields().get("long_field").getValues(), equalTo(List.of(4L))); + assertThat(response.getHits().getAt(0).getFields().get("float_field").getValues(), equalTo(List.of(5.0))); + assertThat(response.getHits().getAt(0).getFields().get("double_field").getValues(), equalTo(List.of(6.0d))); + assertThat( + response.getHits().getAt(0).getFields().get("date_field").getValue(), + equalTo(DateFormatter.forPattern("date_optional_time").format(date)) + ); + assertThat(response.getHits().getAt(0).getFields().get("boolean_field").getValues(), equalTo(List.of(true))); + assertThat(response.getHits().getAt(0).getFields().get("text_field").getValues(), equalTo(List.of("foo"))); + assertThat(response.getHits().getAt(0).getFields().get("keyword_field").getValues(), equalTo(List.of("foo"))); + assertThat(response.getHits().getAt(0).getFields().get("binary_field").getValues(), equalTo(List.of("KmQ="))); + assertThat(response.getHits().getAt(0).getFields().get("ip_field").getValues(), equalTo(List.of("::1"))); + }); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addDocValueField("byte_field", "#.0") + .addDocValueField("short_field", "#.0") + .addDocValueField("integer_field", "#.0") + .addDocValueField("long_field", "#.0") + .addDocValueField("float_field", "#.0") + .addDocValueField("double_field", "#.0") + .addDocValueField("date_field", "epoch_millis"), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getHits().length, equalTo(1)); + Set fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); + assertThat( + fields, + equalTo( + newHashSet("byte_field", "short_field", "integer_field", "long_field", "float_field", "double_field", "date_field") + ) + ); + assertThat(response.getHits().getAt(0).getFields().get("byte_field").getValues(), equalTo(List.of("1.0"))); + assertThat(response.getHits().getAt(0).getFields().get("short_field").getValues(), equalTo(List.of("2.0"))); + assertThat(response.getHits().getAt(0).getFields().get("integer_field").getValues(), equalTo(List.of("3.0"))); + assertThat(response.getHits().getAt(0).getFields().get("long_field").getValues(), equalTo(List.of("4.0"))); + assertThat(response.getHits().getAt(0).getFields().get("float_field").getValues(), equalTo(List.of("5.0"))); + assertThat(response.getHits().getAt(0).getFields().get("double_field").getValues(), equalTo(List.of("6.0"))); + assertThat( + response.getHits().getAt(0).getFields().get("date_field").getValue(), + equalTo(DateFormatter.forPattern("epoch_millis").format(date)) + ); + } ); } @@ -994,8 +998,7 @@ public void testScriptFields() throws Exception { List reqs = new ArrayList<>(); for (int i = 0; i < numDocs; ++i) { reqs.add( - client().prepareIndex("index") - .setId(Integer.toString(i)) + prepareIndex("index").setId(Integer.toString(i)) .setSource( "s", Integer.toString(i), @@ -1021,18 +1024,18 @@ public void testScriptFields() throws Exception { new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['" + field + "']", Collections.emptyMap()) ); } - SearchResponse resp = req.get(); - assertNoFailures(resp); - for (SearchHit hit : resp.getHits().getHits()) { - final int id = Integer.parseInt(hit.getId()); - Map fields = hit.getFields(); - assertThat(fields.get("s").getValues(), equalTo(Collections.singletonList(Integer.toString(id)))); - assertThat(fields.get("l").getValues(), equalTo(Collections.singletonList((long) id))); - assertThat(fields.get("d").getValues(), equalTo(Collections.singletonList((double) id))); - assertThat(fields.get("ms").getValues(), equalTo(Arrays.asList(Integer.toString(id), Integer.toString(id + 1)))); - assertThat(fields.get("ml").getValues(), equalTo(Arrays.asList((long) id, id + 1L))); - assertThat(fields.get("md").getValues(), equalTo(Arrays.asList((double) id, id + 1d))); - } + assertNoFailuresAndResponse(req, response -> { + for (SearchHit hit : response.getHits().getHits()) { + final int id = Integer.parseInt(hit.getId()); + Map fields = hit.getFields(); + assertThat(fields.get("s").getValues(), equalTo(Collections.singletonList(Integer.toString(id)))); + assertThat(fields.get("l").getValues(), equalTo(Collections.singletonList((long) id))); + assertThat(fields.get("d").getValues(), equalTo(Collections.singletonList((double) id))); + assertThat(fields.get("ms").getValues(), equalTo(Arrays.asList(Integer.toString(id), Integer.toString(id + 1)))); + assertThat(fields.get("ml").getValues(), equalTo(Arrays.asList((long) id, id + 1L))); + assertThat(fields.get("md").getValues(), equalTo(Arrays.asList((double) id, id + 1d))); + } + }); } public void testDocValueFieldsWithFieldAlias() throws Exception { @@ -1071,30 +1074,31 @@ public void testDocValueFieldsWithFieldAlias() throws Exception { indexDoc("test", "1", "text_field", "foo", "date_field", formatter.format(date)); refresh("test"); - SearchRequestBuilder builder = prepareSearch().setQuery(matchAllQuery()) - .addDocValueField("text_field_alias") - .addDocValueField("date_field_alias") - .addDocValueField("date_field"); - SearchResponse searchResponse = builder.get(); - - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 1); - SearchHit hit = searchResponse.getHits().getAt(0); - - Map fields = hit.getFields(); - assertThat(fields.keySet(), equalTo(newHashSet("text_field_alias", "date_field_alias", "date_field"))); - - DocumentField textFieldAlias = fields.get("text_field_alias"); - assertThat(textFieldAlias.getName(), equalTo("text_field_alias")); - assertThat(textFieldAlias.getValue(), equalTo("foo")); - - DocumentField dateFieldAlias = fields.get("date_field_alias"); - assertThat(dateFieldAlias.getName(), equalTo("date_field_alias")); - assertThat(dateFieldAlias.getValue(), equalTo("1990-12-29")); - - DocumentField dateField = fields.get("date_field"); - assertThat(dateField.getName(), equalTo("date_field")); - assertThat(dateField.getValue(), equalTo("1990-12-29")); + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()) + .addDocValueField("text_field_alias") + .addDocValueField("date_field_alias") + .addDocValueField("date_field"), + response -> { + assertHitCount(response, 1); + SearchHit hit = response.getHits().getAt(0); + + Map fields = hit.getFields(); + assertThat(fields.keySet(), equalTo(newHashSet("text_field_alias", "date_field_alias", "date_field"))); + + DocumentField textFieldAlias = fields.get("text_field_alias"); + assertThat(textFieldAlias.getName(), equalTo("text_field_alias")); + assertThat(textFieldAlias.getValue(), equalTo("foo")); + + DocumentField dateFieldAlias = fields.get("date_field_alias"); + assertThat(dateFieldAlias.getName(), equalTo("date_field_alias")); + assertThat(dateFieldAlias.getValue(), equalTo("1990-12-29")); + + DocumentField dateField = fields.get("date_field"); + assertThat(dateField.getName(), equalTo("date_field")); + assertThat(dateField.getValue(), equalTo("1990-12-29")); + } + ); } public void testWildcardDocValueFieldsWithFieldAlias() throws Exception { @@ -1133,27 +1137,28 @@ public void testWildcardDocValueFieldsWithFieldAlias() throws Exception { indexDoc("test", "1", "text_field", "foo", "date_field", formatter.format(date)); refresh("test"); - SearchRequestBuilder builder = prepareSearch().setQuery(matchAllQuery()).addDocValueField("*alias").addDocValueField("date_field"); - SearchResponse searchResponse = builder.get(); + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()).addDocValueField("*alias").addDocValueField("date_field"), + response -> { + assertHitCount(response, 1); + SearchHit hit = response.getHits().getAt(0); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 1); - SearchHit hit = searchResponse.getHits().getAt(0); + Map fields = hit.getFields(); + assertThat(fields.keySet(), equalTo(newHashSet("text_field_alias", "date_field_alias", "date_field"))); - Map fields = hit.getFields(); - assertThat(fields.keySet(), equalTo(newHashSet("text_field_alias", "date_field_alias", "date_field"))); + DocumentField textFieldAlias = fields.get("text_field_alias"); + assertThat(textFieldAlias.getName(), equalTo("text_field_alias")); + assertThat(textFieldAlias.getValue(), equalTo("foo")); - DocumentField textFieldAlias = fields.get("text_field_alias"); - assertThat(textFieldAlias.getName(), equalTo("text_field_alias")); - assertThat(textFieldAlias.getValue(), equalTo("foo")); + DocumentField dateFieldAlias = fields.get("date_field_alias"); + assertThat(dateFieldAlias.getName(), equalTo("date_field_alias")); + assertThat(dateFieldAlias.getValue(), equalTo("1990-12-29")); - DocumentField dateFieldAlias = fields.get("date_field_alias"); - assertThat(dateFieldAlias.getName(), equalTo("date_field_alias")); - assertThat(dateFieldAlias.getValue(), equalTo("1990-12-29")); - - DocumentField dateField = fields.get("date_field"); - assertThat(dateField.getName(), equalTo("date_field")); - assertThat(dateField.getValue(), equalTo("1990-12-29")); + DocumentField dateField = fields.get("date_field"); + assertThat(dateField.getName(), equalTo("date_field")); + assertThat(dateField.getValue(), equalTo("1990-12-29")); + } + ); } public void testStoredFieldsWithFieldAlias() throws Exception { @@ -1185,18 +1190,19 @@ public void testStoredFieldsWithFieldAlias() throws Exception { indexDoc("test", "1", "field1", "value1", "field2", "value2"); refresh("test"); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addStoredField("field1-alias") - .addStoredField("field2-alias") - .get(); - assertHitCount(searchResponse, 1L); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).addStoredField("field1-alias").addStoredField("field2-alias"), + response -> { + assertHitCount(response, 1L); - SearchHit hit = searchResponse.getHits().getAt(0); - assertEquals(1, hit.getFields().size()); - assertTrue(hit.getFields().containsKey("field1-alias")); + SearchHit hit = response.getHits().getAt(0); + assertEquals(1, hit.getFields().size()); + assertTrue(hit.getFields().containsKey("field1-alias")); - DocumentField field = hit.getFields().get("field1-alias"); - assertThat(field.getValue().toString(), equalTo("value1")); + DocumentField field = hit.getFields().get("field1-alias"); + assertThat(field.getValue().toString(), equalTo("value1")); + } + ); } public void testWildcardStoredFieldsWithFieldAlias() throws Exception { @@ -1228,19 +1234,20 @@ public void testWildcardStoredFieldsWithFieldAlias() throws Exception { indexDoc("test", "1", "field1", "value1", "field2", "value2"); refresh("test"); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()).addStoredField("field*").get(); - assertHitCount(searchResponse, 1L); + assertResponse(prepareSearch().setQuery(matchAllQuery()).addStoredField("field*"), response -> { + assertHitCount(response, 1L); - SearchHit hit = searchResponse.getHits().getAt(0); - assertEquals(2, hit.getFields().size()); - assertTrue(hit.getFields().containsKey("field1")); - assertTrue(hit.getFields().containsKey("field1-alias")); + SearchHit hit = response.getHits().getAt(0); + assertEquals(2, hit.getFields().size()); + assertTrue(hit.getFields().containsKey("field1")); + assertTrue(hit.getFields().containsKey("field1-alias")); - DocumentField field = hit.getFields().get("field1"); - assertThat(field.getValue().toString(), equalTo("value1")); + DocumentField field = hit.getFields().get("field1"); + assertThat(field.getValue().toString(), equalTo("value1")); - DocumentField fieldAlias = hit.getFields().get("field1-alias"); - assertThat(fieldAlias.getValue().toString(), equalTo("value1")); + DocumentField fieldAlias = hit.getFields().get("field1-alias"); + assertThat(fieldAlias.getValue().toString(), equalTo("value1")); + }); } public void testLoadMetadata() throws Exception { @@ -1248,20 +1255,17 @@ public void testLoadMetadata() throws Exception { indexRandom( true, - client().prepareIndex("test") - .setId("1") - .setRouting("1") - .setSource(jsonBuilder().startObject().field("field1", "value").endObject()) + prepareIndex("test").setId("1").setRouting("1").setSource(jsonBuilder().startObject().field("field1", "value").endObject()) ); - SearchResponse response = prepareSearch("test").addStoredField("field1").get(); - assertNoFailures(response); - assertHitCount(response, 1); + assertNoFailuresAndResponse(prepareSearch("test").addStoredField("field1"), response -> { + assertHitCount(response, 1); - Map fields = response.getHits().getAt(0).getMetadataFields(); + Map fields = response.getHits().getAt(0).getMetadataFields(); - assertThat(fields.get("field1"), nullValue()); - assertThat(fields.get("_routing").getValue().toString(), equalTo("1")); - assertThat(response.getHits().getAt(0).getDocumentFields().size(), equalTo(0)); + assertThat(fields.get("field1"), nullValue()); + assertThat(fields.get("_routing").getValue().toString(), equalTo("1")); + assertThat(response.getHits().getAt(0).getDocumentFields().size(), equalTo(0)); + }); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java index faefeea0cb04e..eff2e8d3653c5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java @@ -8,12 +8,10 @@ package org.elasticsearch.search.functionscore; -import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.geo.GeoPoint; @@ -48,8 +46,10 @@ import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.linearDecayFunction; import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.anyOf; @@ -87,9 +87,7 @@ public void testDistanceScoreGeoLinGaussExp() throws Exception { List indexBuilders = new ArrayList<>(); indexBuilders.add( - client().prepareIndex() - .setId("1") - .setIndex("test") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("test", "value") @@ -101,9 +99,7 @@ public void testDistanceScoreGeoLinGaussExp() throws Exception { ) ); indexBuilders.add( - client().prepareIndex() - .setId("2") - .setIndex("test") + prepareIndex("test").setId("2") .setSource( jsonBuilder().startObject() .field("test", "value") @@ -118,9 +114,7 @@ public void testDistanceScoreGeoLinGaussExp() throws Exception { int numDummyDocs = 20; for (int i = 1; i <= numDummyDocs; i++) { indexBuilders.add( - client().prepareIndex() - .setId(Integer.toString(i + 3)) - .setIndex("test") + prepareIndex("test").setId(Integer.toString(i + 3)) .setSource( jsonBuilder().startObject() .field("test", "value") @@ -140,61 +134,65 @@ public void testDistanceScoreGeoLinGaussExp() throws Exception { lonlat.add(20f); lonlat.add(11f); - ActionFuture response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH).source(searchSource().query(baseQuery)) + assertHitCount( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH).source(searchSource().query(baseQuery)) + ), + (numDummyDocs + 2) ); - SearchResponse sr = response.actionGet(); - SearchHits sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (numDummyDocs + 2))); - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source(searchSource().query(functionScoreQuery(baseQuery, gaussDecayFunction("loc", lonlat, "1000km")))) + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source(searchSource().query(functionScoreQuery(baseQuery, gaussDecayFunction("loc", lonlat, "1000km")))) + ), + response -> { + assertHitCount(response, (numDummyDocs + 2)); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + } ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (numDummyDocs + 2))); - - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat(sh.getAt(1).getId(), equalTo("2")); // Test Exp - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH).source(searchSource().query(baseQuery)) + assertHitCount( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH).source(searchSource().query(baseQuery)) + ), + (numDummyDocs + 2) ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (numDummyDocs + 2))); - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source(searchSource().query(functionScoreQuery(baseQuery, linearDecayFunction("loc", lonlat, "1000km")))) + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source(searchSource().query(functionScoreQuery(baseQuery, linearDecayFunction("loc", lonlat, "1000km")))) + ), + response -> { + assertHitCount(response, (numDummyDocs + 2)); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + } ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (numDummyDocs + 2))); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat(sh.getAt(1).getId(), equalTo("2")); // Test Lin - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH).source(searchSource().query(baseQuery)) + assertHitCount( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH).source(searchSource().query(baseQuery)) + ), + (numDummyDocs + 2) ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (numDummyDocs + 2))); - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source(searchSource().query(functionScoreQuery(baseQuery, exponentialDecayFunction("loc", lonlat, "1000km")))) + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source(searchSource().query(functionScoreQuery(baseQuery, exponentialDecayFunction("loc", lonlat, "1000km")))) + ), + response -> { + assertHitCount(response, (numDummyDocs + 2)); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + } ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (numDummyDocs + 2))); - - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat(sh.getAt(1).getId(), equalTo("2")); } public void testDistanceScoreGeoLinGaussExpWithOffset() throws Exception { @@ -218,25 +216,17 @@ public void testDistanceScoreGeoLinGaussExpWithOffset() throws Exception { // add tw docs within offset List indexBuilders = new ArrayList<>(); indexBuilders.add( - client().prepareIndex() - .setId("1") - .setIndex("test") - .setSource(jsonBuilder().startObject().field("test", "value").field("num", 0.5).endObject()) + prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("test", "value").field("num", 0.5).endObject()) ); indexBuilders.add( - client().prepareIndex() - .setId("2") - .setIndex("test") - .setSource(jsonBuilder().startObject().field("test", "value").field("num", 1.7).endObject()) + prepareIndex("test").setId("2").setSource(jsonBuilder().startObject().field("test", "value").field("num", 1.7).endObject()) ); // add docs outside offset int numDummyDocs = 20; for (int i = 0; i < numDummyDocs; i++) { indexBuilders.add( - client().prepareIndex() - .setId(Integer.toString(i + 3)) - .setIndex("test") + prepareIndex("test").setId(Integer.toString(i + 3)) .setSource(jsonBuilder().startObject().field("test", "value").field("num", 3.0 + i).endObject()) ); } @@ -245,67 +235,76 @@ public void testDistanceScoreGeoLinGaussExpWithOffset() throws Exception { // Test Gauss - ActionFuture response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().size(numDummyDocs + 2) - .query( - functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num", 1.0, 5.0, 1.0)).boostMode( - CombineFunction.REPLACE + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().size(numDummyDocs + 2) + .query( + functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num", 1.0, 5.0, 1.0)).boostMode( + CombineFunction.REPLACE + ) ) - ) - ) + ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (numDummyDocs + 2))); + assertThat(sh.getAt(0).getId(), anyOf(equalTo("1"), equalTo("2"))); + assertThat(sh.getAt(1).getId(), anyOf(equalTo("1"), equalTo("2"))); + assertThat(sh.getAt(1).getScore(), equalTo(sh.getAt(0).getScore())); + for (int i = 0; i < numDummyDocs; i++) { + assertThat(sh.getAt(i + 2).getId(), equalTo(Integer.toString(i + 3))); + } + } ); - SearchResponse sr = response.actionGet(); - SearchHits sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (numDummyDocs + 2))); - assertThat(sh.getAt(0).getId(), anyOf(equalTo("1"), equalTo("2"))); - assertThat(sh.getAt(1).getId(), anyOf(equalTo("1"), equalTo("2"))); - assertThat(sh.getAt(1).getScore(), equalTo(sh.getAt(0).getScore())); - for (int i = 0; i < numDummyDocs; i++) { - assertThat(sh.getAt(i + 2).getId(), equalTo(Integer.toString(i + 3))); - } // Test Exp - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().size(numDummyDocs + 2) - .query( - functionScoreQuery(termQuery("test", "value"), exponentialDecayFunction("num", 1.0, 5.0, 1.0)).boostMode( - CombineFunction.REPLACE + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().size(numDummyDocs + 2) + .query( + functionScoreQuery(termQuery("test", "value"), exponentialDecayFunction("num", 1.0, 5.0, 1.0)).boostMode( + CombineFunction.REPLACE + ) ) - ) - ) + ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (numDummyDocs + 2))); + assertThat(sh.getAt(0).getId(), anyOf(equalTo("1"), equalTo("2"))); + assertThat(sh.getAt(1).getId(), anyOf(equalTo("1"), equalTo("2"))); + assertThat(sh.getAt(1).getScore(), equalTo(sh.getAt(0).getScore())); + for (int i = 0; i < numDummyDocs; i++) { + assertThat(sh.getAt(i + 2).getId(), equalTo(Integer.toString(i + 3))); + } + } ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (numDummyDocs + 2))); - assertThat(sh.getAt(0).getId(), anyOf(equalTo("1"), equalTo("2"))); - assertThat(sh.getAt(1).getId(), anyOf(equalTo("1"), equalTo("2"))); - assertThat(sh.getAt(1).getScore(), equalTo(sh.getAt(0).getScore())); - for (int i = 0; i < numDummyDocs; i++) { - assertThat(sh.getAt(i + 2).getId(), equalTo(Integer.toString(i + 3))); - } // Test Lin - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().size(numDummyDocs + 2) - .query( - functionScoreQuery(termQuery("test", "value"), linearDecayFunction("num", 1.0, 20.0, 1.0)).boostMode( - CombineFunction.REPLACE + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().size(numDummyDocs + 2) + .query( + functionScoreQuery(termQuery("test", "value"), linearDecayFunction("num", 1.0, 20.0, 1.0)).boostMode( + CombineFunction.REPLACE + ) ) - ) - ) + ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (numDummyDocs + 2))); + assertThat(sh.getAt(0).getId(), anyOf(equalTo("1"), equalTo("2"))); + assertThat(sh.getAt(1).getId(), anyOf(equalTo("1"), equalTo("2"))); + assertThat(sh.getAt(1).getScore(), equalTo(sh.getAt(0).getScore())); + } ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (numDummyDocs + 2))); - assertThat(sh.getAt(0).getId(), anyOf(equalTo("1"), equalTo("2"))); - assertThat(sh.getAt(1).getId(), anyOf(equalTo("1"), equalTo("2"))); - assertThat(sh.getAt(1).getScore(), equalTo(sh.getAt(0).getScore())); } public void testBoostModeSettingWorks() throws Exception { @@ -330,9 +329,7 @@ public void testBoostModeSettingWorks() throws Exception { List indexBuilders = new ArrayList<>(); indexBuilders.add( - client().prepareIndex() - .setId("1") - .setIndex("test") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("test", "value value") @@ -344,9 +341,7 @@ public void testBoostModeSettingWorks() throws Exception { ) ); indexBuilders.add( - client().prepareIndex() - .setId("2") - .setIndex("test") + prepareIndex("test").setId("2") .setSource( jsonBuilder().startObject() .field("test", "value") @@ -364,48 +359,56 @@ public void testBoostModeSettingWorks() throws Exception { lonlat.add(20f); lonlat.add(11f); - ActionFuture response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("loc", lonlat, "1000km")).boostMode( - CombineFunction.MULTIPLY + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("loc", lonlat, "1000km")).boostMode( + CombineFunction.MULTIPLY + ) ) ) - ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (2))); + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat(sh.getAt(1).getId(), equalTo("2")); + } ); - SearchResponse sr = response.actionGet(); - SearchHits sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (2))); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat(sh.getAt(1).getId(), equalTo("2")); - // Test Exp - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source(searchSource().query(termQuery("test", "value"))) + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source(searchSource().query(termQuery("test", "value"))) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (2))); + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat(sh.getAt(1).getId(), equalTo("2")); + } ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (2))); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat(sh.getAt(1).getId(), equalTo("2")); - - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("loc", lonlat, "1000km")).boostMode( - CombineFunction.REPLACE + + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("loc", lonlat, "1000km")).boostMode( + CombineFunction.REPLACE + ) ) ) - ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (2))); + assertThat(sh.getAt(0).getId(), equalTo("2")); + assertThat(sh.getAt(1).getId(), equalTo("1")); + } ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (2))); - assertThat(sh.getAt(0).getId(), equalTo("2")); - assertThat(sh.getAt(1).getId(), equalTo("1")); } @@ -427,9 +430,7 @@ public void testParseGeoPoint() throws Exception { ) ); - client().prepareIndex() - .setId("1") - .setIndex("test") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("test", "value") @@ -446,34 +447,44 @@ public void testParseGeoPoint() throws Exception { ScoreFunctionBuilders.weightFactorFunction(randomIntBetween(1, 10)) ); GeoPoint point = new GeoPoint(20, 11); - ActionFuture response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(baseQueryBuilder, gaussDecayFunction("loc", point, "1000km")).boostMode(CombineFunction.REPLACE) + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("loc", point, "1000km")).boostMode( + CombineFunction.REPLACE + ) + ) ) - ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (1))); + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat((double) sh.getAt(0).getScore(), closeTo(1.0, 1.e-5)); + } ); - SearchResponse sr = response.actionGet(); - SearchHits sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (1))); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat((double) sh.getAt(0).getScore(), closeTo(1.0, 1.e-5)); // this is equivalent to new GeoPoint(20, 11); just flipped so scores must be same float[] coords = { 11, 20 }; - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(baseQueryBuilder, gaussDecayFunction("loc", coords, "1000km")).boostMode(CombineFunction.REPLACE) + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("loc", coords, "1000km")).boostMode( + CombineFunction.REPLACE + ) + ) ) - ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (1))); + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat((double) sh.getAt(0).getScore(), closeTo(1.0f, 1.e-5)); + } ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (1))); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat((double) sh.getAt(0).getScore(), closeTo(1.0f, 1.e-5)); } public void testCombineModes() throws Exception { @@ -494,9 +505,7 @@ public void testCombineModes() throws Exception { ) ); - client().prepareIndex() - .setId("1") - .setIndex("test") + prepareIndex("test").setId("1") .setRefreshPolicy(IMMEDIATE) .setSource(jsonBuilder().startObject().field("test", "value value").field("num", 1.0).endObject()) .get(); @@ -505,95 +514,120 @@ public void testCombineModes() throws Exception { ScoreFunctionBuilders.weightFactorFunction(2) ); // decay score should return 0.5 for this function and baseQuery should return 2.0f as it's score - ActionFuture response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode( - CombineFunction.MULTIPLY + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode( + CombineFunction.MULTIPLY + ) ) ) - ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (1))); + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat((double) sh.getAt(0).getScore(), closeTo(1.0, 1.e-5)); + } ); - SearchResponse sr = response.actionGet(); - SearchHits sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (1))); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat((double) sh.getAt(0).getScore(), closeTo(1.0, 1.e-5)); - - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode( - CombineFunction.REPLACE + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode( + CombineFunction.REPLACE + ) ) ) - ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (1))); + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat((double) sh.getAt(0).getScore(), closeTo(0.5, 1.e-5)); + } ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (1))); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat((double) sh.getAt(0).getScore(), closeTo(0.5, 1.e-5)); - - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode(CombineFunction.SUM) + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode( + CombineFunction.SUM + ) + ) ) - ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (1))); + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat((double) sh.getAt(0).getScore(), closeTo(2.0 + 0.5, 1.e-5)); + logger.info( + "--> Hit[0] {} Explanation:\n {}", + response.getHits().getAt(0).getId(), + response.getHits().getAt(0).getExplanation() + ); + } ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (1))); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat((double) sh.getAt(0).getScore(), closeTo(2.0 + 0.5, 1.e-5)); - logger.info("--> Hit[0] {} Explanation:\n {}", sr.getHits().getAt(0).getId(), sr.getHits().getAt(0).getExplanation()); - - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode(CombineFunction.AVG) + + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode( + CombineFunction.AVG + ) + ) ) - ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (1))); + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat((double) sh.getAt(0).getScore(), closeTo((2.0 + 0.5) / 2, 1.e-5)); + } ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (1))); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat((double) sh.getAt(0).getScore(), closeTo((2.0 + 0.5) / 2, 1.e-5)); - - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode(CombineFunction.MIN) + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode( + CombineFunction.MIN + ) + ) ) - ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (1))); + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat((double) sh.getAt(0).getScore(), closeTo(0.5, 1.e-5)); + } ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (1))); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat((double) sh.getAt(0).getScore(), closeTo(0.5, 1.e-5)); - - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode(CombineFunction.MAX) + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode( + CombineFunction.MAX + ) + ) ) - ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (1))); + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat((double) sh.getAt(0).getScore(), closeTo(2.0, 1.e-5)); + } ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (1))); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat((double) sh.getAt(0).getScore(), closeTo(2.0, 1.e-5)); - } public void testExceptionThrownIfScaleLE0() throws Exception { @@ -623,18 +657,18 @@ public void testExceptionThrownIfScaleLE0() throws Exception { ).actionGet(); refresh(); - ActionFuture response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query(functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num1", "2013-05-28", "-1d"))) - ) + SearchPhaseExecutionException e = expectThrows( + SearchPhaseExecutionException.class, + () -> client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num1", "2013-05-28", "-1d")) + ) + ) + ).actionGet() ); - try { - response.actionGet(); - fail("Expected SearchPhaseExecutionException"); - } catch (SearchPhaseExecutionException e) { - assertThat(e.getMessage(), is("all shards failed")); - } + assertThat(e.getMessage(), is("all shards failed")); } public void testParseDateMath() throws Exception { @@ -670,24 +704,23 @@ public void testParseDateMath() throws Exception { ).actionGet(); refresh(); - SearchResponse sr = client().search( - new SearchRequest(new String[] {}).source( - searchSource().query(functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num1", "now", "2d"))) - ) - ).get(); - - assertNoFailures(sr); - assertOrderedSearchHits(sr, "1", "2"); - - sr = client().search( - new SearchRequest(new String[] {}).source( - searchSource().query(functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num1", "now-1d", "2d"))) - ) - ).get(); - - assertNoFailures(sr); - assertOrderedSearchHits(sr, "2", "1"); + assertNoFailuresAndResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().query(functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num1", "now", "2d"))) + ) + ), + response -> assertOrderedSearchHits(response, "1", "2") + ); + assertNoFailuresAndResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().query(functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num1", "now-1d", "2d"))) + ) + ), + response -> assertOrderedSearchHits(response, "2", "1") + ); } public void testValueMissingLin() throws Exception { @@ -729,32 +762,31 @@ public void testValueMissingLin() throws Exception { refresh(); - ActionFuture response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery( - baseQuery, - new FilterFunctionBuilder[] { - new FilterFunctionBuilder(linearDecayFunction("num1", "2013-05-28", "+3d")), - new FilterFunctionBuilder(linearDecayFunction("num2", "0.0", "1")) } - ).scoreMode(FunctionScoreQuery.ScoreMode.MULTIPLY) + assertNoFailuresAndResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery( + baseQuery, + new FilterFunctionBuilder[] { + new FilterFunctionBuilder(linearDecayFunction("num1", "2013-05-28", "+3d")), + new FilterFunctionBuilder(linearDecayFunction("num2", "0.0", "1")) } + ).scoreMode(FunctionScoreQuery.ScoreMode.MULTIPLY) + ) ) - ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getHits().length, equalTo(4)); + double[] scores = new double[4]; + for (int i = 0; i < sh.getHits().length; i++) { + scores[Integer.parseInt(sh.getAt(i).getId()) - 1] = sh.getAt(i).getScore(); + } + assertThat(scores[0], lessThan(scores[1])); + assertThat(scores[2], lessThan(scores[3])); + } ); - - SearchResponse sr = response.actionGet(); - - assertNoFailures(sr); - SearchHits sh = sr.getHits(); - assertThat(sh.getHits().length, equalTo(4)); - double[] scores = new double[4]; - for (int i = 0; i < sh.getHits().length; i++) { - scores[Integer.parseInt(sh.getAt(i).getId()) - 1] = sh.getAt(i).getScore(); - } - assertThat(scores[0], lessThan(scores[1])); - assertThat(scores[2], lessThan(scores[3])); - } public void testDateWithoutOrigin() throws Exception { @@ -810,32 +842,32 @@ public void testDateWithoutOrigin() throws Exception { refresh(); - ActionFuture response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery( - QueryBuilders.matchAllQuery(), - new FilterFunctionBuilder[] { - new FilterFunctionBuilder(linearDecayFunction("num1", null, "7000d")), - new FilterFunctionBuilder(gaussDecayFunction("num1", null, "1d")), - new FilterFunctionBuilder(exponentialDecayFunction("num1", null, "7000d")) } - ).scoreMode(FunctionScoreQuery.ScoreMode.MULTIPLY) + assertNoFailuresAndResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery( + QueryBuilders.matchAllQuery(), + new FilterFunctionBuilder[] { + new FilterFunctionBuilder(linearDecayFunction("num1", null, "7000d")), + new FilterFunctionBuilder(gaussDecayFunction("num1", null, "1d")), + new FilterFunctionBuilder(exponentialDecayFunction("num1", null, "7000d")) } + ).scoreMode(FunctionScoreQuery.ScoreMode.MULTIPLY) + ) ) - ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getHits().length, equalTo(3)); + double[] scores = new double[4]; + for (int i = 0; i < sh.getHits().length; i++) { + scores[Integer.parseInt(sh.getAt(i).getId()) - 1] = sh.getAt(i).getScore(); + } + assertThat(scores[1], lessThan(scores[0])); + assertThat(scores[2], lessThan(scores[1])); + } ); - - SearchResponse sr = response.actionGet(); - assertNoFailures(sr); - SearchHits sh = sr.getHits(); - assertThat(sh.getHits().length, equalTo(3)); - double[] scores = new double[4]; - for (int i = 0; i < sh.getHits().length; i++) { - scores[Integer.parseInt(sh.getAt(i).getId()) - 1] = sh.getAt(i).getScore(); - } - assertThat(scores[1], lessThan(scores[0])); - assertThat(scores[2], lessThan(scores[1])); - } public void testManyDocsLin() throws Exception { @@ -871,9 +903,7 @@ public void testManyDocsLin() throws Exception { String date = "2013-05-" + dayString; indexBuilders.add( - client().prepareIndex() - .setId(Integer.toString(i)) - .setIndex("test") + prepareIndex("test").setId(Integer.toString(i)) .setSource( jsonBuilder().startObject() .field("test", "value") @@ -891,33 +921,34 @@ public void testManyDocsLin() throws Exception { List lonlat = new ArrayList<>(); lonlat.add(100f); lonlat.add(110f); - ActionFuture response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().size(numDocs) - .query( - functionScoreQuery( - termQuery("test", "value"), - new FilterFunctionBuilder[] { - new FilterFunctionBuilder(linearDecayFunction("date", "2013-05-30", "+15d")), - new FilterFunctionBuilder(linearDecayFunction("geo", lonlat, "1000km")), - new FilterFunctionBuilder(linearDecayFunction("num", numDocs, numDocs / 2.0)) } - ).scoreMode(ScoreMode.MULTIPLY).boostMode(CombineFunction.REPLACE) - ) - ) + assertNoFailuresAndResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().size(numDocs) + .query( + functionScoreQuery( + termQuery("test", "value"), + new FilterFunctionBuilder[] { + new FilterFunctionBuilder(linearDecayFunction("date", "2013-05-30", "+15d")), + new FilterFunctionBuilder(linearDecayFunction("geo", lonlat, "1000km")), + new FilterFunctionBuilder(linearDecayFunction("num", numDocs, numDocs / 2.0)) } + ).scoreMode(ScoreMode.MULTIPLY).boostMode(CombineFunction.REPLACE) + ) + ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getHits().length, equalTo(numDocs)); + double[] scores = new double[numDocs]; + for (int i = 0; i < numDocs; i++) { + scores[Integer.parseInt(sh.getAt(i).getId())] = sh.getAt(i).getScore(); + } + for (int i = 0; i < numDocs - 1; i++) { + assertThat(scores[i], lessThan(scores[i + 1])); + } + } ); - - SearchResponse sr = response.actionGet(); - assertNoFailures(sr); - SearchHits sh = sr.getHits(); - assertThat(sh.getHits().length, equalTo(numDocs)); - double[] scores = new double[numDocs]; - for (int i = 0; i < numDocs; i++) { - scores[Integer.parseInt(sh.getAt(i).getId())] = sh.getAt(i).getScore(); - } - for (int i = 0; i < numDocs - 1; i++) { - assertThat(scores[i], lessThan(scores[i + 1])); - } } public void testParsingExceptionIfFieldDoesNotExist() throws Exception { @@ -953,23 +984,22 @@ public void testParsingExceptionIfFieldDoesNotExist() throws Exception { List lonlat = new ArrayList<>(); lonlat.add(100f); lonlat.add(110f); - ActionFuture response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().size(numDocs) - .query( - functionScoreQuery(termQuery("test", "value"), linearDecayFunction("type.geo", lonlat, "1000km")).scoreMode( - FunctionScoreQuery.ScoreMode.MULTIPLY + + SearchPhaseExecutionException e = expectThrows( + SearchPhaseExecutionException.class, + () -> client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().size(numDocs) + .query( + functionScoreQuery(termQuery("test", "value"), linearDecayFunction("type.geo", lonlat, "1000km")).scoreMode( + FunctionScoreQuery.ScoreMode.MULTIPLY + ) ) - ) - ) + ) + ).actionGet() ); - try { - response.actionGet(); - fail("Expected SearchPhaseExecutionException"); - } catch (SearchPhaseExecutionException e) { - assertThat(e.getMessage(), is("all shards failed")); - } + assertThat(e.getMessage(), is("all shards failed")); } public void testParsingExceptionIfFieldTypeDoesNotMatch() throws Exception { @@ -996,20 +1026,20 @@ public void testParsingExceptionIfFieldTypeDoesNotMatch() throws Exception { ).actionGet(); refresh(); // so, we indexed a string field, but now we try to score a num field - ActionFuture response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(termQuery("test", "value"), linearDecayFunction("num", 1.0, 0.5)).scoreMode(ScoreMode.MULTIPLY) + SearchPhaseExecutionException e = expectThrows( + SearchPhaseExecutionException.class, + () -> client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery(termQuery("test", "value"), linearDecayFunction("num", 1.0, 0.5)).scoreMode( + ScoreMode.MULTIPLY + ) + ) ) - ) + ).actionGet() ); - try { - response.actionGet(); - fail("Expected SearchPhaseExecutionException"); - } catch (SearchPhaseExecutionException e) { - assertThat(e.getMessage(), is("all shards failed")); - } + assertThat(e.getMessage(), is("all shards failed")); } public void testNoQueryGiven() throws Exception { @@ -1033,15 +1063,17 @@ public void testNoQueryGiven() throws Exception { .actionGet(); refresh(); // so, we indexed a string field, but now we try to score a num field - ActionFuture response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(linearDecayFunction("num", 1, 0.5)).scoreMode(FunctionScoreQuery.ScoreMode.MULTIPLY) + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery(linearDecayFunction("num", 1, 0.5)).scoreMode(FunctionScoreQuery.ScoreMode.MULTIPLY) + ) ) - ) + ), + response -> {} ); - response.actionGet(); } public void testMultiFieldOptions() throws Exception { @@ -1066,9 +1098,7 @@ public void testMultiFieldOptions() throws Exception { ); // Index for testing MIN and MAX - IndexRequestBuilder doc1 = client().prepareIndex() - .setId("1") - .setIndex("test") + IndexRequestBuilder doc1 = prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("test", "value") @@ -1084,9 +1114,7 @@ public void testMultiFieldOptions() throws Exception { .endArray() .endObject() ); - IndexRequestBuilder doc2 = client().prepareIndex() - .setId("2") - .setIndex("test") + IndexRequestBuilder doc2 = prepareIndex("test").setId("2") .setSource( jsonBuilder().startObject() .field("test", "value") @@ -1099,80 +1127,87 @@ public void testMultiFieldOptions() throws Exception { indexRandom(true, doc1, doc2); - ActionFuture response = client().search(new SearchRequest(new String[] {}).source(searchSource().query(baseQuery))); - SearchResponse sr = response.actionGet(); - assertSearchHits(sr, "1", "2"); - SearchHits sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (2))); + assertResponse(client().search(new SearchRequest(new String[] {}).source(searchSource().query(baseQuery))), response -> { + assertSearchHits(response, "1", "2"); + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (2))); + }); List lonlat = new ArrayList<>(); lonlat.add(20f); lonlat.add(10f); - response = client().search( - new SearchRequest(new String[] {}).source( - searchSource().query( - functionScoreQuery(baseQuery, gaussDecayFunction("loc", lonlat, "1000km").setMultiValueMode(MultiValueMode.MIN)) + assertResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().query( + functionScoreQuery(baseQuery, gaussDecayFunction("loc", lonlat, "1000km").setMultiValueMode(MultiValueMode.MIN)) + ) ) - ) + ), + response -> { + assertSearchHits(response, "1", "2"); + SearchHits sh = response.getHits(); + + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat(sh.getAt(1).getId(), equalTo("2")); + } ); - sr = response.actionGet(); - assertSearchHits(sr, "1", "2"); - sh = sr.getHits(); - - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat(sh.getAt(1).getId(), equalTo("2")); - response = client().search( - new SearchRequest(new String[] {}).source( - searchSource().query( - functionScoreQuery(baseQuery, gaussDecayFunction("loc", lonlat, "1000km").setMultiValueMode(MultiValueMode.MAX)) + assertResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().query( + functionScoreQuery(baseQuery, gaussDecayFunction("loc", lonlat, "1000km").setMultiValueMode(MultiValueMode.MAX)) + ) ) - ) + ), + response -> { + assertSearchHits(response, "1", "2"); + SearchHits sh = response.getHits(); + + assertThat(sh.getAt(0).getId(), equalTo("2")); + assertThat(sh.getAt(1).getId(), equalTo("1")); + } ); - sr = response.actionGet(); - assertSearchHits(sr, "1", "2"); - sh = sr.getHits(); - - assertThat(sh.getAt(0).getId(), equalTo("2")); - assertThat(sh.getAt(1).getId(), equalTo("1")); // Now test AVG and SUM - doc1 = client().prepareIndex() - .setId("1") - .setIndex("test") + doc1 = prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject().field("test", "value").startArray("num").value(0.0).value(1.0).value(2.0).endArray().endObject() ); - doc2 = client().prepareIndex() - .setId("2") - .setIndex("test") - .setSource(jsonBuilder().startObject().field("test", "value").field("num", 1.0).endObject()); + doc2 = prepareIndex("test").setId("2").setSource(jsonBuilder().startObject().field("test", "value").field("num", 1.0).endObject()); indexRandom(true, doc1, doc2); - response = client().search( - new SearchRequest(new String[] {}).source( - searchSource().query( - functionScoreQuery(baseQuery, linearDecayFunction("num", "0", "10").setMultiValueMode(MultiValueMode.SUM)) + assertResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().query( + functionScoreQuery(baseQuery, linearDecayFunction("num", "0", "10").setMultiValueMode(MultiValueMode.SUM)) + ) ) - ) + ), + response -> { + assertSearchHits(response, "1", "2"); + SearchHits sh = response.getHits(); + + assertThat(sh.getAt(0).getId(), equalTo("2")); + assertThat(sh.getAt(1).getId(), equalTo("1")); + assertThat(1.0 - sh.getAt(0).getScore(), closeTo((1.0 - sh.getAt(1).getScore()) / 3.0, 1.e-6d)); + } ); - sr = response.actionGet(); - assertSearchHits(sr, "1", "2"); - sh = sr.getHits(); - - assertThat(sh.getAt(0).getId(), equalTo("2")); - assertThat(sh.getAt(1).getId(), equalTo("1")); - assertThat(1.0 - sh.getAt(0).getScore(), closeTo((1.0 - sh.getAt(1).getScore()) / 3.0, 1.e-6d)); - response = client().search( - new SearchRequest(new String[] {}).source( - searchSource().query( - functionScoreQuery(baseQuery, linearDecayFunction("num", "0", "10").setMultiValueMode(MultiValueMode.AVG)) + assertResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().query( + functionScoreQuery(baseQuery, linearDecayFunction("num", "0", "10").setMultiValueMode(MultiValueMode.AVG)) + ) ) - ) + ), + response -> { + assertSearchHits(response, "1", "2"); + SearchHits sh = response.getHits(); + assertThat((double) (sh.getAt(0).getScore()), closeTo((sh.getAt(1).getScore()), 1.e-6d)); + } ); - sr = response.actionGet(); - assertSearchHits(sr, "1", "2"); - sh = sr.getHits(); - assertThat((double) (sh.getAt(0).getScore()), closeTo((sh.getAt(1).getScore()), 1.e-6d)); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java index e9ce09f7455a2..ee60888d7a0a8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java @@ -12,7 +12,6 @@ import org.apache.lucene.search.Explanation; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.lucene.search.function.CombineFunction; import org.elasticsearch.common.settings.Settings; @@ -41,12 +40,13 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.ExecutionException; import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction; import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -112,40 +112,41 @@ protected Collection> nodePlugins() { return Arrays.asList(ExplainableScriptPlugin.class); } - public void testExplainScript() throws InterruptedException, IOException { + public void testExplainScript() throws InterruptedException, IOException, ExecutionException { List indexRequests = new ArrayList<>(); for (int i = 0; i < 20; i++) { indexRequests.add( - client().prepareIndex("test") - .setId(Integer.toString(i)) + prepareIndex("test").setId(Integer.toString(i)) .setSource(jsonBuilder().startObject().field("number_field", i).field("text", "text").endObject()) ); } indexRandom(true, true, indexRequests); client().admin().indices().prepareRefresh().get(); ensureYellow(); - SearchResponse response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().explain(true) - .query( - functionScoreQuery( - termQuery("text", "text"), - scriptFunction(new Script(ScriptType.INLINE, "test", "explainable_script", Collections.emptyMap())) - ).boostMode(CombineFunction.REPLACE) - ) - ) - ).actionGet(); - - assertNoFailures(response); - SearchHits hits = response.getHits(); - assertThat(hits.getTotalHits().value, equalTo(20L)); - int idCounter = 19; - for (SearchHit hit : hits.getHits()) { - assertThat(hit.getId(), equalTo(Integer.toString(idCounter))); - assertThat(hit.getExplanation().toString(), containsString(Double.toString(idCounter))); - assertThat(hit.getExplanation().getDetails().length, equalTo(2)); - idCounter--; - } + assertNoFailuresAndResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().explain(true) + .query( + functionScoreQuery( + termQuery("text", "text"), + scriptFunction(new Script(ScriptType.INLINE, "test", "explainable_script", Collections.emptyMap())) + ).boostMode(CombineFunction.REPLACE) + ) + ) + ), + response -> { + SearchHits hits = response.getHits(); + assertThat(hits.getTotalHits().value, equalTo(20L)); + int idCounter = 19; + for (SearchHit hit : hits.getHits()) { + assertThat(hit.getId(), equalTo(Integer.toString(idCounter))); + assertThat(hit.getExplanation().toString(), containsString(Double.toString(idCounter))); + assertThat(hit.getExplanation().getDetails().length, equalTo(2)); + idCounter--; + } + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScoreFieldValueIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScoreFieldValueIT.java index 61cccfdf114b1..0a43255967dcd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScoreFieldValueIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScoreFieldValueIT.java @@ -9,9 +9,9 @@ package org.elasticsearch.search.functionscore; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.lucene.search.function.FieldValueFactorFunction; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import java.io.IOException; @@ -20,8 +20,8 @@ import static org.elasticsearch.index.query.QueryBuilders.simpleQueryStringQuery; import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.fieldValueFactorFunction; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; /** @@ -46,9 +46,9 @@ public void testFieldValueFactor() throws IOException { ) ); - client().prepareIndex("test").setId("1").setSource("test", 5, "body", "foo").get(); - client().prepareIndex("test").setId("2").setSource("test", 17, "body", "foo").get(); - client().prepareIndex("test").setId("3").setSource("body", "bar").get(); + prepareIndex("test").setId("1").setSource("test", 5, "body", "foo").get(); + prepareIndex("test").setId("2").setSource("test", 17, "body", "foo").get(); + prepareIndex("test").setId("3").setSource("body", "bar").get(); refresh(); @@ -88,10 +88,11 @@ public void testFieldValueFactor() throws IOException { // doc 3 doesn't have a "test" field, so an exception will be thrown try { - SearchResponse response = prepareSearch("test").setExplain(randomBoolean()) - .setQuery(functionScoreQuery(matchAllQuery(), fieldValueFactorFunction("test"))) - .get(); - assertFailures(response); + assertResponse( + prepareSearch("test").setExplain(randomBoolean()) + .setQuery(functionScoreQuery(matchAllQuery(), fieldValueFactorFunction("test"))), + ElasticsearchAssertions::assertFailures + ); } catch (SearchPhaseExecutionException e) { // We are expecting an exception, because 3 has no field } @@ -111,30 +112,32 @@ public void testFieldValueFactor() throws IOException { ); // field is not mapped but we're defaulting it to 100 so all documents should have the same score - SearchResponse response = prepareSearch("test").setExplain(randomBoolean()) - .setQuery( - functionScoreQuery( - matchAllQuery(), - fieldValueFactorFunction("notmapped").modifier(FieldValueFactorFunction.Modifier.RECIPROCAL).missing(100) - ) - ) - .get(); - assertEquals(response.getHits().getAt(0).getScore(), response.getHits().getAt(2).getScore(), 0); + assertResponse( + prepareSearch("test").setExplain(randomBoolean()) + .setQuery( + functionScoreQuery( + matchAllQuery(), + fieldValueFactorFunction("notmapped").modifier(FieldValueFactorFunction.Modifier.RECIPROCAL).missing(100) + ) + ), + response -> assertEquals(response.getHits().getAt(0).getScore(), response.getHits().getAt(2).getScore(), 0) + ); - client().prepareIndex("test").setId("2").setSource("test", -1, "body", "foo").get(); + prepareIndex("test").setId("2").setSource("test", -1, "body", "foo").get(); refresh(); // -1 divided by 0 is infinity, which should provoke an exception. try { - response = prepareSearch("test").setExplain(randomBoolean()) - .setQuery( - functionScoreQuery( - simpleQueryStringQuery("foo"), - fieldValueFactorFunction("test").modifier(FieldValueFactorFunction.Modifier.RECIPROCAL).factor(0) - ) - ) - .get(); - assertFailures(response); + assertResponse( + prepareSearch("test").setExplain(randomBoolean()) + .setQuery( + functionScoreQuery( + simpleQueryStringQuery("foo"), + fieldValueFactorFunction("test").modifier(FieldValueFactorFunction.Modifier.RECIPROCAL).factor(0) + ) + ), + ElasticsearchAssertions::assertFailures + ); } catch (SearchPhaseExecutionException e) { // This is fine, the query will throw an exception if executed // locally, instead of just having failures diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScoreIT.java index e32abeb481a2a..bcecc49c2d463 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScoreIT.java @@ -41,6 +41,8 @@ import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -76,7 +78,7 @@ protected Map, Object>> pluginScripts() { } } - public void testScriptScoresNested() throws IOException { + public void testScriptScoresNested() throws Exception { createIndex(INDEX); index(INDEX, "1", jsonBuilder().startObject().field("dummy_field", 1).endObject()); refresh(); @@ -84,39 +86,46 @@ public void testScriptScoresNested() throws IOException { Script scriptOne = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "1", Collections.emptyMap()); Script scriptTwo = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "get score value", Collections.emptyMap()); - SearchResponse response = client().search( - new SearchRequest(new String[] {}).source( - searchSource().query( - functionScoreQuery( - functionScoreQuery(functionScoreQuery(scriptFunction(scriptOne)), scriptFunction(scriptTwo)), - scriptFunction(scriptTwo) + assertNoFailuresAndResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().query( + functionScoreQuery( + functionScoreQuery(functionScoreQuery(scriptFunction(scriptOne)), scriptFunction(scriptTwo)), + scriptFunction(scriptTwo) + ) ) ) - ) - ).actionGet(); - assertNoFailures(response); - assertThat(response.getHits().getAt(0).getScore(), equalTo(1.0f)); + ), + response -> assertThat(response.getHits().getAt(0).getScore(), equalTo(1.0f)) + ); } - public void testScriptScoresWithAgg() throws IOException { + public void testScriptScoresWithAgg() throws Exception { createIndex(INDEX); index(INDEX, "1", jsonBuilder().startObject().field("dummy_field", 1).endObject()); refresh(); Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "get score value", Collections.emptyMap()); - SearchResponse response = client().search( - new SearchRequest(new String[] {}).source( - searchSource().query(functionScoreQuery(scriptFunction(script))).aggregation(terms("score_agg").script(script)) - ) - ).actionGet(); - assertNoFailures(response); - assertThat(response.getHits().getAt(0).getScore(), equalTo(1.0f)); - assertThat(((Terms) response.getAggregations().asMap().get("score_agg")).getBuckets().get(0).getKeyAsString(), equalTo("1.0")); - assertThat(((Terms) response.getAggregations().asMap().get("score_agg")).getBuckets().get(0).getDocCount(), is(1L)); + assertNoFailuresAndResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().query(functionScoreQuery(scriptFunction(script))).aggregation(terms("score_agg").script(script)) + ) + ), + response -> { + assertThat(response.getHits().getAt(0).getScore(), equalTo(1.0f)); + assertThat( + ((Terms) response.getAggregations().asMap().get("score_agg")).getBuckets().get(0).getKeyAsString(), + equalTo("1.0") + ); + assertThat(((Terms) response.getAggregations().asMap().get("score_agg")).getBuckets().get(0).getDocCount(), is(1L)); + } + ); } - public void testMinScoreFunctionScoreBasic() throws IOException { + public void testMinScoreFunctionScoreBasic() throws Exception { float score = randomValueOtherThanMany((f) -> Float.compare(f, 0) < 0, ESTestCase::randomFloat); float minScore = randomValueOtherThanMany((f) -> Float.compare(f, 0) < 0, ESTestCase::randomFloat); index( @@ -130,34 +139,42 @@ public void testMinScoreFunctionScoreBasic() throws IOException { ensureYellow(); Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['random_score']", Collections.emptyMap()); - SearchResponse searchResponse = client().search( - new SearchRequest(new String[] {}).source( - searchSource().query(functionScoreQuery(scriptFunction(script)).setMinScore(minScore)) - ) - ).actionGet(); - if (score < minScore) { - assertThat(searchResponse.getHits().getTotalHits().value, is(0L)); - } else { - assertThat(searchResponse.getHits().getTotalHits().value, is(1L)); - } + assertResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().query(functionScoreQuery(scriptFunction(script)).setMinScore(minScore)) + ) + ), + response -> { + if (score < minScore) { + assertThat(response.getHits().getTotalHits().value, is(0L)); + } else { + assertThat(response.getHits().getTotalHits().value, is(1L)); + } + } + ); - searchResponse = client().search( - new SearchRequest(new String[] {}).source( - searchSource().query( - functionScoreQuery( - new MatchAllQueryBuilder(), - new FilterFunctionBuilder[] { - new FilterFunctionBuilder(scriptFunction(script)), - new FilterFunctionBuilder(scriptFunction(script)) } - ).scoreMode(FunctionScoreQuery.ScoreMode.AVG).setMinScore(minScore) + assertResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().query( + functionScoreQuery( + new MatchAllQueryBuilder(), + new FilterFunctionBuilder[] { + new FilterFunctionBuilder(scriptFunction(script)), + new FilterFunctionBuilder(scriptFunction(script)) } + ).scoreMode(FunctionScoreQuery.ScoreMode.AVG).setMinScore(minScore) + ) ) - ) - ).actionGet(); - if (score < minScore) { - assertThat(searchResponse.getHits().getTotalHits().value, is(0L)); - } else { - assertThat(searchResponse.getHits().getTotalHits().value, is(1L)); - } + ), + response -> { + if (score < minScore) { + assertThat(response.getHits().getTotalHits().value, is(0L)); + } else { + assertThat(response.getHits().getTotalHits().value, is(1L)); + } + } + ); } public void testMinScoreFunctionScoreManyDocsAndRandomMinScore() throws IOException, ExecutionException, InterruptedException { @@ -166,7 +183,7 @@ public void testMinScoreFunctionScoreManyDocsAndRandomMinScore() throws IOExcept int scoreOffset = randomIntBetween(0, 2 * numDocs); int minScore = randomIntBetween(0, 2 * numDocs); for (int i = 0; i < numDocs; i++) { - docs.add(client().prepareIndex(INDEX).setId(Integer.toString(i)).setSource("num", i + scoreOffset)); + docs.add(prepareIndex(INDEX).setId(Integer.toString(i)).setSource("num", i + scoreOffset)); } indexRandom(true, docs); Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "return (doc['num'].value)", Collections.emptyMap()); @@ -178,26 +195,33 @@ public void testMinScoreFunctionScoreManyDocsAndRandomMinScore() throws IOExcept numMatchingDocs = numDocs; } - SearchResponse searchResponse = client().search( - new SearchRequest(new String[] {}).source( - searchSource().query(functionScoreQuery(scriptFunction(script)).setMinScore(minScore)).size(numDocs) - ) - ).actionGet(); - assertMinScoreSearchResponses(numDocs, searchResponse, numMatchingDocs); - - searchResponse = client().search( - new SearchRequest(new String[] {}).source( - searchSource().query( - functionScoreQuery( - new MatchAllQueryBuilder(), - new FilterFunctionBuilder[] { - new FilterFunctionBuilder(scriptFunction(script)), - new FilterFunctionBuilder(scriptFunction(script)) } - ).scoreMode(FunctionScoreQuery.ScoreMode.AVG).setMinScore(minScore) - ).size(numDocs) - ) - ).actionGet(); - assertMinScoreSearchResponses(numDocs, searchResponse, numMatchingDocs); + final int finalNumMatchingDocs = numMatchingDocs; + + assertResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().query(functionScoreQuery(scriptFunction(script)).setMinScore(minScore)).size(numDocs) + ) + ), + response -> assertMinScoreSearchResponses(numDocs, response, finalNumMatchingDocs) + ); + + assertResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().query( + functionScoreQuery( + new MatchAllQueryBuilder(), + new FilterFunctionBuilder[] { + new FilterFunctionBuilder(scriptFunction(script)), + new FilterFunctionBuilder(scriptFunction(script)) } + ).scoreMode(FunctionScoreQuery.ScoreMode.AVG).setMinScore(minScore) + ).size(numDocs) + ) + ), + response -> assertMinScoreSearchResponses(numDocs, response, finalNumMatchingDocs) + ); + } protected void assertMinScoreSearchResponses(int numDocs, SearchResponse searchResponse, int numMatchingDocs) { @@ -216,35 +240,38 @@ public void testWithEmptyFunctions() throws IOException, ExecutionException, Int index("test", "1", jsonBuilder().startObject().field("text", "test text").endObject()); refresh(); - SearchResponse termQuery = client().search( - new SearchRequest(new String[] {}).source(searchSource().explain(true).query(termQuery("text", "text"))) - ).get(); - assertNoFailures(termQuery); - assertThat(termQuery.getHits().getTotalHits().value, equalTo(1L)); - float termQueryScore = termQuery.getHits().getAt(0).getScore(); - + float[] termQueryScore = new float[1]; + assertNoFailuresAndResponse( + client().search(new SearchRequest(new String[] {}).source(searchSource().explain(true).query(termQuery("text", "text")))), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + termQueryScore[0] = response.getHits().getAt(0).getScore(); + } + ); for (CombineFunction combineFunction : CombineFunction.values()) { - testMinScoreApplied(combineFunction, termQueryScore); + testMinScoreApplied(combineFunction, termQueryScore[0]); } } protected void testMinScoreApplied(CombineFunction boostMode, float expectedScore) throws InterruptedException, ExecutionException { - SearchResponse response = client().search( - new SearchRequest(new String[] {}).source( - searchSource().explain(true).query(functionScoreQuery(termQuery("text", "text")).boostMode(boostMode).setMinScore(0.1f)) - ) - ).get(); - assertNoFailures(response); - assertThat(response.getHits().getTotalHits().value, equalTo(1L)); - assertThat(response.getHits().getAt(0).getScore(), equalTo(expectedScore)); - - response = client().search( - new SearchRequest(new String[] {}).source( - searchSource().explain(true).query(functionScoreQuery(termQuery("text", "text")).boostMode(boostMode).setMinScore(2f)) - ) - ).get(); - - assertNoFailures(response); - assertThat(response.getHits().getTotalHits().value, equalTo(0L)); + assertNoFailuresAndResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().explain(true).query(functionScoreQuery(termQuery("text", "text")).boostMode(boostMode).setMinScore(0.1f)) + ) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getAt(0).getScore(), equalTo(expectedScore)); + } + ); + assertNoFailuresAndResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().explain(true).query(functionScoreQuery(termQuery("text", "text")).boostMode(boostMode).setMinScore(2f)) + ) + ), + response -> assertThat(response.getHits().getTotalHits().value, equalTo(0L)) + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java index 5c9c54a0d3b19..396af7e8501cf 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java @@ -11,10 +11,8 @@ import org.apache.lucene.search.Explanation; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; -import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.Priority; import org.elasticsearch.common.bytes.BytesReference; @@ -29,7 +27,6 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; -import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import java.io.IOException; import java.util.Arrays; @@ -40,6 +37,7 @@ import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; @@ -81,19 +79,19 @@ public void testPlugin() throws Exception { client().admin().indices().prepareRefresh().get(); DecayFunctionBuilder gfb = new CustomDistanceScoreBuilder("num1", "2013-05-28", "+1d"); - ActionFuture response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source(searchSource().explain(false).query(functionScoreQuery(termQuery("test", "value"), gfb))) + assertNoFailuresAndResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source(searchSource().explain(false).query(functionScoreQuery(termQuery("test", "value"), gfb))) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getHits().length, equalTo(2)); + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat(sh.getAt(1).getId(), equalTo("2")); + } ); - SearchResponse sr = response.actionGet(); - ElasticsearchAssertions.assertNoFailures(sr); - SearchHits sh = sr.getHits(); - - assertThat(sh.getHits().length, equalTo(2)); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat(sh.getAt(1).getId(), equalTo("2")); - } public static class CustomDistanceScorePlugin extends Plugin implements SearchPlugin { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java index 14df03bb86e8d..c608c253c851b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java @@ -49,6 +49,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFourthHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThirdHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; @@ -66,35 +68,37 @@ public void testEnforceWindowSize() { // this int iters = scaledRandomIntBetween(10, 20); for (int i = 0; i < iters; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("f", Integer.toString(i)).get(); + prepareIndex("test").setId(Integer.toString(i)).setSource("f", Integer.toString(i)).get(); } refresh(); int numShards = getNumShards("test").numPrimaries; for (int j = 0; j < iters; j++) { - SearchResponse searchResponse = prepareSearch().setQuery(QueryBuilders.matchAllQuery()) - .setRescorer( - new QueryRescorerBuilder( - functionScoreQuery(matchAllQuery(), ScoreFunctionBuilders.weightFactorFunction(100)).boostMode( - CombineFunction.REPLACE - ).queryName("hello world") - ).setQueryWeight(0.0f).setRescoreQueryWeight(1.0f), - 1 - ) - .setSize(randomIntBetween(2, 10)) - .get(); - assertNoFailures(searchResponse); - assertFirstHit(searchResponse, hasScore(100.f)); - int numDocsWith100AsAScore = 0; - for (int i = 0; i < searchResponse.getHits().getHits().length; i++) { - float score = searchResponse.getHits().getHits()[i].getScore(); - if (score == 100f) { - numDocsWith100AsAScore += 1; + assertNoFailuresAndResponse( + prepareSearch().setQuery(QueryBuilders.matchAllQuery()) + .setRescorer( + new QueryRescorerBuilder( + functionScoreQuery(matchAllQuery(), ScoreFunctionBuilders.weightFactorFunction(100)).boostMode( + CombineFunction.REPLACE + ).queryName("hello world") + ).setQueryWeight(0.0f).setRescoreQueryWeight(1.0f), + 1 + ) + .setSize(randomIntBetween(2, 10)), + response -> { + assertFirstHit(response, hasScore(100.f)); + int numDocsWith100AsAScore = 0; + for (int i = 0; i < response.getHits().getHits().length; i++) { + float score = response.getHits().getHits()[i].getScore(); + if (score == 100f) { + numDocsWith100AsAScore += 1; + } + } + assertThat(response.getHits().getMaxScore(), equalTo(response.getHits().getHits()[0].getScore())); + // we cannot assert that they are equal since some shards might not have docs at all + assertThat(numDocsWith100AsAScore, lessThanOrEqualTo(numShards)); } - } - assertThat(searchResponse.getHits().getMaxScore(), equalTo(searchResponse.getHits().getHits()[0].getScore())); - // we cannot assert that they are equal since some shards might not have docs at all - assertThat(numDocsWith100AsAScore, lessThanOrEqualTo(numShards)); + ); } } @@ -114,46 +118,47 @@ public void testRescorePhrase() throws Exception { ).setSettings(Settings.builder().put(indexSettings()).put("index.number_of_shards", 1)) ); - client().prepareIndex("test").setId("1").setSource("field1", "the quick brown fox").get(); - client().prepareIndex("test").setId("2").setSource("field1", "the quick lazy huge brown fox jumps over the tree ").get(); - client().prepareIndex("test") - .setId("3") + prepareIndex("test").setId("1").setSource("field1", "the quick brown fox").get(); + prepareIndex("test").setId("2").setSource("field1", "the quick lazy huge brown fox jumps over the tree ").get(); + prepareIndex("test").setId("3") .setSource("field1", "quick huge brown", "field2", "the quick lazy huge brown fox jumps over the tree") .get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery( - QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR) - ) - .setRescorer( - new QueryRescorerBuilder(matchPhraseQuery("field1", "quick brown").slop(2).boost(4.0f)).setRescoreQueryWeight(2), - 5 - ) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getMaxScore(), equalTo(searchResponse.getHits().getHits()[0].getScore())); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("2")); - - searchResponse = prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) - .setRescorer(new QueryRescorerBuilder(matchPhraseQuery("field1", "the quick brown").slop(3)), 5) - .get(); - - assertHitCount(searchResponse, 3); - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("2")); - assertThirdHit(searchResponse, hasId("3")); - - searchResponse = prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) - .setRescorer(new QueryRescorerBuilder(matchPhraseQuery("field1", "the quick brown")), 5) - .get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getMaxScore(), equalTo(searchResponse.getHits().getHits()[0].getScore())); - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("2")); - assertThirdHit(searchResponse, hasId("3")); + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) + .setRescorer( + new QueryRescorerBuilder(matchPhraseQuery("field1", "quick brown").slop(2).boost(4.0f)).setRescoreQueryWeight(2), + 5 + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getMaxScore(), equalTo(response.getHits().getHits()[0].getScore())); + assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("2")); + } + ); + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) + .setRescorer(new QueryRescorerBuilder(matchPhraseQuery("field1", "the quick brown").slop(3)), 5), + response -> { + assertHitCount(response, 3); + assertFirstHit(response, hasId("1")); + assertSecondHit(response, hasId("2")); + assertThirdHit(response, hasId("3")); + } + ); + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) + .setRescorer(new QueryRescorerBuilder(matchPhraseQuery("field1", "the quick brown")), 5), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getMaxScore(), equalTo(response.getHits().getHits()[0].getScore())); + assertFirstHit(response, hasId("1")); + assertSecondHit(response, hasId("2")); + assertThirdHit(response, hasId("3")); + } + ); } public void testMoreDocs() throws Exception { @@ -173,78 +178,77 @@ public void testMoreDocs() throws Exception { assertAcked(indicesAdmin().prepareCreate("test").setMapping(mapping).setSettings(builder.put("index.number_of_shards", 1))); - client().prepareIndex("test").setId("1").setSource("field1", "massachusetts avenue boston massachusetts").get(); - client().prepareIndex("test").setId("2").setSource("field1", "lexington avenue boston massachusetts").get(); - client().prepareIndex("test").setId("3").setSource("field1", "boston avenue lexington massachusetts").get(); + prepareIndex("test").setId("1").setSource("field1", "massachusetts avenue boston massachusetts").get(); + prepareIndex("test").setId("2").setSource("field1", "lexington avenue boston massachusetts").get(); + prepareIndex("test").setId("3").setSource("field1", "boston avenue lexington massachusetts").get(); indicesAdmin().prepareRefresh("test").get(); - client().prepareIndex("test").setId("4").setSource("field1", "boston road lexington massachusetts").get(); - client().prepareIndex("test").setId("5").setSource("field1", "lexington street lexington massachusetts").get(); - client().prepareIndex("test").setId("6").setSource("field1", "massachusetts avenue lexington massachusetts").get(); - client().prepareIndex("test").setId("7").setSource("field1", "bosten street san franciso california").get(); + prepareIndex("test").setId("4").setSource("field1", "boston road lexington massachusetts").get(); + prepareIndex("test").setId("5").setSource("field1", "lexington street lexington massachusetts").get(); + prepareIndex("test").setId("6").setSource("field1", "massachusetts avenue lexington massachusetts").get(); + prepareIndex("test").setId("7").setSource("field1", "bosten street san franciso california").get(); indicesAdmin().prepareRefresh("test").get(); - client().prepareIndex("test").setId("8").setSource("field1", "hollywood boulevard los angeles california").get(); - client().prepareIndex("test").setId("9").setSource("field1", "1st street boston massachussetts").get(); - client().prepareIndex("test").setId("10").setSource("field1", "1st street boston massachusetts").get(); + prepareIndex("test").setId("8").setSource("field1", "hollywood boulevard los angeles california").get(); + prepareIndex("test").setId("9").setSource("field1", "1st street boston massachussetts").get(); + prepareIndex("test").setId("10").setSource("field1", "1st street boston massachusetts").get(); indicesAdmin().prepareRefresh("test").get(); - client().prepareIndex("test").setId("11").setSource("field1", "2st street boston massachusetts").get(); - client().prepareIndex("test").setId("12").setSource("field1", "3st street boston massachusetts").get(); + prepareIndex("test").setId("11").setSource("field1", "2st street boston massachusetts").get(); + prepareIndex("test").setId("12").setSource("field1", "3st street boston massachusetts").get(); indicesAdmin().prepareRefresh("test").get(); - SearchResponse searchResponse = prepareSearch().setQuery( - QueryBuilders.matchQuery("field1", "lexington avenue massachusetts").operator(Operator.OR) - ) - .setFrom(0) - .setSize(5) - .setRescorer( - new QueryRescorerBuilder(matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3)).setQueryWeight(0.6f) - .setRescoreQueryWeight(2.0f), - 20 - ) - .get(); - - assertThat(searchResponse.getHits().getHits().length, equalTo(5)); - assertHitCount(searchResponse, 9); - assertFirstHit(searchResponse, hasId("2")); - assertSecondHit(searchResponse, hasId("6")); - assertThirdHit(searchResponse, hasId("3")); - - searchResponse = prepareSearch().setQuery( - QueryBuilders.matchQuery("field1", "lexington avenue massachusetts").operator(Operator.OR) - ) - .setFrom(0) - .setSize(5) - .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .setRescorer( - new QueryRescorerBuilder(matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3)).setQueryWeight(0.6f) - .setRescoreQueryWeight(2.0f), - 20 - ) - .get(); - - assertThat(searchResponse.getHits().getHits().length, equalTo(5)); - assertHitCount(searchResponse, 9); - assertThat(searchResponse.getHits().getMaxScore(), equalTo(searchResponse.getHits().getHits()[0].getScore())); - assertFirstHit(searchResponse, hasId("2")); - assertSecondHit(searchResponse, hasId("6")); - assertThirdHit(searchResponse, hasId("3")); + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "lexington avenue massachusetts").operator(Operator.OR)) + .setFrom(0) + .setSize(5) + .setRescorer( + new QueryRescorerBuilder(matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3)).setQueryWeight(0.6f) + .setRescoreQueryWeight(2.0f), + 20 + ), + response -> { + assertThat(response.getHits().getHits().length, equalTo(5)); + assertHitCount(response, 9); + assertFirstHit(response, hasId("2")); + assertSecondHit(response, hasId("6")); + assertThirdHit(response, hasId("3")); + } + ); + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "lexington avenue massachusetts").operator(Operator.OR)) + .setFrom(0) + .setSize(5) + .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) + .setRescorer( + new QueryRescorerBuilder(matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3)).setQueryWeight(0.6f) + .setRescoreQueryWeight(2.0f), + 20 + ), + response -> { + assertThat(response.getHits().getHits().length, equalTo(5)); + assertHitCount(response, 9); + assertThat(response.getHits().getMaxScore(), equalTo(response.getHits().getHits()[0].getScore())); + assertFirstHit(response, hasId("2")); + assertSecondHit(response, hasId("6")); + assertThirdHit(response, hasId("3")); + } + ); // Make sure non-zero from works: - searchResponse = prepareSearch().setQuery( - QueryBuilders.matchQuery("field1", "lexington avenue massachusetts").operator(Operator.OR) - ) - .setFrom(2) - .setSize(5) - .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .setRescorer( - new QueryRescorerBuilder(matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3)).setQueryWeight(0.6f) - .setRescoreQueryWeight(2.0f), - 20 - ) - .get(); - - assertThat(searchResponse.getHits().getHits().length, equalTo(5)); - assertHitCount(searchResponse, 9); - assertThat(searchResponse.getHits().getMaxScore(), greaterThan(searchResponse.getHits().getHits()[0].getScore())); - assertFirstHit(searchResponse, hasId("3")); + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "lexington avenue massachusetts").operator(Operator.OR)) + .setFrom(2) + .setSize(5) + .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) + .setRescorer( + new QueryRescorerBuilder(matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3)).setQueryWeight(0.6f) + .setRescoreQueryWeight(2.0f), + 20 + ), + response -> { + assertThat(response.getHits().getHits().length, equalTo(5)); + assertHitCount(response, 9); + assertThat(response.getHits().getMaxScore(), greaterThan(response.getHits().getHits()[0].getScore())); + assertFirstHit(response, hasId("3")); + } + ); } // Tests a rescore window smaller than number of hits: @@ -265,63 +269,66 @@ public void testSmallRescoreWindow() throws Exception { assertAcked(indicesAdmin().prepareCreate("test").setMapping(mapping).setSettings(builder.put("index.number_of_shards", 1))); - client().prepareIndex("test").setId("3").setSource("field1", "massachusetts").get(); - client().prepareIndex("test").setId("6").setSource("field1", "massachusetts avenue lexington massachusetts").get(); + prepareIndex("test").setId("3").setSource("field1", "massachusetts").get(); + prepareIndex("test").setId("6").setSource("field1", "massachusetts avenue lexington massachusetts").get(); indicesAdmin().prepareRefresh("test").get(); - client().prepareIndex("test").setId("1").setSource("field1", "lexington massachusetts avenue").get(); - client().prepareIndex("test").setId("2").setSource("field1", "lexington avenue boston massachusetts road").get(); + prepareIndex("test").setId("1").setSource("field1", "lexington massachusetts avenue").get(); + prepareIndex("test").setId("2").setSource("field1", "lexington avenue boston massachusetts road").get(); indicesAdmin().prepareRefresh("test").get(); - SearchResponse searchResponse = prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "massachusetts")) - .setFrom(0) - .setSize(5) - .get(); - assertThat(searchResponse.getHits().getHits().length, equalTo(4)); - assertHitCount(searchResponse, 4); - assertThat(searchResponse.getHits().getMaxScore(), equalTo(searchResponse.getHits().getHits()[0].getScore())); - assertFirstHit(searchResponse, hasId("3")); - assertSecondHit(searchResponse, hasId("6")); - assertThirdHit(searchResponse, hasId("1")); - assertFourthHit(searchResponse, hasId("2")); + assertResponse(prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "massachusetts")).setFrom(0).setSize(5), response -> { + assertThat(response.getHits().getHits().length, equalTo(4)); + assertHitCount(response, 4); + assertThat(response.getHits().getMaxScore(), equalTo(response.getHits().getHits()[0].getScore())); + assertFirstHit(response, hasId("3")); + assertSecondHit(response, hasId("6")); + assertThirdHit(response, hasId("1")); + assertFourthHit(response, hasId("2")); + }); // Now, rescore only top 2 hits w/ proximity: - searchResponse = prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "massachusetts")) - .setFrom(0) - .setSize(5) - .setRescorer( - new QueryRescorerBuilder(matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3)).setQueryWeight(0.6f) - .setRescoreQueryWeight(2.0f), - 2 - ) - .get(); - // Only top 2 hits were re-ordered: - assertThat(searchResponse.getHits().getHits().length, equalTo(4)); - assertHitCount(searchResponse, 4); - assertThat(searchResponse.getHits().getMaxScore(), equalTo(searchResponse.getHits().getHits()[0].getScore())); - assertFirstHit(searchResponse, hasId("6")); - assertSecondHit(searchResponse, hasId("3")); - assertThirdHit(searchResponse, hasId("1")); - assertFourthHit(searchResponse, hasId("2")); + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "massachusetts")) + .setFrom(0) + .setSize(5) + .setRescorer( + new QueryRescorerBuilder(matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3)).setQueryWeight(0.6f) + .setRescoreQueryWeight(2.0f), + 2 + ), + response -> { + // Only top 2 hits were re-ordered: + assertThat(response.getHits().getHits().length, equalTo(4)); + assertHitCount(response, 4); + assertThat(response.getHits().getMaxScore(), equalTo(response.getHits().getHits()[0].getScore())); + assertFirstHit(response, hasId("6")); + assertSecondHit(response, hasId("3")); + assertThirdHit(response, hasId("1")); + assertFourthHit(response, hasId("2")); + } + ); // Now, rescore only top 3 hits w/ proximity: - searchResponse = prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "massachusetts")) - .setFrom(0) - .setSize(5) - .setRescorer( - new QueryRescorerBuilder(matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3)).setQueryWeight(0.6f) - .setRescoreQueryWeight(2.0f), - 3 - ) - .get(); - - // Only top 3 hits were re-ordered: - assertThat(searchResponse.getHits().getHits().length, equalTo(4)); - assertHitCount(searchResponse, 4); - assertThat(searchResponse.getHits().getMaxScore(), equalTo(searchResponse.getHits().getHits()[0].getScore())); - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("6")); - assertThirdHit(searchResponse, hasId("3")); - assertFourthHit(searchResponse, hasId("2")); + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "massachusetts")) + .setFrom(0) + .setSize(5) + .setRescorer( + new QueryRescorerBuilder(matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3)).setQueryWeight(0.6f) + .setRescoreQueryWeight(2.0f), + 3 + ), + response -> { + // Only top 3 hits were re-ordered: + assertThat(response.getHits().getHits().length, equalTo(4)); + assertHitCount(response, 4); + assertThat(response.getHits().getMaxScore(), equalTo(response.getHits().getHits()[0].getScore())); + assertFirstHit(response, hasId("1")); + assertSecondHit(response, hasId("6")); + assertThirdHit(response, hasId("3")); + assertFourthHit(response, hasId("2")); + } + ); } // Tests a rescorer that penalizes the scores: @@ -342,42 +349,44 @@ public void testRescorerMadeScoresWorse() throws Exception { assertAcked(indicesAdmin().prepareCreate("test").setMapping(mapping).setSettings(builder.put("index.number_of_shards", 1))); - client().prepareIndex("test").setId("3").setSource("field1", "massachusetts").get(); - client().prepareIndex("test").setId("6").setSource("field1", "massachusetts avenue lexington massachusetts").get(); + prepareIndex("test").setId("3").setSource("field1", "massachusetts").get(); + prepareIndex("test").setId("6").setSource("field1", "massachusetts avenue lexington massachusetts").get(); indicesAdmin().prepareRefresh("test").get(); - client().prepareIndex("test").setId("1").setSource("field1", "lexington massachusetts avenue").get(); - client().prepareIndex("test").setId("2").setSource("field1", "lexington avenue boston massachusetts road").get(); + prepareIndex("test").setId("1").setSource("field1", "lexington massachusetts avenue").get(); + prepareIndex("test").setId("2").setSource("field1", "lexington avenue boston massachusetts road").get(); indicesAdmin().prepareRefresh("test").get(); - SearchResponse searchResponse = prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "massachusetts").operator(Operator.OR)) - .setFrom(0) - .setSize(5) - .get(); - assertThat(searchResponse.getHits().getHits().length, equalTo(4)); - assertHitCount(searchResponse, 4); - assertThat(searchResponse.getHits().getMaxScore(), equalTo(searchResponse.getHits().getHits()[0].getScore())); - assertFirstHit(searchResponse, hasId("3")); - assertSecondHit(searchResponse, hasId("6")); - assertThirdHit(searchResponse, hasId("1")); - assertFourthHit(searchResponse, hasId("2")); - + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "massachusetts").operator(Operator.OR)).setFrom(0).setSize(5), + response -> { + assertThat(response.getHits().getHits().length, equalTo(4)); + assertHitCount(response, 4); + assertThat(response.getHits().getMaxScore(), equalTo(response.getHits().getHits()[0].getScore())); + assertFirstHit(response, hasId("3")); + assertSecondHit(response, hasId("6")); + assertThirdHit(response, hasId("1")); + assertFourthHit(response, hasId("2")); + } + ); // Now, penalizing rescore (nothing matches the rescore query): - searchResponse = prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "massachusetts").operator(Operator.OR)) - .setFrom(0) - .setSize(5) - .setRescorer( - new QueryRescorerBuilder(matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3)).setQueryWeight(1.0f) - .setRescoreQueryWeight(-1f), - 3 - ) - .get(); - - // 6 and 1 got worse, and then the hit (2) outside the rescore window were sorted ahead: - assertThat(searchResponse.getHits().getMaxScore(), equalTo(searchResponse.getHits().getHits()[0].getScore())); - assertFirstHit(searchResponse, hasId("3")); - assertSecondHit(searchResponse, hasId("2")); - assertThirdHit(searchResponse, hasId("6")); - assertFourthHit(searchResponse, hasId("1")); + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "massachusetts").operator(Operator.OR)) + .setFrom(0) + .setSize(5) + .setRescorer( + new QueryRescorerBuilder(matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3)).setQueryWeight(1.0f) + .setRescoreQueryWeight(-1f), + 3 + ), + response -> { + // 6 and 1 got worse, and then the hit (2) outside the rescore window were sorted ahead: + assertThat(response.getHits().getMaxScore(), equalTo(response.getHits().getHits()[0].getScore())); + assertFirstHit(response, hasId("3")); + assertSecondHit(response, hasId("2")); + assertThirdHit(response, hasId("6")); + assertFourthHit(response, hasId("1")); + } + ); } // Comparator that sorts hits and rescored hits in the same way. @@ -430,43 +439,46 @@ public void testEquivalence() throws Exception { int rescoreWindow = between(1, 3) * resultSize; String intToEnglish = English.intToEnglish(between(0, numDocs - 1)); String query = intToEnglish.split(" ")[0]; - SearchResponse rescored = prepareSearch().setSearchType(SearchType.QUERY_THEN_FETCH) - .setPreference("test") // ensure we hit the same shards for tie-breaking - .setQuery(QueryBuilders.matchQuery("field1", query).operator(Operator.OR)) - .setFrom(0) - .setSize(resultSize) - .setRescorer( - new QueryRescorerBuilder(constantScoreQuery(matchPhraseQuery("field1", intToEnglish).slop(3))).setQueryWeight(1.0f) - // no weight - so we basically use the same score as the actual query - .setRescoreQueryWeight(0.0f), - rescoreWindow - ) - .get(); - - SearchResponse plain = prepareSearch().setSearchType(SearchType.QUERY_THEN_FETCH) - .setPreference("test") // ensure we hit the same shards for tie-breaking - .setQuery(QueryBuilders.matchQuery("field1", query).operator(Operator.OR)) - .setFrom(0) - .setSize(resultSize) - .get(); - - // check equivalence - assertEquivalent(query, plain, rescored); - rescored = prepareSearch().setSearchType(SearchType.QUERY_THEN_FETCH) - .setPreference("test") // ensure we hit the same shards for tie-breaking - .setQuery(QueryBuilders.matchQuery("field1", query).operator(Operator.OR)) - .setFrom(0) - .setSize(resultSize) - .setRescorer( - new QueryRescorerBuilder(constantScoreQuery(matchPhraseQuery("field1", "not in the index").slop(3))).setQueryWeight( - 1.0f - ).setRescoreQueryWeight(1.0f), - rescoreWindow - ) - .get(); - // check equivalence - assertEquivalent(query, plain, rescored); + assertResponse( + prepareSearch().setSearchType(SearchType.QUERY_THEN_FETCH) + .setPreference("test") // ensure we hit the same shards for tie-breaking + .setQuery(QueryBuilders.matchQuery("field1", query).operator(Operator.OR)) + .setFrom(0) + .setSize(resultSize), + plain -> { + assertResponse( + prepareSearch().setSearchType(SearchType.QUERY_THEN_FETCH) + .setPreference("test") // ensure we hit the same shards for tie-breaking + .setQuery(QueryBuilders.matchQuery("field1", query).operator(Operator.OR)) + .setFrom(0) + .setSize(resultSize) + .setRescorer( + new QueryRescorerBuilder(constantScoreQuery(matchPhraseQuery("field1", intToEnglish).slop(3))) + .setQueryWeight(1.0f) + // no weight - so we basically use the same score as the actual query + .setRescoreQueryWeight(0.0f), + rescoreWindow + ), + rescored -> assertEquivalent(query, plain, rescored) + ); // check equivalence + + assertResponse( + prepareSearch().setSearchType(SearchType.QUERY_THEN_FETCH) + .setPreference("test") // ensure we hit the same shards for tie-breaking + .setQuery(QueryBuilders.matchQuery("field1", query).operator(Operator.OR)) + .setFrom(0) + .setSize(resultSize) + .setRescorer( + new QueryRescorerBuilder(constantScoreQuery(matchPhraseQuery("field1", "not in the index").slop(3))) + .setQueryWeight(1.0f) + .setRescoreQueryWeight(1.0f), + rescoreWindow + ), + rescored -> assertEquivalent(query, plain, rescored) + ); // check equivalence + } + ); } } @@ -486,48 +498,50 @@ public void testExplain() throws Exception { ) ); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("field1", "the quick brown fox").get(); - client().prepareIndex("test").setId("2").setSource("field1", "the quick lazy huge brown fox jumps over the tree").get(); - client().prepareIndex("test") - .setId("3") + prepareIndex("test").setId("1").setSource("field1", "the quick brown fox").get(); + prepareIndex("test").setId("2").setSource("field1", "the quick lazy huge brown fox jumps over the tree").get(); + prepareIndex("test").setId("3") .setSource("field1", "quick huge brown", "field2", "the quick lazy huge brown fox jumps over the tree") .get(); refresh(); { - SearchResponse searchResponse = prepareSearch().setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) - .setRescorer( - new QueryRescorerBuilder(matchPhraseQuery("field1", "the quick brown").slop(2).boost(4.0f)).setQueryWeight(0.5f) - .setRescoreQueryWeight(0.4f), - 5 - ) - .setExplain(true) - .get(); - assertHitCount(searchResponse, 3); - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("2")); - assertThirdHit(searchResponse, hasId("3")); - - for (int i = 0; i < 3; i++) { - assertThat(searchResponse.getHits().getAt(i).getExplanation(), notNullValue()); - assertThat(searchResponse.getHits().getAt(i).getExplanation().isMatch(), equalTo(true)); - assertThat(searchResponse.getHits().getAt(i).getExplanation().getDetails().length, equalTo(2)); - assertThat(searchResponse.getHits().getAt(i).getExplanation().getDetails()[0].isMatch(), equalTo(true)); - if (i == 2) { - assertThat(searchResponse.getHits().getAt(i).getExplanation().getDetails()[1].getValue(), equalTo(0.5f)); - } else { - assertThat(searchResponse.getHits().getAt(i).getExplanation().getDescription(), equalTo("sum of:")); - assertThat( - searchResponse.getHits().getAt(i).getExplanation().getDetails()[0].getDetails()[1].getValue(), - equalTo(0.5f) - ); - assertThat( - searchResponse.getHits().getAt(i).getExplanation().getDetails()[1].getDetails()[1].getValue(), - equalTo(0.4f) - ); + assertResponse( + prepareSearch().setSearchType(SearchType.DFS_QUERY_THEN_FETCH) + .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) + .setRescorer( + new QueryRescorerBuilder(matchPhraseQuery("field1", "the quick brown").slop(2).boost(4.0f)).setQueryWeight(0.5f) + .setRescoreQueryWeight(0.4f), + 5 + ) + .setExplain(true), + response -> { + assertHitCount(response, 3); + assertFirstHit(response, hasId("1")); + assertSecondHit(response, hasId("2")); + assertThirdHit(response, hasId("3")); + + for (int i = 0; i < 3; i++) { + assertThat(response.getHits().getAt(i).getExplanation(), notNullValue()); + assertThat(response.getHits().getAt(i).getExplanation().isMatch(), equalTo(true)); + assertThat(response.getHits().getAt(i).getExplanation().getDetails().length, equalTo(2)); + assertThat(response.getHits().getAt(i).getExplanation().getDetails()[0].isMatch(), equalTo(true)); + if (i == 2) { + assertThat(response.getHits().getAt(i).getExplanation().getDetails()[1].getValue(), equalTo(0.5f)); + } else { + assertThat(response.getHits().getAt(i).getExplanation().getDescription(), equalTo("sum of:")); + assertThat( + response.getHits().getAt(i).getExplanation().getDetails()[0].getDetails()[1].getValue(), + equalTo(0.5f) + ); + assertThat( + response.getHits().getAt(i).getExplanation().getDetails()[1].getDetails()[1].getValue(), + equalTo(0.4f) + ); + } + } } - } + ); } String[] scoreModes = new String[] { "max", "min", "avg", "total", "multiply", "" }; @@ -540,21 +554,26 @@ public void testExplain() throws Exception { if ("".equals(scoreModes[innerMode]) == false) { innerRescoreQuery.setScoreMode(QueryRescoreMode.fromString(scoreModes[innerMode])); } - - SearchResponse searchResponse = prepareSearch().setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) - .setRescorer(innerRescoreQuery, 5) - .setExplain(true) - .get(); - assertHitCount(searchResponse, 3); - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("2")); - assertThirdHit(searchResponse, hasId("3")); - - for (int j = 0; j < 3; j++) { - assertThat(searchResponse.getHits().getAt(j).getExplanation().getDescription(), equalTo(descriptionModes[innerMode])); - } - + final int finalInnerMode = innerMode; + assertResponse( + prepareSearch().setSearchType(SearchType.DFS_QUERY_THEN_FETCH) + .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) + .setRescorer(innerRescoreQuery, 5) + .setExplain(true), + response -> { + assertHitCount(response, 3); + assertFirstHit(response, hasId("1")); + assertSecondHit(response, hasId("2")); + assertThirdHit(response, hasId("3")); + + for (int j = 0; j < 3; j++) { + assertThat( + response.getHits().getAt(j).getExplanation().getDescription(), + equalTo(descriptionModes[finalInnerMode]) + ); + } + } + ); for (int outerMode = 0; outerMode < scoreModes.length; outerMode++) { QueryRescorerBuilder outerRescoreQuery = new QueryRescorerBuilder(matchQuery("field1", "the quick brown").boost(4.0f)) .setQueryWeight(0.5f) @@ -563,23 +582,29 @@ public void testExplain() throws Exception { if ("".equals(scoreModes[outerMode]) == false) { outerRescoreQuery.setScoreMode(QueryRescoreMode.fromString(scoreModes[outerMode])); } - - searchResponse = prepareSearch().setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) - .addRescorer(innerRescoreQuery, 5) - .addRescorer(outerRescoreQuery.windowSize(10)) - .setExplain(true) - .get(); - assertHitCount(searchResponse, 3); - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("2")); - assertThirdHit(searchResponse, hasId("3")); - - for (int j = 0; j < 3; j++) { - Explanation explanation = searchResponse.getHits().getAt(j).getExplanation(); - assertThat(explanation.getDescription(), equalTo(descriptionModes[outerMode])); - assertThat(explanation.getDetails()[0].getDetails()[0].getDescription(), equalTo(descriptionModes[innerMode])); - } + final int finalOuterMode = outerMode; + assertResponse( + prepareSearch().setSearchType(SearchType.DFS_QUERY_THEN_FETCH) + .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) + .addRescorer(innerRescoreQuery, 5) + .addRescorer(outerRescoreQuery.windowSize(10)) + .setExplain(true), + response -> { + assertHitCount(response, 3); + assertFirstHit(response, hasId("1")); + assertSecondHit(response, hasId("2")); + assertThirdHit(response, hasId("3")); + + for (int j = 0; j < 3; j++) { + Explanation explanation = response.getHits().getAt(j).getExplanation(); + assertThat(explanation.getDescription(), equalTo(descriptionModes[finalOuterMode])); + assertThat( + explanation.getDetails()[0].getDetails()[0].getDescription(), + equalTo(descriptionModes[finalInnerMode]) + ); + } + } + ); } } } @@ -617,58 +642,66 @@ public void testScoring() throws Exception { if ("".equals(scoreMode) == false) { rescoreQuery.setScoreMode(QueryRescoreMode.fromString(scoreMode)); } - - SearchResponse rescored = prepareSearch().setPreference("test") // ensure we hit the same shards for tie-breaking - .setFrom(0) - .setSize(10) - .setQuery(query) - .setRescorer(rescoreQuery, 50) - .get(); - - assertHitCount(rescored, 4); - - assertThat(rescored.getHits().getMaxScore(), equalTo(rescored.getHits().getHits()[0].getScore())); - if ("total".equals(scoreMode) || "".equals(scoreMode)) { - assertFirstHit(rescored, hasId(String.valueOf(i + 1))); - assertSecondHit(rescored, hasId(String.valueOf(i))); - assertThirdHit(rescored, hasId(String.valueOf(i + 2))); - assertThat(rescored.getHits().getHits()[0].getScore(), equalTo(3.0f * primaryWeight + 7.0f * secondaryWeight)); - assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(2.0f * primaryWeight + 5.0f * secondaryWeight)); - assertThat(rescored.getHits().getHits()[2].getScore(), equalTo(5.0f * primaryWeight)); - assertThat(rescored.getHits().getHits()[3].getScore(), equalTo(0.2f * primaryWeight + 0.0f * secondaryWeight)); - } else if ("max".equals(scoreMode)) { - assertFirstHit(rescored, hasId(String.valueOf(i + 1))); - assertSecondHit(rescored, hasId(String.valueOf(i))); - assertThirdHit(rescored, hasId(String.valueOf(i + 2))); - assertThat(rescored.getHits().getHits()[0].getScore(), equalTo(7.0f * secondaryWeight)); - assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(5.0f * secondaryWeight)); - assertThat(rescored.getHits().getHits()[2].getScore(), equalTo(5.0f * primaryWeight)); - assertThat(rescored.getHits().getHits()[3].getScore(), equalTo(0.2f * primaryWeight)); - } else if ("min".equals(scoreMode)) { - assertFirstHit(rescored, hasId(String.valueOf(i + 2))); - assertSecondHit(rescored, hasId(String.valueOf(i + 1))); - assertThirdHit(rescored, hasId(String.valueOf(i))); - assertThat(rescored.getHits().getHits()[0].getScore(), equalTo(5.0f * primaryWeight)); - assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(3.0f * primaryWeight)); - assertThat(rescored.getHits().getHits()[2].getScore(), equalTo(2.0f * primaryWeight)); - assertThat(rescored.getHits().getHits()[3].getScore(), equalTo(0.0f * secondaryWeight)); - } else if ("avg".equals(scoreMode)) { - assertFirstHit(rescored, hasId(String.valueOf(i + 1))); - assertSecondHit(rescored, hasId(String.valueOf(i + 2))); - assertThirdHit(rescored, hasId(String.valueOf(i))); - assertThat(rescored.getHits().getHits()[0].getScore(), equalTo((3.0f * primaryWeight + 7.0f * secondaryWeight) / 2.0f)); - assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(5.0f * primaryWeight)); - assertThat(rescored.getHits().getHits()[2].getScore(), equalTo((2.0f * primaryWeight + 5.0f * secondaryWeight) / 2.0f)); - assertThat(rescored.getHits().getHits()[3].getScore(), equalTo((0.2f * primaryWeight) / 2.0f)); - } else if ("multiply".equals(scoreMode)) { - assertFirstHit(rescored, hasId(String.valueOf(i + 1))); - assertSecondHit(rescored, hasId(String.valueOf(i))); - assertThirdHit(rescored, hasId(String.valueOf(i + 2))); - assertThat(rescored.getHits().getHits()[0].getScore(), equalTo(3.0f * primaryWeight * 7.0f * secondaryWeight)); - assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(2.0f * primaryWeight * 5.0f * secondaryWeight)); - assertThat(rescored.getHits().getHits()[2].getScore(), equalTo(5.0f * primaryWeight)); - assertThat(rescored.getHits().getHits()[3].getScore(), equalTo(0.2f * primaryWeight * 0.0f * secondaryWeight)); - } + final int finalI = i; + assertResponse( + prepareSearch().setPreference("test") // ensure we hit the same shards for tie-breaking + .setFrom(0) + .setSize(10) + .setQuery(query) + .setRescorer(rescoreQuery, 50), + rescored -> { + assertHitCount(rescored, 4); + + assertThat(rescored.getHits().getMaxScore(), equalTo(rescored.getHits().getHits()[0].getScore())); + if ("total".equals(scoreMode) || "".equals(scoreMode)) { + assertFirstHit(rescored, hasId(String.valueOf(finalI + 1))); + assertSecondHit(rescored, hasId(String.valueOf(finalI))); + assertThirdHit(rescored, hasId(String.valueOf(finalI + 2))); + assertThat(rescored.getHits().getHits()[0].getScore(), equalTo(3.0f * primaryWeight + 7.0f * secondaryWeight)); + assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(2.0f * primaryWeight + 5.0f * secondaryWeight)); + assertThat(rescored.getHits().getHits()[2].getScore(), equalTo(5.0f * primaryWeight)); + assertThat(rescored.getHits().getHits()[3].getScore(), equalTo(0.2f * primaryWeight + 0.0f * secondaryWeight)); + } else if ("max".equals(scoreMode)) { + assertFirstHit(rescored, hasId(String.valueOf(finalI + 1))); + assertSecondHit(rescored, hasId(String.valueOf(finalI))); + assertThirdHit(rescored, hasId(String.valueOf(finalI + 2))); + assertThat(rescored.getHits().getHits()[0].getScore(), equalTo(7.0f * secondaryWeight)); + assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(5.0f * secondaryWeight)); + assertThat(rescored.getHits().getHits()[2].getScore(), equalTo(5.0f * primaryWeight)); + assertThat(rescored.getHits().getHits()[3].getScore(), equalTo(0.2f * primaryWeight)); + } else if ("min".equals(scoreMode)) { + assertFirstHit(rescored, hasId(String.valueOf(finalI + 2))); + assertSecondHit(rescored, hasId(String.valueOf(finalI + 1))); + assertThirdHit(rescored, hasId(String.valueOf(finalI))); + assertThat(rescored.getHits().getHits()[0].getScore(), equalTo(5.0f * primaryWeight)); + assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(3.0f * primaryWeight)); + assertThat(rescored.getHits().getHits()[2].getScore(), equalTo(2.0f * primaryWeight)); + assertThat(rescored.getHits().getHits()[3].getScore(), equalTo(0.0f * secondaryWeight)); + } else if ("avg".equals(scoreMode)) { + assertFirstHit(rescored, hasId(String.valueOf(finalI + 1))); + assertSecondHit(rescored, hasId(String.valueOf(finalI + 2))); + assertThirdHit(rescored, hasId(String.valueOf(finalI))); + assertThat( + rescored.getHits().getHits()[0].getScore(), + equalTo((3.0f * primaryWeight + 7.0f * secondaryWeight) / 2.0f) + ); + assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(5.0f * primaryWeight)); + assertThat( + rescored.getHits().getHits()[2].getScore(), + equalTo((2.0f * primaryWeight + 5.0f * secondaryWeight) / 2.0f) + ); + assertThat(rescored.getHits().getHits()[3].getScore(), equalTo((0.2f * primaryWeight) / 2.0f)); + } else if ("multiply".equals(scoreMode)) { + assertFirstHit(rescored, hasId(String.valueOf(finalI + 1))); + assertSecondHit(rescored, hasId(String.valueOf(finalI))); + assertThirdHit(rescored, hasId(String.valueOf(finalI + 2))); + assertThat(rescored.getHits().getHits()[0].getScore(), equalTo(3.0f * primaryWeight * 7.0f * secondaryWeight)); + assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(2.0f * primaryWeight * 5.0f * secondaryWeight)); + assertThat(rescored.getHits().getHits()[2].getScore(), equalTo(5.0f * primaryWeight)); + assertThat(rescored.getHits().getHits()[3].getScore(), equalTo(0.2f * primaryWeight * 0.0f * secondaryWeight)); + } + } + ); } } } @@ -688,13 +721,16 @@ public void testMultipleRescores() throws Exception { // First set the rescore window large enough that both rescores take effect SearchRequestBuilder request = prepareSearch(); request.addRescorer(eightIsGreat, numDocs).addRescorer(sevenIsBetter, numDocs); - SearchResponse response = request.get(); - assertFirstHit(response, hasId("7")); - assertSecondHit(response, hasId("8")); + assertResponse(request, response -> { + assertFirstHit(response, hasId("7")); + assertSecondHit(response, hasId("8")); + }); // Now squash the second rescore window so it never gets to see a seven - response = request.setSize(1).clearRescorers().addRescorer(eightIsGreat, numDocs).addRescorer(sevenIsBetter, 1).get(); - assertFirstHit(response, hasId("8")); + assertResponse( + request.setSize(1).clearRescorers().addRescorer(eightIsGreat, numDocs).addRescorer(sevenIsBetter, 1), + response -> assertFirstHit(response, hasId("8")) + ); // We have no idea what the second hit will be because we didn't get a chance to look for seven // Now use one rescore to drag the number we're looking for into the window of another @@ -709,11 +745,12 @@ public void testMultipleRescores() throws Exception { ) ).setScoreMode(QueryRescoreMode.Total); request.clearRescorers().addRescorer(ninetyIsGood, numDocs).addRescorer(oneToo, 10); - response = request.setSize(2).get(); - assertThat(response.getHits().getMaxScore(), equalTo(response.getHits().getHits()[0].getScore())); - assertFirstHit(response, hasId("91")); - assertFirstHit(response, hasScore(2001.0f)); - assertSecondHit(response, hasScore(1001.0f)); // Not sure which one it is but it is ninety something + assertResponse(request.setSize(2), response -> { + assertThat(response.getHits().getMaxScore(), equalTo(response.getHits().getHits()[0].getScore())); + assertFirstHit(response, hasId("91")); + assertFirstHit(response, hasScore(2001.0f)); + assertSecondHit(response, hasScore(1001.0f)); // Not sure which one it is but it is ninety something + }); } private int indexRandomNumbers(String analyzer) throws Exception { @@ -744,7 +781,7 @@ private int indexRandomNumbers(String analyzer, int shards, boolean dummyDocs) t int numDocs = randomIntBetween(100, 150); IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { - docs[i] = client().prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i)); + docs[i] = prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i)); } indexRandom(true, dummyDocs, docs); @@ -756,7 +793,7 @@ private int indexRandomNumbers(String analyzer, int shards, boolean dummyDocs) t public void testFromSize() throws Exception { assertAcked(prepareCreate("test").setSettings(indexSettings(1, 0))); for (int i = 0; i < 5; i++) { - client().prepareIndex("test").setId("" + i).setSource("text", "hello world").get(); + prepareIndex("test").setId("" + i).setSource("text", "hello world").get(); } refresh(); @@ -772,7 +809,7 @@ public void testFromSize() throws Exception { public void testRescorePhaseWithInvalidSort() throws Exception { assertAcked(prepareCreate("test")); for (int i = 0; i < 5; i++) { - client().prepareIndex("test").setId("" + i).setSource("number", 0).get(); + prepareIndex("test").setId("" + i).setSource("number", 0).get(); } refresh(); @@ -797,14 +834,17 @@ public void testRescorePhaseWithInvalidSort() throws Exception { assertNotNull(exc.getCause()); assertThat(exc.getCause().getMessage(), containsString("Cannot use [sort] option in conjunction with [rescore].")); - SearchResponse resp = prepareSearch().addSort(SortBuilders.scoreSort()) - .setTrackScores(true) - .addRescorer(new QueryRescorerBuilder(matchAllQuery()).setRescoreQueryWeight(100.0f), 50) - .get(); - assertThat(resp.getHits().getTotalHits().value, equalTo(5L)); - assertThat(resp.getHits().getHits().length, equalTo(5)); - for (SearchHit hit : resp.getHits().getHits()) { - assertThat(hit.getScore(), equalTo(101f)); - } + assertResponse( + prepareSearch().addSort(SortBuilders.scoreSort()) + .setTrackScores(true) + .addRescorer(new QueryRescorerBuilder(matchAllQuery()).setRescoreQueryWeight(100.0f), 50), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(5L)); + assertThat(response.getHits().getHits().length, equalTo(5)); + for (SearchHit hit : response.getHits().getHits()) { + assertThat(hit.getScore(), equalTo(101f)); + } + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/RandomScoreFunctionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/RandomScoreFunctionIT.java index ef8ffcf0d806a..8f178397f508b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/RandomScoreFunctionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/RandomScoreFunctionIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.functionscore; import org.apache.lucene.util.ArrayUtil; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder; @@ -37,6 +36,8 @@ import static org.elasticsearch.script.MockScriptPlugin.NAME; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; @@ -97,35 +98,39 @@ public void testConsistentHitsWithSameSeed() throws Exception { preference = randomRealisticUnicodeOfLengthBetween(1, 10); } int innerIters = scaledRandomIntBetween(2, 5); - SearchHit[] hits = null; + final SearchHit[][] hits = new SearchHit[1][]; for (int i = 0; i < innerIters; i++) { - SearchResponse searchResponse = prepareSearch().setSize(docCount) // get all docs otherwise we are prone to tie-breaking - .setPreference(preference) - .setQuery(functionScoreQuery(matchAllQuery(), randomFunction().seed(seed).setField("foo"))) - .get(); - assertThat( - "Failures " + Arrays.toString(searchResponse.getShardFailures()), - searchResponse.getShardFailures().length, - CoreMatchers.equalTo(0) - ); - final int hitCount = searchResponse.getHits().getHits().length; - final SearchHit[] currentHits = searchResponse.getHits().getHits(); - ArrayUtil.timSort(currentHits, (o1, o2) -> { - // for tie-breaking we have to resort here since if the score is - // identical we rely on collection order which might change. - int cmp = Float.compare(o1.getScore(), o2.getScore()); - return cmp == 0 ? o1.getId().compareTo(o2.getId()) : cmp; - }); - if (i == 0) { - assertThat(hits, nullValue()); - hits = currentHits; - } else { - assertThat(hits.length, equalTo(searchResponse.getHits().getHits().length)); - for (int j = 0; j < hitCount; j++) { - assertThat("" + j, currentHits[j].getScore(), equalTo(hits[j].getScore())); - assertThat("" + j, currentHits[j].getId(), equalTo(hits[j].getId())); + final int finalI = i; + assertResponse( + prepareSearch().setSize(docCount) // get all docs otherwise we are prone to tie-breaking + .setPreference(preference) + .setQuery(functionScoreQuery(matchAllQuery(), randomFunction().seed(seed).setField("foo"))), + response -> { + assertThat( + "Failures " + Arrays.toString(response.getShardFailures()), + response.getShardFailures().length, + CoreMatchers.equalTo(0) + ); + final int hitCount = response.getHits().getHits().length; + final SearchHit[] currentHits = response.getHits().getHits(); + ArrayUtil.timSort(currentHits, (o1, o2) -> { + // for tie-breaking we have to resort here since if the score is + // identical we rely on collection order which might change. + int cmp = Float.compare(o1.getScore(), o2.getScore()); + return cmp == 0 ? o1.getId().compareTo(o2.getId()) : cmp; + }); + if (finalI == 0) { + assertThat(hits[0], nullValue()); + hits[0] = currentHits; + } else { + assertThat(hits[0].length, equalTo(response.getHits().getHits().length)); + for (int j = 0; j < hitCount; j++) { + assertThat("" + j, currentHits[j].getScore(), equalTo(hits[0][j].getScore())); + assertThat("" + j, currentHits[j].getId(), equalTo(hits[0][j].getId())); + } + } } - } + ); // randomly change some docs to get them in different segments int numDocsToChange = randomIntBetween(20, 50); @@ -152,8 +157,7 @@ public void testScoreAccessWithinScript() throws Exception { int docCount = randomIntBetween(100, 200); for (int i = 0; i < docCount; i++) { - client().prepareIndex("test") - .setId("" + i) + prepareIndex("test").setId("" + i) // we add 1 to the index field to make sure that the scripts below never compute log(0) .setSource("body", randomFrom(Arrays.asList("foo", "bar", "baz")), "index", i + 1) .get(); @@ -165,73 +169,88 @@ public void testScoreAccessWithinScript() throws Exception { // Test for accessing _score Script script = new Script(ScriptType.INLINE, NAME, "log(doc['index'].value + (factor * _score))", params); - SearchResponse resp = prepareSearch("test").setQuery( - functionScoreQuery( - matchQuery("body", "foo"), - new FunctionScoreQueryBuilder.FilterFunctionBuilder[] { - new FunctionScoreQueryBuilder.FilterFunctionBuilder(fieldValueFactorFunction("index").factor(2)), - new FunctionScoreQueryBuilder.FilterFunctionBuilder(scriptFunction(script)) } - ) - ).get(); - assertNoFailures(resp); - SearchHit firstHit = resp.getHits().getAt(0); - assertThat(firstHit.getScore(), greaterThan(1f)); + assertNoFailuresAndResponse( + prepareSearch("test").setQuery( + functionScoreQuery( + matchQuery("body", "foo"), + new FunctionScoreQueryBuilder.FilterFunctionBuilder[] { + new FunctionScoreQueryBuilder.FilterFunctionBuilder(fieldValueFactorFunction("index").factor(2)), + new FunctionScoreQueryBuilder.FilterFunctionBuilder(scriptFunction(script)) } + ) + ), + response -> { + SearchHit firstHit = response.getHits().getAt(0); + assertThat(firstHit.getScore(), greaterThan(1f)); + } + ); // Test for accessing _score.intValue() script = new Script(ScriptType.INLINE, NAME, "log(doc['index'].value + (factor * _score.intValue()))", params); - resp = prepareSearch("test").setQuery( - functionScoreQuery( - matchQuery("body", "foo"), - new FunctionScoreQueryBuilder.FilterFunctionBuilder[] { - new FunctionScoreQueryBuilder.FilterFunctionBuilder(fieldValueFactorFunction("index").factor(2)), - new FunctionScoreQueryBuilder.FilterFunctionBuilder(scriptFunction(script)) } - ) - ).get(); - assertNoFailures(resp); - firstHit = resp.getHits().getAt(0); - assertThat(firstHit.getScore(), greaterThan(1f)); + assertNoFailuresAndResponse( + prepareSearch("test").setQuery( + functionScoreQuery( + matchQuery("body", "foo"), + new FunctionScoreQueryBuilder.FilterFunctionBuilder[] { + new FunctionScoreQueryBuilder.FilterFunctionBuilder(fieldValueFactorFunction("index").factor(2)), + new FunctionScoreQueryBuilder.FilterFunctionBuilder(scriptFunction(script)) } + ) + ), + response -> { + SearchHit firstHit = response.getHits().getAt(0); + assertThat(firstHit.getScore(), greaterThan(1f)); + } + ); // Test for accessing _score.longValue() script = new Script(ScriptType.INLINE, NAME, "log(doc['index'].value + (factor * _score.longValue()))", params); - resp = prepareSearch("test").setQuery( - functionScoreQuery( - matchQuery("body", "foo"), - new FunctionScoreQueryBuilder.FilterFunctionBuilder[] { - new FunctionScoreQueryBuilder.FilterFunctionBuilder(fieldValueFactorFunction("index").factor(2)), - new FunctionScoreQueryBuilder.FilterFunctionBuilder(scriptFunction(script)) } - ) - ).get(); - assertNoFailures(resp); - firstHit = resp.getHits().getAt(0); - assertThat(firstHit.getScore(), greaterThan(1f)); + assertNoFailuresAndResponse( + prepareSearch("test").setQuery( + functionScoreQuery( + matchQuery("body", "foo"), + new FunctionScoreQueryBuilder.FilterFunctionBuilder[] { + new FunctionScoreQueryBuilder.FilterFunctionBuilder(fieldValueFactorFunction("index").factor(2)), + new FunctionScoreQueryBuilder.FilterFunctionBuilder(scriptFunction(script)) } + ) + ), + response -> { + SearchHit firstHit = response.getHits().getAt(0); + assertThat(firstHit.getScore(), greaterThan(1f)); + } + ); // Test for accessing _score.floatValue() script = new Script(ScriptType.INLINE, NAME, "log(doc['index'].value + (factor * _score.floatValue()))", params); - resp = prepareSearch("test").setQuery( - functionScoreQuery( - matchQuery("body", "foo"), - new FunctionScoreQueryBuilder.FilterFunctionBuilder[] { - new FunctionScoreQueryBuilder.FilterFunctionBuilder(fieldValueFactorFunction("index").factor(2)), - new FunctionScoreQueryBuilder.FilterFunctionBuilder(scriptFunction(script)) } - ) - ).get(); - assertNoFailures(resp); - firstHit = resp.getHits().getAt(0); - assertThat(firstHit.getScore(), greaterThan(1f)); + assertNoFailuresAndResponse( + prepareSearch("test").setQuery( + functionScoreQuery( + matchQuery("body", "foo"), + new FunctionScoreQueryBuilder.FilterFunctionBuilder[] { + new FunctionScoreQueryBuilder.FilterFunctionBuilder(fieldValueFactorFunction("index").factor(2)), + new FunctionScoreQueryBuilder.FilterFunctionBuilder(scriptFunction(script)) } + ) + ), + response -> { + SearchHit firstHit = response.getHits().getAt(0); + assertThat(firstHit.getScore(), greaterThan(1f)); + } + ); // Test for accessing _score.doubleValue() script = new Script(ScriptType.INLINE, NAME, "log(doc['index'].value + (factor * _score.doubleValue()))", params); - resp = prepareSearch("test").setQuery( - functionScoreQuery( - matchQuery("body", "foo"), - new FunctionScoreQueryBuilder.FilterFunctionBuilder[] { - new FunctionScoreQueryBuilder.FilterFunctionBuilder(fieldValueFactorFunction("index").factor(2)), - new FunctionScoreQueryBuilder.FilterFunctionBuilder(scriptFunction(script)) } - ) - ).get(); - assertNoFailures(resp); - firstHit = resp.getHits().getAt(0); - assertThat(firstHit.getScore(), greaterThan(1f)); + assertNoFailuresAndResponse( + prepareSearch("test").setQuery( + functionScoreQuery( + matchQuery("body", "foo"), + new FunctionScoreQueryBuilder.FilterFunctionBuilder[] { + new FunctionScoreQueryBuilder.FilterFunctionBuilder(fieldValueFactorFunction("index").factor(2)), + new FunctionScoreQueryBuilder.FilterFunctionBuilder(scriptFunction(script)) } + ) + ), + response -> { + SearchHit firstHit = response.getHits().getAt(0); + assertThat(firstHit.getScore(), greaterThan(1f)); + } + ); } public void testSeedReportedInExplain() throws Exception { @@ -243,28 +262,33 @@ public void testSeedReportedInExplain() throws Exception { int seed = 12345678; - SearchResponse resp = prepareSearch("test").setQuery( - functionScoreQuery(matchAllQuery(), randomFunction().seed(seed).setField(SeqNoFieldMapper.NAME)) - ).setExplain(true).get(); - assertNoFailures(resp); - assertEquals(1, resp.getHits().getTotalHits().value); - SearchHit firstHit = resp.getHits().getAt(0); - assertThat(firstHit.getExplanation().toString(), containsString("" + seed)); + assertNoFailuresAndResponse( + prepareSearch("test").setQuery(functionScoreQuery(matchAllQuery(), randomFunction().seed(seed).setField(SeqNoFieldMapper.NAME))) + .setExplain(true), + response -> { + assertNoFailures(response); + assertEquals(1, response.getHits().getTotalHits().value); + SearchHit firstHit = response.getHits().getAt(0); + assertThat(firstHit.getExplanation().toString(), containsString("" + seed)); + } + ); } public void testNoDocs() throws Exception { createIndex("test"); ensureGreen(); - SearchResponse resp = prepareSearch("test").setQuery( - functionScoreQuery(matchAllQuery(), randomFunction().seed(1234).setField(SeqNoFieldMapper.NAME)) - ).get(); - assertNoFailures(resp); - assertEquals(0, resp.getHits().getTotalHits().value); + assertNoFailuresAndResponse( + prepareSearch("test").setQuery( + functionScoreQuery(matchAllQuery(), randomFunction().seed(1234).setField(SeqNoFieldMapper.NAME)) + ), + response -> assertEquals(0, response.getHits().getTotalHits().value) + ); - resp = prepareSearch("test").setQuery(functionScoreQuery(matchAllQuery(), randomFunction())).get(); - assertNoFailures(resp); - assertEquals(0, resp.getHits().getTotalHits().value); + assertNoFailuresAndResponse( + prepareSearch("test").setQuery(functionScoreQuery(matchAllQuery(), randomFunction())), + response -> assertEquals(0, response.getHits().getTotalHits().value) + ); } public void testScoreRange() throws Exception { @@ -280,14 +304,14 @@ public void testScoreRange() throws Exception { refresh(); int iters = scaledRandomIntBetween(10, 20); for (int i = 0; i < iters; ++i) { - SearchResponse searchResponse = prepareSearch().setQuery(functionScoreQuery(matchAllQuery(), randomFunction())) - .setSize(docCount) - .get(); - - assertNoFailures(searchResponse); - for (SearchHit hit : searchResponse.getHits().getHits()) { - assertThat(hit.getScore(), allOf(greaterThanOrEqualTo(0.0f), lessThanOrEqualTo(1.0f))); - } + assertNoFailuresAndResponse( + prepareSearch().setQuery(functionScoreQuery(matchAllQuery(), randomFunction())).setSize(docCount), + response -> { + for (SearchHit hit : response.getHits().getHits()) { + assertThat(hit.getScore(), allOf(greaterThanOrEqualTo(0.0f), lessThanOrEqualTo(1.0f))); + } + } + ); } } @@ -338,10 +362,10 @@ public void checkDistribution() throws Exception { for (int i = 0; i < count; i++) { - SearchResponse searchResponse = prepareSearch().setQuery(functionScoreQuery(matchAllQuery(), new RandomScoreFunctionBuilder())) - .get(); - - matrix[Integer.valueOf(searchResponse.getHits().getAt(0).getId())]++; + assertResponse( + prepareSearch().setQuery(functionScoreQuery(matchAllQuery(), new RandomScoreFunctionBuilder())), + response -> matrix[Integer.valueOf(response.getHits().getAt(0).getId())]++ + ); } int filled = 0; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoDistanceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoDistanceIT.java index 1ade5bfa3b71e..37c78ec568332 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoDistanceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoDistanceIT.java @@ -109,8 +109,7 @@ public void setupTestIndex() throws IOException { } public void testDistanceScript() throws Exception { - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("name", "TestPosition") @@ -191,8 +190,7 @@ public void testDistanceScript() throws Exception { } public void testGeoDistanceAggregation() throws IOException { - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("name", "TestPosition") diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPointScriptDocValuesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPointScriptDocValuesIT.java index c3ff5db7ebf6f..3b2d266e77cda 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPointScriptDocValuesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPointScriptDocValuesIT.java @@ -147,8 +147,7 @@ public void setupTestIndex() throws IOException { public void testRandomPoint() throws Exception { final double lat = GeometryTestUtils.randomLat(); final double lon = GeometryTestUtils.randomLon(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("name", "TestPosition").field("location", new double[] { lon, lat }).endObject()) .get(); @@ -194,7 +193,7 @@ public void testRandomMultiPoint() throws Exception { } XContentBuilder builder = jsonBuilder().startObject().field("name", "TestPosition").field("location", values).endObject(); - client().prepareIndex("test").setId("1").setSource(builder).get(); + prepareIndex("test").setId("1").setSource(builder).get(); client().admin().indices().prepareRefresh("test").get(); @@ -233,8 +232,7 @@ public void testRandomMultiPoint() throws Exception { } public void testNullPoint() throws Exception { - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("name", "TestPosition").nullField("location").endObject()) .get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPolygonIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPolygonIT.java index 97c8aa0ea9e3e..e929487af9240 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPolygonIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPolygonIT.java @@ -48,8 +48,7 @@ protected void setupSuiteScopeCluster() throws Exception { indexRandom( true, - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("name", "New York") @@ -60,8 +59,7 @@ protected void setupSuiteScopeCluster() throws Exception { .endObject() ), // to NY: 5.286 km - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource( jsonBuilder().startObject() .field("name", "Times Square") @@ -72,8 +70,7 @@ protected void setupSuiteScopeCluster() throws Exception { .endObject() ), // to NY: 0.4621 km - client().prepareIndex("test") - .setId("3") + prepareIndex("test").setId("3") .setSource( jsonBuilder().startObject() .field("name", "Tribeca") @@ -84,8 +81,7 @@ protected void setupSuiteScopeCluster() throws Exception { .endObject() ), // to NY: 1.055 km - client().prepareIndex("test") - .setId("4") + prepareIndex("test").setId("4") .setSource( jsonBuilder().startObject() .field("name", "Wall Street") @@ -96,8 +92,7 @@ protected void setupSuiteScopeCluster() throws Exception { .endObject() ), // to NY: 1.258 km - client().prepareIndex("test") - .setId("5") + prepareIndex("test").setId("5") .setSource( jsonBuilder().startObject() .field("name", "Soho") @@ -108,8 +103,7 @@ protected void setupSuiteScopeCluster() throws Exception { .endObject() ), // to NY: 2.029 km - client().prepareIndex("test") - .setId("6") + prepareIndex("test").setId("6") .setSource( jsonBuilder().startObject() .field("name", "Greenwich Village") @@ -120,8 +114,7 @@ protected void setupSuiteScopeCluster() throws Exception { .endObject() ), // to NY: 8.572 km - client().prepareIndex("test") - .setId("7") + prepareIndex("test").setId("7") .setSource( jsonBuilder().startObject() .field("name", "Brooklyn") diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java index 415de06030938..d79bb903bdb6a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java @@ -14,7 +14,6 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.MoreLikeThisQueryBuilder; @@ -41,8 +40,10 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCountAndNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; @@ -225,26 +226,36 @@ public void testMoreLikeThisWithAliases() throws Exception { ); logger.info("Running moreLikeThis on beta shard"); - SearchResponse response = prepareSearch("beta").setQuery( - new MoreLikeThisQueryBuilder(null, new Item[] { new Item("test", "1") }).minTermFreq(1).minDocFreq(1) - ).get(); - assertHitCount(response, 1L); - assertThat(response.getHits().getAt(0).getId(), equalTo("3")); - + assertResponse( + prepareSearch("beta").setQuery( + new MoreLikeThisQueryBuilder(null, new Item[] { new Item("test", "1") }).minTermFreq(1).minDocFreq(1) + ), + response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), equalTo("3")); + } + ); logger.info("Running moreLikeThis on release shard"); - response = prepareSearch("release").setQuery( - new MoreLikeThisQueryBuilder(null, new Item[] { new Item("test", "1") }).minTermFreq(1).minDocFreq(1) - ).get(); - assertHitCount(response, 1L); - assertThat(response.getHits().getAt(0).getId(), equalTo("2")); + assertResponse( + prepareSearch("release").setQuery( + new MoreLikeThisQueryBuilder(null, new Item[] { new Item("test", "1") }).minTermFreq(1).minDocFreq(1) + ), + response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), equalTo("2")); + } + ); logger.info("Running moreLikeThis on alias with node client"); - response = internalCluster().coordOnlyNodeClient() - .prepareSearch("beta") - .setQuery(new MoreLikeThisQueryBuilder(null, new Item[] { new Item("test", "1") }).minTermFreq(1).minDocFreq(1)) - .get(); - assertHitCount(response, 1L); - assertThat(response.getHits().getAt(0).getId(), equalTo("3")); + assertResponse( + internalCluster().coordOnlyNodeClient() + .prepareSearch("beta") + .setQuery(new MoreLikeThisQueryBuilder(null, new Item[] { new Item("test", "1") }).minTermFreq(1).minDocFreq(1)), + response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), equalTo("3")); + } + ); } // Issue #14944 @@ -267,17 +278,20 @@ public void testMoreLikeThisWithAliasesInLikeDocuments() throws Exception { ).actionGet(); refresh(indexName); - SearchResponse response = prepareSearch().setQuery( - new MoreLikeThisQueryBuilder(null, new Item[] { new Item(aliasName, "1") }).minTermFreq(1).minDocFreq(1) - ).get(); - assertHitCount(response, 2L); - assertThat(response.getHits().getAt(0).getId(), equalTo("3")); + assertResponse( + prepareSearch().setQuery( + new MoreLikeThisQueryBuilder(null, new Item[] { new Item(aliasName, "1") }).minTermFreq(1).minDocFreq(1) + ), + response -> { + assertHitCount(response, 2L); + assertThat(response.getHits().getAt(0).getId(), equalTo("3")); + } + ); } public void testMoreLikeThisIssue2197() throws Exception { indicesAdmin().prepareCreate("foo").get(); - client().prepareIndex("foo") - .setId("1") + prepareIndex("foo").setId("1") .setSource(jsonBuilder().startObject().startObject("foo").field("bar", "boz").endObject().endObject()) .get(); indicesAdmin().prepareRefresh("foo").get(); @@ -292,8 +306,7 @@ public void testMoreLikeWithCustomRouting() throws Exception { indicesAdmin().prepareCreate("foo").get(); ensureGreen(); - client().prepareIndex("foo") - .setId("1") + prepareIndex("foo").setId("1") .setSource(jsonBuilder().startObject().startObject("foo").field("bar", "boz").endObject().endObject()) .setRouting("2") .get(); @@ -307,8 +320,7 @@ public void testMoreLikeThisIssueRoutingNotSerialized() throws Exception { assertAcked(prepareCreate("foo", 2, indexSettings(2, 0))); ensureGreen(); - client().prepareIndex("foo") - .setId("1") + prepareIndex("foo").setId("1") .setSource(jsonBuilder().startObject().startObject("foo").field("bar", "boz").endObject().endObject()) .setRouting("4000") .get(); @@ -334,12 +346,10 @@ public void testNumericField() throws Exception { .endObject() ).get(); ensureGreen(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("string_value", "lucene index").field("int_value", 1).endObject()) .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource(jsonBuilder().startObject().field("string_value", "elasticsearch index").field("int_value", 42).endObject()) .get(); @@ -547,9 +557,9 @@ public void testSimpleMoreLikeThisIds() throws Exception { logger.info("Indexing..."); List builders = new ArrayList<>(); - builders.add(client().prepareIndex("test").setSource("text", "lucene").setId("1")); - builders.add(client().prepareIndex("test").setSource("text", "lucene release").setId("2")); - builders.add(client().prepareIndex("test").setSource("text", "apache lucene").setId("3")); + builders.add(prepareIndex("test").setSource("text", "lucene").setId("1")); + builders.add(prepareIndex("test").setSource("text", "lucene release").setId("2")); + builders.add(prepareIndex("test").setSource("text", "apache lucene").setId("3")); indexRandom(true, builders); logger.info("Running MoreLikeThis"); @@ -573,10 +583,10 @@ public void testMoreLikeThisMultiValueFields() throws Exception { String[] values = { "aaaa", "bbbb", "cccc", "dddd", "eeee", "ffff", "gggg", "hhhh", "iiii", "jjjj" }; List builders = new ArrayList<>(values.length + 1); // index one document with all the values - builders.add(client().prepareIndex("test").setId("0").setSource("text", values)); + builders.add(prepareIndex("test").setId("0").setSource("text", values)); // index each document with only one of the values for (int i = 0; i < values.length; i++) { - builders.add(client().prepareIndex("test").setId(String.valueOf(i + 1)).setSource("text", values[i])); + builders.add(prepareIndex("test").setId(String.valueOf(i + 1)).setSource("text", values[i])); } indexRandom(true, builders); @@ -608,7 +618,7 @@ public void testMinimumShouldMatch() throws ExecutionException, InterruptedExcep for (int j = 1; j <= 10 - i; j++) { text += j + " "; } - builders.add(client().prepareIndex("test").setId(i + "").setSource("text", text)); + builders.add(prepareIndex("test").setId(i + "").setSource("text", text)); } indexRandom(true, builders); @@ -620,13 +630,14 @@ public void testMinimumShouldMatch() throws ExecutionException, InterruptedExcep .minDocFreq(1) .minimumShouldMatch(minimumShouldMatch); logger.info("Testing with minimum_should_match = {}", minimumShouldMatch); - SearchResponse response = prepareSearch("test").setQuery(mltQuery).get(); - assertNoFailures(response); - if (minimumShouldMatch.equals("0%")) { - assertHitCount(response, 10); - } else { - assertHitCount(response, 11 - i); - } + final int finalI = i; + assertNoFailuresAndResponse(prepareSearch("test").setQuery(mltQuery), response -> { + if (minimumShouldMatch.equals("0%")) { + assertHitCount(response, 10); + } else { + assertHitCount(response, 11 - finalI); + } + }); } } @@ -642,7 +653,7 @@ public void testMoreLikeThisArtificialDocs() throws Exception { doc.field("field" + i, generateRandomStringArray(5, 10, false) + "a"); // make sure they are not all empty } doc.endObject(); - indexRandom(true, client().prepareIndex("test").setId("0").setSource(doc)); + indexRandom(true, prepareIndex("test").setId("0").setSource(doc)); logger.info("Checking the document matches ..."); // routing to ensure we hit the shard with the doc @@ -661,8 +672,7 @@ public void testMoreLikeThisMalformedArtificialDocs() throws Exception { logger.info("Creating an index with a single document ..."); indexRandom( true, - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("text", "Hello World!").field("date", "2009-01-01").endObject()) ); @@ -707,7 +717,7 @@ public void testMoreLikeThisUnlike() throws InterruptedException, IOException { logger.info("Indexing each field value of this document as a single document."); List builders = new ArrayList<>(); for (int i = 0; i < numFields; i++) { - builders.add(client().prepareIndex("test").setId(i + "").setSource("field" + i, i + "")); + builders.add(prepareIndex("test").setId(i + "").setSource("field" + i, i + "")); } indexRandom(true, builders); @@ -738,11 +748,9 @@ public void testSelectFields() throws IOException, ExecutionException, Interrupt indexRandom( true, - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("text", "hello world").field("text1", "elasticsearch").endObject()), - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource(jsonBuilder().startObject().field("text", "goodby moon").field("text1", "elasticsearch").endObject()) ); @@ -760,9 +768,9 @@ public void testSelectFields() throws IOException, ExecutionException, Interrupt } public void testWithRouting() throws IOException { - client().prepareIndex("index").setId("1").setRouting("3").setSource("text", "this is a document").get(); - client().prepareIndex("index").setId("2").setRouting("1").setSource("text", "this is another document").get(); - client().prepareIndex("index").setId("3").setRouting("4").setSource("text", "this is yet another document").get(); + prepareIndex("index").setId("1").setRouting("3").setSource("text", "this is a document").get(); + prepareIndex("index").setId("2").setRouting("1").setSource("text", "this is another document").get(); + prepareIndex("index").setId("3").setRouting("4").setSource("text", "this is yet another document").get(); refresh("index"); Item item = new Item("index", "2").routing("1"); @@ -773,8 +781,7 @@ public void testWithRouting() throws IOException { ); moreLikeThisQueryBuilder.minTermFreq(1); moreLikeThisQueryBuilder.minDocFreq(1); - SearchResponse searchResponse = prepareSearch("index").setQuery(moreLikeThisQueryBuilder).get(); - assertEquals(2, searchResponse.getHits().getTotalHits().value); + assertHitCount(prepareSearch("index").setQuery(moreLikeThisQueryBuilder), 2L); } // Issue #29678 diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/msearch/MultiSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/msearch/MultiSearchIT.java index aa418288b8ebf..7072594eab8ec 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/msearch/MultiSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/msearch/MultiSearchIT.java @@ -11,7 +11,7 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.action.search.MultiSearchRequest; -import org.elasticsearch.action.search.MultiSearchResponse; +import org.elasticsearch.action.search.MultiSearchResponse.Item; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.DummyQueryBuilder; @@ -23,6 +23,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; import static org.hamcrest.Matchers.equalTo; @@ -39,31 +40,33 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { public void testSimpleMultiSearch() { createIndex("test"); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("field", "xxx").get(); - client().prepareIndex("test").setId("2").setSource("field", "yyy").get(); + prepareIndex("test").setId("1").setSource("field", "xxx").get(); + prepareIndex("test").setId("2").setSource("field", "yyy").get(); refresh(); - MultiSearchResponse response = client().prepareMultiSearch() - .add(prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "xxx"))) - .add(prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "yyy"))) - .add(prepareSearch("test").setQuery(QueryBuilders.matchAllQuery())) - .get(); - - for (MultiSearchResponse.Item item : response) { - assertNoFailures(item.getResponse()); - } - assertThat(response.getResponses().length, equalTo(3)); - assertHitCount(response.getResponses()[0].getResponse(), 1L); - assertHitCount(response.getResponses()[1].getResponse(), 1L); - assertHitCount(response.getResponses()[2].getResponse(), 2L); - assertFirstHit(response.getResponses()[0].getResponse(), hasId("1")); - assertFirstHit(response.getResponses()[1].getResponse(), hasId("2")); + assertResponse( + client().prepareMultiSearch() + .add(prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "xxx"))) + .add(prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "yyy"))) + .add(prepareSearch("test").setQuery(QueryBuilders.matchAllQuery())), + response -> { + for (Item item : response) { + assertNoFailures(item.getResponse()); + } + assertThat(response.getResponses().length, equalTo(3)); + assertHitCount(response.getResponses()[0].getResponse(), 1L); + assertHitCount(response.getResponses()[1].getResponse(), 1L); + assertHitCount(response.getResponses()[2].getResponse(), 2L); + assertFirstHit(response.getResponses()[0].getResponse(), hasId("1")); + assertFirstHit(response.getResponses()[1].getResponse(), hasId("2")); + } + ); } - public void testSimpleMultiSearchMoreRequests() { + public void testSimpleMultiSearchMoreRequests() throws Exception { createIndex("test"); int numDocs = randomIntBetween(0, 16); for (int i = 0; i < numDocs; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get(); + prepareIndex("test").setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get(); } refresh(); @@ -75,13 +78,13 @@ public void testSimpleMultiSearchMoreRequests() { for (int i = 0; i < numSearchRequests; i++) { request.add(prepareSearch("test")); } - - MultiSearchResponse response = client().multiSearch(request).actionGet(); - assertThat(response.getResponses().length, equalTo(numSearchRequests)); - for (MultiSearchResponse.Item item : response) { - assertNoFailures(item.getResponse()); - assertHitCount(item.getResponse(), numDocs); - } + assertResponse(client().multiSearch(request), response -> { + assertThat(response.getResponses().length, equalTo(numSearchRequests)); + for (Item item : response) { + assertNoFailures(item.getResponse()); + assertHitCount(item.getResponse(), numDocs); + } + }); } /** @@ -92,26 +95,28 @@ public void testCCSCheckCompatibility() throws Exception { TransportVersion transportVersion = TransportVersionUtils.getNextVersion(TransportVersions.MINIMUM_CCS_VERSION, true); createIndex("test"); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("field", "xxx").get(); - client().prepareIndex("test").setId("2").setSource("field", "yyy").get(); + prepareIndex("test").setId("1").setSource("field", "xxx").get(); + prepareIndex("test").setId("2").setSource("field", "yyy").get(); refresh(); - MultiSearchResponse response = client().prepareMultiSearch() - .add(prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "xxx"))) - .add(prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "yyy"))) - .add(prepareSearch("test").setQuery(new DummyQueryBuilder() { - @Override - public TransportVersion getMinimalSupportedVersion() { - return transportVersion; - } - })) - .get(); - - assertThat(response.getResponses().length, equalTo(3)); - assertHitCount(response.getResponses()[0].getResponse(), 1L); - assertHitCount(response.getResponses()[1].getResponse(), 1L); - assertTrue(response.getResponses()[2].isFailure()); - assertTrue( - response.getResponses()[2].getFailure().getMessage().contains("the 'search.check_ccs_compatibility' setting is enabled") + assertResponse( + client().prepareMultiSearch() + .add(prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "xxx"))) + .add(prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "yyy"))) + .add(prepareSearch("test").setQuery(new DummyQueryBuilder() { + @Override + public TransportVersion getMinimalSupportedVersion() { + return transportVersion; + } + })), + response -> { + assertThat(response.getResponses().length, equalTo(3)); + assertHitCount(response.getResponses()[0].getResponse(), 1L); + assertHitCount(response.getResponses()[1].getResponse(), 1L); + assertTrue(response.getResponses()[2].isFailure()); + assertTrue( + response.getResponses()[2].getFailure().getMessage().contains("the 'search.check_ccs_compatibility' setting is enabled") + ); + } ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/nested/NestedWithMinScoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/nested/NestedWithMinScoreIT.java index e238a254b7843..245fb1651f4d6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/nested/NestedWithMinScoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/nested/NestedWithMinScoreIT.java @@ -95,7 +95,7 @@ public void testNestedWithMinScore() throws Exception { doc.endArray(); doc.endObject(); - client().prepareIndex("test").setId("d1").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).setSource(doc).get(); + prepareIndex("test").setId("d1").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).setSource(doc).get(); final BoolQueryBuilder childQuery = new BoolQueryBuilder().filter( new MatchPhraseQueryBuilder("toolTracks.data", "cash dispenser, automated teller machine, automatic teller machine") ).filter(new RangeQueryBuilder("toolTracks.confidence").from(0.8)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/nested/SimpleNestedIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/nested/SimpleNestedIT.java index 736796d73f164..29a3e589e7923 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/nested/SimpleNestedIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/nested/SimpleNestedIT.java @@ -16,7 +16,6 @@ import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.settings.Settings; @@ -37,7 +36,10 @@ import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCountAndNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -51,13 +53,10 @@ public void testSimpleNested() throws Exception { ensureGreen(); // check on no data, see it works - SearchResponse searchResponse = prepareSearch("test").get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); - searchResponse = prepareSearch("test").setQuery(termQuery("n_field1", "n_value1_1")).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); + assertHitCount(prepareSearch("test"), 0L); + assertHitCount(prepareSearch("test").setQuery(termQuery("n_field1", "n_value1_1")), 0L); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("field1", "value1") @@ -78,36 +77,31 @@ public void testSimpleNested() throws Exception { waitForRelocation(ClusterHealthStatus.GREEN); GetResponse getResponse = client().prepareGet("test", "1").get(); assertThat(getResponse.isExists(), equalTo(true)); - assertThat(getResponse.getSourceAsBytes(), notNullValue()); + assertThat(getResponse.getSourceAsBytesRef(), notNullValue()); refresh(); // check the numDocs assertDocumentCount("test", 3); - searchResponse = prepareSearch("test").setQuery(termQuery("n_field1", "n_value1_1")).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); + assertHitCount(prepareSearch("test").setQuery(termQuery("n_field1", "n_value1_1")), 0L); // search for something that matches the nested doc, and see that we don't find the nested doc - searchResponse = prepareSearch("test").setQuery(matchAllQuery()).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - searchResponse = prepareSearch("test").setQuery(termQuery("n_field1", "n_value1_1")).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); + assertHitCount(prepareSearch("test"), 1L); + assertHitCount(prepareSearch("test").setQuery(termQuery("n_field1", "n_value1_1")), 0L); // now, do a nested query - searchResponse = prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"), ScoreMode.Avg)) - .get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - - searchResponse = prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"), ScoreMode.Avg)) - .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); + assertHitCountAndNoFailures( + prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"), ScoreMode.Avg)), + 1L + ); + assertHitCountAndNoFailures( + prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"), ScoreMode.Avg)) + .setSearchType(SearchType.DFS_QUERY_THEN_FETCH), + 1L + ); // add another doc, one that would match if it was not nested... - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource( jsonBuilder().startObject() .field("field1", "value1") @@ -128,40 +122,44 @@ public void testSimpleNested() throws Exception { refresh(); assertDocumentCount("test", 6); - searchResponse = prepareSearch("test").setQuery( - nestedQuery( - "nested1", - boolQuery().must(termQuery("nested1.n_field1", "n_value1_1")).must(termQuery("nested1.n_field2", "n_value2_1")), - ScoreMode.Avg - ) - ).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); + assertHitCountAndNoFailures( + prepareSearch("test").setQuery( + nestedQuery( + "nested1", + boolQuery().must(termQuery("nested1.n_field1", "n_value1_1")).must(termQuery("nested1.n_field2", "n_value2_1")), + ScoreMode.Avg + ) + ), + 1L + ); + ; // filter - searchResponse = prepareSearch("test").setQuery( - boolQuery().must(matchAllQuery()) - .mustNot( - nestedQuery( - "nested1", - boolQuery().must(termQuery("nested1.n_field1", "n_value1_1")).must(termQuery("nested1.n_field2", "n_value2_1")), - ScoreMode.Avg + assertHitCountAndNoFailures( + prepareSearch("test").setQuery( + boolQuery().must(matchAllQuery()) + .mustNot( + nestedQuery( + "nested1", + boolQuery().must(termQuery("nested1.n_field1", "n_value1_1")).must(termQuery("nested1.n_field2", "n_value2_1")), + ScoreMode.Avg + ) ) - ) - ).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); + ), + 1L + ); // check with type prefix - searchResponse = prepareSearch("test").setQuery( - nestedQuery( - "nested1", - boolQuery().must(termQuery("nested1.n_field1", "n_value1_1")).must(termQuery("nested1.n_field2", "n_value2_1")), - ScoreMode.Avg - ) - ).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); + assertHitCountAndNoFailures( + prepareSearch("test").setQuery( + nestedQuery( + "nested1", + boolQuery().must(termQuery("nested1.n_field1", "n_value1_1")).must(termQuery("nested1.n_field2", "n_value2_1")), + ScoreMode.Avg + ) + ), + 1L + ); // check delete, so all is gone... DeleteResponse deleteResponse = client().prepareDelete("test", "2").get(); @@ -170,10 +168,10 @@ public void testSimpleNested() throws Exception { refresh(); assertDocumentCount("test", 3); - searchResponse = prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"), ScoreMode.Avg)) - .get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); + assertHitCountAndNoFailures( + prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"), ScoreMode.Avg)), + 1L + ); } public void testMultiNested() throws Exception { @@ -197,8 +195,7 @@ public void testMultiNested() throws Exception { ); ensureGreen(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("field", "value") @@ -238,83 +235,87 @@ public void testMultiNested() throws Exception { assertDocumentCount("test", 7); // do some multi nested queries - SearchResponse searchResponse = prepareSearch("test").setQuery( - nestedQuery("nested1", termQuery("nested1.field1", "1"), ScoreMode.Avg) - ).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - - searchResponse = prepareSearch("test").setQuery( - nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "2"), ScoreMode.Avg) - ).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - - searchResponse = prepareSearch("test").setQuery( - nestedQuery( - "nested1", - boolQuery().must(termQuery("nested1.field1", "1")) - .must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "2"), ScoreMode.Avg)), - ScoreMode.Avg - ) - ).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - - searchResponse = prepareSearch("test").setQuery( - nestedQuery( - "nested1", - boolQuery().must(termQuery("nested1.field1", "1")) - .must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "3"), ScoreMode.Avg)), - ScoreMode.Avg - ) - ).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - - searchResponse = prepareSearch("test").setQuery( - nestedQuery( - "nested1", - boolQuery().must(termQuery("nested1.field1", "1")) - .must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "4"), ScoreMode.Avg)), - ScoreMode.Avg - ) - ).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); - - searchResponse = prepareSearch("test").setQuery( - nestedQuery( - "nested1", - boolQuery().must(termQuery("nested1.field1", "1")) - .must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "5"), ScoreMode.Avg)), - ScoreMode.Avg - ) - ).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); - - searchResponse = prepareSearch("test").setQuery( - nestedQuery( - "nested1", - boolQuery().must(termQuery("nested1.field1", "4")) - .must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "5"), ScoreMode.Avg)), - ScoreMode.Avg - ) - ).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - - searchResponse = prepareSearch("test").setQuery( - nestedQuery( - "nested1", - boolQuery().must(termQuery("nested1.field1", "4")) - .must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "2"), ScoreMode.Avg)), - ScoreMode.Avg - ) - ).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); + assertHitCountAndNoFailures( + prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.field1", "1"), ScoreMode.Avg)), + 1L + ); + + assertHitCountAndNoFailures( + prepareSearch("test").setQuery(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "2"), ScoreMode.Avg)), + 1L + ); + + assertHitCountAndNoFailures( + prepareSearch("test").setQuery( + nestedQuery( + "nested1", + boolQuery().must(termQuery("nested1.field1", "1")) + .must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "2"), ScoreMode.Avg)), + ScoreMode.Avg + ) + ), + 1L + ); + + assertHitCountAndNoFailures( + prepareSearch("test").setQuery( + nestedQuery( + "nested1", + boolQuery().must(termQuery("nested1.field1", "1")) + .must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "3"), ScoreMode.Avg)), + ScoreMode.Avg + ) + ), + 1L + ); + + assertHitCountAndNoFailures( + prepareSearch("test").setQuery( + nestedQuery( + "nested1", + boolQuery().must(termQuery("nested1.field1", "1")) + .must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "4"), ScoreMode.Avg)), + ScoreMode.Avg + ) + ), + 0L + ); + + assertHitCountAndNoFailures( + prepareSearch("test").setQuery( + nestedQuery( + "nested1", + boolQuery().must(termQuery("nested1.field1", "1")) + .must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "5"), ScoreMode.Avg)), + ScoreMode.Avg + ) + ), + 0L + ); + + assertHitCountAndNoFailures( + prepareSearch("test").setQuery( + nestedQuery( + "nested1", + boolQuery().must(termQuery("nested1.field1", "4")) + .must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "5"), ScoreMode.Avg)), + ScoreMode.Avg + ) + ), + 1L + ); + + assertHitCountAndNoFailures( + prepareSearch("test").setQuery( + nestedQuery( + "nested1", + boolQuery().must(termQuery("nested1.field1", "4")) + .must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "2"), ScoreMode.Avg)), + ScoreMode.Avg + ) + ), + 0L + ); } // When IncludeNestedDocsQuery is wrapped in a FilteredQuery then a in-finite loop occurs b/c of a bug in @@ -343,8 +344,7 @@ public void testDeleteNestedDocsWithAlias() throws Exception { ensureGreen(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("field1", "value1") @@ -362,8 +362,7 @@ public void testDeleteNestedDocsWithAlias() throws Exception { ) .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource( jsonBuilder().startObject() .field("field1", "value2") @@ -403,8 +402,7 @@ public void testExplain() throws Exception { ensureGreen(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("field1", "value1") @@ -421,14 +419,17 @@ public void testExplain() throws Exception { .setRefreshPolicy(IMMEDIATE) .get(); - SearchResponse searchResponse = prepareSearch("test").setQuery( - nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1"), ScoreMode.Total) - ).setExplain(true).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - Explanation explanation = searchResponse.getHits().getHits()[0].getExplanation(); - assertThat(explanation.getValue(), equalTo(searchResponse.getHits().getHits()[0].getScore())); - assertThat(explanation.toString(), startsWith("0.36464313 = Score based on 2 child docs in range from 0 to 1")); + assertNoFailuresAndResponse( + prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1"), ScoreMode.Total)) + .setExplain(true), + response -> { + assertNoFailures(response); + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + Explanation explanation = response.getHits().getHits()[0].getExplanation(); + assertThat(explanation.getValue(), equalTo(response.getHits().getHits()[0].getScore())); + assertThat(explanation.toString(), startsWith("0.36464313 = Score based on 2 child docs in range from 0 to 1")); + } + ); } public void testSimpleNestedSorting() throws Exception { @@ -454,8 +455,7 @@ public void testSimpleNestedSorting() throws Exception { ); ensureGreen(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("field1", 1) @@ -470,8 +470,7 @@ public void testSimpleNestedSorting() throws Exception { .endObject() ) .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource( jsonBuilder().startObject() .field("field1", 2) @@ -486,8 +485,7 @@ public void testSimpleNestedSorting() throws Exception { .endObject() ) .get(); - client().prepareIndex("test") - .setId("3") + prepareIndex("test").setId("3") .setSource( jsonBuilder().startObject() .field("field1", 3) @@ -504,33 +502,32 @@ public void testSimpleNestedSorting() throws Exception { .get(); refresh(); - SearchResponse searchResponse = prepareSearch("test") - - .setQuery(QueryBuilders.matchAllQuery()) - .addSort(SortBuilders.fieldSort("nested1.field1").order(SortOrder.ASC).setNestedSort(new NestedSortBuilder("nested1"))) - .get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("4")); - - searchResponse = prepareSearch("test") - - .setQuery(QueryBuilders.matchAllQuery()) - .addSort(SortBuilders.fieldSort("nested1.field1").order(SortOrder.DESC).setNestedSort(new NestedSortBuilder("nested1"))) - .get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("5")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("4")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("2")); + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()) + .addSort(SortBuilders.fieldSort("nested1.field1").order(SortOrder.ASC).setNestedSort(new NestedSortBuilder("nested1"))), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits()[0].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("3")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("4")); + } + ); + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()) + .addSort(SortBuilders.fieldSort("nested1.field1").order(SortOrder.DESC).setNestedSort(new NestedSortBuilder("nested1"))), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("5")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("4")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("2")); + } + ); } public void testSimpleNestedSortingWithNestedFilterMissing() throws Exception { @@ -558,8 +555,7 @@ public void testSimpleNestedSortingWithNestedFilterMissing() throws Exception { ); ensureGreen(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("field1", 1) @@ -576,8 +572,7 @@ public void testSimpleNestedSortingWithNestedFilterMissing() throws Exception { .endObject() ) .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource( jsonBuilder().startObject() .field("field1", 2) @@ -596,8 +591,7 @@ public void testSimpleNestedSortingWithNestedFilterMissing() throws Exception { .get(); // Doc with missing nested docs if nested filter is used refresh(); - client().prepareIndex("test") - .setId("3") + prepareIndex("test").setId("3") .setSource( jsonBuilder().startObject() .field("field1", 3) @@ -628,16 +622,15 @@ public void testSimpleNestedSortingWithNestedFilterMissing() throws Exception { searchRequestBuilder.setScroll("10m"); } - SearchResponse searchResponse = searchRequestBuilder.get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("4")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("10")); - + assertResponse(searchRequestBuilder, response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits()[0].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("4")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("10")); + }); searchRequestBuilder = prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()) .addSort( SortBuilders.fieldSort("nested1.field1") @@ -650,16 +643,16 @@ public void testSimpleNestedSortingWithNestedFilterMissing() throws Exception { searchRequestBuilder.setScroll("10m"); } - searchResponse = searchRequestBuilder.get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("10")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("5")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("2")); - client().prepareClearScroll().addScrollId("_all").get(); + assertResponse(searchRequestBuilder, response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits()[0].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("10")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("5")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("2")); + client().prepareClearScroll().addScrollId("_all").get(); + }); } public void testNestedSortWithMultiLevelFiltering() throws Exception { @@ -689,7 +682,7 @@ public void testNestedSortWithMultiLevelFiltering() throws Exception { }""")); ensureGreen(); - client().prepareIndex("test").setId("1").setSource(""" + prepareIndex("test").setId("1").setSource(""" { "acl": [ { @@ -739,7 +732,7 @@ public void testNestedSortWithMultiLevelFiltering() throws Exception { ] }""", XContentType.JSON).get(); - client().prepareIndex("test").setId("2").setSource(""" + prepareIndex("test").setId("2").setSource(""" { "acl": [ { @@ -788,101 +781,106 @@ public void testNestedSortWithMultiLevelFiltering() throws Exception { refresh(); // access id = 1, read, max value, asc, should use matt and shay - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("acl.operation.user.username") - .setNestedSort( - new NestedSortBuilder("acl").setFilter(QueryBuilders.termQuery("acl.access_id", "1")) - .setNestedSort( - new NestedSortBuilder("acl.operation").setFilter(QueryBuilders.termQuery("acl.operation.name", "read")) - .setNestedSort(new NestedSortBuilder("acl.operation.user")) - ) - ) - .sortMode(SortMode.MAX) - .order(SortOrder.ASC) - ) - .get(); - - assertHitCount(searchResponse, 2); - assertThat(searchResponse.getHits().getHits().length, equalTo(2)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("matt")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("shay")); - + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("acl.operation.user.username") + .setNestedSort( + new NestedSortBuilder("acl").setFilter(QueryBuilders.termQuery("acl.access_id", "1")) + .setNestedSort( + new NestedSortBuilder("acl.operation").setFilter(QueryBuilders.termQuery("acl.operation.name", "read")) + .setNestedSort(new NestedSortBuilder("acl.operation.user")) + ) + ) + .sortMode(SortMode.MAX) + .order(SortOrder.ASC) + ), + response -> { + assertHitCount(response, 2); + assertThat(response.getHits().getHits().length, equalTo(2)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("matt")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("shay")); + } + ); // access id = 1, read, min value, asc, should now use adrien and luca - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("acl.operation.user.username") - .setNestedSort( - new NestedSortBuilder("acl").setFilter(QueryBuilders.termQuery("acl.access_id", "1")) - .setNestedSort( - new NestedSortBuilder("acl.operation").setFilter(QueryBuilders.termQuery("acl.operation.name", "read")) - .setNestedSort(new NestedSortBuilder("acl.operation.user")) - ) - ) - .sortMode(SortMode.MIN) - .order(SortOrder.ASC) - ) - .get(); - - assertHitCount(searchResponse, 2); - assertThat(searchResponse.getHits().getHits().length, equalTo(2)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("adrien")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("luca")); - - // execute, by matt or luca, by user id, sort missing first - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("acl.operation.user.id") - .setNestedSort( - new NestedSortBuilder("acl").setNestedSort( - new NestedSortBuilder("acl.operation").setFilter(QueryBuilders.termQuery("acl.operation.name", "execute")) + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("acl.operation.user.username") + .setNestedSort( + new NestedSortBuilder("acl").setFilter(QueryBuilders.termQuery("acl.access_id", "1")) .setNestedSort( - new NestedSortBuilder("acl.operation.user").setFilter( - QueryBuilders.termsQuery("acl.operation.user.username", "matt", "luca") - ) + new NestedSortBuilder("acl.operation").setFilter(QueryBuilders.termQuery("acl.operation.name", "read")) + .setNestedSort(new NestedSortBuilder("acl.operation.user")) ) ) - ) - .missing("_first") - .sortMode(SortMode.MIN) - .order(SortOrder.DESC) - ) - .get(); - - assertHitCount(searchResponse, 2); - assertThat(searchResponse.getHits().getHits().length, equalTo(2)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1")); // missing first - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("1")); - + .sortMode(SortMode.MIN) + .order(SortOrder.ASC) + ), + response -> { + assertHitCount(response, 2); + assertThat(response.getHits().getHits().length, equalTo(2)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("adrien")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("luca")); + } + ); + // execute, by matt or luca, by user id, sort missing first + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("acl.operation.user.id") + .setNestedSort( + new NestedSortBuilder("acl").setNestedSort( + new NestedSortBuilder("acl.operation").setFilter(QueryBuilders.termQuery("acl.operation.name", "execute")) + .setNestedSort( + new NestedSortBuilder("acl.operation.user").setFilter( + QueryBuilders.termsQuery("acl.operation.user.username", "matt", "luca") + ) + ) + ) + ) + .missing("_first") + .sortMode(SortMode.MIN) + .order(SortOrder.DESC) + ), + response -> { + assertHitCount(response, 2); + assertThat(response.getHits().getHits().length, equalTo(2)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); // missing first + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("1")); + } + ); // execute, by matt or luca, by username, sort missing last (default) - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("acl.operation.user.username") - .setNestedSort( - new NestedSortBuilder("acl").setNestedSort( - new NestedSortBuilder("acl.operation").setFilter(QueryBuilders.termQuery("acl.operation.name", "execute")) - .setNestedSort( - new NestedSortBuilder("acl.operation.user").setFilter( - QueryBuilders.termsQuery("acl.operation.user.username", "matt", "luca") + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("acl.operation.user.username") + .setNestedSort( + new NestedSortBuilder("acl").setNestedSort( + new NestedSortBuilder("acl.operation").setFilter(QueryBuilders.termQuery("acl.operation.name", "execute")) + .setNestedSort( + new NestedSortBuilder("acl.operation.user").setFilter( + QueryBuilders.termsQuery("acl.operation.user.username", "matt", "luca") + ) ) - ) + ) ) - ) - .sortMode(SortMode.MIN) - .order(SortOrder.DESC) - ) - .get(); - - assertHitCount(searchResponse, 2); - assertThat(searchResponse.getHits().getHits().length, equalTo(2)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("luca")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("1")); // missing last + .sortMode(SortMode.MIN) + .order(SortOrder.DESC) + ), + response -> { + assertHitCount(response, 2); + assertThat(response.getHits().getHits().length, equalTo(2)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("luca")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("1")); // missing last + } + ); } // https://github.com/elastic/elasticsearch/issues/31554 @@ -914,7 +912,7 @@ public void testLeakingSortValues() throws Exception { """)); ensureGreen(); - client().prepareIndex("test").setId("1").setSource(""" + prepareIndex("test").setId("1").setSource(""" { "nested1": [ { @@ -928,7 +926,7 @@ public void testLeakingSortValues() throws Exception { ] }""", XContentType.JSON).get(); - client().prepareIndex("test").setId("2").setSource(""" + prepareIndex("test").setId("2").setSource(""" { "nested1": [ { @@ -944,22 +942,25 @@ public void testLeakingSortValues() throws Exception { refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(termQuery("_id", 2)) - .addSort( - SortBuilders.fieldSort("nested1.nested2.sortVal") - .setNestedSort( - new NestedSortBuilder("nested1").setNestedSort( - new NestedSortBuilder("nested1.nested2").setFilter(termQuery("nested1.nested2.nested2_keyword", "nested2_bar")) + assertResponse( + prepareSearch().setQuery(termQuery("_id", 2)) + .addSort( + SortBuilders.fieldSort("nested1.nested2.sortVal") + .setNestedSort( + new NestedSortBuilder("nested1").setNestedSort( + new NestedSortBuilder("nested1.nested2").setFilter( + termQuery("nested1.nested2.nested2_keyword", "nested2_bar") + ) + ) ) - ) - ) - .get(); - - assertHitCount(searchResponse, 1); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("2")); - + ), + response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getHits().length, equalTo(1)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("2")); + } + ); } public void testSortNestedWithNestedFilter() throws Exception { @@ -996,8 +997,7 @@ public void testSortNestedWithNestedFilter() throws Exception { ensureGreen(); // sum: 11 - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("grand_parent_values", 1L) @@ -1039,8 +1039,7 @@ public void testSortNestedWithNestedFilter() throws Exception { .get(); // sum: 7 - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource( jsonBuilder().startObject() .field("grand_parent_values", 2L) @@ -1082,8 +1081,7 @@ public void testSortNestedWithNestedFilter() throws Exception { .get(); // sum: 2 - client().prepareIndex("test") - .setId("3") + prepareIndex("test").setId("3") .setSource( jsonBuilder().startObject() .field("grand_parent_values", 3L) @@ -1126,215 +1124,236 @@ public void testSortNestedWithNestedFilter() throws Exception { refresh(); // Without nested filter - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("parent.child.child_values") - .setNestedSort(new NestedSortBuilder("parent.child")) - .order(SortOrder.ASC) - ) - .get(); - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("-3")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("-2")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("-1")); - + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("parent.child.child_values") + .setNestedSort(new NestedSortBuilder("parent.child")) + .order(SortOrder.ASC) + ), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("-3")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("-2")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("-1")); + } + ); // With nested filter NestedSortBuilder nestedSort = new NestedSortBuilder("parent.child"); nestedSort.setFilter(QueryBuilders.termQuery("parent.child.filter", true)); - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("parent.child.child_values").setNestedSort(nestedSort).order(SortOrder.ASC)) - .get(); - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); - + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort(SortBuilders.fieldSort("parent.child.child_values").setNestedSort(nestedSort).order(SortOrder.ASC)), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); + } + ); // Nested path should be automatically detected, expect same results as above search request - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("parent.child.child_values").setNestedSort(nestedSort).order(SortOrder.ASC)) - .get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); - + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort(SortBuilders.fieldSort("parent.child.child_values").setNestedSort(nestedSort).order(SortOrder.ASC)), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); + } + ); nestedSort.setFilter(QueryBuilders.termQuery("parent.filter", false)); - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("parent.parent_values").setNestedSort(nestedSort).order(SortOrder.ASC)) - .get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); - - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("parent.child.child_values") - .setNestedSort( - new NestedSortBuilder("parent").setFilter(QueryBuilders.termQuery("parent.filter", false)) - .setNestedSort(new NestedSortBuilder("parent.child")) - ) - .sortMode(SortMode.MAX) - .order(SortOrder.ASC) - ) - .get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("4")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("6")); - + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort(SortBuilders.fieldSort("parent.parent_values").setNestedSort(nestedSort).order(SortOrder.ASC)), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); + } + ); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("parent.child.child_values") + .setNestedSort( + new NestedSortBuilder("parent").setFilter(QueryBuilders.termQuery("parent.filter", false)) + .setNestedSort(new NestedSortBuilder("parent.child")) + ) + .sortMode(SortMode.MAX) + .order(SortOrder.ASC) + ), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("3")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("4")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("6")); + } + ); // Check if closest nested type is resolved - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("parent.child.child_obj.value") - .setNestedSort(new NestedSortBuilder("parent.child").setFilter(QueryBuilders.termQuery("parent.child.filter", true))) - .order(SortOrder.ASC) - ) - .get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); - + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("parent.child.child_obj.value") + .setNestedSort( + new NestedSortBuilder("parent.child").setFilter(QueryBuilders.termQuery("parent.child.filter", true)) + ) + .order(SortOrder.ASC) + ), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); + } + ); // Sort mode: sum - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("parent.child.child_values") - .setNestedSort(new NestedSortBuilder("parent.child")) - .sortMode(SortMode.SUM) - .order(SortOrder.ASC) - ) - .get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("7")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("11")); - - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("parent.child.child_values") - .setNestedSort(new NestedSortBuilder("parent.child")) - .sortMode(SortMode.SUM) - .order(SortOrder.DESC) - ) - .get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("11")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("7")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("2")); - + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("parent.child.child_values") + .setNestedSort(new NestedSortBuilder("parent.child")) + .sortMode(SortMode.SUM) + .order(SortOrder.ASC) + ), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("7")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("11")); + } + ); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("parent.child.child_values") + .setNestedSort(new NestedSortBuilder("parent.child")) + .sortMode(SortMode.SUM) + .order(SortOrder.DESC) + ), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("11")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("7")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("2")); + } + ); // Sort mode: sum with filter - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("parent.child.child_values") - .setNestedSort(new NestedSortBuilder("parent.child").setFilter(QueryBuilders.termQuery("parent.child.filter", true))) - .sortMode(SortMode.SUM) - .order(SortOrder.ASC) - ) - .get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); - + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("parent.child.child_values") + .setNestedSort( + new NestedSortBuilder("parent.child").setFilter(QueryBuilders.termQuery("parent.child.filter", true)) + ) + .sortMode(SortMode.SUM) + .order(SortOrder.ASC) + ), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); + } + ); // Sort mode: avg - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("parent.child.child_values") - .setNestedSort(new NestedSortBuilder("parent.child")) - .sortMode(SortMode.AVG) - .order(SortOrder.ASC) - ) - .get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); - - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("parent.child.child_values") - .setNestedSort(new NestedSortBuilder("parent.child")) - .sortMode(SortMode.AVG) - .order(SortOrder.DESC) - ) - .get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("1")); - + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("parent.child.child_values") + .setNestedSort(new NestedSortBuilder("parent.child")) + .sortMode(SortMode.AVG) + .order(SortOrder.ASC) + ), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); + } + ); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("parent.child.child_values") + .setNestedSort(new NestedSortBuilder("parent.child")) + .sortMode(SortMode.AVG) + .order(SortOrder.DESC) + ), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("3")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("1")); + } + ); // Sort mode: avg with filter - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("parent.child.child_values") - .setNestedSort(new NestedSortBuilder("parent.child").setFilter(QueryBuilders.termQuery("parent.child.filter", true))) - .sortMode(SortMode.AVG) - .order(SortOrder.ASC) - ) - .get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("parent.child.child_values") + .setNestedSort( + new NestedSortBuilder("parent.child").setFilter(QueryBuilders.termQuery("parent.child.filter", true)) + ) + .sortMode(SortMode.AVG) + .order(SortOrder.ASC) + ), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); + } + ); } // Issue #9305 @@ -1373,8 +1392,7 @@ public void testNestedSortingWithNestedFilterAsFilter() throws Exception { ) ); - DocWriteResponse indexResponse1 = client().prepareIndex("test") - .setId("1") + DocWriteResponse indexResponse1 = prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("officelocation", "gendale") @@ -1427,8 +1445,7 @@ public void testNestedSortingWithNestedFilterAsFilter() throws Exception { .get(); assertTrue(indexResponse1.getShardInfo().getSuccessful() > 0); - DocWriteResponse indexResponse2 = client().prepareIndex("test") - .setId("2") + DocWriteResponse indexResponse2 = prepareIndex("test").setId("2") .setSource( jsonBuilder().startObject() .field("officelocation", "gendale") @@ -1482,27 +1499,30 @@ public void testNestedSortingWithNestedFilterAsFilter() throws Exception { assertTrue(indexResponse2.getShardInfo().getSuccessful() > 0); refresh(); - SearchResponse searchResponse = prepareSearch("test").addSort( - SortBuilders.fieldSort("users.first").setNestedSort(new NestedSortBuilder("users")).order(SortOrder.ASC) - ) - .addSort( - SortBuilders.fieldSort("users.first") - .order(SortOrder.ASC) - .setNestedSort( - new NestedSortBuilder("users").setFilter( - nestedQuery("users.workstations", termQuery("users.workstations.stationid", "s5"), ScoreMode.Avg) - ) - ) + assertNoFailuresAndResponse( + prepareSearch("test").addSort( + SortBuilders.fieldSort("users.first").setNestedSort(new NestedSortBuilder("users")).order(SortOrder.ASC) ) - .get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 2); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("2")); - assertThat(searchResponse.getHits().getAt(0).getSortValues()[0].toString(), equalTo("fname1")); - assertThat(searchResponse.getHits().getAt(0).getSortValues()[1].toString(), equalTo("fname1")); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getSortValues()[0].toString(), equalTo("fname1")); - assertThat(searchResponse.getHits().getAt(1).getSortValues()[1].toString(), equalTo("fname3")); + .addSort( + SortBuilders.fieldSort("users.first") + .order(SortOrder.ASC) + .setNestedSort( + new NestedSortBuilder("users").setFilter( + nestedQuery("users.workstations", termQuery("users.workstations.stationid", "s5"), ScoreMode.Avg) + ) + ) + ), + response -> { + assertNoFailures(response); + assertHitCount(response, 2); + assertThat(response.getHits().getAt(0).getId(), equalTo("2")); + assertThat(response.getHits().getAt(0).getSortValues()[0].toString(), equalTo("fname1")); + assertThat(response.getHits().getAt(0).getSortValues()[1].toString(), equalTo("fname1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getSortValues()[0].toString(), equalTo("fname1")); + assertThat(response.getHits().getAt(1).getSortValues()[1].toString(), equalTo("fname3")); + } + ); } public void testCheckFixedBitSetCache() throws Exception { @@ -1513,8 +1533,8 @@ public void testCheckFixedBitSetCache() throws Exception { } assertAcked(prepareCreate("test").setSettings(settingsBuilder)); - client().prepareIndex("test").setId("0").setSource("field", "value").get(); - client().prepareIndex("test").setId("1").setSource("field", "value").get(); + prepareIndex("test").setId("0").setSource("field", "value").get(); + prepareIndex("test").setId("1").setSource("field", "value").get(); refresh(); ensureSearchable("test"); @@ -1533,11 +1553,11 @@ public void testCheckFixedBitSetCache() throws Exception { .endArray() .endObject(); // index simple data - client().prepareIndex("test").setId("2").setSource(builder).get(); - client().prepareIndex("test").setId("3").setSource(builder).get(); - client().prepareIndex("test").setId("4").setSource(builder).get(); - client().prepareIndex("test").setId("5").setSource(builder).get(); - client().prepareIndex("test").setId("6").setSource(builder).get(); + prepareIndex("test").setId("2").setSource(builder).get(); + prepareIndex("test").setId("3").setSource(builder).get(); + prepareIndex("test").setId("4").setSource(builder).get(); + prepareIndex("test").setId("5").setSource(builder).get(); + prepareIndex("test").setId("6").setSource(builder).get(); refresh(); ensureSearchable("test"); @@ -1546,11 +1566,10 @@ public void testCheckFixedBitSetCache() throws Exception { assertThat(clusterStatsResponse.getIndicesStats().getSegments().getBitsetMemoryInBytes(), equalTo(0L)); // only when querying with nested the fixed bitsets are loaded - SearchResponse searchResponse = prepareSearch("test").setQuery( - nestedQuery("array1", termQuery("array1.field1", "value1"), ScoreMode.Avg) - ).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(5L)); + assertHitCountAndNoFailures( + prepareSearch("test").setQuery(nestedQuery("array1", termQuery("array1.field1", "value1"), ScoreMode.Avg)), + 5L + ); } clusterStatsResponse = clusterAdmin().prepareClusterStats().get(); assertThat(clusterStatsResponse.getIndicesStats().getSegments().getBitsetMemoryInBytes(), greaterThan(0L)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/nested/VectorNestedIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/nested/VectorNestedIT.java index 9219641f1d3bf..3dd9e68cf08af 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/nested/VectorNestedIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/nested/VectorNestedIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.search.nested; import org.elasticsearch.action.get.GetResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.search.vectors.KnnSearchBuilder; @@ -18,6 +17,7 @@ import java.util.List; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -47,8 +47,7 @@ public void testSimpleNested() throws Exception { ); ensureGreen(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .startArray("nested") @@ -63,13 +62,14 @@ public void testSimpleNested() throws Exception { waitForRelocation(ClusterHealthStatus.GREEN); GetResponse getResponse = client().prepareGet("test", "1").get(); assertThat(getResponse.isExists(), equalTo(true)); - assertThat(getResponse.getSourceAsBytes(), notNullValue()); + assertThat(getResponse.getSourceAsBytesRef(), notNullValue()); refresh(); - SearchResponse searchResponse = prepareSearch("test").setKnnSearch( - List.of(new KnnSearchBuilder("nested.vector", new float[] { 1, 1, 1 }, 1, 1, null)) - ).setAllowPartialSearchResults(false).get(); - assertThat(searchResponse.getHits().getHits().length, greaterThan(0)); + assertResponse( + prepareSearch("test").setKnnSearch(List.of(new KnnSearchBuilder("nested.vector", new float[] { 1, 1, 1 }, 1, 1, null))) + .setAllowPartialSearchResults(false), + response -> assertThat(response.getHits().getHits().length, greaterThan(0)) + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java index 526d523bb0638..0acf9be574ffe 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.search.profile.aggregation; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; @@ -42,7 +41,7 @@ import static org.elasticsearch.test.MapMatcher.assertMap; import static org.elasticsearch.test.MapMatcher.matchesMap; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -106,14 +105,13 @@ protected void setupSuiteScopeCluster() throws Exception { for (int i = 0; i < 5; i++) { builders.add( - client().prepareIndex("idx") - .setSource( - jsonBuilder().startObject() - .field(STRING_FIELD, randomFrom(randomStrings)) - .field(NUMBER_FIELD, randomIntBetween(0, 9)) - .field(TAG_FIELD, randomBoolean() ? "more" : "less") - .endObject() - ) + prepareIndex("idx").setSource( + jsonBuilder().startObject() + .field(STRING_FIELD, randomFrom(randomStrings)) + .field(NUMBER_FIELD, randomIntBetween(0, 9)) + .field(TAG_FIELD, randomBoolean() ? "more" : "less") + .endObject() + ) ); } @@ -122,110 +120,113 @@ protected void setupSuiteScopeCluster() throws Exception { } public void testSimpleProfile() { - SearchResponse response = prepareSearch("idx").setProfile(true) - .addAggregation(histogram("histo").field(NUMBER_FIELD).interval(1L)) - .get(); - assertNoFailures(response); - Map profileResults = response.getProfileResults(); - assertThat(profileResults, notNullValue()); - assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries)); - for (SearchProfileShardResult profileShardResult : profileResults.values()) { - assertThat(profileShardResult, notNullValue()); - AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); - assertThat(aggProfileResults, notNullValue()); - List aggProfileResultsList = aggProfileResults.getProfileResults(); - assertThat(aggProfileResultsList, notNullValue()); - assertThat(aggProfileResultsList.size(), equalTo(1)); - ProfileResult histoAggResult = aggProfileResultsList.get(0); - assertThat(histoAggResult, notNullValue()); - assertThat(histoAggResult.getQueryName(), equalTo("NumericHistogramAggregator")); - assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); - assertThat(histoAggResult.getProfiledChildren().size(), equalTo(0)); - assertThat(histoAggResult.getTime(), greaterThan(0L)); - Map breakdown = histoAggResult.getTimeBreakdown(); - assertThat(breakdown, notNullValue()); - assertThat(breakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(breakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(breakdown.get(COLLECT), greaterThan(0L)); - assertThat(breakdown.get(BUILD_AGGREGATION).longValue(), greaterThan(0L)); - assertThat(breakdown.get(REDUCE), equalTo(0L)); - assertMap( - histoAggResult.getDebugInfo(), - matchesMap().entry(TOTAL_BUCKETS, greaterThan(0L)).entry(BUILT_BUCKETS, greaterThan(0)) - ); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setProfile(true).addAggregation(histogram("histo").field(NUMBER_FIELD).interval(1L)), + response -> { + Map profileResults = response.getProfileResults(); + assertThat(profileResults, notNullValue()); + assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries)); + for (SearchProfileShardResult profileShardResult : profileResults.values()) { + assertThat(profileShardResult, notNullValue()); + AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); + assertThat(aggProfileResults, notNullValue()); + List aggProfileResultsList = aggProfileResults.getProfileResults(); + assertThat(aggProfileResultsList, notNullValue()); + assertThat(aggProfileResultsList.size(), equalTo(1)); + ProfileResult histoAggResult = aggProfileResultsList.get(0); + assertThat(histoAggResult, notNullValue()); + assertThat(histoAggResult.getQueryName(), equalTo("NumericHistogramAggregator")); + assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); + assertThat(histoAggResult.getProfiledChildren().size(), equalTo(0)); + assertThat(histoAggResult.getTime(), greaterThan(0L)); + Map breakdown = histoAggResult.getTimeBreakdown(); + assertThat(breakdown, notNullValue()); + assertThat(breakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(breakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(breakdown.get(COLLECT), greaterThan(0L)); + assertThat(breakdown.get(BUILD_AGGREGATION).longValue(), greaterThan(0L)); + assertThat(breakdown.get(REDUCE), equalTo(0L)); + assertMap( + histoAggResult.getDebugInfo(), + matchesMap().entry(TOTAL_BUCKETS, greaterThan(0L)).entry(BUILT_BUCKETS, greaterThan(0)) + ); + } + } + ); } public void testMultiLevelProfile() { - SearchResponse response = prepareSearch("idx").setProfile(true) - .addAggregation( - histogram("histo").field(NUMBER_FIELD) - .interval(1L) - .subAggregation( - terms("terms").field(TAG_FIELD) - .order(BucketOrder.aggregation("avg", false)) - .subAggregation(avg("avg").field(NUMBER_FIELD)) - ) - ) - .get(); - assertNoFailures(response); - Map profileResults = response.getProfileResults(); - assertThat(profileResults, notNullValue()); - assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries)); - for (SearchProfileShardResult profileShardResult : profileResults.values()) { - assertThat(profileShardResult, notNullValue()); - AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); - assertThat(aggProfileResults, notNullValue()); - List aggProfileResultsList = aggProfileResults.getProfileResults(); - assertThat(aggProfileResultsList, notNullValue()); - assertThat(aggProfileResultsList.size(), equalTo(1)); - ProfileResult histoAggResult = aggProfileResultsList.get(0); - assertThat(histoAggResult, notNullValue()); - assertThat(histoAggResult.getQueryName(), equalTo("NumericHistogramAggregator")); - assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); - assertThat(histoAggResult.getTime(), greaterThan(0L)); - Map histoBreakdown = histoAggResult.getTimeBreakdown(); - assertThat(histoBreakdown, notNullValue()); - assertThat(histoBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(histoBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(histoBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(histoBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(histoBreakdown.get(REDUCE), equalTo(0L)); - assertMap( - histoAggResult.getDebugInfo(), - matchesMap().entry(TOTAL_BUCKETS, greaterThan(0L)).entry(BUILT_BUCKETS, greaterThan(0)) - ); - - ProfileResult termsAggResult = histoAggResult.getProfiledChildren().get(0); - assertThat(termsAggResult, notNullValue()); - assertThat(termsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getSimpleName())); - assertThat(termsAggResult.getLuceneDescription(), equalTo("terms")); - assertThat(termsAggResult.getTime(), greaterThan(0L)); - Map termsBreakdown = termsAggResult.getTimeBreakdown(); - assertThat(termsBreakdown, notNullValue()); - assertThat(termsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(termsBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(termsBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(termsBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(termsBreakdown.get(REDUCE), equalTo(0L)); - assertRemapTermsDebugInfo(termsAggResult); - assertThat(termsAggResult.getProfiledChildren().size(), equalTo(1)); - - ProfileResult avgAggResult = termsAggResult.getProfiledChildren().get(0); - assertThat(avgAggResult, notNullValue()); - assertThat(avgAggResult.getQueryName(), equalTo("AvgAggregator")); - assertThat(avgAggResult.getLuceneDescription(), equalTo("avg")); - assertThat(avgAggResult.getTime(), greaterThan(0L)); - Map avgBreakdown = avgAggResult.getTimeBreakdown(); - assertThat(avgBreakdown, notNullValue()); - assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(avgBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(avgBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); - assertMap(avgAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); - assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setProfile(true) + .addAggregation( + histogram("histo").field(NUMBER_FIELD) + .interval(1L) + .subAggregation( + terms("terms").field(TAG_FIELD) + .order(BucketOrder.aggregation("avg", false)) + .subAggregation(avg("avg").field(NUMBER_FIELD)) + ) + ), + response -> { + Map profileResults = response.getProfileResults(); + assertThat(profileResults, notNullValue()); + assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries)); + for (SearchProfileShardResult profileShardResult : profileResults.values()) { + assertThat(profileShardResult, notNullValue()); + AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); + assertThat(aggProfileResults, notNullValue()); + List aggProfileResultsList = aggProfileResults.getProfileResults(); + assertThat(aggProfileResultsList, notNullValue()); + assertThat(aggProfileResultsList.size(), equalTo(1)); + ProfileResult histoAggResult = aggProfileResultsList.get(0); + assertThat(histoAggResult, notNullValue()); + assertThat(histoAggResult.getQueryName(), equalTo("NumericHistogramAggregator")); + assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); + assertThat(histoAggResult.getTime(), greaterThan(0L)); + Map histoBreakdown = histoAggResult.getTimeBreakdown(); + assertThat(histoBreakdown, notNullValue()); + assertThat(histoBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(histoBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(histoBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(histoBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(histoBreakdown.get(REDUCE), equalTo(0L)); + assertMap( + histoAggResult.getDebugInfo(), + matchesMap().entry(TOTAL_BUCKETS, greaterThan(0L)).entry(BUILT_BUCKETS, greaterThan(0)) + ); + + ProfileResult termsAggResult = histoAggResult.getProfiledChildren().get(0); + assertThat(termsAggResult, notNullValue()); + assertThat(termsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getSimpleName())); + assertThat(termsAggResult.getLuceneDescription(), equalTo("terms")); + assertThat(termsAggResult.getTime(), greaterThan(0L)); + Map termsBreakdown = termsAggResult.getTimeBreakdown(); + assertThat(termsBreakdown, notNullValue()); + assertThat(termsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(termsBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(termsBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(termsBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(termsBreakdown.get(REDUCE), equalTo(0L)); + assertRemapTermsDebugInfo(termsAggResult); + assertThat(termsAggResult.getProfiledChildren().size(), equalTo(1)); + + ProfileResult avgAggResult = termsAggResult.getProfiledChildren().get(0); + assertThat(avgAggResult, notNullValue()); + assertThat(avgAggResult.getQueryName(), equalTo("AvgAggregator")); + assertThat(avgAggResult.getLuceneDescription(), equalTo("avg")); + assertThat(avgAggResult.getTime(), greaterThan(0L)); + Map avgBreakdown = avgAggResult.getTimeBreakdown(); + assertThat(avgBreakdown, notNullValue()); + assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(avgBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(avgBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); + assertMap(avgAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); + assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); + } + } + ); } private void assertRemapTermsDebugInfo(ProfileResult termsAggResult, String... deferredAggregators) { @@ -243,375 +244,386 @@ private void assertRemapTermsDebugInfo(ProfileResult termsAggResult, String... d } public void testMultiLevelProfileBreadthFirst() { - SearchResponse response = prepareSearch("idx").setProfile(true) - .addAggregation( - histogram("histo").field(NUMBER_FIELD) - .interval(1L) - .subAggregation( - terms("terms").collectMode(SubAggCollectionMode.BREADTH_FIRST) - .field(TAG_FIELD) - .subAggregation(avg("avg").field(NUMBER_FIELD)) - ) - ) - .get(); - assertNoFailures(response); - Map profileResults = response.getProfileResults(); - assertThat(profileResults, notNullValue()); - assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries)); - for (SearchProfileShardResult profileShardResult : profileResults.values()) { - assertThat(profileShardResult, notNullValue()); - AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); - assertThat(aggProfileResults, notNullValue()); - List aggProfileResultsList = aggProfileResults.getProfileResults(); - assertThat(aggProfileResultsList, notNullValue()); - assertThat(aggProfileResultsList.size(), equalTo(1)); - ProfileResult histoAggResult = aggProfileResultsList.get(0); - assertThat(histoAggResult, notNullValue()); - assertThat(histoAggResult.getQueryName(), equalTo("NumericHistogramAggregator")); - assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); - assertThat(histoAggResult.getTime(), greaterThan(0L)); - Map histoBreakdown = histoAggResult.getTimeBreakdown(); - assertThat(histoBreakdown, notNullValue()); - assertThat(histoBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(histoBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(histoBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(histoBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(histoBreakdown.get(REDUCE), equalTo(0L)); - assertMap( - histoAggResult.getDebugInfo(), - matchesMap().entry(TOTAL_BUCKETS, greaterThan(0L)).entry(BUILT_BUCKETS, greaterThan(0)) - ); - assertThat(histoAggResult.getProfiledChildren().size(), equalTo(1)); - - ProfileResult termsAggResult = histoAggResult.getProfiledChildren().get(0); - assertThat(termsAggResult, notNullValue()); - assertThat(termsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getSimpleName())); - assertThat(termsAggResult.getLuceneDescription(), equalTo("terms")); - assertThat(termsAggResult.getTime(), greaterThan(0L)); - Map termsBreakdown = termsAggResult.getTimeBreakdown(); - assertThat(termsBreakdown, notNullValue()); - assertThat(termsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(termsBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(termsBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(termsBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(termsBreakdown.get(REDUCE), equalTo(0L)); - assertRemapTermsDebugInfo(termsAggResult, "avg"); - assertThat(termsAggResult.getProfiledChildren().size(), equalTo(1)); - - ProfileResult avgAggResult = termsAggResult.getProfiledChildren().get(0); - assertThat(avgAggResult, notNullValue()); - assertThat(avgAggResult.getQueryName(), equalTo("AvgAggregator")); - assertThat(avgAggResult.getLuceneDescription(), equalTo("avg")); - assertThat(avgAggResult.getTime(), greaterThan(0L)); - Map avgBreakdown = avgAggResult.getTimeBreakdown(); - assertThat(avgBreakdown, notNullValue()); - assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(avgBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(avgBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); - assertMap(avgAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); - assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setProfile(true) + .addAggregation( + histogram("histo").field(NUMBER_FIELD) + .interval(1L) + .subAggregation( + terms("terms").collectMode(SubAggCollectionMode.BREADTH_FIRST) + .field(TAG_FIELD) + .subAggregation(avg("avg").field(NUMBER_FIELD)) + ) + ), + response -> { + Map profileResults = response.getProfileResults(); + assertThat(profileResults, notNullValue()); + assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries)); + for (SearchProfileShardResult profileShardResult : profileResults.values()) { + assertThat(profileShardResult, notNullValue()); + AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); + assertThat(aggProfileResults, notNullValue()); + List aggProfileResultsList = aggProfileResults.getProfileResults(); + assertThat(aggProfileResultsList, notNullValue()); + assertThat(aggProfileResultsList.size(), equalTo(1)); + ProfileResult histoAggResult = aggProfileResultsList.get(0); + assertThat(histoAggResult, notNullValue()); + assertThat(histoAggResult.getQueryName(), equalTo("NumericHistogramAggregator")); + assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); + assertThat(histoAggResult.getTime(), greaterThan(0L)); + Map histoBreakdown = histoAggResult.getTimeBreakdown(); + assertThat(histoBreakdown, notNullValue()); + assertThat(histoBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(histoBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(histoBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(histoBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(histoBreakdown.get(REDUCE), equalTo(0L)); + assertMap( + histoAggResult.getDebugInfo(), + matchesMap().entry(TOTAL_BUCKETS, greaterThan(0L)).entry(BUILT_BUCKETS, greaterThan(0)) + ); + assertThat(histoAggResult.getProfiledChildren().size(), equalTo(1)); + + ProfileResult termsAggResult = histoAggResult.getProfiledChildren().get(0); + assertThat(termsAggResult, notNullValue()); + assertThat(termsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getSimpleName())); + assertThat(termsAggResult.getLuceneDescription(), equalTo("terms")); + assertThat(termsAggResult.getTime(), greaterThan(0L)); + Map termsBreakdown = termsAggResult.getTimeBreakdown(); + assertThat(termsBreakdown, notNullValue()); + assertThat(termsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(termsBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(termsBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(termsBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(termsBreakdown.get(REDUCE), equalTo(0L)); + assertRemapTermsDebugInfo(termsAggResult, "avg"); + assertThat(termsAggResult.getProfiledChildren().size(), equalTo(1)); + + ProfileResult avgAggResult = termsAggResult.getProfiledChildren().get(0); + assertThat(avgAggResult, notNullValue()); + assertThat(avgAggResult.getQueryName(), equalTo("AvgAggregator")); + assertThat(avgAggResult.getLuceneDescription(), equalTo("avg")); + assertThat(avgAggResult.getTime(), greaterThan(0L)); + Map avgBreakdown = avgAggResult.getTimeBreakdown(); + assertThat(avgBreakdown, notNullValue()); + assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(avgBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(avgBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); + assertMap(avgAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); + assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); + } + } + ); } public void testDiversifiedAggProfile() { - SearchResponse response = prepareSearch("idx").setProfile(true) - .addAggregation( - diversifiedSampler("diversify").shardSize(10) - .field(STRING_FIELD) - .maxDocsPerValue(2) - .subAggregation(max("max").field(NUMBER_FIELD)) - ) - .get(); - assertNoFailures(response); - Map profileResults = response.getProfileResults(); - assertThat(profileResults, notNullValue()); - assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries)); - for (SearchProfileShardResult profileShardResult : profileResults.values()) { - assertThat(profileShardResult, notNullValue()); - AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); - assertThat(aggProfileResults, notNullValue()); - List aggProfileResultsList = aggProfileResults.getProfileResults(); - assertThat(aggProfileResultsList, notNullValue()); - assertThat(aggProfileResultsList.size(), equalTo(1)); - ProfileResult diversifyAggResult = aggProfileResultsList.get(0); - assertThat(diversifyAggResult, notNullValue()); - assertThat(diversifyAggResult.getQueryName(), equalTo(DiversifiedOrdinalsSamplerAggregator.class.getSimpleName())); - assertThat(diversifyAggResult.getLuceneDescription(), equalTo("diversify")); - assertThat(diversifyAggResult.getTime(), greaterThan(0L)); - Map diversifyBreakdown = diversifyAggResult.getTimeBreakdown(); - assertThat(diversifyBreakdown, notNullValue()); - assertThat(diversifyBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(diversifyBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(diversifyBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); - assertThat(diversifyBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(diversifyBreakdown.get(POST_COLLECTION), greaterThan(0L)); - assertThat(diversifyBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(diversifyBreakdown.get(REDUCE), equalTo(0L)); - assertMap(diversifyAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0)).entry(DEFERRED, List.of("max"))); - - ProfileResult maxAggResult = diversifyAggResult.getProfiledChildren().get(0); - assertThat(maxAggResult, notNullValue()); - assertThat(maxAggResult.getQueryName(), equalTo("MaxAggregator")); - assertThat(maxAggResult.getLuceneDescription(), equalTo("max")); - assertThat(maxAggResult.getTime(), greaterThan(0L)); - Map maxBreakdown = maxAggResult.getTimeBreakdown(); - assertThat(maxBreakdown, notNullValue()); - assertThat(maxBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(diversifyBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(diversifyBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); - assertThat(diversifyBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(diversifyBreakdown.get(POST_COLLECTION), greaterThan(0L)); - assertThat(maxBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(maxBreakdown.get(REDUCE), equalTo(0L)); - assertMap(maxAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); - assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setProfile(true) + .addAggregation( + diversifiedSampler("diversify").shardSize(10) + .field(STRING_FIELD) + .maxDocsPerValue(2) + .subAggregation(max("max").field(NUMBER_FIELD)) + ), + response -> { + Map profileResults = response.getProfileResults(); + assertThat(profileResults, notNullValue()); + assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries)); + for (SearchProfileShardResult profileShardResult : profileResults.values()) { + assertThat(profileShardResult, notNullValue()); + AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); + assertThat(aggProfileResults, notNullValue()); + List aggProfileResultsList = aggProfileResults.getProfileResults(); + assertThat(aggProfileResultsList, notNullValue()); + assertThat(aggProfileResultsList.size(), equalTo(1)); + ProfileResult diversifyAggResult = aggProfileResultsList.get(0); + assertThat(diversifyAggResult, notNullValue()); + assertThat(diversifyAggResult.getQueryName(), equalTo(DiversifiedOrdinalsSamplerAggregator.class.getSimpleName())); + assertThat(diversifyAggResult.getLuceneDescription(), equalTo("diversify")); + assertThat(diversifyAggResult.getTime(), greaterThan(0L)); + Map diversifyBreakdown = diversifyAggResult.getTimeBreakdown(); + assertThat(diversifyBreakdown, notNullValue()); + assertThat(diversifyBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(diversifyBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(diversifyBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); + assertThat(diversifyBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(diversifyBreakdown.get(POST_COLLECTION), greaterThan(0L)); + assertThat(diversifyBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(diversifyBreakdown.get(REDUCE), equalTo(0L)); + assertMap( + diversifyAggResult.getDebugInfo(), + matchesMap().entry(BUILT_BUCKETS, greaterThan(0)).entry(DEFERRED, List.of("max")) + ); + + ProfileResult maxAggResult = diversifyAggResult.getProfiledChildren().get(0); + assertThat(maxAggResult, notNullValue()); + assertThat(maxAggResult.getQueryName(), equalTo("MaxAggregator")); + assertThat(maxAggResult.getLuceneDescription(), equalTo("max")); + assertThat(maxAggResult.getTime(), greaterThan(0L)); + Map maxBreakdown = maxAggResult.getTimeBreakdown(); + assertThat(maxBreakdown, notNullValue()); + assertThat(maxBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(diversifyBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(diversifyBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); + assertThat(diversifyBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(diversifyBreakdown.get(POST_COLLECTION), greaterThan(0L)); + assertThat(maxBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(maxBreakdown.get(REDUCE), equalTo(0L)); + assertMap(maxAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); + assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0)); + } + } + ); } public void testComplexProfile() { - SearchResponse response = prepareSearch("idx").setProfile(true) - .addAggregation( - histogram("histo").field(NUMBER_FIELD) - .interval(1L) - .subAggregation( - terms("tags").field(TAG_FIELD) - .subAggregation(avg("avg").field(NUMBER_FIELD)) - .subAggregation(max("max").field(NUMBER_FIELD)) - ) - .subAggregation( - terms("strings").field(STRING_FIELD) - .subAggregation(avg("avg").field(NUMBER_FIELD)) - .subAggregation(max("max").field(NUMBER_FIELD)) - .subAggregation( - terms("tags").field(TAG_FIELD) - .subAggregation(avg("avg").field(NUMBER_FIELD)) - .subAggregation(max("max").field(NUMBER_FIELD)) - ) - ) - ) - .get(); - assertNoFailures(response); - Map profileResults = response.getProfileResults(); - assertThat(profileResults, notNullValue()); - assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries)); - for (SearchProfileShardResult profileShardResult : profileResults.values()) { - assertThat(profileShardResult, notNullValue()); - AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); - assertThat(aggProfileResults, notNullValue()); - List aggProfileResultsList = aggProfileResults.getProfileResults(); - assertThat(aggProfileResultsList, notNullValue()); - assertThat(aggProfileResultsList.size(), equalTo(1)); - ProfileResult histoAggResult = aggProfileResultsList.get(0); - assertThat(histoAggResult, notNullValue()); - assertThat(histoAggResult.getQueryName(), equalTo("NumericHistogramAggregator")); - assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); - assertThat(histoAggResult.getTime(), greaterThan(0L)); - Map histoBreakdown = histoAggResult.getTimeBreakdown(); - assertThat(histoBreakdown, notNullValue()); - assertThat(histoBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(histoBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(histoBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); - assertThat(histoBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(histoBreakdown.get(POST_COLLECTION), greaterThan(0L)); - assertThat(histoBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(histoBreakdown.get(REDUCE), equalTo(0L)); - assertMap( - histoAggResult.getDebugInfo(), - matchesMap().entry(TOTAL_BUCKETS, greaterThan(0L)).entry(BUILT_BUCKETS, greaterThan(0)) - ); - assertThat(histoAggResult.getProfiledChildren().size(), equalTo(2)); - - Map histoAggResultSubAggregations = histoAggResult.getProfiledChildren() - .stream() - .collect(Collectors.toMap(ProfileResult::getLuceneDescription, s -> s)); - - ProfileResult tagsAggResult = histoAggResultSubAggregations.get("tags"); - assertThat(tagsAggResult, notNullValue()); - assertThat(tagsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getSimpleName())); - assertThat(tagsAggResult.getTime(), greaterThan(0L)); - Map tagsBreakdown = tagsAggResult.getTimeBreakdown(); - assertThat(tagsBreakdown, notNullValue()); - assertThat(tagsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(tagsBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(tagsBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); - assertThat(tagsBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(tagsBreakdown.get(POST_COLLECTION), greaterThan(0L)); - assertThat(tagsBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(tagsBreakdown.get(REDUCE), equalTo(0L)); - assertRemapTermsDebugInfo(tagsAggResult); - assertThat(tagsAggResult.getProfiledChildren().size(), equalTo(2)); - - Map tagsAggResultSubAggregations = tagsAggResult.getProfiledChildren() - .stream() - .collect(Collectors.toMap(ProfileResult::getLuceneDescription, s -> s)); - - ProfileResult avgAggResult = tagsAggResultSubAggregations.get("avg"); - assertThat(avgAggResult, notNullValue()); - assertThat(avgAggResult.getQueryName(), equalTo("AvgAggregator")); - assertThat(avgAggResult.getTime(), greaterThan(0L)); - Map avgBreakdown = avgAggResult.getTimeBreakdown(); - assertThat(avgBreakdown, notNullValue()); - assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(avgBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(avgBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); - assertThat(avgBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(avgBreakdown.get(POST_COLLECTION), greaterThan(0L)); - assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); - assertMap(avgAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); - assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); - - ProfileResult maxAggResult = tagsAggResultSubAggregations.get("max"); - assertThat(maxAggResult, notNullValue()); - assertThat(maxAggResult.getQueryName(), equalTo("MaxAggregator")); - assertThat(maxAggResult.getTime(), greaterThan(0L)); - Map maxBreakdown = maxAggResult.getTimeBreakdown(); - assertThat(maxBreakdown, notNullValue()); - assertThat(maxBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(maxBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(maxBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); - assertThat(maxBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(maxBreakdown.get(POST_COLLECTION), greaterThan(0L)); - assertThat(maxBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(maxBreakdown.get(REDUCE), equalTo(0L)); - assertMap(maxAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); - assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0)); - - ProfileResult stringsAggResult = histoAggResultSubAggregations.get("strings"); - assertThat(stringsAggResult, notNullValue()); - assertThat(stringsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getSimpleName())); - assertThat(stringsAggResult.getTime(), greaterThan(0L)); - Map stringsBreakdown = stringsAggResult.getTimeBreakdown(); - assertThat(stringsBreakdown, notNullValue()); - assertThat(stringsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(stringsBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(stringsBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); - assertThat(stringsBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(stringsBreakdown.get(POST_COLLECTION), greaterThan(0L)); - assertThat(stringsBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(stringsBreakdown.get(REDUCE), equalTo(0L)); - assertRemapTermsDebugInfo(stringsAggResult); - assertThat(stringsAggResult.getProfiledChildren().size(), equalTo(3)); - - Map stringsAggResultSubAggregations = stringsAggResult.getProfiledChildren() - .stream() - .collect(Collectors.toMap(ProfileResult::getLuceneDescription, s -> s)); - - avgAggResult = stringsAggResultSubAggregations.get("avg"); - assertThat(avgAggResult, notNullValue()); - assertThat(avgAggResult.getQueryName(), equalTo("AvgAggregator")); - assertThat(avgAggResult.getTime(), greaterThan(0L)); - avgBreakdown = avgAggResult.getTimeBreakdown(); - assertThat(avgBreakdown, notNullValue()); - assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(avgBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(avgBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); - assertThat(avgBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(avgBreakdown.get(POST_COLLECTION), greaterThan(0L)); - assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); - assertMap(avgAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); - assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); - - maxAggResult = stringsAggResultSubAggregations.get("max"); - assertThat(maxAggResult, notNullValue()); - assertThat(maxAggResult.getQueryName(), equalTo("MaxAggregator")); - assertThat(maxAggResult.getTime(), greaterThan(0L)); - maxBreakdown = maxAggResult.getTimeBreakdown(); - assertThat(maxBreakdown, notNullValue()); - assertThat(maxBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(maxBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(maxBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); - assertThat(maxBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(maxBreakdown.get(POST_COLLECTION), greaterThan(0L)); - assertThat(maxBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(maxBreakdown.get(REDUCE), equalTo(0L)); - assertMap(maxAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); - assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0)); - - tagsAggResult = stringsAggResultSubAggregations.get("tags"); - assertThat(tagsAggResult, notNullValue()); - assertThat(tagsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getSimpleName())); - assertThat(tagsAggResult.getLuceneDescription(), equalTo("tags")); - assertThat(tagsAggResult.getTime(), greaterThan(0L)); - tagsBreakdown = tagsAggResult.getTimeBreakdown(); - assertThat(tagsBreakdown, notNullValue()); - assertThat(tagsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(tagsBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(tagsBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); - assertThat(tagsBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(tagsBreakdown.get(POST_COLLECTION), greaterThan(0L)); - assertThat(tagsBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(tagsBreakdown.get(REDUCE), equalTo(0L)); - assertRemapTermsDebugInfo(tagsAggResult); - assertThat(tagsAggResult.getProfiledChildren().size(), equalTo(2)); - - tagsAggResultSubAggregations = tagsAggResult.getProfiledChildren() - .stream() - .collect(Collectors.toMap(ProfileResult::getLuceneDescription, s -> s)); - - avgAggResult = tagsAggResultSubAggregations.get("avg"); - assertThat(avgAggResult, notNullValue()); - assertThat(avgAggResult.getQueryName(), equalTo("AvgAggregator")); - assertThat(avgAggResult.getTime(), greaterThan(0L)); - avgBreakdown = avgAggResult.getTimeBreakdown(); - assertThat(avgBreakdown, notNullValue()); - assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(avgBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(avgBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); - assertThat(avgBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(avgBreakdown.get(POST_COLLECTION), greaterThan(0L)); - assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); - assertMap(avgAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); - assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); - - maxAggResult = tagsAggResultSubAggregations.get("max"); - assertThat(maxAggResult, notNullValue()); - assertThat(maxAggResult.getQueryName(), equalTo("MaxAggregator")); - assertThat(maxAggResult.getTime(), greaterThan(0L)); - maxBreakdown = maxAggResult.getTimeBreakdown(); - assertThat(maxBreakdown, notNullValue()); - assertThat(maxBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(maxBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(maxBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); - assertThat(maxBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(maxBreakdown.get(POST_COLLECTION), greaterThan(0L)); - assertThat(maxBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(maxBreakdown.get(REDUCE), equalTo(0L)); - assertMap(maxAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); - assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setProfile(true) + .addAggregation( + histogram("histo").field(NUMBER_FIELD) + .interval(1L) + .subAggregation( + terms("tags").field(TAG_FIELD) + .subAggregation(avg("avg").field(NUMBER_FIELD)) + .subAggregation(max("max").field(NUMBER_FIELD)) + ) + .subAggregation( + terms("strings").field(STRING_FIELD) + .subAggregation(avg("avg").field(NUMBER_FIELD)) + .subAggregation(max("max").field(NUMBER_FIELD)) + .subAggregation( + terms("tags").field(TAG_FIELD) + .subAggregation(avg("avg").field(NUMBER_FIELD)) + .subAggregation(max("max").field(NUMBER_FIELD)) + ) + ) + ), + response -> { + Map profileResults = response.getProfileResults(); + assertThat(profileResults, notNullValue()); + assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries)); + for (SearchProfileShardResult profileShardResult : profileResults.values()) { + assertThat(profileShardResult, notNullValue()); + AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); + assertThat(aggProfileResults, notNullValue()); + List aggProfileResultsList = aggProfileResults.getProfileResults(); + assertThat(aggProfileResultsList, notNullValue()); + assertThat(aggProfileResultsList.size(), equalTo(1)); + ProfileResult histoAggResult = aggProfileResultsList.get(0); + assertThat(histoAggResult, notNullValue()); + assertThat(histoAggResult.getQueryName(), equalTo("NumericHistogramAggregator")); + assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); + assertThat(histoAggResult.getTime(), greaterThan(0L)); + Map histoBreakdown = histoAggResult.getTimeBreakdown(); + assertThat(histoBreakdown, notNullValue()); + assertThat(histoBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(histoBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(histoBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); + assertThat(histoBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(histoBreakdown.get(POST_COLLECTION), greaterThan(0L)); + assertThat(histoBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(histoBreakdown.get(REDUCE), equalTo(0L)); + assertMap( + histoAggResult.getDebugInfo(), + matchesMap().entry(TOTAL_BUCKETS, greaterThan(0L)).entry(BUILT_BUCKETS, greaterThan(0)) + ); + assertThat(histoAggResult.getProfiledChildren().size(), equalTo(2)); + + Map histoAggResultSubAggregations = histoAggResult.getProfiledChildren() + .stream() + .collect(Collectors.toMap(ProfileResult::getLuceneDescription, s -> s)); + + ProfileResult tagsAggResult = histoAggResultSubAggregations.get("tags"); + assertThat(tagsAggResult, notNullValue()); + assertThat(tagsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getSimpleName())); + assertThat(tagsAggResult.getTime(), greaterThan(0L)); + Map tagsBreakdown = tagsAggResult.getTimeBreakdown(); + assertThat(tagsBreakdown, notNullValue()); + assertThat(tagsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(tagsBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(tagsBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); + assertThat(tagsBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(tagsBreakdown.get(POST_COLLECTION), greaterThan(0L)); + assertThat(tagsBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(tagsBreakdown.get(REDUCE), equalTo(0L)); + assertRemapTermsDebugInfo(tagsAggResult); + assertThat(tagsAggResult.getProfiledChildren().size(), equalTo(2)); + + Map tagsAggResultSubAggregations = tagsAggResult.getProfiledChildren() + .stream() + .collect(Collectors.toMap(ProfileResult::getLuceneDescription, s -> s)); + + ProfileResult avgAggResult = tagsAggResultSubAggregations.get("avg"); + assertThat(avgAggResult, notNullValue()); + assertThat(avgAggResult.getQueryName(), equalTo("AvgAggregator")); + assertThat(avgAggResult.getTime(), greaterThan(0L)); + Map avgBreakdown = avgAggResult.getTimeBreakdown(); + assertThat(avgBreakdown, notNullValue()); + assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(avgBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(avgBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); + assertThat(avgBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(avgBreakdown.get(POST_COLLECTION), greaterThan(0L)); + assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); + assertMap(avgAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); + assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); + + ProfileResult maxAggResult = tagsAggResultSubAggregations.get("max"); + assertThat(maxAggResult, notNullValue()); + assertThat(maxAggResult.getQueryName(), equalTo("MaxAggregator")); + assertThat(maxAggResult.getTime(), greaterThan(0L)); + Map maxBreakdown = maxAggResult.getTimeBreakdown(); + assertThat(maxBreakdown, notNullValue()); + assertThat(maxBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(maxBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(maxBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); + assertThat(maxBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(maxBreakdown.get(POST_COLLECTION), greaterThan(0L)); + assertThat(maxBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(maxBreakdown.get(REDUCE), equalTo(0L)); + assertMap(maxAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); + assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0)); + + ProfileResult stringsAggResult = histoAggResultSubAggregations.get("strings"); + assertThat(stringsAggResult, notNullValue()); + assertThat(stringsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getSimpleName())); + assertThat(stringsAggResult.getTime(), greaterThan(0L)); + Map stringsBreakdown = stringsAggResult.getTimeBreakdown(); + assertThat(stringsBreakdown, notNullValue()); + assertThat(stringsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(stringsBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(stringsBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); + assertThat(stringsBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(stringsBreakdown.get(POST_COLLECTION), greaterThan(0L)); + assertThat(stringsBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(stringsBreakdown.get(REDUCE), equalTo(0L)); + assertRemapTermsDebugInfo(stringsAggResult); + assertThat(stringsAggResult.getProfiledChildren().size(), equalTo(3)); + + Map stringsAggResultSubAggregations = stringsAggResult.getProfiledChildren() + .stream() + .collect(Collectors.toMap(ProfileResult::getLuceneDescription, s -> s)); + + avgAggResult = stringsAggResultSubAggregations.get("avg"); + assertThat(avgAggResult, notNullValue()); + assertThat(avgAggResult.getQueryName(), equalTo("AvgAggregator")); + assertThat(avgAggResult.getTime(), greaterThan(0L)); + avgBreakdown = avgAggResult.getTimeBreakdown(); + assertThat(avgBreakdown, notNullValue()); + assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(avgBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(avgBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); + assertThat(avgBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(avgBreakdown.get(POST_COLLECTION), greaterThan(0L)); + assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); + assertMap(avgAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); + assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); + + maxAggResult = stringsAggResultSubAggregations.get("max"); + assertThat(maxAggResult, notNullValue()); + assertThat(maxAggResult.getQueryName(), equalTo("MaxAggregator")); + assertThat(maxAggResult.getTime(), greaterThan(0L)); + maxBreakdown = maxAggResult.getTimeBreakdown(); + assertThat(maxBreakdown, notNullValue()); + assertThat(maxBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(maxBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(maxBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); + assertThat(maxBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(maxBreakdown.get(POST_COLLECTION), greaterThan(0L)); + assertThat(maxBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(maxBreakdown.get(REDUCE), equalTo(0L)); + assertMap(maxAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); + assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0)); + + tagsAggResult = stringsAggResultSubAggregations.get("tags"); + assertThat(tagsAggResult, notNullValue()); + assertThat(tagsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getSimpleName())); + assertThat(tagsAggResult.getLuceneDescription(), equalTo("tags")); + assertThat(tagsAggResult.getTime(), greaterThan(0L)); + tagsBreakdown = tagsAggResult.getTimeBreakdown(); + assertThat(tagsBreakdown, notNullValue()); + assertThat(tagsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(tagsBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(tagsBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); + assertThat(tagsBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(tagsBreakdown.get(POST_COLLECTION), greaterThan(0L)); + assertThat(tagsBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(tagsBreakdown.get(REDUCE), equalTo(0L)); + assertRemapTermsDebugInfo(tagsAggResult); + assertThat(tagsAggResult.getProfiledChildren().size(), equalTo(2)); + + tagsAggResultSubAggregations = tagsAggResult.getProfiledChildren() + .stream() + .collect(Collectors.toMap(ProfileResult::getLuceneDescription, s -> s)); + + avgAggResult = tagsAggResultSubAggregations.get("avg"); + assertThat(avgAggResult, notNullValue()); + assertThat(avgAggResult.getQueryName(), equalTo("AvgAggregator")); + assertThat(avgAggResult.getTime(), greaterThan(0L)); + avgBreakdown = avgAggResult.getTimeBreakdown(); + assertThat(avgBreakdown, notNullValue()); + assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(avgBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(avgBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); + assertThat(avgBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(avgBreakdown.get(POST_COLLECTION), greaterThan(0L)); + assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); + assertMap(avgAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); + assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); + + maxAggResult = tagsAggResultSubAggregations.get("max"); + assertThat(maxAggResult, notNullValue()); + assertThat(maxAggResult.getQueryName(), equalTo("MaxAggregator")); + assertThat(maxAggResult.getTime(), greaterThan(0L)); + maxBreakdown = maxAggResult.getTimeBreakdown(); + assertThat(maxBreakdown, notNullValue()); + assertThat(maxBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(maxBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(maxBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); + assertThat(maxBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(maxBreakdown.get(POST_COLLECTION), greaterThan(0L)); + assertThat(maxBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(maxBreakdown.get(REDUCE), equalTo(0L)); + assertMap(maxAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); + assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0)); + } + } + ); } public void testNoProfile() { - SearchResponse response = prepareSearch("idx").setProfile(false) - .addAggregation( - histogram("histo").field(NUMBER_FIELD) - .interval(1L) - .subAggregation( - terms("tags").field(TAG_FIELD) - .subAggregation(avg("avg").field(NUMBER_FIELD)) - .subAggregation(max("max").field(NUMBER_FIELD)) - ) - .subAggregation( - terms("strings").field(STRING_FIELD) - .subAggregation(avg("avg").field(NUMBER_FIELD)) - .subAggregation(max("max").field(NUMBER_FIELD)) - .subAggregation( - terms("tags").field(TAG_FIELD) - .subAggregation(avg("avg").field(NUMBER_FIELD)) - .subAggregation(max("max").field(NUMBER_FIELD)) - ) - ) - ) - .get(); - assertNoFailures(response); - Map profileResults = response.getProfileResults(); - assertThat(profileResults, notNullValue()); - assertThat(profileResults.size(), equalTo(0)); + assertNoFailuresAndResponse( + prepareSearch("idx").setProfile(false) + .addAggregation( + histogram("histo").field(NUMBER_FIELD) + .interval(1L) + .subAggregation( + terms("tags").field(TAG_FIELD) + .subAggregation(avg("avg").field(NUMBER_FIELD)) + .subAggregation(max("max").field(NUMBER_FIELD)) + ) + .subAggregation( + terms("strings").field(STRING_FIELD) + .subAggregation(avg("avg").field(NUMBER_FIELD)) + .subAggregation(max("max").field(NUMBER_FIELD)) + .subAggregation( + terms("tags").field(TAG_FIELD) + .subAggregation(avg("avg").field(NUMBER_FIELD)) + .subAggregation(max("max").field(NUMBER_FIELD)) + ) + ) + ), + response -> { + Map profileResults = response.getProfileResults(); + assertThat(profileResults, notNullValue()); + assertThat(profileResults.size(), equalTo(0)); + } + ); } /** @@ -630,66 +642,70 @@ public void testFilterByFilter() throws InterruptedException, IOException { List builders = new ArrayList<>(); for (int i = 0; i < RangeAggregator.DOCS_PER_RANGE_TO_USE_FILTERS * 2; i++) { String date = Instant.ofEpochSecond(i).toString(); - builders.add(client().prepareIndex("dateidx").setSource(jsonBuilder().startObject().field("date", date).endObject())); + builders.add(prepareIndex("dateidx").setSource(jsonBuilder().startObject().field("date", date).endObject())); } indexRandom(true, false, builders); - SearchResponse response = prepareSearch("dateidx").setProfile(true) - .addAggregation( - new DateHistogramAggregationBuilder("histo").field("date") - .calendarInterval(DateHistogramInterval.MONTH) - // Add a sub-agg so we don't get to use metadata. That's great and all, but it outputs less debugging info for us to - // verify. - .subAggregation(new MaxAggregationBuilder("m").field("date")) - ) - .get(); - assertNoFailures(response); - Map profileResults = response.getProfileResults(); - assertThat(profileResults, notNullValue()); - assertThat(profileResults.size(), equalTo(getNumShards("dateidx").numPrimaries)); - for (SearchProfileShardResult profileShardResult : profileResults.values()) { - assertThat(profileShardResult, notNullValue()); - AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); - assertThat(aggProfileResults, notNullValue()); - List aggProfileResultsList = aggProfileResults.getProfileResults(); - assertThat(aggProfileResultsList, notNullValue()); - assertThat(aggProfileResultsList.size(), equalTo(1)); - ProfileResult histoAggResult = aggProfileResultsList.get(0); - assertThat(histoAggResult, notNullValue()); - assertThat(histoAggResult.getQueryName(), equalTo("DateHistogramAggregator.FromDateRange")); - assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); - assertThat(histoAggResult.getProfiledChildren().size(), equalTo(1)); - assertThat(histoAggResult.getTime(), greaterThan(0L)); - Map breakdown = histoAggResult.getTimeBreakdown(); - assertThat(breakdown, notNullValue()); - assertThat(breakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(breakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(breakdown.get(COLLECT), equalTo(0L)); - assertThat(breakdown.get(BUILD_AGGREGATION).longValue(), greaterThan(0L)); - assertThat(breakdown.get(REDUCE), equalTo(0L)); - assertMap( - histoAggResult.getDebugInfo(), - matchesMap().entry(BUILT_BUCKETS, greaterThan(0)) - .entry("delegate", "RangeAggregator.FromFilters") - .entry( - "delegate_debug", - matchesMap().entry("average_docs_per_range", equalTo(RangeAggregator.DOCS_PER_RANGE_TO_USE_FILTERS * 2)) - .entry("ranges", 1) - .entry("delegate", "FilterByFilterAggregator") + assertNoFailuresAndResponse( + prepareSearch("dateidx").setProfile(true) + .addAggregation( + new DateHistogramAggregationBuilder("histo").field("date") + .calendarInterval(DateHistogramInterval.MONTH) + // Add a sub-agg so we don't get to use metadata. That's great and all, but it outputs less debugging info for us to + // verify. + .subAggregation(new MaxAggregationBuilder("m").field("date")) + ), + response -> { + Map profileResults = response.getProfileResults(); + assertThat(profileResults, notNullValue()); + assertThat(profileResults.size(), equalTo(getNumShards("dateidx").numPrimaries)); + for (SearchProfileShardResult profileShardResult : profileResults.values()) { + assertThat(profileShardResult, notNullValue()); + AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); + assertThat(aggProfileResults, notNullValue()); + List aggProfileResultsList = aggProfileResults.getProfileResults(); + assertThat(aggProfileResultsList, notNullValue()); + assertThat(aggProfileResultsList.size(), equalTo(1)); + ProfileResult histoAggResult = aggProfileResultsList.get(0); + assertThat(histoAggResult, notNullValue()); + assertThat(histoAggResult.getQueryName(), equalTo("DateHistogramAggregator.FromDateRange")); + assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); + assertThat(histoAggResult.getProfiledChildren().size(), equalTo(1)); + assertThat(histoAggResult.getTime(), greaterThan(0L)); + Map breakdown = histoAggResult.getTimeBreakdown(); + assertThat(breakdown, notNullValue()); + assertThat(breakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(breakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(breakdown.get(COLLECT), equalTo(0L)); + assertThat(breakdown.get(BUILD_AGGREGATION).longValue(), greaterThan(0L)); + assertThat(breakdown.get(REDUCE), equalTo(0L)); + assertMap( + histoAggResult.getDebugInfo(), + matchesMap().entry(BUILT_BUCKETS, greaterThan(0)) + .entry("delegate", "RangeAggregator.FromFilters") .entry( "delegate_debug", - matchesMap().entry("segments_with_deleted_docs", greaterThanOrEqualTo(0)) - .entry("segments_with_doc_count_field", 0) - .entry("segments_counted", 0) - .entry("segments_collected", greaterThan(0)) + matchesMap().entry("average_docs_per_range", equalTo(RangeAggregator.DOCS_PER_RANGE_TO_USE_FILTERS * 2)) + .entry("ranges", 1) + .entry("delegate", "FilterByFilterAggregator") .entry( - "filters", - matchesList().item(matchesMap().entry("query", "*:*").entry("segments_counted_in_constant_time", 0)) + "delegate_debug", + matchesMap().entry("segments_with_deleted_docs", greaterThanOrEqualTo(0)) + .entry("segments_with_doc_count_field", 0) + .entry("segments_counted", 0) + .entry("segments_collected", greaterThan(0)) + .entry( + "filters", + matchesList().item( + matchesMap().entry("query", "*:*").entry("segments_counted_in_constant_time", 0) + ) + ) ) ) - ) - ); - } + ); + } + } + ); } public void testDateHistogramFilterByFilterDisabled() throws InterruptedException, IOException { @@ -704,62 +720,65 @@ public void testDateHistogramFilterByFilterDisabled() throws InterruptedExceptio for (int i = 0; i < RangeAggregator.DOCS_PER_RANGE_TO_USE_FILTERS * 2; i++) { String date = Instant.ofEpochSecond(i).toString(); builders.add( - client().prepareIndex("date_filter_by_filter_disabled") - .setSource(jsonBuilder().startObject().field("date", date).endObject()) + prepareIndex("date_filter_by_filter_disabled").setSource(jsonBuilder().startObject().field("date", date).endObject()) ); } indexRandom(true, false, builders); - SearchResponse response = prepareSearch("date_filter_by_filter_disabled").setProfile(true) - .addAggregation(new DateHistogramAggregationBuilder("histo").field("date").calendarInterval(DateHistogramInterval.MONTH)) - .get(); - assertNoFailures(response); - Map profileResults = response.getProfileResults(); - assertThat(profileResults, notNullValue()); - assertThat(profileResults.size(), equalTo(getNumShards("date_filter_by_filter_disabled").numPrimaries)); - for (SearchProfileShardResult profileShardResult : profileResults.values()) { - assertThat(profileShardResult, notNullValue()); - AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); - assertThat(aggProfileResults, notNullValue()); - List aggProfileResultsList = aggProfileResults.getProfileResults(); - assertThat(aggProfileResultsList, notNullValue()); - assertThat(aggProfileResultsList.size(), equalTo(1)); - ProfileResult histoAggResult = aggProfileResultsList.get(0); - assertThat(histoAggResult, notNullValue()); - assertThat(histoAggResult.getQueryName(), equalTo("DateHistogramAggregator.FromDateRange")); - assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); - assertThat(histoAggResult.getProfiledChildren().size(), equalTo(0)); - assertThat(histoAggResult.getTime(), greaterThan(0L)); - Map breakdown = histoAggResult.getTimeBreakdown(); - assertMap( - breakdown, - matchesMap().entry(INITIALIZE, greaterThan(0L)) - .entry(INITIALIZE + "_count", greaterThan(0L)) - .entry(BUILD_LEAF_COLLECTOR, greaterThan(0L)) - .entry(BUILD_LEAF_COLLECTOR + "_count", greaterThan(0L)) - .entry(COLLECT, greaterThan(0L)) - .entry(COLLECT + "_count", greaterThan(0L)) - .entry(POST_COLLECTION, greaterThan(0L)) - .entry(POST_COLLECTION + "_count", 1L) - .entry(BUILD_AGGREGATION, greaterThan(0L)) - .entry(BUILD_AGGREGATION + "_count", greaterThan(0L)) - .entry(REDUCE, 0L) - .entry(REDUCE + "_count", 0L) - ); - Map debug = histoAggResult.getDebugInfo(); - assertMap( - debug, - matchesMap().entry("delegate", "RangeAggregator.NoOverlap") - .entry("built_buckets", 1) - .entry( - "delegate_debug", - matchesMap().entry("ranges", 1) - .entry("average_docs_per_range", 10000.0) - .entry("singletons", greaterThan(0)) - .entry("non-singletons", 0) - ) - ); - } + assertNoFailuresAndResponse( + prepareSearch("date_filter_by_filter_disabled").setProfile(true) + .addAggregation( + new DateHistogramAggregationBuilder("histo").field("date").calendarInterval(DateHistogramInterval.MONTH) + ), + response -> { + Map profileResults = response.getProfileResults(); + assertThat(profileResults, notNullValue()); + assertThat(profileResults.size(), equalTo(getNumShards("date_filter_by_filter_disabled").numPrimaries)); + for (SearchProfileShardResult profileShardResult : profileResults.values()) { + assertThat(profileShardResult, notNullValue()); + AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); + assertThat(aggProfileResults, notNullValue()); + List aggProfileResultsList = aggProfileResults.getProfileResults(); + assertThat(aggProfileResultsList, notNullValue()); + assertThat(aggProfileResultsList.size(), equalTo(1)); + ProfileResult histoAggResult = aggProfileResultsList.get(0); + assertThat(histoAggResult, notNullValue()); + assertThat(histoAggResult.getQueryName(), equalTo("DateHistogramAggregator.FromDateRange")); + assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); + assertThat(histoAggResult.getProfiledChildren().size(), equalTo(0)); + assertThat(histoAggResult.getTime(), greaterThan(0L)); + Map breakdown = histoAggResult.getTimeBreakdown(); + assertMap( + breakdown, + matchesMap().entry(INITIALIZE, greaterThan(0L)) + .entry(INITIALIZE + "_count", greaterThan(0L)) + .entry(BUILD_LEAF_COLLECTOR, greaterThan(0L)) + .entry(BUILD_LEAF_COLLECTOR + "_count", greaterThan(0L)) + .entry(COLLECT, greaterThan(0L)) + .entry(COLLECT + "_count", greaterThan(0L)) + .entry(POST_COLLECTION, greaterThan(0L)) + .entry(POST_COLLECTION + "_count", 1L) + .entry(BUILD_AGGREGATION, greaterThan(0L)) + .entry(BUILD_AGGREGATION + "_count", greaterThan(0L)) + .entry(REDUCE, 0L) + .entry(REDUCE + "_count", 0L) + ); + Map debug = histoAggResult.getDebugInfo(); + assertMap( + debug, + matchesMap().entry("delegate", "RangeAggregator.NoOverlap") + .entry("built_buckets", 1) + .entry( + "delegate_debug", + matchesMap().entry("ranges", 1) + .entry("average_docs_per_range", 10000.0) + .entry("singletons", greaterThan(0)) + .entry("non-singletons", 0) + ) + ); + } + } + ); } finally { updateClusterSettings(Settings.builder().putNull(SearchService.ENABLE_REWRITE_AGGS_TO_FILTER_BY_FILTER.getKey())); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/dfs/DfsProfilerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/dfs/DfsProfilerIT.java index f7b2b0f4443d3..c6d3a6733d2fc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/dfs/DfsProfilerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/dfs/DfsProfilerIT.java @@ -10,7 +10,6 @@ import org.apache.lucene.tests.util.English; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.profile.ProfileResult; @@ -28,6 +27,7 @@ import static org.elasticsearch.search.profile.query.RandomQueryGenerator.randomQueryBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.emptyOrNullString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -50,8 +50,7 @@ public void testProfileDfs() throws Exception { int numDocs = randomIntBetween(10, 50); IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { - docs[i] = client().prepareIndex(indexName) - .setId(String.valueOf(i)) + docs[i] = prepareIndex(indexName).setId(String.valueOf(i)) .setSource( textField, English.intToEnglish(i), @@ -67,53 +66,55 @@ public void testProfileDfs() throws Exception { for (int i = 0; i < iters; i++) { QueryBuilder q = randomQueryBuilder(List.of(textField), List.of(numericField), numDocs, 3); logger.info("Query: {}", q); - SearchResponse resp = prepareSearch().setQuery(q) - .setTrackTotalHits(true) - .setProfile(true) - .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .setKnnSearch( - randomList( - 2, - 5, - () -> new KnnSearchBuilder( - vectorField, - new float[] { randomFloat(), randomFloat(), randomFloat() }, - randomIntBetween(5, 10), - 50, - randomBoolean() ? null : randomFloat() + assertResponse( + prepareSearch().setQuery(q) + .setTrackTotalHits(true) + .setProfile(true) + .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) + .setKnnSearch( + randomList( + 2, + 5, + () -> new KnnSearchBuilder( + vectorField, + new float[] { randomFloat(), randomFloat(), randomFloat() }, + randomIntBetween(5, 10), + 50, + randomBoolean() ? null : randomFloat() + ) ) - ) - ) - .get(); - - assertNotNull("Profile response element should not be null", resp.getProfileResults()); - assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); - for (Map.Entry shard : resp.getProfileResults().entrySet()) { - for (QueryProfileShardResult searchProfiles : shard.getValue().getQueryProfileResults()) { - for (ProfileResult result : searchProfiles.getQueryResults()) { - assertNotNull(result.getQueryName()); - assertNotNull(result.getLuceneDescription()); - assertThat(result.getTime(), greaterThan(0L)); - } - CollectorResult result = searchProfiles.getCollectorResult(); - assertThat(result.getName(), is(not(emptyOrNullString()))); - assertThat(result.getTime(), greaterThan(0L)); - } - SearchProfileDfsPhaseResult searchProfileDfsPhaseResult = shard.getValue().getSearchProfileDfsPhaseResult(); - assertThat(searchProfileDfsPhaseResult, is(notNullValue())); - for (QueryProfileShardResult queryProfileShardResult : searchProfileDfsPhaseResult.getQueryProfileShardResult()) { - for (ProfileResult result : queryProfileShardResult.getQueryResults()) { - assertNotNull(result.getQueryName()); - assertNotNull(result.getLuceneDescription()); - assertThat(result.getTime(), greaterThan(0L)); + ), + response -> { + assertNotNull("Profile response element should not be null", response.getProfileResults()); + assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0)); + for (Map.Entry shard : response.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shard.getValue().getQueryProfileResults()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + } + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); + assertThat(result.getTime(), greaterThan(0L)); + } + SearchProfileDfsPhaseResult searchProfileDfsPhaseResult = shard.getValue().getSearchProfileDfsPhaseResult(); + assertThat(searchProfileDfsPhaseResult, is(notNullValue())); + for (QueryProfileShardResult queryProfileShardResult : searchProfileDfsPhaseResult.getQueryProfileShardResult()) { + for (ProfileResult result : queryProfileShardResult.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + } + CollectorResult result = queryProfileShardResult.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); + assertThat(result.getTime(), greaterThan(0L)); + } + ProfileResult statsResult = searchProfileDfsPhaseResult.getDfsShardResult(); + assertThat(statsResult.getQueryName(), equalTo("statistics")); } - CollectorResult result = queryProfileShardResult.getCollectorResult(); - assertThat(result.getName(), is(not(emptyOrNullString()))); - assertThat(result.getTime(), greaterThan(0L)); } - ProfileResult statsResult = searchProfileDfsPhaseResult.getDfsShardResult(); - assertThat(statsResult.getQueryName(), equalTo("statistics")); - } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java index e7b02faede9b1..e02bed8409bc4 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java @@ -10,7 +10,7 @@ import org.apache.lucene.tests.util.English; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.MultiSearchResponse; +import org.elasticsearch.action.search.MultiSearchResponse.Item; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; @@ -30,6 +30,7 @@ import java.util.Set; import static org.elasticsearch.search.profile.query.RandomQueryGenerator.randomQueryBuilder; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.emptyOrNullString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -49,7 +50,7 @@ public void testProfileQuery() throws Exception { int numDocs = randomIntBetween(100, 150); IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { - docs[i] = client().prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); + docs[i] = prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); } List stringFields = Arrays.asList("field1"); @@ -62,29 +63,26 @@ public void testProfileQuery() throws Exception { for (int i = 0; i < iters; i++) { QueryBuilder q = randomQueryBuilder(stringFields, numericFields, numDocs, 3); logger.info("Query: {}", q); - - SearchResponse resp = prepareSearch().setQuery(q) - .setTrackTotalHits(true) - .setProfile(true) - .setSearchType(SearchType.QUERY_THEN_FETCH) - .get(); - - assertNotNull("Profile response element should not be null", resp.getProfileResults()); - assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); - for (Map.Entry shard : resp.getProfileResults().entrySet()) { - for (QueryProfileShardResult searchProfiles : shard.getValue().getQueryProfileResults()) { - for (ProfileResult result : searchProfiles.getQueryResults()) { - assertNotNull(result.getQueryName()); - assertNotNull(result.getLuceneDescription()); - assertThat(result.getTime(), greaterThan(0L)); + assertResponse( + prepareSearch().setQuery(q).setTrackTotalHits(true).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH), + response -> { + assertNotNull("Profile response element should not be null", response.getProfileResults()); + assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0)); + for (Map.Entry shard : response.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shard.getValue().getQueryProfileResults()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + } + + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); + assertThat(result.getTime(), greaterThan(0L)); + } } - - CollectorResult result = searchProfiles.getCollectorResult(); - assertThat(result.getName(), is(not(emptyOrNullString()))); - assertThat(result.getTime(), greaterThan(0L)); } - } - + ); } } @@ -100,8 +98,7 @@ public void testProfileMatchesRegular() throws Exception { int numDocs = randomIntBetween(100, 150); IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { - docs[i] = client().prepareIndex("test") - .setId(String.valueOf(i)) + docs[i] = prepareIndex("test").setId(String.valueOf(i)) .setSource("id", String.valueOf(i), "field1", English.intToEnglish(i), "field2", i); } @@ -126,47 +123,52 @@ public void testProfileMatchesRegular() throws Exception { .setSearchType(SearchType.QUERY_THEN_FETCH) .setRequestCache(false); - MultiSearchResponse.Item[] responses = client().prepareMultiSearch().add(vanilla).add(profile).get().getResponses(); - - SearchResponse vanillaResponse = responses[0].getResponse(); - SearchResponse profileResponse = responses[1].getResponse(); - - assertThat(vanillaResponse.getFailedShards(), equalTo(0)); - assertThat(profileResponse.getFailedShards(), equalTo(0)); - assertThat(vanillaResponse.getSuccessfulShards(), equalTo(profileResponse.getSuccessfulShards())); - - float vanillaMaxScore = vanillaResponse.getHits().getMaxScore(); - float profileMaxScore = profileResponse.getHits().getMaxScore(); - if (Float.isNaN(vanillaMaxScore)) { - assertTrue("Vanilla maxScore is NaN but Profile is not [" + profileMaxScore + "]", Float.isNaN(profileMaxScore)); - } else { - assertEquals( - "Profile maxScore of [" + profileMaxScore + "] is not close to Vanilla maxScore [" + vanillaMaxScore + "]", - vanillaMaxScore, - profileMaxScore, - 0.001 - ); - } + assertResponse(client().prepareMultiSearch().add(vanilla).add(profile), response -> { + Item[] responses = response.getResponses(); - if (vanillaResponse.getHits().getTotalHits().value != profileResponse.getHits().getTotalHits().value) { - Set vanillaSet = new HashSet<>(Arrays.asList(vanillaResponse.getHits().getHits())); - Set profileSet = new HashSet<>(Arrays.asList(profileResponse.getHits().getHits())); - if (vanillaResponse.getHits().getTotalHits().value > profileResponse.getHits().getTotalHits().value) { - vanillaSet.removeAll(profileSet); - fail("Vanilla hits were larger than profile hits. Non-overlapping elements were: " + vanillaSet.toString()); + SearchResponse vanillaResponse = responses[0].getResponse(); + SearchResponse profileResponse = responses[1].getResponse(); + + assertThat(vanillaResponse.getFailedShards(), equalTo(0)); + assertThat(profileResponse.getFailedShards(), equalTo(0)); + assertThat(vanillaResponse.getSuccessfulShards(), equalTo(profileResponse.getSuccessfulShards())); + + float vanillaMaxScore = vanillaResponse.getHits().getMaxScore(); + float profileMaxScore = profileResponse.getHits().getMaxScore(); + if (Float.isNaN(vanillaMaxScore)) { + assertTrue("Vanilla maxScore is NaN but Profile is not [" + profileMaxScore + "]", Float.isNaN(profileMaxScore)); } else { - profileSet.removeAll(vanillaSet); - fail("Profile hits were larger than vanilla hits. Non-overlapping elements were: " + profileSet.toString()); + assertEquals( + "Profile maxScore of [" + profileMaxScore + "] is not close to Vanilla maxScore [" + vanillaMaxScore + "]", + vanillaMaxScore, + profileMaxScore, + 0.001 + ); } - } - SearchHit[] vanillaHits = vanillaResponse.getHits().getHits(); - SearchHit[] profileHits = profileResponse.getHits().getHits(); + if (vanillaResponse.getHits().getTotalHits().value != profileResponse.getHits().getTotalHits().value) { + Set vanillaSet = new HashSet<>(Arrays.asList(vanillaResponse.getHits().getHits())); + Set profileSet = new HashSet<>(Arrays.asList(profileResponse.getHits().getHits())); + if (vanillaResponse.getHits().getTotalHits().value > profileResponse.getHits().getTotalHits().value) { + vanillaSet.removeAll(profileSet); + fail("Vanilla hits were larger than profile hits. Non-overlapping elements were: " + vanillaSet.toString()); + } else { + profileSet.removeAll(vanillaSet); + fail("Profile hits were larger than vanilla hits. Non-overlapping elements were: " + profileSet.toString()); + } + } - for (int j = 0; j < vanillaHits.length; j++) { - assertThat("Profile hit #" + j + " has a different ID from Vanilla", vanillaHits[j].getId(), equalTo(profileHits[j].getId())); - } + SearchHit[] vanillaHits = vanillaResponse.getHits().getHits(); + SearchHit[] profileHits = profileResponse.getHits().getHits(); + for (int j = 0; j < vanillaHits.length; j++) { + assertThat( + "Profile hit #" + j + " has a different ID from Vanilla", + vanillaHits[j].getId(), + equalTo(profileHits[j].getId()) + ); + } + }); } /** @@ -177,7 +179,7 @@ public void testSimpleMatch() throws Exception { int numDocs = randomIntBetween(100, 150); IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { - docs[i] = client().prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); + docs[i] = prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); } indexRandom(true, docs); @@ -185,26 +187,26 @@ public void testSimpleMatch() throws Exception { QueryBuilder q = QueryBuilders.matchQuery("field1", "one"); - SearchResponse resp = prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH).get(); + assertResponse(prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH), response -> { + Map p = response.getProfileResults(); + assertNotNull(p); + assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0)); - Map p = resp.getProfileResults(); - assertNotNull(p); - assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); + for (Map.Entry shardResult : response.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertEquals(result.getQueryName(), "TermQuery"); + assertEquals(result.getLuceneDescription(), "field1:one"); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } - for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { - for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { - for (ProfileResult result : searchProfiles.getQueryResults()) { - assertEquals(result.getQueryName(), "TermQuery"); - assertEquals(result.getLuceneDescription(), "field1:one"); + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); assertThat(result.getTime(), greaterThan(0L)); - assertNotNull(result.getTimeBreakdown()); } - - CollectorResult result = searchProfiles.getCollectorResult(); - assertThat(result.getName(), is(not(emptyOrNullString()))); - assertThat(result.getTime(), greaterThan(0L)); } - } + }); } /** @@ -217,7 +219,7 @@ public void testBool() throws Exception { int numDocs = randomIntBetween(100, 150); IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { - docs[i] = client().prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); + docs[i] = prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); } indexRandom(true, docs); @@ -226,45 +228,44 @@ public void testBool() throws Exception { .must(QueryBuilders.matchQuery("field1", "one")) .must(QueryBuilders.matchQuery("field1", "two")); - SearchResponse resp = prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH).get(); + assertResponse(prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH), response -> { + Map p = response.getProfileResults(); + assertNotNull(p); + assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0)); - Map p = resp.getProfileResults(); - assertNotNull(p); - assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); + for (Map.Entry shardResult : response.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertEquals(result.getQueryName(), "BooleanQuery"); + assertEquals(result.getLuceneDescription(), "+field1:one +field1:two"); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + assertEquals(result.getProfiledChildren().size(), 2); + + // Check the children + List children = result.getProfiledChildren(); + assertEquals(children.size(), 2); + + ProfileResult childProfile = children.get(0); + assertEquals(childProfile.getQueryName(), "TermQuery"); + assertEquals(childProfile.getLuceneDescription(), "field1:one"); + assertThat(childProfile.getTime(), greaterThan(0L)); + assertNotNull(childProfile.getTimeBreakdown()); + assertEquals(childProfile.getProfiledChildren().size(), 0); + + childProfile = children.get(1); + assertEquals(childProfile.getQueryName(), "TermQuery"); + assertEquals(childProfile.getLuceneDescription(), "field1:two"); + assertThat(childProfile.getTime(), greaterThan(0L)); + assertNotNull(childProfile.getTimeBreakdown()); + } - for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { - for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { - for (ProfileResult result : searchProfiles.getQueryResults()) { - assertEquals(result.getQueryName(), "BooleanQuery"); - assertEquals(result.getLuceneDescription(), "+field1:one +field1:two"); + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); assertThat(result.getTime(), greaterThan(0L)); - assertNotNull(result.getTimeBreakdown()); - assertEquals(result.getProfiledChildren().size(), 2); - - // Check the children - List children = result.getProfiledChildren(); - assertEquals(children.size(), 2); - - ProfileResult childProfile = children.get(0); - assertEquals(childProfile.getQueryName(), "TermQuery"); - assertEquals(childProfile.getLuceneDescription(), "field1:one"); - assertThat(childProfile.getTime(), greaterThan(0L)); - assertNotNull(childProfile.getTimeBreakdown()); - assertEquals(childProfile.getProfiledChildren().size(), 0); - - childProfile = children.get(1); - assertEquals(childProfile.getQueryName(), "TermQuery"); - assertEquals(childProfile.getLuceneDescription(), "field1:two"); - assertThat(childProfile.getTime(), greaterThan(0L)); - assertNotNull(childProfile.getTimeBreakdown()); } - - CollectorResult result = searchProfiles.getCollectorResult(); - assertThat(result.getName(), is(not(emptyOrNullString()))); - assertThat(result.getTime(), greaterThan(0L)); } - } - + }); } /** @@ -277,7 +278,7 @@ public void testEmptyBool() throws Exception { int numDocs = randomIntBetween(100, 150); IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { - docs[i] = client().prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); + docs[i] = prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); } indexRandom(true, docs); @@ -287,25 +288,25 @@ public void testEmptyBool() throws Exception { QueryBuilder q = QueryBuilders.boolQuery(); logger.info("Query: {}", q); - SearchResponse resp = prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH).get(); + assertResponse(prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH), response -> { + assertNotNull("Profile response element should not be null", response.getProfileResults()); + assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0)); - assertNotNull("Profile response element should not be null", resp.getProfileResults()); - assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); + for (Map.Entry shardResult : response.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } - for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { - for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { - for (ProfileResult result : searchProfiles.getQueryResults()) { - assertNotNull(result.getQueryName()); - assertNotNull(result.getLuceneDescription()); + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); assertThat(result.getTime(), greaterThan(0L)); - assertNotNull(result.getTimeBreakdown()); } - - CollectorResult result = searchProfiles.getCollectorResult(); - assertThat(result.getName(), is(not(emptyOrNullString()))); - assertThat(result.getTime(), greaterThan(0L)); } - } + }); } /** @@ -320,7 +321,7 @@ public void testCollapsingBool() throws Exception { int numDocs = randomIntBetween(100, 150); IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { - docs[i] = client().prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); + docs[i] = prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); } indexRandom(true, docs); @@ -332,25 +333,25 @@ public void testCollapsingBool() throws Exception { logger.info("Query: {}", q); - SearchResponse resp = prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH).get(); + assertResponse(prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH), response -> { + assertNotNull("Profile response element should not be null", response.getProfileResults()); + assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0)); - assertNotNull("Profile response element should not be null", resp.getProfileResults()); - assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); + for (Map.Entry shardResult : response.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } - for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { - for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { - for (ProfileResult result : searchProfiles.getQueryResults()) { - assertNotNull(result.getQueryName()); - assertNotNull(result.getLuceneDescription()); + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); assertThat(result.getTime(), greaterThan(0L)); - assertNotNull(result.getTimeBreakdown()); } - - CollectorResult result = searchProfiles.getCollectorResult(); - assertThat(result.getName(), is(not(emptyOrNullString()))); - assertThat(result.getTime(), greaterThan(0L)); } - } + }); } public void testBoosting() throws Exception { @@ -360,7 +361,7 @@ public void testBoosting() throws Exception { int numDocs = randomIntBetween(100, 150); IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { - docs[i] = client().prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); + docs[i] = prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); } indexRandom(true, docs); @@ -372,25 +373,25 @@ public void testBoosting() throws Exception { .negativeBoost(randomFloat()); logger.info("Query: {}", q); - SearchResponse resp = prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH).get(); + assertResponse(prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH), response -> { + assertNotNull("Profile response element should not be null", response.getProfileResults()); + assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0)); - assertNotNull("Profile response element should not be null", resp.getProfileResults()); - assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); + for (Map.Entry shardResult : response.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } - for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { - for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { - for (ProfileResult result : searchProfiles.getQueryResults()) { - assertNotNull(result.getQueryName()); - assertNotNull(result.getLuceneDescription()); + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); assertThat(result.getTime(), greaterThan(0L)); - assertNotNull(result.getTimeBreakdown()); } - - CollectorResult result = searchProfiles.getCollectorResult(); - assertThat(result.getName(), is(not(emptyOrNullString()))); - assertThat(result.getTime(), greaterThan(0L)); } - } + }); } public void testDisMaxRange() throws Exception { @@ -400,7 +401,7 @@ public void testDisMaxRange() throws Exception { int numDocs = randomIntBetween(100, 150); IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { - docs[i] = client().prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); + docs[i] = prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); } indexRandom(true, docs); @@ -412,25 +413,25 @@ public void testDisMaxRange() throws Exception { .add(QueryBuilders.rangeQuery("field2").from(null).to(73).includeLower(true).includeUpper(true)); logger.info("Query: {}", q); - SearchResponse resp = prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH).get(); + assertResponse(prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH), response -> { + assertNotNull("Profile response element should not be null", response.getProfileResults()); + assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0)); - assertNotNull("Profile response element should not be null", resp.getProfileResults()); - assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); + for (Map.Entry shardResult : response.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } - for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { - for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { - for (ProfileResult result : searchProfiles.getQueryResults()) { - assertNotNull(result.getQueryName()); - assertNotNull(result.getLuceneDescription()); + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); assertThat(result.getTime(), greaterThan(0L)); - assertNotNull(result.getTimeBreakdown()); } - - CollectorResult result = searchProfiles.getCollectorResult(); - assertThat(result.getName(), is(not(emptyOrNullString()))); - assertThat(result.getTime(), greaterThan(0L)); } - } + }); } public void testRange() throws Exception { @@ -440,7 +441,7 @@ public void testRange() throws Exception { int numDocs = randomIntBetween(100, 150); IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { - docs[i] = client().prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); + docs[i] = prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); } indexRandom(true, docs); @@ -451,25 +452,25 @@ public void testRange() throws Exception { logger.info("Query: {}", q.toString()); - SearchResponse resp = prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH).get(); + assertResponse(prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH), response -> { + assertNotNull("Profile response element should not be null", response.getProfileResults()); + assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0)); - assertNotNull("Profile response element should not be null", resp.getProfileResults()); - assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); + for (Map.Entry shardResult : response.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } - for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { - for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { - for (ProfileResult result : searchProfiles.getQueryResults()) { - assertNotNull(result.getQueryName()); - assertNotNull(result.getLuceneDescription()); + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); assertThat(result.getTime(), greaterThan(0L)); - assertNotNull(result.getTimeBreakdown()); } - - CollectorResult result = searchProfiles.getCollectorResult(); - assertThat(result.getName(), is(not(emptyOrNullString()))); - assertThat(result.getTime(), greaterThan(0L)); } - } + }); } public void testPhrase() throws Exception { @@ -479,8 +480,7 @@ public void testPhrase() throws Exception { int numDocs = randomIntBetween(100, 150); IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { - docs[i] = client().prepareIndex("test") - .setId(String.valueOf(i)) + docs[i] = prepareIndex("test").setId(String.valueOf(i)) .setSource("field1", English.intToEnglish(i) + " " + English.intToEnglish(i + 1), "field2", i); } @@ -492,36 +492,35 @@ public void testPhrase() throws Exception { logger.info("Query: {}", q); - SearchResponse resp = prepareSearch().setQuery(q) - .setIndices("test") - .setProfile(true) - .setSearchType(SearchType.QUERY_THEN_FETCH) - .get(); + assertResponse( + prepareSearch().setQuery(q).setIndices("test").setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH), + response -> { + if (response.getShardFailures().length > 0) { + for (ShardSearchFailure f : response.getShardFailures()) { + logger.error("Shard search failure: {}", f); + } + fail(); + } - if (resp.getShardFailures().length > 0) { - for (ShardSearchFailure f : resp.getShardFailures()) { - logger.error("Shard search failure: {}", f); - } - fail(); - } + assertNotNull("Profile response element should not be null", response.getProfileResults()); + assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0)); - assertNotNull("Profile response element should not be null", resp.getProfileResults()); - assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); + for (Map.Entry shardResult : response.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } - for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { - for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { - for (ProfileResult result : searchProfiles.getQueryResults()) { - assertNotNull(result.getQueryName()); - assertNotNull(result.getLuceneDescription()); - assertThat(result.getTime(), greaterThan(0L)); - assertNotNull(result.getTimeBreakdown()); + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); + assertThat(result.getTime(), greaterThan(0L)); + } } - - CollectorResult result = searchProfiles.getCollectorResult(); - assertThat(result.getName(), is(not(emptyOrNullString()))); - assertThat(result.getTime(), greaterThan(0L)); } - } + ); } /** @@ -534,7 +533,7 @@ public void testNoProfile() throws Exception { int numDocs = randomIntBetween(100, 150); IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { - docs[i] = client().prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); + docs[i] = prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); } indexRandom(true, docs); @@ -543,8 +542,9 @@ public void testNoProfile() throws Exception { logger.info("Query: {}", q); - SearchResponse resp = prepareSearch().setQuery(q).setProfile(false).get(); - assertThat("Profile response element should be an empty map", resp.getProfileResults().size(), equalTo(0)); + assertResponse( + prepareSearch().setQuery(q).setProfile(false), + response -> assertThat("Profile response element should be an empty map", response.getProfileResults().size(), equalTo(0)) + ); } - } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/ExistsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/ExistsIT.java index 099100a7a67e3..81c612107e44a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/ExistsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/ExistsIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.explain.ExplainResponse; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.SearchHit; @@ -30,7 +29,9 @@ import static java.util.Collections.singletonMap; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCountAndNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; public class ExistsIT extends ESIntegTestCase { @@ -94,7 +95,7 @@ public void testExists() throws Exception { emptyMap() }; List reqs = new ArrayList<>(); for (Map source : sources) { - reqs.add(client().prepareIndex("idx").setSource(source)); + reqs.add(prepareIndex("idx").setSource(source)); } // We do NOT index dummy documents, otherwise the type for these dummy documents // would have _field_names indexed while the current type might not which might @@ -113,46 +114,46 @@ public void testExists() throws Exception { expected.put("vec", 2); final long numDocs = sources.length; - SearchResponse allDocs = prepareSearch("idx").setSize(sources.length).get(); - assertNoFailures(allDocs); - assertHitCount(allDocs, numDocs); - for (Map.Entry entry : expected.entrySet()) { - final String fieldName = entry.getKey(); - final int count = entry.getValue(); - // exists - SearchResponse resp = prepareSearch("idx").setQuery(QueryBuilders.existsQuery(fieldName)).get(); - assertNoFailures(resp); - try { - assertEquals( - String.format( - Locale.ROOT, - "exists(%s, %d) mapping: %s response: %s", - fieldName, - count, - Strings.toString(mapping), - resp - ), - count, - resp.getHits().getTotalHits().value - ); - } catch (AssertionError e) { - for (SearchHit searchHit : allDocs.getHits()) { - final String index = searchHit.getIndex(); - final String id = searchHit.getId(); - final ExplainResponse explanation = client().prepareExplain(index, id) - .setQuery(QueryBuilders.existsQuery(fieldName)) - .get(); - logger.info( - "Explanation for [{}] / [{}] / [{}]: [{}]", - fieldName, - id, - searchHit.getSourceAsString(), - explanation.getExplanation() - ); - } - throw e; + assertNoFailuresAndResponse(prepareSearch("idx").setSize(sources.length), allDocs -> { + assertHitCount(allDocs, numDocs); + for (Map.Entry entry : expected.entrySet()) { + final String fieldName = entry.getKey(); + final int count = entry.getValue(); + // exists + assertNoFailuresAndResponse(prepareSearch("idx").setQuery(QueryBuilders.existsQuery(fieldName)), response -> { + try { + assertEquals( + String.format( + Locale.ROOT, + "exists(%s, %d) mapping: %s response: %s", + fieldName, + count, + Strings.toString(mapping), + response + ), + count, + response.getHits().getTotalHits().value + ); + } catch (AssertionError e) { + for (SearchHit searchHit : allDocs.getHits()) { + final String index = searchHit.getIndex(); + final String id = searchHit.getId(); + final ExplainResponse explanation = client().prepareExplain(index, id) + .setQuery(QueryBuilders.existsQuery(fieldName)) + .get(); + logger.info( + "Explanation for [{}] / [{}] / [{}]: [{}]", + fieldName, + id, + searchHit.getSourceAsString(), + explanation.getExplanation() + ); + } + throw e; + } + }); } - } + }); } public void testFieldAlias() throws Exception { @@ -182,11 +183,11 @@ public void testFieldAlias() throws Exception { ensureGreen("idx"); List indexRequests = new ArrayList<>(); - indexRequests.add(client().prepareIndex("idx").setSource(emptyMap())); - indexRequests.add(client().prepareIndex("idx").setSource(emptyMap())); - indexRequests.add(client().prepareIndex("idx").setSource("bar", 3)); - indexRequests.add(client().prepareIndex("idx").setSource("foo", singletonMap("bar", 2.718))); - indexRequests.add(client().prepareIndex("idx").setSource("foo", singletonMap("bar", 6.283))); + indexRequests.add(prepareIndex("idx").setSource(emptyMap())); + indexRequests.add(prepareIndex("idx").setSource(emptyMap())); + indexRequests.add(prepareIndex("idx").setSource("bar", 3)); + indexRequests.add(prepareIndex("idx").setSource("foo", singletonMap("bar", 2.718))); + indexRequests.add(prepareIndex("idx").setSource("foo", singletonMap("bar", 6.283))); indexRandom(true, false, indexRequests); Map expected = new LinkedHashMap<>(); @@ -198,10 +199,7 @@ public void testFieldAlias() throws Exception { for (Map.Entry entry : expected.entrySet()) { String fieldName = entry.getKey(); int expectedCount = entry.getValue(); - - SearchResponse response = prepareSearch("idx").setQuery(QueryBuilders.existsQuery(fieldName)).get(); - assertNoFailures(response); - assertHitCount(response, expectedCount); + assertHitCountAndNoFailures(prepareSearch("idx").setQuery(QueryBuilders.existsQuery(fieldName)), expectedCount); } } @@ -225,14 +223,12 @@ public void testFieldAliasWithNoDocValues() throws Exception { ensureGreen("idx"); List indexRequests = new ArrayList<>(); - indexRequests.add(client().prepareIndex("idx").setSource(emptyMap())); - indexRequests.add(client().prepareIndex("idx").setSource(emptyMap())); - indexRequests.add(client().prepareIndex("idx").setSource("foo", 3)); - indexRequests.add(client().prepareIndex("idx").setSource("foo", 43)); + indexRequests.add(prepareIndex("idx").setSource(emptyMap())); + indexRequests.add(prepareIndex("idx").setSource(emptyMap())); + indexRequests.add(prepareIndex("idx").setSource("foo", 3)); + indexRequests.add(prepareIndex("idx").setSource("foo", 43)); indexRandom(true, false, indexRequests); - SearchResponse response = prepareSearch("idx").setQuery(QueryBuilders.existsQuery("foo-alias")).get(); - assertNoFailures(response); - assertHitCount(response, 2); + assertHitCountAndNoFailures(prepareSearch("idx").setQuery(QueryBuilders.existsQuery("foo-alias")), 2L); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/IntervalQueriesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/IntervalQueriesIT.java index 1e18c0ca3c59c..50a1924843e74 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/IntervalQueriesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/IntervalQueriesIT.java @@ -12,7 +12,6 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.KeywordTokenizer; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.index.analysis.AnalyzerProvider; import org.elasticsearch.index.analysis.AnalyzerScope; import org.elasticsearch.index.query.IntervalQueryBuilder; @@ -30,6 +29,7 @@ import static java.util.Collections.singletonMap; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; public class IntervalQueriesIT extends ESIntegTestCase { @@ -51,15 +51,16 @@ public void testEmptyIntervalsWithNestedMappings() throws InterruptedException { indexRandom( true, - client().prepareIndex("nested").setId("1").setSource("text", "the quick brown fox jumps"), - client().prepareIndex("nested").setId("2").setSource("text", "quick brown"), - client().prepareIndex("nested").setId("3").setSource("text", "quick") + prepareIndex("nested").setId("1").setSource("text", "the quick brown fox jumps"), + prepareIndex("nested").setId("2").setSource("text", "quick brown"), + prepareIndex("nested").setId("3").setSource("text", "quick") ); - SearchResponse resp = prepareSearch("nested").setQuery( - new IntervalQueryBuilder("empty_text", new IntervalsSourceProvider.Match("an empty query", 0, true, null, null, null)) - ).get(); - assertEquals(0, resp.getFailedShards()); + assertNoFailures( + prepareSearch("nested").setQuery( + new IntervalQueryBuilder("empty_text", new IntervalsSourceProvider.Match("an empty query", 0, true, null, null, null)) + ) + ); } private static class EmptyAnalyzer extends Analyzer { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/MultiMatchQueryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/MultiMatchQueryIT.java index f251ab5cb6269..2d77e170abdc5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/MultiMatchQueryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/MultiMatchQueryIT.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; @@ -53,6 +54,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHitsWithoutFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; @@ -88,8 +91,7 @@ public void init() throws Exception { int numDocs = scaledRandomIntBetween(50, 100); List builders = new ArrayList<>(); builders.add( - client().prepareIndex("test") - .setId("theone") + prepareIndex("test").setId("theone") .setSource( "id", "theone", @@ -108,8 +110,7 @@ public void init() throws Exception { ) ); builders.add( - client().prepareIndex("test") - .setId("theother") + prepareIndex("test").setId("theother") .setSource( "id", "theother", @@ -127,8 +128,7 @@ public void init() throws Exception { ); builders.add( - client().prepareIndex("test") - .setId("ultimate1") + prepareIndex("test").setId("ultimate1") .setSource( "id", "ultimate1", @@ -145,8 +145,7 @@ public void init() throws Exception { ) ); builders.add( - client().prepareIndex("test") - .setId("ultimate2") + prepareIndex("test").setId("ultimate2") .setSource( "full_name", "Man the Ultimate Ninja", @@ -162,8 +161,7 @@ public void init() throws Exception { ); builders.add( - client().prepareIndex("test") - .setId("anotherhero") + prepareIndex("test").setId("anotherhero") .setSource( "id", "anotherhero", @@ -181,8 +179,7 @@ public void init() throws Exception { ); builders.add( - client().prepareIndex("test") - .setId("nowHero") + prepareIndex("test").setId("nowHero") .setSource( "id", "nowHero", @@ -209,8 +206,7 @@ public void init() throws Exception { String first = RandomPicks.randomFrom(random(), firstNames); String last = randomPickExcept(lastNames, first); builders.add( - client().prepareIndex("test") - .setId("" + i) + prepareIndex("test").setId("" + i) .setSource( "id", i, @@ -267,72 +263,91 @@ private XContentBuilder createMapping() throws IOException { public void testDefaults() throws ExecutionException, InterruptedException { MatchQueryParser.Type type = MatchQueryParser.Type.BOOLEAN; - SearchResponse searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category").operator(Operator.OR) - ) - ).get(); - Set topNIds = Sets.newHashSet("theone", "theother"); - for (int i = 0; i < searchResponse.getHits().getHits().length; i++) { - topNIds.remove(searchResponse.getHits().getAt(i).getId()); - // very likely that we hit a random doc that has the same score so orders are random since - // the doc id is the tie-breaker - } - assertThat(topNIds, empty()); - assertThat(searchResponse.getHits().getHits()[0].getScore(), greaterThan(searchResponse.getHits().getHits()[1].getScore())); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category").operator(Operator.OR) - .type(type) - ) - ).get(); - assertFirstHit(searchResponse, anyOf(hasId("theone"), hasId("theother"))); - assertThat(searchResponse.getHits().getHits()[0].getScore(), greaterThan(searchResponse.getHits().getHits()[1].getScore())); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("marvel hero", "full_name", "first_name", "last_name", "category").operator(Operator.OR).type(type) - ) - ).get(); - assertFirstHit(searchResponse, hasId("theother")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category").operator(Operator.AND).type(type) - ) - ).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category").operator(Operator.AND).type(type) - ) - ).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("theone")); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category").operator(Operator.OR) + ) + ), + response -> { + Set topNIds = Sets.newHashSet("theone", "theother"); + for (int i = 0; i < response.getHits().getHits().length; i++) { + topNIds.remove(response.getHits().getAt(i).getId()); + // very likely that we hit a random doc that has the same score so orders are random since + // the doc id is the tie-breaker + } + assertThat(topNIds, empty()); + assertThat(response.getHits().getHits()[0].getScore(), greaterThan(response.getHits().getHits()[1].getScore())); + } + ); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category").operator(Operator.OR) + .type(type) + ) + ), + response -> { + assertFirstHit(response, anyOf(hasId("theone"), hasId("theother"))); + assertThat(response.getHits().getHits()[0].getScore(), greaterThan(response.getHits().getHits()[1].getScore())); + } + ); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("marvel hero", "full_name", "first_name", "last_name", "category").operator(Operator.OR).type(type) + ) + ), + response -> assertFirstHit(response, hasId("theother")) + ); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category").operator(Operator.AND).type(type) + ) + ), + response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("theone")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category").operator(Operator.AND).type(type) + ) + ), + response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("theone")); + } + ); } public void testPhraseType() { - SearchResponse searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("Man the Ultimate", "full_name_phrase", "first_name_phrase", "last_name_phrase", "category_phrase") - .operator(Operator.OR) - .type(MatchQueryParser.Type.PHRASE) - ) - ).get(); - assertFirstHit(searchResponse, hasId("ultimate2")); - assertHitCount(searchResponse, 1L); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("Captain", "full_name_phrase", "first_name_phrase", "last_name_phrase", "category_phrase").operator( - Operator.OR - ).type(MatchQueryParser.Type.PHRASE) - ) - ).get(); - assertThat(searchResponse.getHits().getTotalHits().value, greaterThan(1L)); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("Man the Ultimate", "full_name_phrase", "first_name_phrase", "last_name_phrase", "category_phrase") + .operator(Operator.OR) + .type(MatchQueryParser.Type.PHRASE) + ) + ), + response -> { + assertFirstHit(response, hasId("ultimate2")); + assertHitCount(response, 1L); + } + ); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("Captain", "full_name_phrase", "first_name_phrase", "last_name_phrase", "category_phrase").operator( + Operator.OR + ).type(MatchQueryParser.Type.PHRASE) + ) + ), + response -> assertThat(response.getHits().getTotalHits().value, greaterThan(1L)) + ); assertSearchHitsWithoutFailures( prepareSearch("test").setQuery( @@ -348,14 +363,15 @@ public void testPhraseType() { } public void testSingleField() throws NoSuchFieldException, IllegalAccessException { - SearchResponse searchResponse = prepareSearch("test").setQuery(randomizeType(multiMatchQuery("15", "skill"))).get(); - assertNoFailures(searchResponse); - assertFirstHit(searchResponse, hasId("theone")); + assertNoFailuresAndResponse( + prepareSearch("test").setQuery(randomizeType(multiMatchQuery("15", "skill"))), + response -> assertFirstHit(response, hasId("theone")) + ); - searchResponse = prepareSearch("test").setQuery(randomizeType(multiMatchQuery("15", "skill", "int-field")).analyzer("category")) - .get(); - assertNoFailures(searchResponse); - assertFirstHit(searchResponse, hasId("theone")); + assertNoFailuresAndResponse( + prepareSearch("test").setQuery(randomizeType(multiMatchQuery("15", "skill", "int-field")).analyzer("category")), + response -> assertFirstHit(response, hasId("theone")) + ); String[] fields = { "full_name", @@ -393,34 +409,39 @@ public void testSingleField() throws NoSuchFieldException, IllegalAccessExceptio builder.append(RandomPicks.randomFrom(random(), query)).append(" "); } MultiMatchQueryBuilder multiMatchQueryBuilder = randomizeType(multiMatchQuery(builder.toString(), field)); - SearchResponse multiMatchResp = prepareSearch("test") - // id sort field is a tie, in case hits have the same score, - // the hits will be sorted the same consistently - .addSort("_score", SortOrder.DESC) - .addSort("id", SortOrder.ASC) - .setQuery(multiMatchQueryBuilder) - .get(); - MatchQueryBuilder matchQueryBuilder = QueryBuilders.matchQuery(field, builder.toString()); - - SearchResponse matchResp = prepareSearch("test") - // id tie sort - .addSort("_score", SortOrder.DESC) - .addSort("id", SortOrder.ASC) - .setQuery(matchQueryBuilder) - .get(); - assertThat( - "field: " + field + " query: " + builder.toString(), - multiMatchResp.getHits().getTotalHits().value, - equalTo(matchResp.getHits().getTotalHits().value) + assertResponse( + prepareSearch("test") + // id sort field is a tie, in case hits have the same score, + // the hits will be sorted the same consistently + .addSort("_score", SortOrder.DESC) + .addSort("id", SortOrder.ASC) + .setQuery(multiMatchQueryBuilder), + multiMatchResp -> { + MatchQueryBuilder matchQueryBuilder = QueryBuilders.matchQuery(field, builder.toString()); + assertResponse( + prepareSearch("test") + // id tie sort + .addSort("_score", SortOrder.DESC) + .addSort("id", SortOrder.ASC) + .setQuery(matchQueryBuilder), + matchResp -> { + assertThat( + "field: " + field + " query: " + builder.toString(), + multiMatchResp.getHits().getTotalHits().value, + equalTo(matchResp.getHits().getTotalHits().value) + ); + SearchHits hits = multiMatchResp.getHits(); + if (field.startsWith("missing")) { + assertEquals(0, hits.getHits().length); + } + for (int j = 0; j < hits.getHits().length; j++) { + assertThat(hits.getHits()[j].getScore(), equalTo(matchResp.getHits().getHits()[j].getScore())); + assertThat(hits.getHits()[j].getId(), equalTo(matchResp.getHits().getHits()[j].getId())); + } + } + ); + } ); - SearchHits hits = multiMatchResp.getHits(); - if (field.startsWith("missing")) { - assertEquals(0, hits.getHits().length); - } - for (int j = 0; j < hits.getHits().length; j++) { - assertThat(hits.getHits()[j].getScore(), equalTo(matchResp.getHits().getHits()[j].getScore())); - assertThat(hits.getHits()[j].getId(), equalTo(matchResp.getHits().getHits()[j].getId())); - } } } @@ -435,23 +456,24 @@ public void testEquivalence() { MultiMatchQueryBuilder multiMatchQueryBuilder = randomBoolean() ? multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category") : multiMatchQuery("marvel hero captain america", "*_name", randomBoolean() ? "category" : "categ*"); - SearchResponse left = prepareSearch("test").setSize(numDocs) - .addSort(SortBuilders.scoreSort()) - .addSort(SortBuilders.fieldSort("id")) - .setQuery(randomizeType(multiMatchQueryBuilder.operator(Operator.OR).type(type))) - .get(); - - SearchResponse right = prepareSearch("test").setSize(numDocs) - .addSort(SortBuilders.scoreSort()) - .addSort(SortBuilders.fieldSort("id")) - .setQuery( - disMaxQuery().add(matchQuery("full_name", "marvel hero captain america")) - .add(matchQuery("first_name", "marvel hero captain america")) - .add(matchQuery("last_name", "marvel hero captain america")) - .add(matchQuery("category", "marvel hero captain america")) + assertResponse( + prepareSearch("test").setSize(numDocs) + .addSort(SortBuilders.scoreSort()) + .addSort(SortBuilders.fieldSort("id")) + .setQuery(randomizeType(multiMatchQueryBuilder.operator(Operator.OR).type(type))), + left -> assertResponse( + prepareSearch("test").setSize(numDocs) + .addSort(SortBuilders.scoreSort()) + .addSort(SortBuilders.fieldSort("id")) + .setQuery( + disMaxQuery().add(matchQuery("full_name", "marvel hero captain america")) + .add(matchQuery("first_name", "marvel hero captain america")) + .add(matchQuery("last_name", "marvel hero captain america")) + .add(matchQuery("category", "marvel hero captain america")) + ), + right -> assertEquivalent("marvel hero captain america", left, right) ) - .get(); - assertEquivalent("marvel hero captain america", left, right); + ); } { @@ -461,64 +483,68 @@ public void testEquivalence() { MultiMatchQueryBuilder multiMatchQueryBuilder = randomBoolean() ? multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category") : multiMatchQuery("captain america", "*_name", randomBoolean() ? "category" : "categ*"); - SearchResponse left = prepareSearch("test").setSize(numDocs) - .addSort(SortBuilders.scoreSort()) - .addSort(SortBuilders.fieldSort("id")) - .setQuery( - randomizeType(multiMatchQueryBuilder.operator(op).tieBreaker(1.0f).minimumShouldMatch(minShouldMatch).type(type)) - ) - .get(); - - SearchResponse right = prepareSearch("test").setSize(numDocs) - .addSort(SortBuilders.scoreSort()) - .addSort(SortBuilders.fieldSort("id")) - .setQuery( - boolQuery().minimumShouldMatch(minShouldMatch) - .should( - randomBoolean() - ? termQuery("full_name", "captain america") - : matchQuery("full_name", "captain america").operator(op) + assertResponse( + prepareSearch("test").setSize(numDocs) + .addSort(SortBuilders.scoreSort()) + .addSort(SortBuilders.fieldSort("id")) + .setQuery( + randomizeType( + multiMatchQueryBuilder.operator(op).tieBreaker(1.0f).minimumShouldMatch(minShouldMatch).type(type) ) - .should(matchQuery("first_name", "captain america").operator(op)) - .should(matchQuery("last_name", "captain america").operator(op)) - .should(matchQuery("category", "captain america").operator(op)) + ), + left -> assertResponse( + prepareSearch("test").setSize(numDocs) + .addSort(SortBuilders.scoreSort()) + .addSort(SortBuilders.fieldSort("id")) + .setQuery( + boolQuery().minimumShouldMatch(minShouldMatch) + .should( + randomBoolean() + ? termQuery("full_name", "captain america") + : matchQuery("full_name", "captain america").operator(op) + ) + .should(matchQuery("first_name", "captain america").operator(op)) + .should(matchQuery("last_name", "captain america").operator(op)) + .should(matchQuery("category", "captain america").operator(op)) + ), + right -> assertEquivalent("captain america", left, right) ) - .get(); - assertEquivalent("captain america", left, right); + ); } { String minShouldMatch = randomBoolean() ? null : "" + between(0, 1); - SearchResponse left = prepareSearch("test").setSize(numDocs) - .addSort(SortBuilders.scoreSort()) - .addSort(SortBuilders.fieldSort("id")) - .setQuery( - randomizeType( - multiMatchQuery("capta", "full_name", "first_name", "last_name", "category").type( - MatchQueryParser.Type.PHRASE_PREFIX - ).tieBreaker(1.0f).minimumShouldMatch(minShouldMatch) - ) - ) - .get(); - - SearchResponse right = prepareSearch("test").setSize(numDocs) - .addSort(SortBuilders.scoreSort()) - .addSort(SortBuilders.fieldSort("id")) - .setQuery( - boolQuery().minimumShouldMatch(minShouldMatch) - .should(matchPhrasePrefixQuery("full_name", "capta")) - .should(matchPhrasePrefixQuery("first_name", "capta")) - .should(matchPhrasePrefixQuery("last_name", "capta")) - .should(matchPhrasePrefixQuery("category", "capta")) + assertResponse( + prepareSearch("test").setSize(numDocs) + .addSort(SortBuilders.scoreSort()) + .addSort(SortBuilders.fieldSort("id")) + .setQuery( + randomizeType( + multiMatchQuery("capta", "full_name", "first_name", "last_name", "category").type( + MatchQueryParser.Type.PHRASE_PREFIX + ).tieBreaker(1.0f).minimumShouldMatch(minShouldMatch) + ) + ), + left -> assertResponse( + prepareSearch("test").setSize(numDocs) + .addSort(SortBuilders.scoreSort()) + .addSort(SortBuilders.fieldSort("id")) + .setQuery( + boolQuery().minimumShouldMatch(minShouldMatch) + .should(matchPhrasePrefixQuery("full_name", "capta")) + .should(matchPhrasePrefixQuery("first_name", "capta")) + .should(matchPhrasePrefixQuery("last_name", "capta")) + .should(matchPhrasePrefixQuery("category", "capta")) + ), + right -> assertEquivalent("capta", left, right) ) - .get(); - assertEquivalent("capta", left, right); + ); } { String minShouldMatch = randomBoolean() ? null : "" + between(0, 1); - SearchResponse left; + SearchRequestBuilder leftSearch; if (randomBoolean()) { - left = prepareSearch("test").setSize(numDocs) + leftSearch = prepareSearch("test").setSize(numDocs) .addSort(SortBuilders.scoreSort()) .addSort(SortBuilders.fieldSort("id")) .setQuery( @@ -527,10 +553,9 @@ public void testEquivalence() { MatchQueryParser.Type.PHRASE ).minimumShouldMatch(minShouldMatch) ) - ) - .get(); + ); } else { - left = prepareSearch("test").setSize(numDocs) + leftSearch = prepareSearch("test").setSize(numDocs) .addSort(SortBuilders.scoreSort()) .addSort(SortBuilders.fieldSort("id")) .setQuery( @@ -539,163 +564,206 @@ public void testEquivalence() { MatchQueryParser.Type.PHRASE ).tieBreaker(1.0f).minimumShouldMatch(minShouldMatch) ) - ) - .get(); + ); } - SearchResponse right = prepareSearch("test").setSize(numDocs) - .addSort(SortBuilders.scoreSort()) - .addSort(SortBuilders.fieldSort("id")) - .setQuery( - boolQuery().minimumShouldMatch(minShouldMatch) - .should(matchPhraseQuery("full_name", "captain america")) - .should(matchPhraseQuery("first_name", "captain america")) - .should(matchPhraseQuery("last_name", "captain america")) - .should(matchPhraseQuery("category", "captain america")) + assertResponse( + leftSearch, + left -> assertResponse( + prepareSearch("test").setSize(numDocs) + .addSort(SortBuilders.scoreSort()) + .addSort(SortBuilders.fieldSort("id")) + .setQuery( + boolQuery().minimumShouldMatch(minShouldMatch) + .should(matchPhraseQuery("full_name", "captain america")) + .should(matchPhraseQuery("first_name", "captain america")) + .should(matchPhraseQuery("last_name", "captain america")) + .should(matchPhraseQuery("category", "captain america")) + ), + right -> assertEquivalent("captain america", left, right) ) - .get(); - assertEquivalent("captain america", left, right); + ); } } } public void testCrossFieldMode() throws ExecutionException, InterruptedException { - SearchResponse searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("captain america", "full_name", "first_name", "last_name").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) - .operator(Operator.OR) - ) - ).get(); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category").type( - MultiMatchQueryBuilder.Type.CROSS_FIELDS - ).operator(Operator.OR) - ) - ).get(); - assertFirstHit(searchResponse, hasId("theother")); - assertSecondHit(searchResponse, hasId("theone")); - assertThat(searchResponse.getHits().getHits()[0].getScore(), greaterThan(searchResponse.getHits().getHits()[1].getScore())); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("marvel hero", "full_name", "first_name", "last_name", "category").type( - MultiMatchQueryBuilder.Type.CROSS_FIELDS - ).operator(Operator.OR) - ) - ).get(); - assertFirstHit(searchResponse, hasId("theother")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category").type( - MultiMatchQueryBuilder.Type.CROSS_FIELDS - ).operator(Operator.AND) - ) - ).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("captain america 15", "full_name", "first_name", "last_name", "category", "skill").type( - MultiMatchQueryBuilder.Type.CROSS_FIELDS - ).analyzer("category").lenient(true).operator(Operator.AND) - ) - ).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("captain america 15", "full_name", "first_name", "last_name", "category", "skill", "int-field").type( - MultiMatchQueryBuilder.Type.CROSS_FIELDS - ).analyzer("category").lenient(true).operator(Operator.AND) - ) - ).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("captain america 15", "skill", "full_name", "first_name", "last_name", "category", "int-field").type( - MultiMatchQueryBuilder.Type.CROSS_FIELDS - ).analyzer("category").lenient(true).operator(Operator.AND) - ) - ).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("captain america 15", "first_name", "last_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) - .lenient(true) - .analyzer("category") - ) - ).get(); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType(multiMatchQuery("15", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).analyzer("category")) - ).get(); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType(multiMatchQuery("25 15", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).analyzer("category")) - ).get(); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("25 15", "int-field", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).analyzer("category") - ) - ).get(); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("25 15", "first_name", "int-field", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) - .analyzer("category") - ) - ).get(); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("25 15", "int-field", "skill", "first_name").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) - .analyzer("category") - ) - ).get(); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("25 15", "int-field", "first_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) - .analyzer("category") - ) - ).get(); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("captain america marvel hero", "first_name", "last_name", "category").type( - MultiMatchQueryBuilder.Type.CROSS_FIELDS - ).analyzer("category").operator(Operator.OR) - ) - ).get(); - assertFirstHit(searchResponse, hasId("theone")); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("captain america", "full_name", "first_name", "last_name").type( + MultiMatchQueryBuilder.Type.CROSS_FIELDS + ).operator(Operator.OR) + ) + ), + response -> assertFirstHit(response, hasId("theone")) + ); + + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category").type( + MultiMatchQueryBuilder.Type.CROSS_FIELDS + ).operator(Operator.OR) + ) + ), + response -> { + assertFirstHit(response, hasId("theother")); + assertSecondHit(response, hasId("theone")); + assertThat(response.getHits().getHits()[0].getScore(), greaterThan(response.getHits().getHits()[1].getScore())); + } + ); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("marvel hero", "full_name", "first_name", "last_name", "category").type( + MultiMatchQueryBuilder.Type.CROSS_FIELDS + ).operator(Operator.OR) + ) + ), + response -> assertFirstHit(response, hasId("theother")) + ); + + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category").type( + MultiMatchQueryBuilder.Type.CROSS_FIELDS + ).operator(Operator.AND) + ) + ), + response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("theone")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("captain america 15", "full_name", "first_name", "last_name", "category", "skill").type( + MultiMatchQueryBuilder.Type.CROSS_FIELDS + ).analyzer("category").lenient(true).operator(Operator.AND) + ) + ), + response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("theone")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("captain america 15", "full_name", "first_name", "last_name", "category", "skill", "int-field").type( + MultiMatchQueryBuilder.Type.CROSS_FIELDS + ).analyzer("category").lenient(true).operator(Operator.AND) + ) + ), + response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("theone")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("captain america 15", "skill", "full_name", "first_name", "last_name", "category", "int-field").type( + MultiMatchQueryBuilder.Type.CROSS_FIELDS + ).analyzer("category").lenient(true).operator(Operator.AND) + ) + ), + response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("theone")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("captain america 15", "first_name", "last_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) + .lenient(true) + .analyzer("category") + ) + ), + response -> assertFirstHit(response, hasId("theone")) + ); + + assertResponse( + prepareSearch("test").setQuery( + randomizeType(multiMatchQuery("15", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).analyzer("category")) + ), + response -> assertFirstHit(response, hasId("theone")) + ); + + assertResponse( + prepareSearch("test").setQuery( + randomizeType(multiMatchQuery("25 15", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).analyzer("category")) + ), + response -> assertFirstHit(response, hasId("theone")) + ); + + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("25 15", "int-field", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).analyzer("category") + ) + ), + response -> assertFirstHit(response, hasId("theone")) + ); + + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("25 15", "first_name", "int-field", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) + .analyzer("category") + ) + ), + response -> assertFirstHit(response, hasId("theone")) + ); + + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("25 15", "int-field", "skill", "first_name").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) + .analyzer("category") + ) + ), + response -> assertFirstHit(response, hasId("theone")) + ); + + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("25 15", "int-field", "first_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) + .analyzer("category") + ) + ), + response -> assertFirstHit(response, hasId("theone")) + ); + + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("captain america marvel hero", "first_name", "last_name", "category").type( + MultiMatchQueryBuilder.Type.CROSS_FIELDS + ).analyzer("category").operator(Operator.OR) + ) + ), + response -> assertFirstHit(response, hasId("theone")) + ); // test group based on analyzer -- all fields are grouped into a cross field search - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("captain america marvel hero", "first_name", "last_name", "category").type( - MultiMatchQueryBuilder.Type.CROSS_FIELDS - ).analyzer("category").operator(Operator.AND) - ) - ).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("theone")); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("captain america marvel hero", "first_name", "last_name", "category").type( + MultiMatchQueryBuilder.Type.CROSS_FIELDS + ).analyzer("category").operator(Operator.AND) + ) + ), + response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("theone")); + } + ); // counter example assertHitCount( prepareSearch("test").setQuery( @@ -721,83 +789,112 @@ public void testCrossFieldMode() throws ExecutionException, InterruptedException ); // test if boosts work - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("the ultimate", "full_name", "first_name", "category").field("last_name", 10) - .type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) - .operator(Operator.AND) - ) - ).get(); - assertHitCount(searchResponse, 2L); - assertFirstHit(searchResponse, hasId("ultimate1")); // has ultimate in the last_name and that is boosted - assertSecondHit(searchResponse, hasId("ultimate2")); - assertThat(searchResponse.getHits().getHits()[0].getScore(), greaterThan(searchResponse.getHits().getHits()[1].getScore())); - + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("the ultimate", "full_name", "first_name", "category").field("last_name", 10) + .type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) + .operator(Operator.AND) + ) + ), + response -> { + assertHitCount(response, 2L); + assertFirstHit(response, hasId("ultimate1")); // has ultimate in the last_name and that is boosted + assertSecondHit(response, hasId("ultimate2")); + assertThat(response.getHits().getHits()[0].getScore(), greaterThan(response.getHits().getHits()[1].getScore())); + } + ); // since we try to treat the matching fields as one field scores are very similar but we have a small bias towards the // more frequent field that acts as a tie-breaker internally - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("the ultimate", "full_name", "first_name", "last_name", "category").type( - MultiMatchQueryBuilder.Type.CROSS_FIELDS - ).operator(Operator.AND) - ) - ).get(); - assertHitCount(searchResponse, 2L); - assertFirstHit(searchResponse, hasId("ultimate2")); - assertSecondHit(searchResponse, hasId("ultimate1")); - assertThat(searchResponse.getHits().getHits()[0].getScore(), greaterThan(searchResponse.getHits().getHits()[1].getScore())); - + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("the ultimate", "full_name", "first_name", "last_name", "category").type( + MultiMatchQueryBuilder.Type.CROSS_FIELDS + ).operator(Operator.AND) + ) + ), + response -> { + assertHitCount(response, 2L); + assertFirstHit(response, hasId("ultimate2")); + assertSecondHit(response, hasId("ultimate1")); + assertThat(response.getHits().getHits()[0].getScore(), greaterThan(response.getHits().getHits()[1].getScore())); + } + ); // Test group based on numeric fields - searchResponse = prepareSearch("test").setQuery( - randomizeType(multiMatchQuery("15", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)) - ).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType(multiMatchQuery("15", "skill", "first_name").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)) - ).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("theone")); - + assertResponse( + prepareSearch("test").setQuery(randomizeType(multiMatchQuery("15", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS))), + response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("theone")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + randomizeType(multiMatchQuery("15", "skill", "first_name").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)) + ), + response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("theone")); + } + ); // Two numeric fields together caused trouble at one point! - searchResponse = prepareSearch("test").setQuery( - randomizeType(multiMatchQuery("15", "int-field", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)) - ).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType(multiMatchQuery("15", "int-field", "first_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)) - ).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType(multiMatchQuery("alpha 15", "first_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).lenient(true)) - ).get(); - /* - * Doesn't find the one because "alpha 15" isn't a number and we don't - * break on spaces. - */ - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("ultimate1")); - + assertResponse( + prepareSearch("test").setQuery( + randomizeType(multiMatchQuery("15", "int-field", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)) + ), + response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("theone")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + randomizeType(multiMatchQuery("15", "int-field", "first_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)) + ), + response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("theone")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("alpha 15", "first_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).lenient(true) + ) + ), + response -> { + /* + * Doesn't find the one because "alpha 15" isn't a number and we don't + * break on spaces. + */ + assertHitCount(response, 1L); + assertFirstHit(response, hasId("ultimate1")); + } + ); // Lenient wasn't always properly lenient with two numeric fields - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("alpha 15", "int-field", "first_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).lenient(true) - ) - ).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("ultimate1")); - + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("alpha 15", "int-field", "first_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) + .lenient(true) + ) + ), + response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("ultimate1")); + } + ); // Check that cross fields works with date fields - searchResponse = prepareSearch("test").setQuery( - randomizeType(multiMatchQuery("now", "f*", "date").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)).lenient(true) - ).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("nowHero")); + assertResponse( + prepareSearch("test").setQuery( + randomizeType(multiMatchQuery("now", "f*", "date").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)).lenient(true) + ), + response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("nowHero")); + } + ); } /** @@ -815,18 +912,25 @@ public void testFuzzyFieldLevelBoosting() throws InterruptedException, Execution assertAcked(builder.setMapping("title", "type=text", "body", "type=text")); ensureGreen(); List builders = new ArrayList<>(); - builders.add(client().prepareIndex(idx).setId("1").setSource("title", "foo", "body", "bar")); - builders.add(client().prepareIndex(idx).setId("2").setSource("title", "bar", "body", "foo")); + builders.add(prepareIndex(idx).setId("1").setSource("title", "foo", "body", "bar")); + builders.add(prepareIndex(idx).setId("2").setSource("title", "bar", "body", "foo")); indexRandom(true, false, builders); - SearchResponse searchResponse = prepareSearch(idx).setExplain(true) - .setQuery(multiMatchQuery("foo").field("title", 100).field("body").fuzziness(Fuzziness.ZERO)) - .get(); - SearchHit[] hits = searchResponse.getHits().getHits(); - assertNotEquals("both documents should be on different shards", hits[0].getShard().getShardId(), hits[1].getShard().getShardId()); - assertEquals("1", hits[0].getId()); - assertEquals("2", hits[1].getId()); - assertThat(hits[0].getScore(), greaterThan(hits[1].getScore())); + assertResponse( + prepareSearch(idx).setExplain(true) + .setQuery(multiMatchQuery("foo").field("title", 100).field("body").fuzziness(Fuzziness.ZERO)), + response -> { + SearchHit[] hits = response.getHits().getHits(); + assertNotEquals( + "both documents should be on different shards", + hits[0].getShard().getShardId(), + hits[1].getShard().getShardId() + ); + assertEquals("1", hits[0].getId()); + assertEquals("2", hits[1].getId()); + assertThat(hits[0].getScore(), greaterThan(hits[1].getScore())); + } + ); } private static void assertEquivalent(String query, SearchResponse left, SearchResponse right) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/QueryStringIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/QueryStringIT.java index 882e18eb593aa..d8787b6ef7b16 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/QueryStringIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/QueryStringIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.Operator; import org.elasticsearch.search.SearchHit; @@ -28,7 +27,8 @@ import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery; import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -44,106 +44,93 @@ public void setup() throws Exception { public void testBasicAllQuery() throws Exception { List reqs = new ArrayList<>(); - reqs.add(client().prepareIndex("test").setId("1").setSource("f1", "foo bar baz")); - reqs.add(client().prepareIndex("test").setId("2").setSource("f2", "Bar")); - reqs.add(client().prepareIndex("test").setId("3").setSource("f3", "foo bar baz")); + reqs.add(prepareIndex("test").setId("1").setSource("f1", "foo bar baz")); + reqs.add(prepareIndex("test").setId("2").setSource("f2", "Bar")); + reqs.add(prepareIndex("test").setId("3").setSource("f3", "foo bar baz")); indexRandom(true, false, reqs); - SearchResponse resp = prepareSearch("test").setQuery(queryStringQuery("foo")).get(); - assertHitCount(resp, 2L); - assertHits(resp.getHits(), "1", "3"); - - resp = prepareSearch("test").setQuery(queryStringQuery("bar")).get(); - assertHitCount(resp, 2L); - assertHits(resp.getHits(), "1", "3"); - - resp = prepareSearch("test").setQuery(queryStringQuery("Bar")).get(); - assertHitCount(resp, 3L); - assertHits(resp.getHits(), "1", "2", "3"); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("foo")), response -> { + assertHitCount(response, 2L); + assertHits(response.getHits(), "1", "3"); + }); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("bar")), response -> { + assertHitCount(response, 2L); + assertHits(response.getHits(), "1", "3"); + }); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("Bar")), response -> { + assertHitCount(response, 3L); + assertHits(response.getHits(), "1", "2", "3"); + }); } public void testWithDate() throws Exception { List reqs = new ArrayList<>(); - reqs.add(client().prepareIndex("test").setId("1").setSource("f1", "foo", "f_date", "2015/09/02")); - reqs.add(client().prepareIndex("test").setId("2").setSource("f1", "bar", "f_date", "2015/09/01")); + reqs.add(prepareIndex("test").setId("1").setSource("f1", "foo", "f_date", "2015/09/02")); + reqs.add(prepareIndex("test").setId("2").setSource("f1", "bar", "f_date", "2015/09/01")); indexRandom(true, false, reqs); - SearchResponse resp = prepareSearch("test").setQuery(queryStringQuery("foo bar")).get(); - assertHits(resp.getHits(), "1", "2"); - assertHitCount(resp, 2L); - - resp = prepareSearch("test").setQuery(queryStringQuery("\"2015/09/02\"")).get(); - assertHits(resp.getHits(), "1"); - assertHitCount(resp, 1L); - - resp = prepareSearch("test").setQuery(queryStringQuery("bar \"2015/09/02\"")).get(); - assertHits(resp.getHits(), "1", "2"); - assertHitCount(resp, 2L); - - resp = prepareSearch("test").setQuery(queryStringQuery("\"2015/09/02\" \"2015/09/01\"")).get(); - assertHits(resp.getHits(), "1", "2"); - assertHitCount(resp, 2L); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("foo bar")), response -> { + assertHits(response.getHits(), "1", "2"); + assertHitCount(response, 2L); + }); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("\"2015/09/02\"")), response -> { + assertHits(response.getHits(), "1"); + assertHitCount(response, 1L); + }); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("bar \"2015/09/02\"")), response -> { + assertHits(response.getHits(), "1", "2"); + assertHitCount(response, 2L); + }); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("\"2015/09/02\" \"2015/09/01\"")), response -> { + assertHits(response.getHits(), "1", "2"); + assertHitCount(response, 2L); + }); } public void testWithLotsOfTypes() throws Exception { List reqs = new ArrayList<>(); - reqs.add( - client().prepareIndex("test").setId("1").setSource("f1", "foo", "f_date", "2015/09/02", "f_float", "1.7", "f_ip", "127.0.0.1") - ); - reqs.add( - client().prepareIndex("test").setId("2").setSource("f1", "bar", "f_date", "2015/09/01", "f_float", "1.8", "f_ip", "127.0.0.2") - ); + reqs.add(prepareIndex("test").setId("1").setSource("f1", "foo", "f_date", "2015/09/02", "f_float", "1.7", "f_ip", "127.0.0.1")); + reqs.add(prepareIndex("test").setId("2").setSource("f1", "bar", "f_date", "2015/09/01", "f_float", "1.8", "f_ip", "127.0.0.2")); indexRandom(true, false, reqs); - SearchResponse resp = prepareSearch("test").setQuery(queryStringQuery("foo bar")).get(); - assertHits(resp.getHits(), "1", "2"); - assertHitCount(resp, 2L); - - resp = prepareSearch("test").setQuery(queryStringQuery("\"2015/09/02\"")).get(); - assertHits(resp.getHits(), "1"); - assertHitCount(resp, 1L); - - resp = prepareSearch("test").setQuery(queryStringQuery("127.0.0.2 \"2015/09/02\"")).get(); - assertHits(resp.getHits(), "1", "2"); - assertHitCount(resp, 2L); - - resp = prepareSearch("test").setQuery(queryStringQuery("127.0.0.1 OR 1.8")).get(); - assertHits(resp.getHits(), "1", "2"); - assertHitCount(resp, 2L); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("foo bar")), response -> { + assertHits(response.getHits(), "1", "2"); + assertHitCount(response, 2L); + }); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("\"2015/09/02\"")), response -> { + assertHits(response.getHits(), "1"); + assertHitCount(response, 1L); + }); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("127.0.0.2 \"2015/09/02\"")), response -> { + assertHits(response.getHits(), "1", "2"); + assertHitCount(response, 2L); + }); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("127.0.0.1 OR 1.8")), response -> { + assertHits(response.getHits(), "1", "2"); + assertHitCount(response, 2L); + }); } public void testDocWithAllTypes() throws Exception { List reqs = new ArrayList<>(); String docBody = copyToStringFromClasspath("/org/elasticsearch/search/query/all-example-document.json"); - reqs.add(client().prepareIndex("test").setId("1").setSource(docBody, XContentType.JSON)); + reqs.add(prepareIndex("test").setId("1").setSource(docBody, XContentType.JSON)); indexRandom(true, false, reqs); - SearchResponse resp = prepareSearch("test").setQuery(queryStringQuery("foo")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(queryStringQuery("Bar")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(queryStringQuery("Baz")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(queryStringQuery("19")).get(); - assertHits(resp.getHits(), "1"); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("foo")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("Bar")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("Baz")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("19")), response -> assertHits(response.getHits(), "1")); // nested doesn't match because it's hidden - resp = prepareSearch("test").setQuery(queryStringQuery("1476383971")).get(); - assertHits(resp.getHits(), "1"); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("1476383971")), response -> assertHits(response.getHits(), "1")); // bool doesn't match - resp = prepareSearch("test").setQuery(queryStringQuery("7")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(queryStringQuery("23")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(queryStringQuery("1293")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(queryStringQuery("42")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(queryStringQuery("1.7")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(queryStringQuery("1.5")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(queryStringQuery("127.0.0.1")).get(); - assertHits(resp.getHits(), "1"); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("7")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("23")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("1293")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("42")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("1.7")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("1.5")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("127.0.0.1")), response -> assertHits(response.getHits(), "1")); // binary doesn't match // suggest doesn't match // geo_point doesn't match @@ -151,22 +138,23 @@ public void testDocWithAllTypes() throws Exception { public void testKeywordWithWhitespace() throws Exception { List reqs = new ArrayList<>(); - reqs.add(client().prepareIndex("test").setId("1").setSource("f2", "Foo Bar")); - reqs.add(client().prepareIndex("test").setId("2").setSource("f1", "bar")); - reqs.add(client().prepareIndex("test").setId("3").setSource("f1", "foo bar")); + reqs.add(prepareIndex("test").setId("1").setSource("f2", "Foo Bar")); + reqs.add(prepareIndex("test").setId("2").setSource("f1", "bar")); + reqs.add(prepareIndex("test").setId("3").setSource("f1", "foo bar")); indexRandom(true, false, reqs); - SearchResponse resp = prepareSearch("test").setQuery(queryStringQuery("foo")).get(); - assertHits(resp.getHits(), "3"); - assertHitCount(resp, 1L); - - resp = prepareSearch("test").setQuery(queryStringQuery("bar")).get(); - assertHits(resp.getHits(), "2", "3"); - assertHitCount(resp, 2L); - - resp = prepareSearch("test").setQuery(queryStringQuery("Foo Bar")).get(); - assertHits(resp.getHits(), "1", "2", "3"); - assertHitCount(resp, 3L); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("foo")), response -> { + assertHits(response.getHits(), "3"); + assertHitCount(response, 1L); + }); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("bar")), response -> { + assertHits(response.getHits(), "2", "3"); + assertHitCount(response, 2L); + }); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("Foo Bar")), response -> { + assertHits(response.getHits(), "1", "2", "3"); + assertHitCount(response, 3L); + }); } public void testAllFields() throws Exception { @@ -177,20 +165,21 @@ public void testAllFields() throws Exception { ensureGreen("test_1"); List reqs = new ArrayList<>(); - reqs.add(client().prepareIndex("test_1").setId("1").setSource("f1", "foo", "f2", "eggplant")); + reqs.add(prepareIndex("test_1").setId("1").setSource("f1", "foo", "f2", "eggplant")); indexRandom(true, false, reqs); assertHitCount(prepareSearch("test_1").setQuery(queryStringQuery("foo eggplant").defaultOperator(Operator.AND)), 0L); - SearchResponse resp = prepareSearch("test_1").setQuery(queryStringQuery("foo eggplant").defaultOperator(Operator.OR)).get(); - assertHits(resp.getHits(), "1"); - assertHitCount(resp, 1L); + assertResponse(prepareSearch("test_1").setQuery(queryStringQuery("foo eggplant").defaultOperator(Operator.OR)), response -> { + assertHits(response.getHits(), "1"); + assertHitCount(response, 1L); + }); } public void testPhraseQueryOnFieldWithNoPositions() throws Exception { List reqs = new ArrayList<>(); - reqs.add(client().prepareIndex("test").setId("1").setSource("f1", "foo bar", "f4", "eggplant parmesan")); - reqs.add(client().prepareIndex("test").setId("2").setSource("f1", "foo bar", "f4", "chicken parmesan")); + reqs.add(prepareIndex("test").setId("1").setSource("f1", "foo bar", "f4", "eggplant parmesan")); + reqs.add(prepareIndex("test").setId("2").setSource("f1", "foo bar", "f4", "chicken parmesan")); indexRandom(true, false, reqs); assertHitCount(prepareSearch("test").setQuery(queryStringQuery("\"eggplant parmesan\"").lenient(true)), 0L); @@ -222,58 +211,54 @@ public void testAllFieldsWithSpecifiedLeniency() throws IOException { public void testFieldAlias() throws Exception { List indexRequests = new ArrayList<>(); - indexRequests.add(client().prepareIndex("test").setId("1").setSource("f3", "text", "f2", "one")); - indexRequests.add(client().prepareIndex("test").setId("2").setSource("f3", "value", "f2", "two")); - indexRequests.add(client().prepareIndex("test").setId("3").setSource("f3", "another value", "f2", "three")); + indexRequests.add(prepareIndex("test").setId("1").setSource("f3", "text", "f2", "one")); + indexRequests.add(prepareIndex("test").setId("2").setSource("f3", "value", "f2", "two")); + indexRequests.add(prepareIndex("test").setId("3").setSource("f3", "another value", "f2", "three")); indexRandom(true, false, indexRequests); - SearchResponse response = prepareSearch("test").setQuery(queryStringQuery("value").field("f3_alias")).get(); - - assertNoFailures(response); - assertHitCount(response, 2); - assertHits(response.getHits(), "2", "3"); + assertNoFailuresAndResponse(prepareSearch("test").setQuery(queryStringQuery("value").field("f3_alias")), response -> { + assertHitCount(response, 2); + assertHits(response.getHits(), "2", "3"); + }); } public void testFieldAliasWithEmbeddedFieldNames() throws Exception { List indexRequests = new ArrayList<>(); - indexRequests.add(client().prepareIndex("test").setId("1").setSource("f3", "text", "f2", "one")); - indexRequests.add(client().prepareIndex("test").setId("2").setSource("f3", "value", "f2", "two")); - indexRequests.add(client().prepareIndex("test").setId("3").setSource("f3", "another value", "f2", "three")); + indexRequests.add(prepareIndex("test").setId("1").setSource("f3", "text", "f2", "one")); + indexRequests.add(prepareIndex("test").setId("2").setSource("f3", "value", "f2", "two")); + indexRequests.add(prepareIndex("test").setId("3").setSource("f3", "another value", "f2", "three")); indexRandom(true, false, indexRequests); - SearchResponse response = prepareSearch("test").setQuery(queryStringQuery("f3_alias:value AND f2:three")).get(); - - assertNoFailures(response); - assertHitCount(response, 1); - assertHits(response.getHits(), "3"); + assertNoFailuresAndResponse(prepareSearch("test").setQuery(queryStringQuery("f3_alias:value AND f2:three")), response -> { + assertHitCount(response, 1); + assertHits(response.getHits(), "3"); + }); } public void testFieldAliasWithWildcardField() throws Exception { List indexRequests = new ArrayList<>(); - indexRequests.add(client().prepareIndex("test").setId("1").setSource("f3", "text", "f2", "one")); - indexRequests.add(client().prepareIndex("test").setId("2").setSource("f3", "value", "f2", "two")); - indexRequests.add(client().prepareIndex("test").setId("3").setSource("f3", "another value", "f2", "three")); + indexRequests.add(prepareIndex("test").setId("1").setSource("f3", "text", "f2", "one")); + indexRequests.add(prepareIndex("test").setId("2").setSource("f3", "value", "f2", "two")); + indexRequests.add(prepareIndex("test").setId("3").setSource("f3", "another value", "f2", "three")); indexRandom(true, false, indexRequests); - SearchResponse response = prepareSearch("test").setQuery(queryStringQuery("value").field("f3_*")).get(); - - assertNoFailures(response); - assertHitCount(response, 2); - assertHits(response.getHits(), "2", "3"); + assertNoFailuresAndResponse(prepareSearch("test").setQuery(queryStringQuery("value").field("f3_*")), response -> { + assertHitCount(response, 2); + assertHits(response.getHits(), "2", "3"); + }); } public void testFieldAliasOnDisallowedFieldType() throws Exception { List indexRequests = new ArrayList<>(); - indexRequests.add(client().prepareIndex("test").setId("1").setSource("f3", "text", "f2", "one")); + indexRequests.add(prepareIndex("test").setId("1").setSource("f3", "text", "f2", "one")); indexRandom(true, false, indexRequests); // The wildcard field matches aliases for both a text and geo_point field. // By default, the geo_point field should be ignored when building the query. - SearchResponse response = prepareSearch("test").setQuery(queryStringQuery("text").field("f*_alias")).get(); - - assertNoFailures(response); - assertHitCount(response, 1); - assertHits(response.getHits(), "1"); + assertNoFailuresAndResponse(prepareSearch("test").setQuery(queryStringQuery("text").field("f*_alias")), response -> { + assertHitCount(response, 1); + assertHits(response.getHits(), "1"); + }); } private void assertHits(SearchHits hits, String... ids) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/ScriptScoreQueryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/ScriptScoreQueryIT.java index c9c7c2a56eea9..0a35c33673343 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/ScriptScoreQueryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/ScriptScoreQueryIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.search.query; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.query.QueryBuilder; @@ -32,6 +31,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThirdHit; @@ -66,31 +66,35 @@ public void testScriptScore() { assertAcked(prepareCreate("test-index").setMapping("field1", "type=text", "field2", "type=double")); int docCount = 10; for (int i = 1; i <= docCount; i++) { - client().prepareIndex("test-index").setId("" + i).setSource("field1", "text" + (i % 2), "field2", i).get(); + prepareIndex("test-index").setId("" + i).setSource("field1", "text" + (i % 2), "field2", i).get(); } refresh(); Map params = new HashMap<>(); params.put("param1", 0.1); Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['field2'].value * param1", params); - SearchResponse resp = prepareSearch("test-index").setQuery(scriptScoreQuery(matchQuery("field1", "text0"), script)).get(); - assertNoFailures(resp); - assertOrderedSearchHits(resp, "10", "8", "6", "4", "2"); - assertFirstHit(resp, hasScore(1.0f)); - assertSecondHit(resp, hasScore(0.8f)); - assertThirdHit(resp, hasScore(0.6f)); + assertNoFailuresAndResponse( + prepareSearch("test-index").setQuery(scriptScoreQuery(matchQuery("field1", "text0"), script)), + response -> { + assertOrderedSearchHits(response, "10", "8", "6", "4", "2"); + assertFirstHit(response, hasScore(1.0f)); + assertSecondHit(response, hasScore(0.8f)); + assertThirdHit(response, hasScore(0.6f)); + } + ); // applying min score - resp = prepareSearch("test-index").setQuery(scriptScoreQuery(matchQuery("field1", "text0"), script).setMinScore(0.6f)).get(); - assertNoFailures(resp); - assertOrderedSearchHits(resp, "10", "8", "6"); + assertNoFailuresAndResponse( + prepareSearch("test-index").setQuery(scriptScoreQuery(matchQuery("field1", "text0"), script).setMinScore(0.6f)), + response -> assertOrderedSearchHits(response, "10", "8", "6") + ); } public void testScriptScoreBoolQuery() { assertAcked(prepareCreate("test-index").setMapping("field1", "type=text", "field2", "type=double")); int docCount = 10; for (int i = 1; i <= docCount; i++) { - client().prepareIndex("test-index").setId("" + i).setSource("field1", "text" + i, "field2", i).get(); + prepareIndex("test-index").setId("" + i).setSource("field1", "text" + i, "field2", i).get(); } refresh(); @@ -98,11 +102,11 @@ public void testScriptScoreBoolQuery() { params.put("param1", 0.1); Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['field2'].value * param1", params); QueryBuilder boolQuery = boolQuery().should(matchQuery("field1", "text1")).should(matchQuery("field1", "text10")); - SearchResponse resp = prepareSearch("test-index").setQuery(scriptScoreQuery(boolQuery, script)).get(); - assertNoFailures(resp); - assertOrderedSearchHits(resp, "10", "1"); - assertFirstHit(resp, hasScore(1.0f)); - assertSecondHit(resp, hasScore(0.1f)); + assertNoFailuresAndResponse(prepareSearch("test-index").setQuery(scriptScoreQuery(boolQuery, script)), response -> { + assertOrderedSearchHits(response, "10", "1"); + assertFirstHit(response, hasScore(1.0f)); + assertSecondHit(response, hasScore(0.1f)); + }); } // test that when the internal query is rewritten script_score works well @@ -111,16 +115,17 @@ public void testRewrittenQuery() { prepareCreate("test-index2").setSettings(Settings.builder().put("index.number_of_shards", 1)) .setMapping("field1", "type=date", "field2", "type=double") ); - client().prepareIndex("test-index2").setId("1").setSource("field1", "2019-09-01", "field2", 1).get(); - client().prepareIndex("test-index2").setId("2").setSource("field1", "2019-10-01", "field2", 2).get(); - client().prepareIndex("test-index2").setId("3").setSource("field1", "2019-11-01", "field2", 3).get(); + prepareIndex("test-index2").setId("1").setSource("field1", "2019-09-01", "field2", 1).get(); + prepareIndex("test-index2").setId("2").setSource("field1", "2019-10-01", "field2", 2).get(); + prepareIndex("test-index2").setId("3").setSource("field1", "2019-11-01", "field2", 3).get(); refresh(); RangeQueryBuilder rangeQB = new RangeQueryBuilder("field1").from("2019-01-01"); // the query should be rewritten to from:null Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['field2'].value * param1", Map.of("param1", 0.1)); - SearchResponse resp = prepareSearch("test-index2").setQuery(scriptScoreQuery(rangeQB, script)).get(); - assertNoFailures(resp); - assertOrderedSearchHits(resp, "3", "2", "1"); + assertNoFailuresAndResponse( + prepareSearch("test-index2").setQuery(scriptScoreQuery(rangeQB, script)), + response -> assertOrderedSearchHits(response, "3", "2", "1") + ); } public void testDisallowExpensiveQueries() { @@ -128,7 +133,7 @@ public void testDisallowExpensiveQueries() { assertAcked(prepareCreate("test-index").setMapping("field1", "type=text", "field2", "type=double")); int docCount = 10; for (int i = 1; i <= docCount; i++) { - client().prepareIndex("test-index").setId("" + i).setSource("field1", "text" + (i % 2), "field2", i).get(); + prepareIndex("test-index").setId("" + i).setSource("field1", "text" + (i % 2), "field2", i).get(); } refresh(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java index 918746021f381..ea2decff18cd0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -17,7 +17,6 @@ import org.apache.lucene.util.AttributeSource; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.document.DocumentField; @@ -106,6 +105,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCountAndNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHitsWithoutFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit; @@ -141,9 +142,9 @@ public void testEmptyQueryString() throws ExecutionException, InterruptedExcepti createIndex("test"); indexRandom( true, - client().prepareIndex("test").setId("1").setSource("field1", "the quick brown fox jumps"), - client().prepareIndex("test").setId("2").setSource("field1", "quick brown"), - client().prepareIndex("test").setId("3").setSource("field1", "quick") + prepareIndex("test").setId("1").setSource("field1", "the quick brown fox jumps"), + prepareIndex("test").setId("2").setSource("field1", "quick brown"), + prepareIndex("test").setId("3").setSource("field1", "quick") ); assertHitCount(prepareSearch().setQuery(queryStringQuery("quick")), 3L); @@ -153,9 +154,9 @@ public void testEmptyQueryString() throws ExecutionException, InterruptedExcepti // see https://github.com/elastic/elasticsearch/issues/3177 public void testIssue3177() { createIndex("test"); - client().prepareIndex("test").setId("1").setSource("field1", "value1").get(); - client().prepareIndex("test").setId("2").setSource("field1", "value2").get(); - client().prepareIndex("test").setId("3").setSource("field1", "value3").get(); + prepareIndex("test").setId("1").setSource("field1", "value1").get(); + prepareIndex("test").setId("2").setSource("field1", "value2").get(); + prepareIndex("test").setId("3").setSource("field1", "value3").get(); ensureGreen(); waitForRelocation(); forceMerge(); @@ -185,8 +186,8 @@ public void testIndexOptions() throws Exception { assertAcked(prepareCreate("test").setMapping("field1", "type=text,index_options=docs")); indexRandom( true, - client().prepareIndex("test").setId("1").setSource("field1", "quick brown fox", "field2", "quick brown fox"), - client().prepareIndex("test").setId("2").setSource("field1", "quick lazy huge brown fox", "field2", "quick lazy huge brown fox") + prepareIndex("test").setId("1").setSource("field1", "quick brown fox", "field2", "quick brown fox"), + prepareIndex("test").setId("2").setSource("field1", "quick lazy huge brown fox", "field2", "quick lazy huge brown fox") ); assertHitCount(prepareSearch().setQuery(matchPhraseQuery("field2", "quick brown").slop(0)), 1L); @@ -204,44 +205,55 @@ public void testConstantScoreQuery() throws Exception { createIndex("test"); indexRandom( true, - client().prepareIndex("test").setId("1").setSource("field1", "quick brown fox", "field2", "quick brown fox"), - client().prepareIndex("test").setId("2").setSource("field1", "quick lazy huge brown fox", "field2", "quick lazy huge brown fox") + prepareIndex("test").setId("1").setSource("field1", "quick brown fox", "field2", "quick brown fox"), + prepareIndex("test").setId("2").setSource("field1", "quick lazy huge brown fox", "field2", "quick lazy huge brown fox") ); - SearchResponse searchResponse = prepareSearch().setQuery(constantScoreQuery(matchQuery("field1", "quick"))).get(); - assertHitCount(searchResponse, 2L); - for (SearchHit searchHit : searchResponse.getHits().getHits()) { - assertThat(searchHit, hasScore(1.0f)); - } - - searchResponse = prepareSearch("test").setQuery( - boolQuery().must(matchAllQuery()).must(constantScoreQuery(matchQuery("field1", "quick")).boost(1.0f + random().nextFloat())) - ).get(); - assertHitCount(searchResponse, 2L); - assertFirstHit(searchResponse, hasScore(searchResponse.getHits().getAt(1).getScore())); - - prepareSearch("test").setQuery(constantScoreQuery(matchQuery("field1", "quick")).boost(1.0f + random().nextFloat())).get(); - assertHitCount(searchResponse, 2L); - assertFirstHit(searchResponse, hasScore(searchResponse.getHits().getAt(1).getScore())); - - searchResponse = prepareSearch("test").setQuery( - constantScoreQuery( - boolQuery().must(matchAllQuery()) - .must( - constantScoreQuery(matchQuery("field1", "quick")).boost(1.0f + (random.nextBoolean() ? 0.0f : random.nextFloat())) - ) - ) - ).get(); - assertHitCount(searchResponse, 2L); - assertFirstHit(searchResponse, hasScore(searchResponse.getHits().getAt(1).getScore())); - for (SearchHit searchHit : searchResponse.getHits().getHits()) { - assertThat(searchHit, hasScore(1.0f)); - } - + assertResponse(prepareSearch().setQuery(constantScoreQuery(matchQuery("field1", "quick"))), response -> { + assertHitCount(response, 2L); + for (SearchHit searchHit : response.getHits().getHits()) { + assertThat(searchHit, hasScore(1.0f)); + } + }); + assertResponse( + prepareSearch("test").setQuery( + boolQuery().must(matchAllQuery()).must(constantScoreQuery(matchQuery("field1", "quick")).boost(1.0f + random().nextFloat())) + ), + response -> { + assertHitCount(response, 2L); + assertFirstHit(response, hasScore(response.getHits().getAt(1).getScore())); + } + ); + assertResponse( + prepareSearch("test").setQuery(constantScoreQuery(matchQuery("field1", "quick")).boost(1.0f + random().nextFloat())), + response -> { + assertHitCount(response, 2L); + assertFirstHit(response, hasScore(response.getHits().getAt(1).getScore())); + } + ); + assertResponse( + prepareSearch("test").setQuery( + constantScoreQuery( + boolQuery().must(matchAllQuery()) + .must( + constantScoreQuery(matchQuery("field1", "quick")).boost( + 1.0f + (random.nextBoolean() ? 0.0f : random.nextFloat()) + ) + ) + ) + ), + response -> { + assertHitCount(response, 2L); + assertFirstHit(response, hasScore(response.getHits().getAt(1).getScore())); + for (SearchHit searchHit : response.getHits().getHits()) { + assertThat(searchHit, hasScore(1.0f)); + } + } + ); int num = scaledRandomIntBetween(100, 200); IndexRequestBuilder[] builders = new IndexRequestBuilder[num]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex("test_1").setId("" + i).setSource("f", English.intToEnglish(i)); + builders[i] = prepareIndex("test_1").setId("" + i).setSource("f", English.intToEnglish(i)); } createIndex("test_1"); indexRandom(true, builders); @@ -249,53 +261,57 @@ public void testConstantScoreQuery() throws Exception { int queryRounds = scaledRandomIntBetween(10, 20); for (int i = 0; i < queryRounds; i++) { MatchQueryBuilder matchQuery = matchQuery("f", English.intToEnglish(between(0, num))); - searchResponse = prepareSearch("test_1").setQuery(constantScoreQuery(matchQuery)).setSize(num).get(); - long totalHits = searchResponse.getHits().getTotalHits().value; - SearchHits hits = searchResponse.getHits(); - for (SearchHit searchHit : hits) { - assertThat(searchHit, hasScore(1.0f)); - } - searchResponse = prepareSearch("test_1").setQuery( - boolQuery().must(matchAllQuery()) - .must(constantScoreQuery(matchQuery).boost(1.0f + (random.nextBoolean() ? 0.0f : random.nextFloat()))) - ).setSize(num).get(); - hits = searchResponse.getHits(); - assertThat(hits.getTotalHits().value, equalTo(totalHits)); - if (totalHits > 1) { - float expected = hits.getAt(0).getScore(); + final long[] constantScoreTotalHits = new long[1]; + assertResponse(prepareSearch("test_1").setQuery(constantScoreQuery(matchQuery)).setSize(num), response -> { + constantScoreTotalHits[0] = response.getHits().getTotalHits().value; + SearchHits hits = response.getHits(); for (SearchHit searchHit : hits) { - assertThat(searchHit, hasScore(expected)); + assertThat(searchHit, hasScore(1.0f)); } - } + }); + assertResponse( + prepareSearch("test_1").setQuery( + boolQuery().must(matchAllQuery()) + .must(constantScoreQuery(matchQuery).boost(1.0f + (random.nextBoolean() ? 0.0f : random.nextFloat()))) + ).setSize(num), + response -> { + SearchHits hits = response.getHits(); + assertThat(hits.getTotalHits().value, equalTo(constantScoreTotalHits[0])); + if (constantScoreTotalHits[0] > 1) { + float expected = hits.getAt(0).getScore(); + for (SearchHit searchHit : hits) { + assertThat(searchHit, hasScore(expected)); + } + } + } + ); } } // see #3521 public void testAllDocsQueryString() throws InterruptedException, ExecutionException { createIndex("test"); - indexRandom( - true, - client().prepareIndex("test").setId("1").setSource("foo", "bar"), - client().prepareIndex("test").setId("2").setSource("foo", "bar") - ); + indexRandom(true, prepareIndex("test").setId("1").setSource("foo", "bar"), prepareIndex("test").setId("2").setSource("foo", "bar")); int iters = scaledRandomIntBetween(100, 200); for (int i = 0; i < iters; i++) { assertHitCount(prepareSearch("test").setQuery(queryStringQuery("*:*^10.0").boost(10.0f)), 2L); - SearchResponse searchResponse = prepareSearch("test").setQuery( - boolQuery().must(matchAllQuery()).must(constantScoreQuery(matchAllQuery())) - ).get(); - assertHitCount(searchResponse, 2L); - assertThat((double) searchResponse.getHits().getAt(0).getScore(), closeTo(2.0, 0.1)); - assertThat((double) searchResponse.getHits().getAt(1).getScore(), closeTo(2.0, 0.1)); + assertResponse( + prepareSearch("test").setQuery(boolQuery().must(matchAllQuery()).must(constantScoreQuery(matchAllQuery()))), + response -> { + assertHitCount(response, 2L); + assertThat((double) response.getHits().getAt(0).getScore(), closeTo(2.0, 0.1)); + assertThat((double) response.getHits().getAt(1).getScore(), closeTo(2.0, 0.1)); + } + ); } } public void testQueryStringAnalyzedWildcard() throws Exception { createIndex("test"); - client().prepareIndex("test").setId("1").setSource("field1", "value_1", "field2", "value_2").get(); + prepareIndex("test").setId("1").setSource("field1", "value_1", "field2", "value_2").get(); refresh(); assertHitCount(prepareSearch().setQuery(queryStringQuery("value*")), 1L); @@ -308,7 +324,7 @@ public void testQueryStringAnalyzedWildcard() throws Exception { public void testLowercaseExpandedTerms() { createIndex("test"); - client().prepareIndex("test").setId("1").setSource("field1", "value_1", "field2", "value_2").get(); + prepareIndex("test").setId("1").setSource("field1", "value_1", "field2", "value_2").get(); refresh(); assertHitCount(prepareSearch().setQuery(queryStringQuery("VALUE_3~1")), 1L); @@ -326,7 +342,7 @@ public void testDateRangeInQueryString() { ZonedDateTime now = ZonedDateTime.now(ZoneOffset.UTC); String aMonthAgo = DateTimeFormatter.ISO_LOCAL_DATE.format(now.minusMonths(1)); String aMonthFromNow = DateTimeFormatter.ISO_LOCAL_DATE.format(now.plusMonths(1)); - client().prepareIndex("test").setId("1").setSource("past", aMonthAgo, "future", aMonthFromNow).get(); + prepareIndex("test").setId("1").setSource("past", aMonthAgo, "future", aMonthFromNow).get(); refresh(); assertHitCount(prepareSearch().setQuery(queryStringQuery("past:[now-2M/d TO now/d]")), 1L); @@ -349,7 +365,7 @@ public void testDateRangeInQueryStringWithTimeZone_7880() { ZoneId timeZone = randomZone(); String now = DateFormatter.forPattern("strict_date_optional_time").format(Instant.now().atZone(timeZone)); logger.info(" --> Using time_zone [{}], now is [{}]", timeZone.getId(), now); - client().prepareIndex("test").setId("1").setSource("past", now).get(); + prepareIndex("test").setId("1").setSource("past", now).get(); refresh(); assertHitCount(prepareSearch().setQuery(queryStringQuery("past:[now-1m/m TO now+1m/m]").timeZone(timeZone.getId())), 1L); @@ -361,8 +377,8 @@ public void testDateRangeInQueryStringWithTimeZone_10477() { // as with dynamic mappings some shards might be lacking behind and parse a different query assertAcked(prepareCreate("test").setMapping("past", "type=date")); - client().prepareIndex("test").setId("1").setSource("past", "2015-04-05T23:00:00+0000").get(); - client().prepareIndex("test").setId("2").setSource("past", "2015-04-06T00:00:00+0000").get(); + prepareIndex("test").setId("1").setSource("past", "2015-04-05T23:00:00+0000").get(); + prepareIndex("test").setId("2").setSource("past", "2015-04-06T00:00:00+0000").get(); refresh(); // Timezone set with dates @@ -389,9 +405,9 @@ public void testIdsQueryTestsIdIndexed() throws Exception { indexRandom( true, - client().prepareIndex("test").setId("1").setSource("field1", "value1"), - client().prepareIndex("test").setId("2").setSource("field1", "value2"), - client().prepareIndex("test").setId("3").setSource("field1", "value3") + prepareIndex("test").setId("1").setSource("field1", "value1"), + prepareIndex("test").setId("2").setSource("field1", "value2"), + prepareIndex("test").setId("3").setSource("field1", "value3") ); assertSearchHitsWithoutFailures(prepareSearch().setQuery(constantScoreQuery(idsQuery().addIds("1", "3"))), "1", "3"); @@ -408,7 +424,7 @@ public void testTermIndexQuery() throws Exception { for (String indexName : indexNames) { assertAcked(indicesAdmin().prepareCreate(indexName)); - indexRandom(true, client().prepareIndex(indexName).setId(indexName + "1").setSource("field1", "value1")); + indexRandom(true, prepareIndex(indexName).setId(indexName + "1").setSource("field1", "value1")); } @@ -431,8 +447,7 @@ public void testFilterExistsMissing() throws Exception { indexRandom( true, - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .startObject("obj1") @@ -443,8 +458,7 @@ public void testFilterExistsMissing() throws Exception { .field("field2", "value2_1") .endObject() ), - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource( jsonBuilder().startObject() .startObject("obj1") @@ -454,8 +468,7 @@ public void testFilterExistsMissing() throws Exception { .field("field1", "value1_2") .endObject() ), - client().prepareIndex("test") - .setId("3") + prepareIndex("test").setId("3") .setSource( jsonBuilder().startObject() .startObject("obj2") @@ -465,8 +478,7 @@ public void testFilterExistsMissing() throws Exception { .field("field2", "value2_3") .endObject() ), - client().prepareIndex("test") - .setId("4") + prepareIndex("test").setId("4") .setSource( jsonBuilder().startObject() .startObject("obj2") @@ -492,7 +504,7 @@ public void testFilterExistsMissing() throws Exception { public void testPassQueryOrFilterAsJSONString() throws Exception { createIndex("test"); - client().prepareIndex("test").setId("1").setSource("field1", "value1_1", "field2", "value2_1").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("field1", "value1_1", "field2", "value2_1").setRefreshPolicy(IMMEDIATE).get(); WrapperQueryBuilder wrapper = new WrapperQueryBuilder("{ \"term\" : { \"field1\" : \"value1_1\" } }"); assertHitCount(prepareSearch().setQuery(wrapper), 1L); @@ -507,7 +519,7 @@ public void testPassQueryOrFilterAsJSONString() throws Exception { public void testFiltersWithCustomCacheKey() throws Exception { createIndex("test"); - client().prepareIndex("test").setId("1").setSource("field1", "value1").get(); + prepareIndex("test").setId("1").setSource("field1", "value1").get(); refresh(); assertHitCount(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("field1", "value1"))), 1L); assertHitCount(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("field1", "value1"))), 1L); @@ -520,19 +532,20 @@ public void testMatchQueryNumeric() throws Exception { indexRandom( true, - client().prepareIndex("test").setId("1").setSource("long", 1L, "double", 1.0d), - client().prepareIndex("test").setId("2").setSource("long", 2L, "double", 2.0d), - client().prepareIndex("test").setId("3").setSource("long", 3L, "double", 3.0d) - ); - - SearchResponse searchResponse = prepareSearch().setQuery(matchQuery("long", "1")).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - - searchResponse = prepareSearch().setQuery(matchQuery("double", "2")).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("2")); - expectThrows(SearchPhaseExecutionException.class, () -> prepareSearch().setQuery(matchQuery("double", "2 3 4")).get()); + prepareIndex("test").setId("1").setSource("long", 1L, "double", 1.0d), + prepareIndex("test").setId("2").setSource("long", 2L, "double", 2.0d), + prepareIndex("test").setId("3").setSource("long", 3L, "double", 3.0d) + ); + + assertResponse(prepareSearch().setQuery(matchQuery("long", "1")), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch().setQuery(matchQuery("double", "2")), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("2")); + expectThrows(SearchPhaseExecutionException.class, () -> prepareSearch().setQuery(matchQuery("double", "2 3 4")).get()); + }); } public void testMatchQueryFuzzy() throws Exception { @@ -540,8 +553,8 @@ public void testMatchQueryFuzzy() throws Exception { indexRandom( true, - client().prepareIndex("test").setId("1").setSource("text", "Unit"), - client().prepareIndex("test").setId("2").setSource("text", "Unity") + prepareIndex("test").setId("1").setSource("text", "Unit"), + prepareIndex("test").setId("2").setSource("text", "Unity") ); assertHitCount(prepareSearch().setQuery(matchQuery("text", "uniy").fuzziness(Fuzziness.fromEdits(0))), 0L); @@ -564,9 +577,9 @@ public void testMultiMatchQuery() throws Exception { indexRandom( true, - client().prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value4", "field3", "value3"), - client().prepareIndex("test").setId("2").setSource("field1", "value2", "field2", "value5", "field3", "value2"), - client().prepareIndex("test").setId("3").setSource("field1", "value3", "field2", "value6", "field3", "value1") + prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value4", "field3", "value3"), + prepareIndex("test").setId("2").setSource("field1", "value2", "field2", "value5", "field3", "value2"), + prepareIndex("test").setId("3").setSource("field1", "value3", "field2", "value6", "field3", "value1") ); MultiMatchQueryBuilder builder = multiMatchQuery("value1 value2 value4", "field1", "field2"); @@ -594,12 +607,12 @@ public void testMultiMatchQuery() throws Exception { builder = multiMatchQuery("value1").field("field1").field("field3", 1.5f).operator(Operator.AND); // Operator only applies on terms // inside a field! Fields are // always OR-ed together. - SearchResponse searchResponse = prepareSearch().setQuery(builder).get(); - assertHitCount(searchResponse, 2L); - assertSearchHits(searchResponse, "3", "1"); - + assertResponse(prepareSearch().setQuery(builder), response -> { + assertHitCount(response, 2L); + assertSearchHits(response, "3", "1"); + }); // Test lenient - client().prepareIndex("test").setId("3").setSource("field1", "value7", "field2", "value8", "field4", 5).get(); + prepareIndex("test").setId("3").setSource("field1", "value7", "field2", "value8", "field4", 5).get(); refresh(); builder = multiMatchQuery("value1", "field1", "field2", "field4"); @@ -607,19 +620,23 @@ public void testMultiMatchQuery() throws Exception { // when the number for shards is randomized and we expect failures // we can either run into partial or total failures depending on the current number of shards Matcher reasonMatcher = containsString("NumberFormatException: For input string: \"value1\""); - ShardSearchFailure[] shardFailures; try { - prepareSearch().setQuery(builder).get(); - shardFailures = searchResponse.getShardFailures(); - assertThat("Expected shard failures, got none", shardFailures, not(emptyArray())); + assertResponse(prepareSearch().setQuery(builder), response -> { + ShardSearchFailure[] shardFailures = response.getShardFailures(); + assertThat("Expected shard failures, got none", shardFailures, not(emptyArray())); + for (ShardSearchFailure shardSearchFailure : shardFailures) { + assertThat(shardSearchFailure.status(), equalTo(RestStatus.BAD_REQUEST)); + assertThat(shardSearchFailure.reason(), reasonMatcher); + } + }); + } catch (SearchPhaseExecutionException e) { assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); - shardFailures = e.shardFailures(); - } - - for (ShardSearchFailure shardSearchFailure : shardFailures) { - assertThat(shardSearchFailure.status(), equalTo(RestStatus.BAD_REQUEST)); - assertThat(shardSearchFailure.reason(), reasonMatcher); + ShardSearchFailure[] shardFailures = e.shardFailures(); + for (ShardSearchFailure shardSearchFailure : shardFailures) { + assertThat(shardSearchFailure.status(), equalTo(RestStatus.BAD_REQUEST)); + assertThat(shardSearchFailure.reason(), reasonMatcher); + } } builder.lenient(true); @@ -628,8 +645,8 @@ public void testMultiMatchQuery() throws Exception { public void testMatchQueryZeroTermsQuery() { assertAcked(prepareCreate("test").setMapping("field1", "type=text,analyzer=classic", "field2", "type=text,analyzer=classic")); - client().prepareIndex("test").setId("1").setSource("field1", "value1").get(); - client().prepareIndex("test").setId("2").setSource("field1", "value2").get(); + prepareIndex("test").setId("1").setSource("field1", "value1").get(); + prepareIndex("test").setId("2").setSource("field1", "value2").get(); refresh(); BoolQueryBuilder boolQuery = boolQuery().must(matchQuery("field1", "a").zeroTermsQuery(ZeroTermsQueryOption.NONE)) @@ -646,8 +663,8 @@ public void testMatchQueryZeroTermsQuery() { public void testMultiMatchQueryZeroTermsQuery() { assertAcked(prepareCreate("test").setMapping("field1", "type=text,analyzer=classic", "field2", "type=text,analyzer=classic")); - client().prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").get(); - client().prepareIndex("test").setId("2").setSource("field1", "value3", "field2", "value4").get(); + prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").get(); + prepareIndex("test").setId("2").setSource("field1", "value3", "field2", "value4").get(); refresh(); BoolQueryBuilder boolQuery = boolQuery().must(multiMatchQuery("a", "field1", "field2").zeroTermsQuery(ZeroTermsQueryOption.NONE)) @@ -665,42 +682,43 @@ public void testMultiMatchQueryZeroTermsQuery() { public void testMultiMatchQueryMinShouldMatch() { createIndex("test"); - client().prepareIndex("test").setId("1").setSource("field1", new String[] { "value1", "value2", "value3" }).get(); - client().prepareIndex("test").setId("2").setSource("field2", "value1").get(); + prepareIndex("test").setId("1").setSource("field1", new String[] { "value1", "value2", "value3" }).get(); + prepareIndex("test").setId("2").setSource("field2", "value1").get(); refresh(); MultiMatchQueryBuilder multiMatchQuery = multiMatchQuery("value1 value2 foo", "field1", "field2"); multiMatchQuery.minimumShouldMatch("70%"); - SearchResponse searchResponse = prepareSearch().setQuery(multiMatchQuery).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - + assertResponse(prepareSearch().setQuery(multiMatchQuery), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); multiMatchQuery.minimumShouldMatch("30%"); - searchResponse = prepareSearch().setQuery(multiMatchQuery).get(); - assertHitCount(searchResponse, 2L); - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("2")); - + assertResponse(prepareSearch().setQuery(multiMatchQuery), response -> { + assertHitCount(response, 2L); + assertFirstHit(response, hasId("1")); + assertSecondHit(response, hasId("2")); + }); multiMatchQuery.minimumShouldMatch("70%"); - searchResponse = prepareSearch().setQuery(multiMatchQuery).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - + assertResponse(prepareSearch().setQuery(multiMatchQuery), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); multiMatchQuery.minimumShouldMatch("30%"); - searchResponse = prepareSearch().setQuery(multiMatchQuery).get(); - assertHitCount(searchResponse, 2L); - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("2")); - + assertResponse(prepareSearch().setQuery(multiMatchQuery), response -> { + assertHitCount(response, 2L); + assertFirstHit(response, hasId("1")); + assertSecondHit(response, hasId("2")); + }); multiMatchQuery = multiMatchQuery("value1 value2 bar", "field1"); multiMatchQuery.minimumShouldMatch("100%"); assertHitCount(prepareSearch().setQuery(multiMatchQuery), 0L); multiMatchQuery.minimumShouldMatch("70%"); - searchResponse = prepareSearch().setQuery(multiMatchQuery).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); + assertResponse(prepareSearch().setQuery(multiMatchQuery), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); // Min should match > # optional clauses returns no docs. multiMatchQuery = multiMatchQuery("value1 value2 value3", "field1", "field2"); multiMatchQuery.minimumShouldMatch("4"); @@ -709,16 +727,16 @@ public void testMultiMatchQueryMinShouldMatch() { public void testBoolQueryMinShouldMatchBiggerThanNumberOfShouldClauses() throws IOException { createIndex("test"); - client().prepareIndex("test").setId("1").setSource("field1", new String[] { "value1", "value2", "value3" }).get(); - client().prepareIndex("test").setId("2").setSource("field2", "value1").get(); + prepareIndex("test").setId("1").setSource("field1", new String[] { "value1", "value2", "value3" }).get(); + prepareIndex("test").setId("2").setSource("field2", "value1").get(); refresh(); BoolQueryBuilder boolQuery = boolQuery().must(termQuery("field1", "value1")) .should(boolQuery().should(termQuery("field1", "value1")).should(termQuery("field1", "value2")).minimumShouldMatch(3)); - SearchResponse searchResponse = prepareSearch().setQuery(boolQuery).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - + assertResponse(prepareSearch().setQuery(boolQuery), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); boolQuery = boolQuery().must(termQuery("field1", "value1")) .should(boolQuery().should(termQuery("field1", "value1")).should(termQuery("field1", "value2")).minimumShouldMatch(1)) // Only one should clause is defined, returns no docs. @@ -728,10 +746,10 @@ public void testBoolQueryMinShouldMatchBiggerThanNumberOfShouldClauses() throws boolQuery = boolQuery().should(termQuery("field1", "value1")) .should(boolQuery().should(termQuery("field1", "value1")).should(termQuery("field1", "value2")).minimumShouldMatch(3)) .minimumShouldMatch(1); - searchResponse = prepareSearch().setQuery(boolQuery).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - + assertResponse(prepareSearch().setQuery(boolQuery), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); boolQuery = boolQuery().must(termQuery("field1", "value1")) .must(boolQuery().should(termQuery("field1", "value1")).should(termQuery("field1", "value2")).minimumShouldMatch(3)); assertHitCount(prepareSearch().setQuery(boolQuery), 0L); @@ -739,14 +757,14 @@ public void testBoolQueryMinShouldMatchBiggerThanNumberOfShouldClauses() throws public void testFuzzyQueryString() { createIndex("test"); - client().prepareIndex("test").setId("1").setSource("str", "kimchy", "date", "2012-02-01", "num", 12).get(); - client().prepareIndex("test").setId("2").setSource("str", "shay", "date", "2012-02-05", "num", 20).get(); + prepareIndex("test").setId("1").setSource("str", "kimchy", "date", "2012-02-01", "num", 12).get(); + prepareIndex("test").setId("2").setSource("str", "shay", "date", "2012-02-05", "num", 20).get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(queryStringQuery("str:kimcy~1")).get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); + assertNoFailuresAndResponse(prepareSearch().setQuery(queryStringQuery("str:kimcy~1")), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); } @TestIssueLogging( @@ -760,38 +778,37 @@ public void testQuotedQueryStringWithBoost() throws InterruptedException { indexRandom( true, false, - client().prepareIndex("test").setId("1").setSource("important", "phrase match", "less_important", "nothing important"), - client().prepareIndex("test").setId("2").setSource("important", "nothing important", "less_important", "phrase match") + prepareIndex("test").setId("1").setSource("important", "phrase match", "less_important", "nothing important"), + prepareIndex("test").setId("2").setSource("important", "nothing important", "less_important", "phrase match") ); - SearchResponse searchResponse = prepareSearch().setQuery( - queryStringQuery("\"phrase match\"").field("important", boost).field("less_important") - ).get(); - assertHitCount(searchResponse, 2L); - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("2")); - assertThat( - (double) searchResponse.getHits().getAt(0).getScore(), - closeTo(boost * searchResponse.getHits().getAt(1).getScore(), .1) + assertResponse( + prepareSearch().setQuery(queryStringQuery("\"phrase match\"").field("important", boost).field("less_important")), + response -> { + assertHitCount(response, 2L); + assertFirstHit(response, hasId("1")); + assertSecondHit(response, hasId("2")); + assertThat((double) response.getHits().getAt(0).getScore(), closeTo(boost * response.getHits().getAt(1).getScore(), .1)); + } ); } public void testSpecialRangeSyntaxInQueryString() { createIndex("test"); - client().prepareIndex("test").setId("1").setSource("str", "kimchy", "date", "2012-02-01", "num", 12).get(); - client().prepareIndex("test").setId("2").setSource("str", "shay", "date", "2012-02-05", "num", 20).get(); + prepareIndex("test").setId("1").setSource("str", "kimchy", "date", "2012-02-01", "num", 12).get(); + prepareIndex("test").setId("2").setSource("str", "shay", "date", "2012-02-05", "num", 20).get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(queryStringQuery("num:>19")).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("2")); - + assertResponse(prepareSearch().setQuery(queryStringQuery("num:>19")), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("2")); + }); assertHitCount(prepareSearch().setQuery(queryStringQuery("num:>20")), 0L); - searchResponse = prepareSearch().setQuery(queryStringQuery("num:>=20")).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("2")); - + assertResponse(prepareSearch().setQuery(queryStringQuery("num:>=20")), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("2")); + }); assertHitCount(prepareSearch().setQuery(queryStringQuery("num:>11")), 2L); assertHitCount(prepareSearch().setQuery(queryStringQuery("num:<20")), 1L); assertHitCount(prepareSearch().setQuery(queryStringQuery("num:<=20")), 2L); @@ -803,10 +820,10 @@ public void testEmptytermsQuery() throws Exception { indexRandom( true, - client().prepareIndex("test").setId("1").setSource("term", "1"), - client().prepareIndex("test").setId("2").setSource("term", "2"), - client().prepareIndex("test").setId("3").setSource("term", "3"), - client().prepareIndex("test").setId("4").setSource("term", "4") + prepareIndex("test").setId("1").setSource("term", "1"), + prepareIndex("test").setId("2").setSource("term", "2"), + prepareIndex("test").setId("3").setSource("term", "3"), + prepareIndex("test").setId("4").setSource("term", "4") ); assertHitCount(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("term", new String[0]))), 0L); assertHitCount(prepareSearch("test").setQuery(idsQuery()), 0L); @@ -817,10 +834,10 @@ public void testTermsQuery() throws Exception { indexRandom( true, - client().prepareIndex("test").setId("1").setSource("str", "1", "lng", 1L, "dbl", 1.0d), - client().prepareIndex("test").setId("2").setSource("str", "2", "lng", 2L, "dbl", 2.0d), - client().prepareIndex("test").setId("3").setSource("str", "3", "lng", 3L, "dbl", 3.0d), - client().prepareIndex("test").setId("4").setSource("str", "4", "lng", 4L, "dbl", 4.0d) + prepareIndex("test").setId("1").setSource("str", "1", "lng", 1L, "dbl", 1.0d), + prepareIndex("test").setId("2").setSource("str", "2", "lng", 2L, "dbl", 2.0d), + prepareIndex("test").setId("3").setSource("str", "3", "lng", 3L, "dbl", 3.0d), + prepareIndex("test").setId("4").setSource("str", "4", "lng", 4L, "dbl", 4.0d) ); assertSearchHitsWithoutFailures(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("str", "1", "4"))), "1", "4"); assertSearchHitsWithoutFailures( @@ -877,12 +894,11 @@ public void testTermsLookupFilter() throws Exception { indexRandom( true, - client().prepareIndex("lookup").setId("1").setSource("terms", new String[] { "1", "3" }), - client().prepareIndex("lookup").setId("2").setSource("terms", new String[] { "2" }), - client().prepareIndex("lookup").setId("3").setSource("terms", new String[] { "2", "4" }), - client().prepareIndex("lookup").setId("4").setSource("other", "value"), - client().prepareIndex("lookup2") - .setId("1") + prepareIndex("lookup").setId("1").setSource("terms", new String[] { "1", "3" }), + prepareIndex("lookup").setId("2").setSource("terms", new String[] { "2" }), + prepareIndex("lookup").setId("3").setSource("terms", new String[] { "2", "4" }), + prepareIndex("lookup").setId("4").setSource("other", "value"), + prepareIndex("lookup2").setId("1") .setSource( XContentFactory.jsonBuilder() .startObject() @@ -896,8 +912,7 @@ public void testTermsLookupFilter() throws Exception { .endArray() .endObject() ), - client().prepareIndex("lookup2") - .setId("2") + prepareIndex("lookup2").setId("2") .setSource( XContentFactory.jsonBuilder() .startObject() @@ -908,8 +923,7 @@ public void testTermsLookupFilter() throws Exception { .endArray() .endObject() ), - client().prepareIndex("lookup2") - .setId("3") + prepareIndex("lookup2").setId("3") .setSource( XContentFactory.jsonBuilder() .startObject() @@ -923,11 +937,11 @@ public void testTermsLookupFilter() throws Exception { .endArray() .endObject() ), - client().prepareIndex("lookup3").setId("1").setSource("terms", new String[] { "1", "3" }), - client().prepareIndex("test").setId("1").setSource("term", "1"), - client().prepareIndex("test").setId("2").setSource("term", "2"), - client().prepareIndex("test").setId("3").setSource("term", "3"), - client().prepareIndex("test").setId("4").setSource("term", "4") + prepareIndex("lookup3").setId("1").setSource("terms", new String[] { "1", "3" }), + prepareIndex("test").setId("1").setSource("term", "1"), + prepareIndex("test").setId("2").setSource("term", "2"), + prepareIndex("test").setId("3").setSource("term", "3"), + prepareIndex("test").setId("4").setSource("term", "4") ); assertSearchHitsWithoutFailures( prepareSearch("test").setQuery(termsLookupQuery("term", new TermsLookup("lookup", "1", "terms"))), @@ -991,30 +1005,31 @@ public void testTermsLookupFilter() throws Exception { public void testBasicQueryById() throws Exception { assertAcked(prepareCreate("test")); - client().prepareIndex("test").setId("1").setSource("field1", "value1").get(); - client().prepareIndex("test").setId("2").setSource("field1", "value2").get(); - client().prepareIndex("test").setId("3").setSource("field1", "value3").get(); + prepareIndex("test").setId("1").setSource("field1", "value1").get(); + prepareIndex("test").setId("2").setSource("field1", "value2").get(); + prepareIndex("test").setId("3").setSource("field1", "value3").get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(idsQuery().addIds("1", "2")).get(); - assertHitCount(searchResponse, 2L); - assertThat(searchResponse.getHits().getHits().length, equalTo(2)); - - searchResponse = prepareSearch().setQuery(idsQuery().addIds("1")).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - - searchResponse = prepareSearch().setQuery(idsQuery().addIds("1", "2")).get(); - assertHitCount(searchResponse, 2L); - assertThat(searchResponse.getHits().getHits().length, equalTo(2)); - - searchResponse = prepareSearch().setQuery(idsQuery().addIds("1")).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - - searchResponse = prepareSearch().setQuery(idsQuery().addIds("1", "2", "3", "4")).get(); - assertHitCount(searchResponse, 3L); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); + assertResponse(prepareSearch().setQuery(idsQuery().addIds("1", "2")), response -> { + assertHitCount(response, 2L); + assertThat(response.getHits().getHits().length, equalTo(2)); + }); + assertResponse(prepareSearch().setQuery(idsQuery().addIds("1")), response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getHits().length, equalTo(1)); + }); + assertResponse(prepareSearch().setQuery(idsQuery().addIds("1", "2")), response -> { + assertHitCount(response, 2L); + assertThat(response.getHits().getHits().length, equalTo(2)); + }); + assertResponse(prepareSearch().setQuery(idsQuery().addIds("1")), response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getHits().length, equalTo(1)); + }); + assertResponse(prepareSearch().setQuery(idsQuery().addIds("1", "2", "3", "4")), response -> { + assertHitCount(response, 3L); + assertThat(response.getHits().getHits().length, equalTo(3)); + }); } public void testNumericTermsAndRanges() throws Exception { @@ -1035,102 +1050,119 @@ public void testNumericTermsAndRanges() throws Exception { ) ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource("num_byte", 1, "num_short", 1, "num_integer", 1, "num_long", 1, "num_float", 1, "num_double", 1) .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource("num_byte", 2, "num_short", 2, "num_integer", 2, "num_long", 2, "num_float", 2, "num_double", 2) .get(); - client().prepareIndex("test") - .setId("17") + prepareIndex("test").setId("17") .setSource("num_byte", 17, "num_short", 17, "num_integer", 17, "num_long", 17, "num_float", 17, "num_double", 17) .get(); refresh(); - SearchResponse searchResponse; logger.info("--> term query on 1"); - searchResponse = prepareSearch("test").setQuery(termQuery("num_byte", 1)).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(termQuery("num_short", 1)).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(termQuery("num_integer", 1)).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(termQuery("num_long", 1)).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(termQuery("num_float", 1)).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(termQuery("num_double", 1)).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - + assertResponse(prepareSearch("test").setQuery(termQuery("num_byte", 1)), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(termQuery("num_short", 1)), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(termQuery("num_integer", 1)), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(termQuery("num_long", 1)), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(termQuery("num_float", 1)), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(termQuery("num_double", 1)), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); logger.info("--> terms query on 1"); - searchResponse = prepareSearch("test").setQuery(termsQuery("num_byte", new int[] { 1 })).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(termsQuery("num_short", new int[] { 1 })).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(termsQuery("num_integer", new int[] { 1 })).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(termsQuery("num_long", new int[] { 1 })).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(termsQuery("num_float", new double[] { 1 })).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(termsQuery("num_double", new double[] { 1 })).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - + assertResponse(prepareSearch("test").setQuery(termsQuery("num_byte", new int[] { 1 })), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(termsQuery("num_short", new int[] { 1 })), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(termsQuery("num_integer", new int[] { 1 })), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(termsQuery("num_long", new int[] { 1 })), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(termsQuery("num_float", new double[] { 1 })), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(termsQuery("num_double", new double[] { 1 })), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); logger.info("--> term filter on 1"); - searchResponse = prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_byte", 1))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_short", 1))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_integer", 1))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_long", 1))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_float", 1))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_double", 1))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - + assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_byte", 1))), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_short", 1))), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_integer", 1))), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_long", 1))), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_float", 1))), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_double", 1))), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); logger.info("--> terms filter on 1"); - searchResponse = prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_byte", new int[] { 1 }))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_short", new int[] { 1 }))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_integer", new int[] { 1 }))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_long", new int[] { 1 }))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_float", new int[] { 1 }))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_double", new int[] { 1 }))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); + assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_byte", new int[] { 1 }))), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_short", new int[] { 1 }))), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_integer", new int[] { 1 }))), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_long", new int[] { 1 }))), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_float", new int[] { 1 }))), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_double", new int[] { 1 }))), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); } public void testNumericRangeFilter_2826() throws Exception { @@ -1151,10 +1183,10 @@ public void testNumericRangeFilter_2826() throws Exception { ) ); - client().prepareIndex("test").setId("1").setSource("field1", "test1", "num_long", 1).get(); - client().prepareIndex("test").setId("2").setSource("field1", "test1", "num_long", 2).get(); - client().prepareIndex("test").setId("3").setSource("field1", "test2", "num_long", 3).get(); - client().prepareIndex("test").setId("4").setSource("field1", "test2", "num_long", 4).get(); + prepareIndex("test").setId("1").setSource("field1", "test1", "num_long", 1).get(); + prepareIndex("test").setId("2").setSource("field1", "test1", "num_long", 2).get(); + prepareIndex("test").setId("3").setSource("field1", "test2", "num_long", 3).get(); + prepareIndex("test").setId("4").setSource("field1", "test2", "num_long", 4).get(); refresh(); assertHitCount( @@ -1193,10 +1225,10 @@ public void testMustNot() throws InterruptedException { indexRandom( true, - client().prepareIndex("test").setId("1").setSource("description", "foo other anything bar"), - client().prepareIndex("test").setId("2").setSource("description", "foo other anything"), - client().prepareIndex("test").setId("3").setSource("description", "foo other"), - client().prepareIndex("test").setId("4").setSource("description", "foo") + prepareIndex("test").setId("1").setSource("description", "foo other anything bar"), + prepareIndex("test").setId("2").setSource("description", "foo other anything"), + prepareIndex("test").setId("3").setSource("description", "foo other"), + prepareIndex("test").setId("4").setSource("description", "foo") ); assertHitCount(prepareSearch("test").setQuery(matchAllQuery()).setSearchType(SearchType.DFS_QUERY_THEN_FETCH), 4L); @@ -1210,10 +1242,7 @@ public void testMustNot() throws InterruptedException { public void testIntervals() throws InterruptedException { createIndex("test"); - indexRandom( - true, - client().prepareIndex("test").setId("1").setSource("description", "it's cold outside, there's no kind of atmosphere") - ); + indexRandom(true, prepareIndex("test").setId("1").setSource("description", "it's cold outside, there's no kind of atmosphere")); String json = """ { @@ -1238,8 +1267,7 @@ public void testIntervals() throws InterruptedException { } } }"""; - SearchResponse response = prepareSearch("test").setQuery(wrapperQuery(json)).get(); - assertHitCount(response, 1L); + assertHitCount(prepareSearch("test").setQuery(wrapperQuery(json)), 1L); } // see #2994 @@ -1248,10 +1276,10 @@ public void testSimpleSpan() throws IOException, ExecutionException, Interrupted indexRandom( true, - client().prepareIndex("test").setId("1").setSource("description", "foo other anything bar"), - client().prepareIndex("test").setId("2").setSource("description", "foo other anything"), - client().prepareIndex("test").setId("3").setSource("description", "foo other"), - client().prepareIndex("test").setId("4").setSource("description", "foo") + prepareIndex("test").setId("1").setSource("description", "foo other anything bar"), + prepareIndex("test").setId("2").setSource("description", "foo other anything"), + prepareIndex("test").setId("3").setSource("description", "foo other"), + prepareIndex("test").setId("4").setSource("description", "foo") ); assertHitCount(prepareSearch("test").setQuery(spanOrQuery(spanTermQuery("description", "bar"))), 1L); @@ -1266,10 +1294,10 @@ public void testSimpleSpan() throws IOException, ExecutionException, Interrupted public void testSpanMultiTermQuery() throws IOException { createIndex("test"); - client().prepareIndex("test").setId("1").setSource("description", "foo other anything bar", "count", 1).get(); - client().prepareIndex("test").setId("2").setSource("description", "foo other anything", "count", 2).get(); - client().prepareIndex("test").setId("3").setSource("description", "foo other", "count", 3).get(); - client().prepareIndex("test").setId("4").setSource("description", "fop", "count", 4).get(); + prepareIndex("test").setId("1").setSource("description", "foo other anything bar", "count", 1).get(); + prepareIndex("test").setId("2").setSource("description", "foo other anything", "count", 2).get(); + prepareIndex("test").setId("3").setSource("description", "foo other", "count", 3).get(); + prepareIndex("test").setId("4").setSource("description", "fop", "count", 4).get(); refresh(); assertHitCount(prepareSearch("test").setQuery(spanOrQuery(spanMultiTermQueryBuilder(fuzzyQuery("description", "fop")))), 4); @@ -1287,8 +1315,8 @@ public void testSpanMultiTermQuery() throws IOException { public void testSpanNot() throws IOException, ExecutionException, InterruptedException { createIndex("test"); - client().prepareIndex("test").setId("1").setSource("description", "the quick brown fox jumped over the lazy dog").get(); - client().prepareIndex("test").setId("2").setSource("description", "the quick black fox leaped over the sleeping dog").get(); + prepareIndex("test").setId("1").setSource("description", "the quick brown fox jumped over the lazy dog").get(); + prepareIndex("test").setId("2").setSource("description", "the quick black fox leaped over the sleeping dog").get(); refresh(); assertHitCount( @@ -1352,23 +1380,19 @@ public void testSimpleDFSQuery() throws IOException { ) ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setRouting("Y") .setSource("online", false, "bs", "Y", "ts", System.currentTimeMillis() - 100, "type", "s") .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setRouting("X") .setSource("online", true, "bs", "X", "ts", System.currentTimeMillis() - 10000000, "type", "s") .get(); - client().prepareIndex("test") - .setId("3") + prepareIndex("test").setId("3") .setRouting(randomAlphaOfLength(2)) .setSource("online", false, "ts", System.currentTimeMillis() - 100, "type", "bs") .get(); - client().prepareIndex("test") - .setId("4") + prepareIndex("test").setId("4") .setRouting(randomAlphaOfLength(2)) .setSource("online", true, "ts", System.currentTimeMillis() - 123123, "type", "bs") .get(); @@ -1397,7 +1421,7 @@ public void testSimpleDFSQuery() throws IOException { } public void testMultiFieldQueryString() { - client().prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); logger.info("regular"); assertHitCount(prepareSearch("test").setQuery(queryStringQuery("value1").field("field1").field("field2")), 1); @@ -1420,7 +1444,7 @@ public void testMultiFieldQueryString() { public void testMultiMatchLenientIssue3797() { createIndex("test"); - client().prepareIndex("test").setId("1").setSource("field1", 123, "field2", "value2").get(); + prepareIndex("test").setId("1").setSource("field1", 123, "field2", "value2").get(); refresh(); assertHitCount(prepareSearch("test").setQuery(multiMatchQuery("value2", "field2").field("field1", 2).lenient(true)), 1L); @@ -1431,25 +1455,29 @@ public void testMultiMatchLenientIssue3797() { public void testMinScore() throws ExecutionException, InterruptedException { createIndex("test"); - client().prepareIndex("test").setId("1").setSource("score", 1.5).get(); - client().prepareIndex("test").setId("2").setSource("score", 1.0).get(); - client().prepareIndex("test").setId("3").setSource("score", 2.0).get(); - client().prepareIndex("test").setId("4").setSource("score", 0.5).get(); + prepareIndex("test").setId("1").setSource("score", 1.5).get(); + prepareIndex("test").setId("2").setSource("score", 1.0).get(); + prepareIndex("test").setId("3").setSource("score", 2.0).get(); + prepareIndex("test").setId("4").setSource("score", 0.5).get(); refresh(); - SearchResponse searchResponse = prepareSearch("test").setQuery( - functionScoreQuery(ScoreFunctionBuilders.fieldValueFactorFunction("score").missing(1.0)).setMinScore(1.5f) - ).get(); - assertHitCount(searchResponse, 2); - assertFirstHit(searchResponse, hasId("3")); - assertSecondHit(searchResponse, hasId("1")); + assertResponse( + prepareSearch("test").setQuery( + functionScoreQuery(ScoreFunctionBuilders.fieldValueFactorFunction("score").missing(1.0)).setMinScore(1.5f) + ), + response -> { + assertHitCount(response, 2); + assertFirstHit(response, hasId("3")); + assertSecondHit(response, hasId("1")); + } + ); } public void testQueryStringWithSlopAndFields() { assertAcked(prepareCreate("test")); - client().prepareIndex("test").setId("1").setSource("desc", "one two three", "type", "customer").get(); - client().prepareIndex("test").setId("2").setSource("desc", "one two three", "type", "product").get(); + prepareIndex("test").setId("1").setSource("desc", "one two three", "type", "customer").get(); + prepareIndex("test").setId("2").setSource("desc", "one two three", "type", "product").get(); refresh(); assertHitCount(prepareSearch("test").setQuery(QueryBuilders.queryStringQuery("\"one two\"").defaultField("desc")), 2); @@ -1480,12 +1508,12 @@ public void testDateProvidedAsNumber() throws InterruptedException { assertAcked(indicesAdmin().preparePutMapping("test").setSource("field", "type=date,format=epoch_millis").get()); indexRandom( true, - client().prepareIndex("test").setId("1").setSource("field", 1000000000001L), - client().prepareIndex("test").setId("2").setSource("field", 1000000000000L), - client().prepareIndex("test").setId("3").setSource("field", 999999999999L), - client().prepareIndex("test").setId("4").setSource("field", 1000000000002L), - client().prepareIndex("test").setId("5").setSource("field", 1000000000003L), - client().prepareIndex("test").setId("6").setSource("field", 999999999999L) + prepareIndex("test").setId("1").setSource("field", 1000000000001L), + prepareIndex("test").setId("2").setSource("field", 1000000000000L), + prepareIndex("test").setId("3").setSource("field", 999999999999L), + prepareIndex("test").setId("4").setSource("field", 1000000000002L), + prepareIndex("test").setId("5").setSource("field", 1000000000003L), + prepareIndex("test").setId("6").setSource("field", 999999999999L) ); assertHitCount(prepareSearch("test").setSize(0).setQuery(rangeQuery("field").gte(1000000000000L)), 4); @@ -1497,74 +1525,104 @@ public void testRangeQueryWithTimeZone() throws Exception { indexRandom( true, - client().prepareIndex("test").setId("1").setSource("date", "2014-01-01", "num", 1), - client().prepareIndex("test").setId("2").setSource("date", "2013-12-31T23:00:00", "num", 2), - client().prepareIndex("test").setId("3").setSource("date", "2014-01-01T01:00:00", "num", 3), + prepareIndex("test").setId("1").setSource("date", "2014-01-01", "num", 1), + prepareIndex("test").setId("2").setSource("date", "2013-12-31T23:00:00", "num", 2), + prepareIndex("test").setId("3").setSource("date", "2014-01-01T01:00:00", "num", 3), // Now in UTC+1 - client().prepareIndex("test") - .setId("4") + prepareIndex("test").setId("4") .setSource("date", Instant.now().atZone(ZoneOffset.ofHours(1)).toInstant().toEpochMilli(), "num", 4) ); - SearchResponse searchResponse = prepareSearch("test").setQuery( - QueryBuilders.rangeQuery("date").from("2014-01-01T00:00:00").to("2014-01-01T00:59:00") - ).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getId(), is("1")); - searchResponse = prepareSearch("test").setQuery( - QueryBuilders.rangeQuery("date").from("2013-12-31T23:00:00").to("2013-12-31T23:59:00") - ).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getId(), is("2")); - searchResponse = prepareSearch("test").setQuery( - QueryBuilders.rangeQuery("date").from("2014-01-01T01:00:00").to("2014-01-01T01:59:00") - ).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getId(), is("3")); - + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.rangeQuery("date").from("2014-01-01T00:00:00").to("2014-01-01T00:59:00")), + response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), is("1")); + } + ); + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.rangeQuery("date").from("2013-12-31T23:00:00").to("2013-12-31T23:59:00")), + response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), is("2")); + } + ); + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.rangeQuery("date").from("2014-01-01T01:00:00").to("2014-01-01T01:59:00")), + response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), is("3")); + } + ); // We explicitly define a time zone in the from/to dates so whatever the time zone is, it won't be used - searchResponse = prepareSearch("test").setQuery( - QueryBuilders.rangeQuery("date").from("2014-01-01T00:00:00Z").to("2014-01-01T00:59:00Z").timeZone("+10:00") - ).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getId(), is("1")); - searchResponse = prepareSearch("test").setQuery( - QueryBuilders.rangeQuery("date").from("2013-12-31T23:00:00Z").to("2013-12-31T23:59:00Z").timeZone("+10:00") - ).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getId(), is("2")); - searchResponse = prepareSearch("test").setQuery( - QueryBuilders.rangeQuery("date").from("2014-01-01T01:00:00Z").to("2014-01-01T01:59:00Z").timeZone("+10:00") - ).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getId(), is("3")); - + assertResponse( + prepareSearch("test").setQuery( + QueryBuilders.rangeQuery("date").from("2014-01-01T00:00:00Z").to("2014-01-01T00:59:00Z").timeZone("+10:00") + ), + response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), is("1")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + QueryBuilders.rangeQuery("date").from("2013-12-31T23:00:00Z").to("2013-12-31T23:59:00Z").timeZone("+10:00") + ), + response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), is("2")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + QueryBuilders.rangeQuery("date").from("2014-01-01T01:00:00Z").to("2014-01-01T01:59:00Z").timeZone("+10:00") + ), + response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), is("3")); + } + ); // We define a time zone to be applied to the filter and from/to have no time zone - searchResponse = prepareSearch("test").setQuery( - QueryBuilders.rangeQuery("date").from("2014-01-01T03:00:00").to("2014-01-01T03:59:00").timeZone("+03:00") - ).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getId(), is("1")); - searchResponse = prepareSearch("test").setQuery( - QueryBuilders.rangeQuery("date").from("2014-01-01T02:00:00").to("2014-01-01T02:59:00").timeZone("+03:00") - ).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getId(), is("2")); - searchResponse = prepareSearch("test").setQuery( - QueryBuilders.rangeQuery("date").from("2014-01-01T04:00:00").to("2014-01-01T04:59:00").timeZone("+03:00") - ).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getId(), is("3")); - - searchResponse = prepareSearch("test").setQuery( - QueryBuilders.rangeQuery("date").from("2014-01-01").to("2014-01-01T00:59:00").timeZone("-01:00") - ).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getId(), is("3")); - - searchResponse = prepareSearch("test").setQuery(QueryBuilders.rangeQuery("date").from("now/d-1d").timeZone("+01:00")).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getId(), is("4")); + assertResponse( + prepareSearch("test").setQuery( + QueryBuilders.rangeQuery("date").from("2014-01-01T03:00:00").to("2014-01-01T03:59:00").timeZone("+03:00") + ), + response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), is("1")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + QueryBuilders.rangeQuery("date").from("2014-01-01T02:00:00").to("2014-01-01T02:59:00").timeZone("+03:00") + ), + response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), is("2")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + QueryBuilders.rangeQuery("date").from("2014-01-01T04:00:00").to("2014-01-01T04:59:00").timeZone("+03:00") + ), + response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), is("3")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + QueryBuilders.rangeQuery("date").from("2014-01-01").to("2014-01-01T00:59:00").timeZone("-01:00") + ), + response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), is("3")); + } + ); + assertResponse(prepareSearch("test").setQuery(QueryBuilders.rangeQuery("date").from("now/d-1d").timeZone("+01:00")), response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), is("4")); + }); } /** @@ -1595,8 +1653,8 @@ public void testRangeQueryWithLocaleMapping() throws Exception { indexRandom( true, - client().prepareIndex("test").setId("1").setSource("date_field", "Mi, 06 Dez 2000 02:55:00 -0800"), - client().prepareIndex("test").setId("2").setSource("date_field", "Do, 07 Dez 2000 02:55:00 -0800") + prepareIndex("test").setId("1").setSource("date_field", "Mi, 06 Dez 2000 02:55:00 -0800"), + prepareIndex("test").setId("2").setSource("date_field", "Do, 07 Dez 2000 02:55:00 -0800") ); assertHitCount( @@ -1614,7 +1672,7 @@ public void testRangeQueryWithLocaleMapping() throws Exception { } public void testSearchEmptyDoc() { - client().prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get(); + prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get(); refresh(); assertHitCount(prepareSearch().setQuery(matchAllQuery()), 1L); @@ -1624,8 +1682,8 @@ public void testMatchPhrasePrefixQuery() throws ExecutionException, InterruptedE createIndex("test1"); indexRandom( true, - client().prepareIndex("test1").setId("1").setSource("field", "Johnnie Walker Black Label"), - client().prepareIndex("test1").setId("2").setSource("field", "trying out Elasticsearch") + prepareIndex("test1").setId("1").setSource("field", "Johnnie Walker Black Label"), + prepareIndex("test1").setId("2").setSource("field", "trying out Elasticsearch") ); assertSearchHitsWithoutFailures(prepareSearch().setQuery(matchPhrasePrefixQuery("field", "Johnnie la").slop(between(2, 5))), "1"); @@ -1635,39 +1693,41 @@ public void testMatchPhrasePrefixQuery() throws ExecutionException, InterruptedE public void testQueryStringParserCache() throws Exception { createIndex("test"); - indexRandom(true, false, client().prepareIndex("test").setId("1").setSource("nameTokens", "xyz")); - - SearchResponse response = prepareSearch("test").setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .setQuery(QueryBuilders.queryStringQuery("xyz").boost(100)) - .get(); - assertThat(response.getHits().getTotalHits().value, equalTo(1L)); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - - float first = response.getHits().getAt(0).getScore(); + indexRandom(true, false, prepareIndex("test").setId("1").setSource("nameTokens", "xyz")); + final float[] first = new float[1]; + assertResponse( + prepareSearch("test").setSearchType(SearchType.DFS_QUERY_THEN_FETCH).setQuery(QueryBuilders.queryStringQuery("xyz").boost(100)), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + first[0] = response.getHits().getAt(0).getScore(); + } + ); for (int i = 0; i < 100; i++) { - response = prepareSearch("test").setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .setQuery(QueryBuilders.queryStringQuery("xyz").boost(100)) - .get(); - - assertThat(response.getHits().getTotalHits().value, equalTo(1L)); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - float actual = response.getHits().getAt(0).getScore(); - assertThat(i + " expected: " + first + " actual: " + actual, Float.compare(first, actual), equalTo(0)); + final int finalI = i; + assertResponse( + prepareSearch("test").setSearchType(SearchType.DFS_QUERY_THEN_FETCH) + .setQuery(QueryBuilders.queryStringQuery("xyz").boost(100)), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + float actual = response.getHits().getAt(0).getScore(); + assertThat(finalI + " expected: " + first[0] + " actual: " + actual, Float.compare(first[0], actual), equalTo(0)); + } + ); } } public void testRangeQueryRangeFields_24744() throws Exception { assertAcked(prepareCreate("test").setMapping("int_range", "type=integer_range")); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().startObject("int_range").field("gte", 10).field("lte", 20).endObject().endObject()) .get(); refresh(); RangeQueryBuilder range = new RangeQueryBuilder("int_range").relation("intersects").from(Integer.MIN_VALUE).to(Integer.MAX_VALUE); - SearchResponse searchResponse = prepareSearch("test").setQuery(range).get(); - assertHitCount(searchResponse, 1); + assertHitCount(prepareSearch("test").setQuery(range), 1L); } public void testNestedQueryWithFieldAlias() throws Exception { @@ -1728,21 +1788,20 @@ public void testFieldAliasesForMetaFields() throws Exception { .endObject(); assertAcked(prepareCreate("test").setMapping(mapping)); - IndexRequestBuilder indexRequest = client().prepareIndex("test").setId("1").setRouting("custom").setSource("field", "value"); + IndexRequestBuilder indexRequest = prepareIndex("test").setId("1").setRouting("custom").setSource("field", "value"); indexRandom(true, false, indexRequest); updateClusterSettings(Settings.builder().put(IndicesService.INDICES_ID_FIELD_DATA_ENABLED_SETTING.getKey(), true)); try { - SearchResponse searchResponse = prepareSearch().setQuery(termQuery("routing-alias", "custom")) - .addDocValueField("id-alias") - .get(); - assertHitCount(searchResponse, 1L); + assertResponse(prepareSearch().setQuery(termQuery("routing-alias", "custom")).addDocValueField("id-alias"), response -> { + assertHitCount(response, 1L); - SearchHit hit = searchResponse.getHits().getAt(0); - assertEquals(2, hit.getFields().size()); - assertTrue(hit.getFields().containsKey("id-alias")); + SearchHit hit = response.getHits().getAt(0); + assertEquals(2, hit.getFields().size()); + assertTrue(hit.getFields().containsKey("id-alias")); - DocumentField field = hit.getFields().get("id-alias"); - assertThat(field.getValue().toString(), equalTo("1")); + DocumentField field = hit.getFields().get("id-alias"); + assertThat(field.getValue().toString(), equalTo("1")); + }); } finally { // unset cluster setting updateClusterSettings(Settings.builder().putNull(IndicesService.INDICES_ID_FIELD_DATA_ENABLED_SETTING.getKey())); @@ -1762,7 +1821,7 @@ public void testWildcardQueryNormalizationOnKeywordField() { .build() ).setMapping("field1", "type=keyword,normalizer=lowercase_normalizer") ); - client().prepareIndex("test").setId("1").setSource("field1", "Bbb Aaa").get(); + prepareIndex("test").setId("1").setSource("field1", "Bbb Aaa").get(); refresh(); { @@ -1787,7 +1846,7 @@ public void testWildcardQueryNormalizationOnTextField() { .build() ).setMapping("field1", "type=text,analyzer=lowercase_analyzer") ); - client().prepareIndex("test").setId("1").setSource("field1", "Bbb Aaa").get(); + prepareIndex("test").setId("1").setSource("field1", "Bbb Aaa").get(); refresh(); { @@ -1817,7 +1876,7 @@ public void testWildcardQueryNormalizationKeywordSpecialCharacters() { .build() ).setMapping("field", "type=keyword,normalizer=no_wildcard") ); - client().prepareIndex("test").setId("1").setSource("field", "label-1").get(); + prepareIndex("test").setId("1").setSource("field", "label-1").get(); refresh(); WildcardQueryBuilder wildCardQuery = wildcardQuery("field", "la*"); @@ -1869,7 +1928,7 @@ public Map> getTokenizers() { */ public void testIssueFuzzyInsideSpanMulti() { createIndex("test"); - client().prepareIndex("test").setId("1").setSource("field", "foobarbaz").get(); + prepareIndex("test").setId("1").setSource("field", "foobarbaz").get(); ensureGreen(); refresh(); @@ -1881,16 +1940,17 @@ public void testFetchIdFieldQuery() { createIndex("test"); int docCount = randomIntBetween(10, 50); for (int i = 0; i < docCount; i++) { - client().prepareIndex("test").setSource("field", "foobarbaz").get(); + prepareIndex("test").setSource("field", "foobarbaz").get(); } ensureGreen(); refresh(); - SearchResponse response = prepareSearch("test").addFetchField("_id").setSize(docCount).get(); - SearchHit[] hits = response.getHits().getHits(); - assertEquals(docCount, hits.length); - for (SearchHit hit : hits) { - assertNotNull(hit.getFields().get("_id").getValue()); - } + assertResponse(prepareSearch("test").addFetchField("_id").setSize(docCount), response -> { + SearchHit[] hits = response.getHits().getHits(); + assertEquals(docCount, hits.length); + for (SearchHit hit : hits) { + assertNotNull(hit.getFields().get("_id").getValue()); + } + }); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/SimpleQueryStringIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/SimpleQueryStringIT.java index 78d98b76b9bc8..449777580b691 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/SimpleQueryStringIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/SimpleQueryStringIT.java @@ -14,7 +14,6 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.analysis.PreConfiguredTokenFilter; @@ -49,7 +48,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHitsWithoutFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; @@ -77,36 +77,43 @@ public void testSimpleQueryString() throws ExecutionException, InterruptedExcept indexRandom( true, false, - client().prepareIndex("test").setId("1").setSource("body", "foo"), - client().prepareIndex("test").setId("2").setSource("body", "bar"), - client().prepareIndex("test").setId("3").setSource("body", "foo bar"), - client().prepareIndex("test").setId("4").setSource("body", "quux baz eggplant"), - client().prepareIndex("test").setId("5").setSource("body", "quux baz spaghetti"), - client().prepareIndex("test").setId("6").setSource("otherbody", "spaghetti") + prepareIndex("test").setId("1").setSource("body", "foo"), + prepareIndex("test").setId("2").setSource("body", "bar"), + prepareIndex("test").setId("3").setSource("body", "foo bar"), + prepareIndex("test").setId("4").setSource("body", "quux baz eggplant"), + prepareIndex("test").setId("5").setSource("body", "quux baz spaghetti"), + prepareIndex("test").setId("6").setSource("otherbody", "spaghetti") ); assertSearchHitsWithoutFailures(prepareSearch().setQuery(simpleQueryStringQuery("foo bar")), "1", "2", "3"); // Tests boost value setting. In this case doc 1 should always be ranked above the other // two matches. - SearchResponse searchResponse = prepareSearch().setQuery( - boolQuery().should(simpleQueryStringQuery("\"foo bar\"").boost(10.0f)).should(termQuery("body", "eggplant")) - ).get(); - assertHitCount(searchResponse, 2L); - assertFirstHit(searchResponse, hasId("3")); - + assertResponse( + prepareSearch().setQuery( + boolQuery().should(simpleQueryStringQuery("\"foo bar\"").boost(10.0f)).should(termQuery("body", "eggplant")) + ), + response -> { + assertHitCount(response, 2L); + assertFirstHit(response, hasId("3")); + } + ); assertSearchHitsWithoutFailures(prepareSearch().setQuery(simpleQueryStringQuery("foo bar").defaultOperator(Operator.AND)), "3"); assertSearchHitsWithoutFailures(prepareSearch().setQuery(simpleQueryStringQuery("\"quux baz\" +(eggplant | spaghetti)")), "4", "5"); assertSearchHitsWithoutFailures(prepareSearch().setQuery(simpleQueryStringQuery("eggplants").analyzer("mock_snowball")), "4"); - searchResponse = prepareSearch().setQuery( - simpleQueryStringQuery("spaghetti").field("body", 1000.0f).field("otherbody", 2.0f).queryName("myquery") - ).get(); - assertHitCount(searchResponse, 2L); - assertFirstHit(searchResponse, hasId("5")); - assertSearchHits(searchResponse, "5", "6"); - assertThat(searchResponse.getHits().getAt(0).getMatchedQueries()[0], equalTo("myquery")); + assertResponse( + prepareSearch().setQuery( + simpleQueryStringQuery("spaghetti").field("body", 1000.0f).field("otherbody", 2.0f).queryName("myquery") + ), + response -> { + assertHitCount(response, 2L); + assertFirstHit(response, hasId("5")); + assertSearchHits(response, "5", "6"); + assertThat(response.getHits().getAt(0).getMatchedQueries()[0], equalTo("myquery")); + } + ); assertSearchHitsWithoutFailures(prepareSearch().setQuery(simpleQueryStringQuery("spaghetti").field("*body")), "5", "6"); } @@ -117,10 +124,10 @@ public void testSimpleQueryStringMinimumShouldMatch() throws Exception { indexRandom( true, false, - client().prepareIndex("test").setId("1").setSource("body", "foo"), - client().prepareIndex("test").setId("2").setSource("body", "bar"), - client().prepareIndex("test").setId("3").setSource("body", "foo bar"), - client().prepareIndex("test").setId("4").setSource("body", "foo baz bar") + prepareIndex("test").setId("1").setSource("body", "foo"), + prepareIndex("test").setId("2").setSource("body", "bar"), + prepareIndex("test").setId("3").setSource("body", "foo bar"), + prepareIndex("test").setId("4").setSource("body", "foo baz bar") ); logger.info("--> query 1"); @@ -152,10 +159,10 @@ public void testSimpleQueryStringMinimumShouldMatch() throws Exception { indexRandom( true, false, - client().prepareIndex("test").setId("5").setSource("body2", "foo", "other", "foo"), - client().prepareIndex("test").setId("6").setSource("body2", "bar", "other", "foo"), - client().prepareIndex("test").setId("7").setSource("body2", "foo bar", "other", "foo"), - client().prepareIndex("test").setId("8").setSource("body2", "foo baz bar", "other", "foo") + prepareIndex("test").setId("5").setSource("body2", "foo", "other", "foo"), + prepareIndex("test").setId("6").setSource("body2", "bar", "other", "foo"), + prepareIndex("test").setId("7").setSource("body2", "foo bar", "other", "foo"), + prepareIndex("test").setId("8").setSource("body2", "foo baz bar", "other", "foo") ); logger.info("--> query 5"); @@ -205,7 +212,7 @@ public void testNestedFieldSimpleQueryString() throws IOException { .endObject() ) ); - client().prepareIndex("test").setId("1").setSource("body", "foo bar baz").get(); + prepareIndex("test").setId("1").setSource("body", "foo bar baz").get(); refresh(); assertSearchHitsWithoutFailures(prepareSearch().setQuery(simpleQueryStringQuery("foo bar baz").field("body")), "1"); @@ -218,12 +225,12 @@ public void testSimpleQueryStringFlags() throws ExecutionException, InterruptedE createIndex("test"); indexRandom( true, - client().prepareIndex("test").setId("1").setSource("body", "foo"), - client().prepareIndex("test").setId("2").setSource("body", "bar"), - client().prepareIndex("test").setId("3").setSource("body", "foo bar"), - client().prepareIndex("test").setId("4").setSource("body", "quux baz eggplant"), - client().prepareIndex("test").setId("5").setSource("body", "quux baz spaghetti"), - client().prepareIndex("test").setId("6").setSource("otherbody", "spaghetti") + prepareIndex("test").setId("1").setSource("body", "foo"), + prepareIndex("test").setId("2").setSource("body", "bar"), + prepareIndex("test").setId("3").setSource("body", "foo bar"), + prepareIndex("test").setId("4").setSource("body", "quux baz eggplant"), + prepareIndex("test").setId("5").setSource("body", "quux baz spaghetti"), + prepareIndex("test").setId("6").setSource("otherbody", "spaghetti") ); assertSearchHitsWithoutFailures( @@ -276,17 +283,19 @@ public void testSimpleQueryStringLenient() throws ExecutionException, Interrupte createIndex("test1", "test2"); indexRandom( true, - client().prepareIndex("test1").setId("1").setSource("field", "foo"), - client().prepareIndex("test2").setId("10").setSource("field", 5) + prepareIndex("test1").setId("1").setSource("field", "foo"), + prepareIndex("test2").setId("10").setSource("field", 5) ); refresh(); - SearchResponse searchResponse = prepareSearch().setAllowPartialSearchResults(true) - .setQuery(simpleQueryStringQuery("foo").field("field")) - .get(); - assertFailures(searchResponse); - assertHitCount(searchResponse, 1L); - assertSearchHits(searchResponse, "1"); + assertResponse( + prepareSearch().setAllowPartialSearchResults(true).setQuery(simpleQueryStringQuery("foo").field("field")), + response -> { + assertFailures(response); + assertHitCount(response, 1L); + assertSearchHits(response, "1"); + } + ); assertSearchHitsWithoutFailures(prepareSearch().setQuery(simpleQueryStringQuery("foo").field("field").lenient(true)), "1"); } @@ -295,8 +304,8 @@ public void testSimpleQueryStringLenient() throws ExecutionException, Interrupte public void testLenientFlagBeingTooLenient() throws Exception { indexRandom( true, - client().prepareIndex("test").setId("1").setSource("num", 1, "body", "foo bar baz"), - client().prepareIndex("test").setId("2").setSource("num", 2, "body", "eggplant spaghetti lasagna") + prepareIndex("test").setId("1").setSource("num", 1, "body", "foo bar baz"), + prepareIndex("test").setId("2").setSource("num", 2, "body", "eggplant spaghetti lasagna") ); BoolQueryBuilder q = boolQuery().should(simpleQueryStringQuery("bar").field("num").field("body").lenient(true)); @@ -320,22 +329,22 @@ public void testSimpleQueryStringAnalyzeWildcard() throws ExecutionException, In CreateIndexRequestBuilder mappingRequest = indicesAdmin().prepareCreate("test1").setMapping(mapping); mappingRequest.get(); - indexRandom(true, client().prepareIndex("test1").setId("1").setSource("location", "Köln")); + indexRandom(true, prepareIndex("test1").setId("1").setSource("location", "Köln")); refresh(); assertSearchHitsWithoutFailures(prepareSearch().setQuery(simpleQueryStringQuery("Köln*").field("location")), "1"); } public void testSimpleQueryStringUsesFieldAnalyzer() throws Exception { - client().prepareIndex("test").setId("1").setSource("foo", 123, "bar", "abc").get(); - client().prepareIndex("test").setId("2").setSource("foo", 234, "bar", "bcd").get(); + prepareIndex("test").setId("1").setSource("foo", 123, "bar", "abc").get(); + prepareIndex("test").setId("2").setSource("foo", 234, "bar", "bcd").get(); refresh(); assertSearchHitsWithoutFailures(prepareSearch().setQuery(simpleQueryStringQuery("123").field("foo").field("bar")), "1"); } public void testSimpleQueryStringOnIndexMetaField() throws Exception { - client().prepareIndex("test").setId("1").setSource("foo", 123, "bar", "abc").get(); - client().prepareIndex("test").setId("2").setSource("foo", 234, "bar", "bcd").get(); + prepareIndex("test").setId("1").setSource("foo", 123, "bar", "abc").get(); + prepareIndex("test").setId("2").setSource("foo", 234, "bar", "bcd").get(); refresh(); assertSearchHitsWithoutFailures(prepareSearch().setQuery(simpleQueryStringQuery("test").field("_index")), "1", "2"); } @@ -356,7 +365,7 @@ public void testEmptySimpleQueryStringWithAnalysis() throws Exception { CreateIndexRequestBuilder mappingRequest = indicesAdmin().prepareCreate("test1").setMapping(mapping); mappingRequest.get(); - indexRandom(true, client().prepareIndex("test1").setId("1").setSource("body", "Some Text")); + indexRandom(true, prepareIndex("test1").setId("1").setSource("body", "Some Text")); refresh(); assertSearchHitsWithoutFailures(prepareSearch().setQuery(simpleQueryStringQuery("the*").field("body"))); @@ -368,22 +377,23 @@ public void testBasicAllQuery() throws Exception { ensureGreen("test"); List reqs = new ArrayList<>(); - reqs.add(client().prepareIndex("test").setId("1").setSource("f1", "foo bar baz")); - reqs.add(client().prepareIndex("test").setId("2").setSource("f2", "Bar")); - reqs.add(client().prepareIndex("test").setId("3").setSource("f3", "foo bar baz")); + reqs.add(prepareIndex("test").setId("1").setSource("f1", "foo bar baz")); + reqs.add(prepareIndex("test").setId("2").setSource("f2", "Bar")); + reqs.add(prepareIndex("test").setId("3").setSource("f3", "foo bar baz")); indexRandom(true, false, reqs); - SearchResponse resp = prepareSearch("test").setQuery(simpleQueryStringQuery("foo")).get(); - assertHitCount(resp, 2L); - assertHits(resp.getHits(), "1", "3"); - - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("bar")).get(); - assertHitCount(resp, 2L); - assertHits(resp.getHits(), "1", "3"); - - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("Bar")).get(); - assertHitCount(resp, 3L); - assertHits(resp.getHits(), "1", "2", "3"); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("foo")), response -> { + assertHitCount(response, 2L); + assertHits(response.getHits(), "1", "3"); + }); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("bar")), response -> { + assertHitCount(response, 2L); + assertHits(response.getHits(), "1", "3"); + }); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("Bar")), response -> { + assertHitCount(response, 3L); + assertHits(response.getHits(), "1", "2", "3"); + }); } public void testWithDate() throws Exception { @@ -392,25 +402,26 @@ public void testWithDate() throws Exception { ensureGreen("test"); List reqs = new ArrayList<>(); - reqs.add(client().prepareIndex("test").setId("1").setSource("f1", "foo", "f_date", "2015/09/02")); - reqs.add(client().prepareIndex("test").setId("2").setSource("f1", "bar", "f_date", "2015/09/01")); + reqs.add(prepareIndex("test").setId("1").setSource("f1", "foo", "f_date", "2015/09/02")); + reqs.add(prepareIndex("test").setId("2").setSource("f1", "bar", "f_date", "2015/09/01")); indexRandom(true, false, reqs); - SearchResponse resp = prepareSearch("test").setQuery(simpleQueryStringQuery("foo bar")).get(); - assertHits(resp.getHits(), "1", "2"); - assertHitCount(resp, 2L); - - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("\"2015/09/02\"")).get(); - assertHits(resp.getHits(), "1"); - assertHitCount(resp, 1L); - - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("bar \"2015/09/02\"")).get(); - assertHits(resp.getHits(), "1", "2"); - assertHitCount(resp, 2L); - - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("\"2015/09/02\" \"2015/09/01\"")).get(); - assertHits(resp.getHits(), "1", "2"); - assertHitCount(resp, 2L); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("foo bar")), response -> { + assertHits(response.getHits(), "1", "2"); + assertHitCount(response, 2L); + }); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("\"2015/09/02\"")), response -> { + assertHits(response.getHits(), "1"); + assertHitCount(response, 1L); + }); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("bar \"2015/09/02\"")), response -> { + assertHits(response.getHits(), "1", "2"); + assertHitCount(response, 2L); + }); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("\"2015/09/02\" \"2015/09/01\"")), response -> { + assertHits(response.getHits(), "1", "2"); + assertHitCount(response, 2L); + }); } public void testWithLotsOfTypes() throws Exception { @@ -419,29 +430,26 @@ public void testWithLotsOfTypes() throws Exception { ensureGreen("test"); List reqs = new ArrayList<>(); - reqs.add( - client().prepareIndex("test").setId("1").setSource("f1", "foo", "f_date", "2015/09/02", "f_float", "1.7", "f_ip", "127.0.0.1") - ); - reqs.add( - client().prepareIndex("test").setId("2").setSource("f1", "bar", "f_date", "2015/09/01", "f_float", "1.8", "f_ip", "127.0.0.2") - ); + reqs.add(prepareIndex("test").setId("1").setSource("f1", "foo", "f_date", "2015/09/02", "f_float", "1.7", "f_ip", "127.0.0.1")); + reqs.add(prepareIndex("test").setId("2").setSource("f1", "bar", "f_date", "2015/09/01", "f_float", "1.8", "f_ip", "127.0.0.2")); indexRandom(true, false, reqs); - SearchResponse resp = prepareSearch("test").setQuery(simpleQueryStringQuery("foo bar")).get(); - assertHits(resp.getHits(), "1", "2"); - assertHitCount(resp, 2L); - - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("\"2015/09/02\"")).get(); - assertHits(resp.getHits(), "1"); - assertHitCount(resp, 1L); - - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("127.0.0.2 \"2015/09/02\"")).get(); - assertHits(resp.getHits(), "1", "2"); - assertHitCount(resp, 2L); - - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("127.0.0.1 1.8")).get(); - assertHits(resp.getHits(), "1", "2"); - assertHitCount(resp, 2L); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("foo bar")), response -> { + assertHits(response.getHits(), "1", "2"); + assertHitCount(response, 2L); + }); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("\"2015/09/02\"")), response -> { + assertHits(response.getHits(), "1"); + assertHitCount(response, 1L); + }); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("127.0.0.2 \"2015/09/02\"")), response -> { + assertHits(response.getHits(), "1", "2"); + assertHitCount(response, 2L); + }); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("127.0.0.1 1.8")), response -> { + assertHits(response.getHits(), "1", "2"); + assertHitCount(response, 2L); + }); } public void testDocWithAllTypes() throws Exception { @@ -451,42 +459,38 @@ public void testDocWithAllTypes() throws Exception { List reqs = new ArrayList<>(); String docBody = copyToStringFromClasspath("/org/elasticsearch/search/query/all-example-document.json"); - reqs.add(client().prepareIndex("test").setId("1").setSource(docBody, XContentType.JSON)); + reqs.add(prepareIndex("test").setId("1").setSource(docBody, XContentType.JSON)); indexRandom(true, false, reqs); - SearchResponse resp = prepareSearch("test").setQuery(simpleQueryStringQuery("foo")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("Bar")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("Baz")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("19")).get(); - assertHits(resp.getHits(), "1"); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("foo")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("Bar")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("Baz")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("19")), response -> assertHits(response.getHits(), "1")); // nested doesn't match because it's hidden - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("1476383971")).get(); - assertHits(resp.getHits(), "1"); + assertResponse( + prepareSearch("test").setQuery(simpleQueryStringQuery("1476383971")), + response -> assertHits(response.getHits(), "1") + ); // bool doesn't match - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("7")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("23")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("1293")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("42")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("1.7")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("1.5")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("127.0.0.1")).get(); - assertHits(resp.getHits(), "1"); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("7")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("23")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("1293")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("42")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("1.7")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("1.5")), response -> assertHits(response.getHits(), "1")); + assertResponse( + prepareSearch("test").setQuery(simpleQueryStringQuery("127.0.0.1")), + response -> assertHits(response.getHits(), "1") + ); // binary doesn't match // suggest doesn't match // geo_point doesn't match // geo_shape doesn't match - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("foo Bar 19 127.0.0.1").defaultOperator(Operator.AND)).get(); - assertHits(resp.getHits(), "1"); + assertResponse( + prepareSearch("test").setQuery(simpleQueryStringQuery("foo Bar 19 127.0.0.1").defaultOperator(Operator.AND)), + response -> assertHits(response.getHits(), "1") + ); } public void testKeywordWithWhitespace() throws Exception { @@ -495,18 +499,19 @@ public void testKeywordWithWhitespace() throws Exception { ensureGreen("test"); List reqs = new ArrayList<>(); - reqs.add(client().prepareIndex("test").setId("1").setSource("f2", "Foo Bar")); - reqs.add(client().prepareIndex("test").setId("2").setSource("f1", "bar")); - reqs.add(client().prepareIndex("test").setId("3").setSource("f1", "foo bar")); + reqs.add(prepareIndex("test").setId("1").setSource("f2", "Foo Bar")); + reqs.add(prepareIndex("test").setId("2").setSource("f1", "bar")); + reqs.add(prepareIndex("test").setId("3").setSource("f1", "foo bar")); indexRandom(true, false, reqs); - SearchResponse resp = prepareSearch("test").setQuery(simpleQueryStringQuery("foo")).get(); - assertHits(resp.getHits(), "3"); - assertHitCount(resp, 1L); - - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("bar")).get(); - assertHits(resp.getHits(), "2", "3"); - assertHitCount(resp, 2L); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("foo")), response -> { + assertHits(response.getHits(), "3"); + assertHitCount(response, 1L); + }); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("bar")), response -> { + assertHits(response.getHits(), "2", "3"); + assertHitCount(response, 2L); + }); } public void testAllFieldsWithSpecifiedLeniency() throws Exception { @@ -515,7 +520,7 @@ public void testAllFieldsWithSpecifiedLeniency() throws Exception { ensureGreen("test"); List reqs = new ArrayList<>(); - reqs.add(client().prepareIndex("test").setId("1").setSource("f_long", 1)); + reqs.add(prepareIndex("test").setId("1").setSource("f_long", 1)); indexRandom(true, false, reqs); SearchPhaseExecutionException e = expectThrows( @@ -531,16 +536,15 @@ public void testFieldAlias() throws Exception { ensureGreen("test"); List indexRequests = new ArrayList<>(); - indexRequests.add(client().prepareIndex("test").setId("1").setSource("f3", "text", "f2", "one")); - indexRequests.add(client().prepareIndex("test").setId("2").setSource("f3", "value", "f2", "two")); - indexRequests.add(client().prepareIndex("test").setId("3").setSource("f3", "another value", "f2", "three")); + indexRequests.add(prepareIndex("test").setId("1").setSource("f3", "text", "f2", "one")); + indexRequests.add(prepareIndex("test").setId("2").setSource("f3", "value", "f2", "two")); + indexRequests.add(prepareIndex("test").setId("3").setSource("f3", "another value", "f2", "three")); indexRandom(true, false, indexRequests); - SearchResponse response = prepareSearch("test").setQuery(simpleQueryStringQuery("value").field("f3_alias")).get(); - - assertNoFailures(response); - assertHitCount(response, 2); - assertHits(response.getHits(), "2", "3"); + assertNoFailuresAndResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("value").field("f3_alias")), response -> { + assertHitCount(response, 2); + assertHits(response.getHits(), "2", "3"); + }); } public void testFieldAliasWithWildcardField() throws Exception { @@ -549,16 +553,15 @@ public void testFieldAliasWithWildcardField() throws Exception { ensureGreen("test"); List indexRequests = new ArrayList<>(); - indexRequests.add(client().prepareIndex("test").setId("1").setSource("f3", "text", "f2", "one")); - indexRequests.add(client().prepareIndex("test").setId("2").setSource("f3", "value", "f2", "two")); - indexRequests.add(client().prepareIndex("test").setId("3").setSource("f3", "another value", "f2", "three")); + indexRequests.add(prepareIndex("test").setId("1").setSource("f3", "text", "f2", "one")); + indexRequests.add(prepareIndex("test").setId("2").setSource("f3", "value", "f2", "two")); + indexRequests.add(prepareIndex("test").setId("3").setSource("f3", "another value", "f2", "three")); indexRandom(true, false, indexRequests); - SearchResponse response = prepareSearch("test").setQuery(simpleQueryStringQuery("value").field("f3_*")).get(); - - assertNoFailures(response); - assertHitCount(response, 2); - assertHits(response.getHits(), "2", "3"); + assertNoFailuresAndResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("value").field("f3_*")), response -> { + assertHitCount(response, 2); + assertHits(response.getHits(), "2", "3"); + }); } public void testFieldAliasOnDisallowedFieldType() throws Exception { @@ -567,16 +570,15 @@ public void testFieldAliasOnDisallowedFieldType() throws Exception { ensureGreen("test"); List indexRequests = new ArrayList<>(); - indexRequests.add(client().prepareIndex("test").setId("1").setSource("f3", "text", "f2", "one")); + indexRequests.add(prepareIndex("test").setId("1").setSource("f3", "text", "f2", "one")); indexRandom(true, false, indexRequests); // The wildcard field matches aliases for both a text and boolean field. // By default, the boolean field should be ignored when building the query. - SearchResponse response = prepareSearch("test").setQuery(queryStringQuery("text").field("f*_alias")).get(); - - assertNoFailures(response); - assertHitCount(response, 1); - assertHits(response.getHits(), "1"); + assertNoFailuresAndResponse(prepareSearch("test").setQuery(queryStringQuery("text").field("f*_alias")), response -> { + assertHitCount(response, 1); + assertHits(response.getHits(), "1"); + }); } private void assertHits(SearchHits hits, String... ids) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchPreferenceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchPreferenceIT.java index 1d13bea9e0639..20b9ce38254c3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchPreferenceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchPreferenceIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -32,6 +31,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -55,7 +55,7 @@ public void testStopOneNodePreferenceWithRedState() throws IOException { assertAcked(prepareCreate("test").setSettings(indexSettings(cluster().numDataNodes() + 2, 0))); ensureGreen(); for (int i = 0; i < 10; i++) { - client().prepareIndex("test").setId("" + i).setSource("field1", "value1").get(); + prepareIndex("test").setId("" + i).setSource("field1", "value1").get(); } refresh(); internalCluster().stopRandomDataNode(); @@ -67,21 +67,25 @@ public void testStopOneNodePreferenceWithRedState() throws IOException { "_prefer_nodes:somenode,server2" }; for (String pref : preferences) { logger.info("--> Testing out preference={}", pref); - SearchResponse searchResponse = prepareSearch().setSize(0).setPreference(pref).get(); - assertThat(RestStatus.OK, equalTo(searchResponse.status())); - assertThat(pref, searchResponse.getFailedShards(), greaterThanOrEqualTo(0)); - searchResponse = prepareSearch().setPreference(pref).get(); - assertThat(RestStatus.OK, equalTo(searchResponse.status())); - assertThat(pref, searchResponse.getFailedShards(), greaterThanOrEqualTo(0)); + assertResponse(prepareSearch().setSize(0).setPreference(pref), response -> { + assertThat(RestStatus.OK, equalTo(response.status())); + assertThat(pref, response.getFailedShards(), greaterThanOrEqualTo(0)); + }); + assertResponse(prepareSearch().setPreference(pref), response -> { + assertThat(RestStatus.OK, equalTo(response.status())); + assertThat(pref, response.getFailedShards(), greaterThanOrEqualTo(0)); + }); } // _only_local is a stricter preference, we need to send the request to a data node - SearchResponse searchResponse = dataNodeClient().prepareSearch().setSize(0).setPreference("_only_local").get(); - assertThat(RestStatus.OK, equalTo(searchResponse.status())); - assertThat("_only_local", searchResponse.getFailedShards(), greaterThanOrEqualTo(0)); - searchResponse = dataNodeClient().prepareSearch().setPreference("_only_local").get(); - assertThat(RestStatus.OK, equalTo(searchResponse.status())); - assertThat("_only_local", searchResponse.getFailedShards(), greaterThanOrEqualTo(0)); + assertResponse(dataNodeClient().prepareSearch().setSize(0).setPreference("_only_local"), response -> { + assertThat(RestStatus.OK, equalTo(response.status())); + assertThat("_only_local", response.getFailedShards(), greaterThanOrEqualTo(0)); + }); + assertResponse(dataNodeClient().prepareSearch().setPreference("_only_local"), response -> { + assertThat(RestStatus.OK, equalTo(response.status())); + assertThat("_only_local", response.getFailedShards(), greaterThanOrEqualTo(0)); + }); } public void testNoPreferenceRandom() { @@ -93,33 +97,43 @@ public void testNoPreferenceRandom() { ); ensureGreen(); - client().prepareIndex("test").setSource("field1", "value1").get(); + prepareIndex("test").setSource("field1", "value1").get(); refresh(); final Client client = internalCluster().smartClient(); - SearchResponse searchResponse = client.prepareSearch("test").setQuery(matchAllQuery()).get(); - String firstNodeId = searchResponse.getHits().getAt(0).getShard().getNodeId(); - searchResponse = client.prepareSearch("test").setQuery(matchAllQuery()).get(); - String secondNodeId = searchResponse.getHits().getAt(0).getShard().getNodeId(); - - assertThat(firstNodeId, not(equalTo(secondNodeId))); + assertResponse( + client.prepareSearch("test").setQuery(matchAllQuery()), + fist -> assertResponse( + client.prepareSearch("test").setQuery(matchAllQuery()), + second -> assertThat( + fist.getHits().getAt(0).getShard().getNodeId(), + not(equalTo(second.getHits().getAt(0).getShard().getNodeId())) + ) + ) + ); } public void testSimplePreference() { indicesAdmin().prepareCreate("test").setSettings("{\"number_of_replicas\": 1}", XContentType.JSON).get(); ensureGreen(); - client().prepareIndex("test").setSource("field1", "value1").get(); + prepareIndex("test").setSource("field1", "value1").get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); + assertResponse( + prepareSearch().setQuery(matchAllQuery()), + response -> assertThat(response.getHits().getTotalHits().value, equalTo(1L)) + ); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setPreference("_local").get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).setPreference("_local"), + response -> assertThat(response.getHits().getTotalHits().value, equalTo(1L)) + ); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setPreference("1234").get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).setPreference("1234"), + response -> assertThat(response.getHits().getTotalHits().value, equalTo(1L)) + ); } public void testThatSpecifyingNonExistingNodesReturnsUsefulError() { @@ -142,7 +156,7 @@ public void testNodesOnlyRandom() { ) ); ensureGreen(); - client().prepareIndex("test").setSource("field1", "value1").get(); + prepareIndex("test").setSource("field1", "value1").get(); refresh(); final Client client = internalCluster().smartClient(); @@ -188,9 +202,10 @@ public void testNodesOnlyRandom() { private void assertSearchOnRandomNodes(SearchRequestBuilder request) { Set hitNodes = new HashSet<>(); for (int i = 0; i < 2; i++) { - SearchResponse searchResponse = request.get(); - assertThat(searchResponse.getHits().getHits().length, greaterThan(0)); - hitNodes.add(searchResponse.getHits().getAt(0).getShard().getNodeId()); + assertResponse(request, response -> { + assertThat(response.getHits().getHits().length, greaterThan(0)); + hitNodes.add(response.getHits().getAt(0).getShard().getNodeId()); + }); } assertThat(hitNodes.size(), greaterThan(1)); } @@ -212,7 +227,7 @@ public void testCustomPreferenceUnaffectedByOtherShardMovements() { ) ); ensureGreen(); - client().prepareIndex("test").setSource("field1", "value1").get(); + prepareIndex("test").setSource("field1", "value1").get(); refresh(); final String customPreference = randomAlphaOfLength(10); @@ -259,8 +274,9 @@ public void testCustomPreferenceUnaffectedByOtherShardMovements() { } private static void assertSearchesSpecificNode(String index, String customPreference, String nodeId) { - final SearchResponse searchResponse = prepareSearch(index).setQuery(matchAllQuery()).setPreference(customPreference).get(); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getShard().getNodeId(), equalTo(nodeId)); + assertResponse(prepareSearch(index).setQuery(matchAllQuery()).setPreference(customPreference), response -> { + assertThat(response.getHits().getHits().length, equalTo(1)); + assertThat(response.getHits().getAt(0).getShard().getNodeId(), equalTo(nodeId)); + }); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchReplicaSelectionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchReplicaSelectionIT.java index 35ea9614d182a..1362b0166a709 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchReplicaSelectionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchReplicaSelectionIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.OperationRouting; @@ -23,6 +22,7 @@ import java.util.Set; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -49,18 +49,18 @@ public void testNodeSelection() { // Before we've gathered stats for all nodes, we should try each node once. Set nodeIds = new HashSet<>(); - SearchResponse searchResponse = client.prepareSearch().setQuery(matchAllQuery()).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - nodeIds.add(searchResponse.getHits().getAt(0).getShard().getNodeId()); - - searchResponse = client.prepareSearch().setQuery(matchAllQuery()).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - nodeIds.add(searchResponse.getHits().getAt(0).getShard().getNodeId()); - - searchResponse = client.prepareSearch().setQuery(matchAllQuery()).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - nodeIds.add(searchResponse.getHits().getAt(0).getShard().getNodeId()); - + assertResponse(client.prepareSearch().setQuery(matchAllQuery()), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + nodeIds.add(response.getHits().getAt(0).getShard().getNodeId()); + }); + assertResponse(client.prepareSearch().setQuery(matchAllQuery()), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + nodeIds.add(response.getHits().getAt(0).getShard().getNodeId()); + }); + assertResponse(client.prepareSearch().setQuery(matchAllQuery()), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + nodeIds.add(response.getHits().getAt(0).getShard().getNodeId()); + }); assertEquals(3, nodeIds.size()); // Now after more searches, we should select a node with the lowest ARS rank. @@ -78,13 +78,14 @@ public void testNodeSelection() { assertNotNull(nodeStats); assertEquals(3, nodeStats.getAdaptiveSelectionStats().getComputedStats().size()); - searchResponse = client.prepareSearch().setQuery(matchAllQuery()).get(); - String selectedNodeId = searchResponse.getHits().getAt(0).getShard().getNodeId(); - double selectedRank = nodeStats.getAdaptiveSelectionStats().getRanks().get(selectedNodeId); + assertResponse(client.prepareSearch().setQuery(matchAllQuery()), response -> { + String selectedNodeId = response.getHits().getAt(0).getShard().getNodeId(); + double selectedRank = nodeStats.getAdaptiveSelectionStats().getRanks().get(selectedNodeId); - for (Map.Entry entry : nodeStats.getAdaptiveSelectionStats().getRanks().entrySet()) { - double rank = entry.getValue(); - assertThat(rank, greaterThanOrEqualTo(selectedRank)); - } + for (Map.Entry entry : nodeStats.getAdaptiveSelectionStats().getRanks().entrySet()) { + double rank = entry.getValue(); + assertThat(rank, greaterThanOrEqualTo(selectedRank)); + } + }); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/scriptfilter/ScriptQuerySearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/scriptfilter/ScriptQuerySearchIT.java index dc460468db605..4c99becad055e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/scriptfilter/ScriptQuerySearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/scriptfilter/ScriptQuerySearchIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.search.scriptfilter; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.fielddata.ScriptDocValues; @@ -37,6 +36,7 @@ import static org.elasticsearch.index.query.QueryBuilders.scriptQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; @@ -102,31 +102,30 @@ public void testCustomScriptBinaryField() throws Exception { final byte[] randomBytesDoc2 = getRandomBytes(16); assertAcked(indicesAdmin().prepareCreate("my-index").setMapping(createMappingSource("binary")).setSettings(indexSettings())); - client().prepareIndex("my-index") - .setId("1") + prepareIndex("my-index").setId("1") .setSource(jsonBuilder().startObject().field("binaryData", Base64.getEncoder().encodeToString(randomBytesDoc1)).endObject()) .get(); flush(); - client().prepareIndex("my-index") - .setId("2") + prepareIndex("my-index").setId("2") .setSource(jsonBuilder().startObject().field("binaryData", Base64.getEncoder().encodeToString(randomBytesDoc2)).endObject()) .get(); flush(); refresh(); - SearchResponse response = prepareSearch().setQuery( - scriptQuery(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['binaryData'].get(0).length > 15", emptyMap())) - ) - .addScriptField( - "sbinaryData", - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['binaryData'].get(0).length", emptyMap()) + assertResponse( + prepareSearch().setQuery( + scriptQuery(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['binaryData'].get(0).length > 15", emptyMap())) ) - .get(); - - assertThat(response.getHits().getTotalHits().value, equalTo(1L)); - assertThat(response.getHits().getAt(0).getId(), equalTo("2")); - assertThat(response.getHits().getAt(0).getFields().get("sbinaryData").getValues().get(0), equalTo(16)); - + .addScriptField( + "sbinaryData", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['binaryData'].get(0).length", emptyMap()) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("2")); + assertThat(response.getHits().getAt(0).getFields().get("sbinaryData").getValues().get(0), equalTo(16)); + } + ); } private byte[] getRandomBytes(int len) { @@ -151,68 +150,78 @@ private XContentBuilder createMappingSource(String fieldType) throws IOException public void testCustomScriptBoost() throws Exception { createIndex("test"); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("test", "value beck").field("num1", 1.0f).endObject()) .get(); flush(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource(jsonBuilder().startObject().field("test", "value beck").field("num1", 2.0f).endObject()) .get(); flush(); - client().prepareIndex("test") - .setId("3") + prepareIndex("test").setId("3") .setSource(jsonBuilder().startObject().field("test", "value beck").field("num1", 3.0f).endObject()) .get(); refresh(); logger.info("running doc['num1'].value > 1"); - SearchResponse response = prepareSearch().setQuery( - scriptQuery(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value > 1", Collections.emptyMap())) - ) - .addSort("num1", SortOrder.ASC) - .addScriptField("sNum1", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value", Collections.emptyMap())) - .get(); - - assertThat(response.getHits().getTotalHits().value, equalTo(2L)); - assertThat(response.getHits().getAt(0).getId(), equalTo("2")); - assertThat(response.getHits().getAt(0).getFields().get("sNum1").getValues().get(0), equalTo(2.0)); - assertThat(response.getHits().getAt(1).getId(), equalTo("3")); - assertThat(response.getHits().getAt(1).getFields().get("sNum1").getValues().get(0), equalTo(3.0)); - + assertResponse( + prepareSearch().setQuery( + scriptQuery(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value > 1", Collections.emptyMap())) + ) + .addSort("num1", SortOrder.ASC) + .addScriptField( + "sNum1", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value", Collections.emptyMap()) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("2")); + assertThat(response.getHits().getAt(0).getFields().get("sNum1").getValues().get(0), equalTo(2.0)); + assertThat(response.getHits().getAt(1).getId(), equalTo("3")); + assertThat(response.getHits().getAt(1).getFields().get("sNum1").getValues().get(0), equalTo(3.0)); + } + ); Map params = new HashMap<>(); params.put("param1", 2); logger.info("running doc['num1'].value > param1"); - response = prepareSearch().setQuery( - scriptQuery(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value > param1", params)) - ) - .addSort("num1", SortOrder.ASC) - .addScriptField("sNum1", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value", Collections.emptyMap())) - .get(); - - assertThat(response.getHits().getTotalHits().value, equalTo(1L)); - assertThat(response.getHits().getAt(0).getId(), equalTo("3")); - assertThat(response.getHits().getAt(0).getFields().get("sNum1").getValues().get(0), equalTo(3.0)); - + assertResponse( + prepareSearch().setQuery( + scriptQuery(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value > param1", params)) + ) + .addSort("num1", SortOrder.ASC) + .addScriptField( + "sNum1", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value", Collections.emptyMap()) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("3")); + assertThat(response.getHits().getAt(0).getFields().get("sNum1").getValues().get(0), equalTo(3.0)); + } + ); params = new HashMap<>(); params.put("param1", -1); logger.info("running doc['num1'].value > param1"); - response = prepareSearch().setQuery( - scriptQuery(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value > param1", params)) - ) - .addSort("num1", SortOrder.ASC) - .addScriptField("sNum1", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value", Collections.emptyMap())) - .get(); - - assertThat(response.getHits().getTotalHits().value, equalTo(3L)); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(0).getFields().get("sNum1").getValues().get(0), equalTo(1.0)); - assertThat(response.getHits().getAt(1).getId(), equalTo("2")); - assertThat(response.getHits().getAt(1).getFields().get("sNum1").getValues().get(0), equalTo(2.0)); - assertThat(response.getHits().getAt(2).getId(), equalTo("3")); - assertThat(response.getHits().getAt(2).getFields().get("sNum1").getValues().get(0), equalTo(3.0)); + assertResponse( + prepareSearch().setQuery( + scriptQuery(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value > param1", params)) + ) + .addSort("num1", SortOrder.ASC) + .addScriptField( + "sNum1", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value", Collections.emptyMap()) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(0).getFields().get("sNum1").getValues().get(0), equalTo(1.0)); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(1).getFields().get("sNum1").getValues().get(0), equalTo(2.0)); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + assertThat(response.getHits().getAt(2).getFields().get("sNum1").getValues().get(0), equalTo(3.0)); + } + ); } public void testDisallowExpensiveQueries() { @@ -220,7 +229,7 @@ public void testDisallowExpensiveQueries() { assertAcked(prepareCreate("test-index").setMapping("num1", "type=double")); int docCount = 10; for (int i = 1; i <= docCount; i++) { - client().prepareIndex("test-index").setId("" + i).setSource("num1", i).get(); + prepareIndex("test-index").setId("" + i).setSource("num1", i).get(); } refresh(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/DuelScrollIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/DuelScrollIT.java index c63aa19beb42e..e89e51a60fa23 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/DuelScrollIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/DuelScrollIT.java @@ -130,7 +130,7 @@ private TestContext create(SearchType... searchTypes) throws Exception { } for (int i = 1; i <= numDocs; i++) { - IndexRequestBuilder indexRequestBuilder = client().prepareIndex("index").setId(String.valueOf(i)); + IndexRequestBuilder indexRequestBuilder = prepareIndex("index").setId(String.valueOf(i)); if (missingDocs.contains(i)) { indexRequestBuilder.setSource("x", "y"); } else { @@ -205,7 +205,7 @@ private int createIndex(boolean singleShard) throws Exception { IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; ++i) { - builders[i] = client().prepareIndex("test").setId(Integer.toString(i)).setSource("foo", random().nextBoolean()); + builders[i] = prepareIndex("test").setId(Integer.toString(i)).setSource("foo", random().nextBoolean()); } indexRandom(true, builders); return numDocs; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollIT.java index f94e59cbe1ab4..e8b3cfdb1768a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollIT.java @@ -73,10 +73,7 @@ public void testSimpleScrollQueryThenFetch() throws Exception { clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); for (int i = 0; i < 100; i++) { - client().prepareIndex("test") - .setId(Integer.toString(i)) - .setSource(jsonBuilder().startObject().field("field", i).endObject()) - .get(); + prepareIndex("test").setId(Integer.toString(i)).setSource(jsonBuilder().startObject().field("field", i).endObject()).get(); } indicesAdmin().prepareRefresh().get(); @@ -128,7 +125,7 @@ public void testSimpleScrollQueryThenFetchSmallSizeUnevenDistribution() throws E } else if (i > 60) { routing = "2"; } - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", i).setRouting(routing).get(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field", i).setRouting(routing).get(); } indicesAdmin().prepareRefresh().get(); @@ -186,8 +183,7 @@ public void testScrollAndUpdateIndex() throws Exception { clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); for (int i = 0; i < 500; i++) { - client().prepareIndex("test") - .setId(Integer.toString(i)) + prepareIndex("test").setId(Integer.toString(i)) .setSource( jsonBuilder().startObject() .field("user", "kimchy") @@ -216,7 +212,7 @@ public void testScrollAndUpdateIndex() throws Exception { for (SearchHit searchHit : searchResponse.getHits().getHits()) { Map map = searchHit.getSourceAsMap(); map.put("message", "update"); - client().prepareIndex("test").setId(searchHit.getId()).setSource(map).get(); + prepareIndex("test").setId(searchHit.getId()).setSource(map).get(); } searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueMinutes(2)).get(); } while (searchResponse.getHits().getHits().length > 0); @@ -245,10 +241,7 @@ public void testSimpleScrollQueryThenFetch_clearScrollIds() throws Exception { clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); for (int i = 0; i < 100; i++) { - client().prepareIndex("test") - .setId(Integer.toString(i)) - .setSource(jsonBuilder().startObject().field("field", i).endObject()) - .get(); + prepareIndex("test").setId(Integer.toString(i)).setSource(jsonBuilder().startObject().field("field", i).endObject()).get(); } indicesAdmin().prepareRefresh().get(); @@ -363,10 +356,7 @@ public void testSimpleScrollQueryThenFetchClearAllScrollIds() throws Exception { clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); for (int i = 0; i < 100; i++) { - client().prepareIndex("test") - .setId(Integer.toString(i)) - .setSource(jsonBuilder().startObject().field("field", i).endObject()) - .get(); + prepareIndex("test").setId(Integer.toString(i)).setSource(jsonBuilder().startObject().field("field", i).endObject()).get(); } indicesAdmin().prepareRefresh().get(); @@ -436,7 +426,7 @@ public void testSimpleScrollQueryThenFetchClearAllScrollIds() throws Exception { * Tests that we use an optimization shrinking the batch to the size of the shard. Thus the Integer.MAX_VALUE window doesn't OOM us. */ public void testDeepScrollingDoesNotBlowUp() throws Exception { - client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).execute().get(); + prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).execute().get(); /* * Disable the max result window setting for this test because it'll reject the search's unreasonable batch size. We want * unreasonable batch sizes to just OOM. @@ -462,7 +452,7 @@ public void testDeepScrollingDoesNotBlowUp() throws Exception { } public void testThatNonExistingScrollIdReturnsCorrectException() throws Exception { - client().prepareIndex("index").setId("1").setSource("field", "value").execute().get(); + prepareIndex("index").setId("1").setSource("field", "value").execute().get(); refresh(); SearchResponse searchResponse = prepareSearch("index").setSize(1).setScroll("1m").get(); @@ -478,7 +468,7 @@ public void testStringSortMissingAscTerminates() throws Exception { assertAcked( prepareCreate("test").setSettings(indexSettings(1, 0)).setMapping("no_field", "type=keyword", "some_field", "type=keyword") ); - client().prepareIndex("test").setId("1").setSource("some_field", "test").get(); + prepareIndex("test").setId("1").setSource("some_field", "test").get(); refresh(); SearchResponse response = prepareSearch("test") @@ -510,7 +500,7 @@ public void testStringSortMissingAscTerminates() throws Exception { public void testCloseAndReopenOrDeleteWithActiveScroll() { createIndex("test"); for (int i = 0; i < 100; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", i).get(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field", i).get(); } refresh(); SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) @@ -566,10 +556,7 @@ public void testScrollInvalidDefaultKeepAlive() throws IOException { public void testInvalidScrollKeepAlive() throws IOException { createIndex("test"); for (int i = 0; i < 2; i++) { - client().prepareIndex("test") - .setId(Integer.toString(i)) - .setSource(jsonBuilder().startObject().field("field", i).endObject()) - .get(); + prepareIndex("test").setId(Integer.toString(i)).setSource(jsonBuilder().startObject().field("field", i).endObject()).get(); } refresh(); updateClusterSettings(Settings.builder().put("search.default_keep_alive", "5m").put("search.max_keep_alive", "5m")); @@ -612,9 +599,9 @@ public void testScrollRewrittenToMatchNoDocs() { {"properties":{"created_date":{"type": "date", "format": "yyyy-MM-dd"}}} """) ); - client().prepareIndex("test").setId("1").setSource("created_date", "2020-01-01").get(); - client().prepareIndex("test").setId("2").setSource("created_date", "2020-01-02").get(); - client().prepareIndex("test").setId("3").setSource("created_date", "2020-01-03").get(); + prepareIndex("test").setId("1").setSource("created_date", "2020-01-01").get(); + prepareIndex("test").setId("2").setSource("created_date", "2020-01-02").get(); + prepareIndex("test").setId("3").setSource("created_date", "2020-01-03").get(); indicesAdmin().prepareRefresh("test").get(); SearchResponse resp = null; try { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollWithFailingNodesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollWithFailingNodesIT.java index 96c007e05e414..23a38c0608490 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollWithFailingNodesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollWithFailingNodesIT.java @@ -53,7 +53,7 @@ public void testScanScrollWithShardExceptions() throws Exception { List writes = new ArrayList<>(); for (int i = 0; i < 100; i++) { - writes.add(client().prepareIndex("test").setSource(jsonBuilder().startObject().field("field", i).endObject())); + writes.add(prepareIndex("test").setSource(jsonBuilder().startObject().field("field", i).endObject())); } indexRandom(false, writes); refresh(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/searchafter/SearchAfterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/searchafter/SearchAfterIT.java index 3ac8b103ce910..6219c1b72253a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/searchafter/SearchAfterIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/searchafter/SearchAfterIT.java @@ -12,15 +12,15 @@ import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.ClosePointInTimeAction; import org.elasticsearch.action.search.ClosePointInTimeRequest; -import org.elasticsearch.action.search.OpenPointInTimeAction; import org.elasticsearch.action.search.OpenPointInTimeRequest; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.action.search.TransportClosePointInTimeAction; +import org.elasticsearch.action.search.TransportOpenPointInTimeAction; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Randomness; @@ -63,7 +63,7 @@ public class SearchAfterIT extends ESIntegTestCase { public void testsShouldFail() throws Exception { assertAcked(indicesAdmin().prepareCreate("test").setMapping("field1", "type=long", "field2", "type=keyword").get()); ensureGreen(); - indexRandom(true, client().prepareIndex("test").setId("0").setSource("field1", 0, "field2", "toto")); + indexRandom(true, prepareIndex("test").setId("0").setSource("field1", 0, "field2", "toto")); { SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, @@ -154,8 +154,8 @@ public void testWithNullStrings() throws InterruptedException { ensureGreen(); indexRandom( true, - client().prepareIndex("test").setId("0").setSource("field1", 0), - client().prepareIndex("test").setId("1").setSource("field1", 100, "field2", "toto") + prepareIndex("test").setId("0").setSource("field1", 0), + prepareIndex("test").setId("1").setSource("field1", 100, "field2", "toto") ); SearchResponse searchResponse = prepareSearch("test").addSort("field1", SortOrder.ASC) .addSort("field2", SortOrder.ASC) @@ -314,7 +314,7 @@ private void assertSearchFromWithSortValues(String indexName, List> builder.field("field" + Integer.toString(j), documents.get(i).get(j)); } builder.endObject(); - requests.add(client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource(builder)); + requests.add(prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource(builder)); } indexRandom(true, requests); } @@ -456,7 +456,7 @@ public void testScrollAndSearchAfterWithBigIndex() { String pitID; { OpenPointInTimeRequest openPITRequest = new OpenPointInTimeRequest("test").keepAlive(TimeValue.timeValueMinutes(5)); - pitID = client().execute(OpenPointInTimeAction.INSTANCE, openPITRequest).actionGet().getPointInTimeId(); + pitID = client().execute(TransportOpenPointInTimeAction.TYPE, openPITRequest).actionGet().getPointInTimeId(); SearchRequest searchRequest = new SearchRequest("test").source( new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(pitID).setKeepAlive(TimeValue.timeValueMinutes(5))) .sort("timestamp") @@ -483,14 +483,14 @@ public void testScrollAndSearchAfterWithBigIndex() { } while (resp.getHits().getHits().length > 0); assertThat(foundHits, equalTo(timestamps.size())); } finally { - client().execute(ClosePointInTimeAction.INSTANCE, new ClosePointInTimeRequest(pitID)).actionGet(); + client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pitID)).actionGet(); } } // search_after without sort with point in time { OpenPointInTimeRequest openPITRequest = new OpenPointInTimeRequest("test").keepAlive(TimeValue.timeValueMinutes(5)); - pitID = client().execute(OpenPointInTimeAction.INSTANCE, openPITRequest).actionGet().getPointInTimeId(); + pitID = client().execute(TransportOpenPointInTimeAction.TYPE, openPITRequest).actionGet().getPointInTimeId(); SearchRequest searchRequest = new SearchRequest("test").source( new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(pitID).setKeepAlive(TimeValue.timeValueMinutes(5))) .sort(SortBuilders.pitTiebreaker()) @@ -517,7 +517,7 @@ public void testScrollAndSearchAfterWithBigIndex() { Collections.sort(foundSeqNos); assertThat(foundSeqNos, equalTo(timestamps)); } finally { - client().execute(ClosePointInTimeAction.INSTANCE, new ClosePointInTimeRequest(pitID)).actionGet(); + client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pitID)).actionGet(); } } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/simple/SimpleSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/simple/SimpleSearchIT.java index 61490cac43e45..f47303b83b6e3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/simple/SimpleSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/simple/SimpleSearchIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; @@ -47,7 +46,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCountAndNoFailures; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -76,12 +76,12 @@ public void testSearchRandomPreference() throws InterruptedException, ExecutionE createIndex("test"); indexRandom( true, - client().prepareIndex("test").setId("1").setSource("field", "value"), - client().prepareIndex("test").setId("2").setSource("field", "value"), - client().prepareIndex("test").setId("3").setSource("field", "value"), - client().prepareIndex("test").setId("4").setSource("field", "value"), - client().prepareIndex("test").setId("5").setSource("field", "value"), - client().prepareIndex("test").setId("6").setSource("field", "value") + prepareIndex("test").setId("1").setSource("field", "value"), + prepareIndex("test").setId("2").setSource("field", "value"), + prepareIndex("test").setId("3").setSource("field", "value"), + prepareIndex("test").setId("4").setSource("field", "value"), + prepareIndex("test").setId("5").setSource("field", "value"), + prepareIndex("test").setId("6").setSource("field", "value") ); int iters = scaledRandomIntBetween(10, 20); @@ -117,7 +117,7 @@ public void testSimpleIp() throws Exception { ) .get(); - client().prepareIndex("test").setId("1").setSource("from", "192.168.0.5", "to", "192.168.0.10").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("from", "192.168.0.5", "to", "192.168.0.10").setRefreshPolicy(IMMEDIATE).get(); assertHitCount( prepareSearch().setQuery(boolQuery().must(rangeQuery("from").lte("192.168.0.7")).must(rangeQuery("to").gte("192.168.0.7"))), 1L @@ -143,11 +143,11 @@ public void testIpCidr() throws Exception { .get(); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("ip", "192.168.0.1").get(); - client().prepareIndex("test").setId("2").setSource("ip", "192.168.0.2").get(); - client().prepareIndex("test").setId("3").setSource("ip", "192.168.0.3").get(); - client().prepareIndex("test").setId("4").setSource("ip", "192.168.1.4").get(); - client().prepareIndex("test").setId("5").setSource("ip", "2001:db8::ff00:42:8329").get(); + prepareIndex("test").setId("1").setSource("ip", "192.168.0.1").get(); + prepareIndex("test").setId("2").setSource("ip", "192.168.0.2").get(); + prepareIndex("test").setId("3").setSource("ip", "192.168.0.3").get(); + prepareIndex("test").setId("4").setSource("ip", "192.168.1.4").get(); + prepareIndex("test").setId("5").setSource("ip", "2001:db8::ff00:42:8329").get(); refresh(); assertHitCount(prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.0.1"))), 1L); @@ -171,7 +171,7 @@ public void testIpCidr() throws Exception { public void testSimpleId() { createIndex("test"); - client().prepareIndex("test").setId("XXX1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("XXX1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); // id is not indexed, but lets see that we automatically convert to assertHitCount(prepareSearch().setQuery(QueryBuilders.termQuery("_id", "XXX1")), 1L); assertHitCount(prepareSearch().setQuery(QueryBuilders.queryStringQuery("_id:XXX1")), 1L); @@ -179,9 +179,9 @@ public void testSimpleId() { public void testSimpleDateRange() throws Exception { createIndex("test"); - client().prepareIndex("test").setId("1").setSource("field", "2010-01-05T02:00").get(); - client().prepareIndex("test").setId("2").setSource("field", "2010-01-06T02:00").get(); - client().prepareIndex("test").setId("3").setSource("field", "1967-01-01T00:00").get(); + prepareIndex("test").setId("1").setSource("field", "2010-01-05T02:00").get(); + prepareIndex("test").setId("2").setSource("field", "2010-01-06T02:00").get(); + prepareIndex("test").setId("3").setSource("field", "1967-01-01T00:00").get(); ensureGreen(); refresh(); assertHitCountAndNoFailures( @@ -210,12 +210,12 @@ public void testSimpleDateRange() throws Exception { assertHitCountAndNoFailures(prepareSearch("test").setQuery(QueryBuilders.rangeQuery("field").gt("1000")), 3L); // a numeric value of 1000 should be parsed as 1000 millis since epoch and return only docs after 1970 - SearchResponse searchResponse = prepareSearch("test").setQuery(QueryBuilders.rangeQuery("field").gt(1000)).get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 2L); - String[] expectedIds = new String[] { "1", "2" }; - assertThat(searchResponse.getHits().getHits()[0].getId(), is(oneOf(expectedIds))); - assertThat(searchResponse.getHits().getHits()[1].getId(), is(oneOf(expectedIds))); + assertNoFailuresAndResponse(prepareSearch("test").setQuery(QueryBuilders.rangeQuery("field").gt(1000)), response -> { + assertHitCount(response, 2L); + String[] expectedIds = new String[] { "1", "2" }; + assertThat(response.getHits().getHits()[0].getId(), is(oneOf(expectedIds))); + assertThat(response.getHits().getHits()[1].getId(), is(oneOf(expectedIds))); + }); } public void testRangeQueryKeyword() throws Exception { @@ -223,10 +223,10 @@ public void testRangeQueryKeyword() throws Exception { indicesAdmin().preparePutMapping("test").setSource("field", "type=keyword").get(); - client().prepareIndex("test").setId("0").setSource("field", "").get(); - client().prepareIndex("test").setId("1").setSource("field", "A").get(); - client().prepareIndex("test").setId("2").setSource("field", "B").get(); - client().prepareIndex("test").setId("3").setSource("field", "C").get(); + prepareIndex("test").setId("0").setSource("field", "").get(); + prepareIndex("test").setId("1").setSource("field", "A").get(); + prepareIndex("test").setId("2").setSource("field", "B").get(); + prepareIndex("test").setId("3").setSource("field", "C").get(); ensureGreen(); refresh(); @@ -248,24 +248,30 @@ public void testSimpleTerminateAfterCount() throws Exception { for (int i = 1; i <= max; i++) { String id = String.valueOf(i); - docbuilders.add(client().prepareIndex("test").setId(id).setSource("field", i)); + docbuilders.add(prepareIndex("test").setId(id).setSource("field", i)); } indexRandom(true, docbuilders); ensureGreen(); refresh(); - SearchResponse searchResponse; for (int i = 1; i < max; i++) { - searchResponse = prepareSearch("test").setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(max)).setTerminateAfter(i).get(); - assertHitCount(searchResponse, i); - assertTrue(searchResponse.isTerminatedEarly()); + final int finalI = i; + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(max)).setTerminateAfter(i), + response -> { + assertHitCount(response, finalI); + assertTrue(response.isTerminatedEarly()); + } + ); } - - searchResponse = prepareSearch("test").setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(max)).setTerminateAfter(2 * max).get(); - - assertHitCount(searchResponse, max); - assertFalse(searchResponse.isTerminatedEarly()); + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(max)).setTerminateAfter(2 * max), + response -> { + assertHitCount(response, max); + assertFalse(response.isTerminatedEarly()); + } + ); } public void testSimpleIndexSortEarlyTerminate() throws Exception { @@ -276,30 +282,30 @@ public void testSimpleIndexSortEarlyTerminate() throws Exception { for (int i = max - 1; i >= 0; i--) { String id = String.valueOf(i); - docbuilders.add(client().prepareIndex("test").setId(id).setSource("rank", i)); + docbuilders.add(prepareIndex("test").setId(id).setSource("rank", i)); } indexRandom(true, docbuilders); ensureGreen(); refresh(); - SearchResponse searchResponse; for (int i = 1; i < max; i++) { - searchResponse = prepareSearch("test").addDocValueField("rank") - .setTrackTotalHits(false) - .addSort("rank", SortOrder.ASC) - .setSize(i) - .get(); - assertNull(searchResponse.getHits().getTotalHits()); - for (int j = 0; j < i; j++) { - assertThat(searchResponse.getHits().getAt(j).field("rank").getValue(), equalTo((long) j)); - } + final int finalI = i; + assertResponse( + prepareSearch("test").addDocValueField("rank").setTrackTotalHits(false).addSort("rank", SortOrder.ASC).setSize(i), + response -> { + assertNull(response.getHits().getTotalHits()); + for (int j = 0; j < finalI; j++) { + assertThat(response.getHits().getAt(j).field("rank").getValue(), equalTo((long) j)); + } + } + ); } } public void testInsaneFromAndSize() throws Exception { createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", XContentType.JSON)); + indexRandom(true, prepareIndex("idx").setSource("{}", XContentType.JSON)); assertWindowFails(prepareSearch("idx").setFrom(Integer.MAX_VALUE)); assertWindowFails(prepareSearch("idx").setSize(Integer.MAX_VALUE)); @@ -307,7 +313,7 @@ public void testInsaneFromAndSize() throws Exception { public void testTooLargeFromAndSize() throws Exception { createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", XContentType.JSON)); + indexRandom(true, prepareIndex("idx").setSource("{}", XContentType.JSON)); assertWindowFails(prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY))); assertWindowFails(prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1)); @@ -319,7 +325,7 @@ public void testTooLargeFromAndSize() throws Exception { public void testLargeFromAndSizeSucceeds() throws Exception { createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", XContentType.JSON)); + indexRandom(true, prepareIndex("idx").setSource("{}", XContentType.JSON)); assertHitCount(prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) - 10), 1); assertHitCount(prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)), 1); @@ -335,7 +341,7 @@ public void testTooLargeFromAndSizeOkBySetting() throws Exception { Settings.builder() .put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 2) ).get(); - indexRandom(true, client().prepareIndex("idx").setSource("{}", XContentType.JSON)); + indexRandom(true, prepareIndex("idx").setSource("{}", XContentType.JSON)); assertHitCount(prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)), 1); assertHitCount(prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1), 1); @@ -353,7 +359,7 @@ public void testTooLargeFromAndSizeOkByDynamicSetting() throws Exception { .put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 2), "idx" ); - indexRandom(true, client().prepareIndex("idx").setSource("{}", XContentType.JSON)); + indexRandom(true, prepareIndex("idx").setSource("{}", XContentType.JSON)); assertHitCount(prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)), 1); assertHitCount(prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1), 1); @@ -366,7 +372,7 @@ public void testTooLargeFromAndSizeOkByDynamicSetting() throws Exception { public void testTooLargeFromAndSizeBackwardsCompatibilityRecommendation() throws Exception { prepareCreate("idx").setSettings(Settings.builder().put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), Integer.MAX_VALUE)).get(); - indexRandom(true, client().prepareIndex("idx").setSource("{}", XContentType.JSON)); + indexRandom(true, prepareIndex("idx").setSource("{}", XContentType.JSON)); assertHitCount(prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10), 1); assertHitCount(prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10), 1); @@ -379,7 +385,7 @@ public void testTooLargeFromAndSizeBackwardsCompatibilityRecommendation() throws public void testTooLargeRescoreWindow() throws Exception { createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", XContentType.JSON)); + indexRandom(true, prepareIndex("idx").setSource("{}", XContentType.JSON)); assertRescoreWindowFails(Integer.MAX_VALUE); assertRescoreWindowFails(IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY) + 1); @@ -389,7 +395,7 @@ public void testTooLargeRescoreOkBySetting() throws Exception { int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); prepareCreate("idx").setSettings(Settings.builder().put(IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey(), defaultMaxWindow * 2)) .get(); - indexRandom(true, client().prepareIndex("idx").setSource("{}", XContentType.JSON)); + indexRandom(true, prepareIndex("idx").setSource("{}", XContentType.JSON)); assertHitCount(prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)), 1); } @@ -403,7 +409,7 @@ public void testTooLargeRescoreOkByResultWindowSetting() throws Exception { defaultMaxWindow * 2 ) ).get(); - indexRandom(true, client().prepareIndex("idx").setSource("{}", XContentType.JSON)); + indexRandom(true, prepareIndex("idx").setSource("{}", XContentType.JSON)); assertHitCount(prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)), 1); } @@ -412,7 +418,7 @@ public void testTooLargeRescoreOkByDynamicSetting() throws Exception { int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); createIndex("idx"); updateIndexSettings(Settings.builder().put(IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey(), defaultMaxWindow * 2), "idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", XContentType.JSON)); + indexRandom(true, prepareIndex("idx").setSource("{}", XContentType.JSON)); assertHitCount(prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)), 1); } @@ -425,7 +431,7 @@ public void testTooLargeRescoreOkByDynamicResultWindowSetting() throws Exception Settings.builder().put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), defaultMaxWindow * 2), "idx" ); - indexRandom(true, client().prepareIndex("idx").setSource("{}", XContentType.JSON)); + indexRandom(true, prepareIndex("idx").setSource("{}", XContentType.JSON)); assertHitCount(prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)), 1); } @@ -446,8 +452,7 @@ public void testTermQueryBigInt() throws Exception { prepareCreate("idx").setMapping("field", "type=keyword").get(); ensureGreen("idx"); - client().prepareIndex("idx") - .setId("1") + prepareIndex("idx").setId("1") .setSource("{\"field\" : 80315953321748200608 }", XContentType.JSON) .setRefreshPolicy(RefreshPolicy.IMMEDIATE) .get(); @@ -461,7 +466,7 @@ public void testTermQueryBigInt() throws Exception { public void testTooLongRegexInRegexpQuery() throws Exception { createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", XContentType.JSON)); + indexRandom(true, prepareIndex("idx").setSource("{}", XContentType.JSON)); int defaultMaxRegexLength = IndexSettings.MAX_REGEX_LENGTH_SETTING.get(Settings.EMPTY); StringBuilder regexp = new StringBuilder(defaultMaxRegexLength); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java index 948b7261ded1c..527d8bed8bc68 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java @@ -10,14 +10,14 @@ import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.ClosePointInTimeAction; import org.elasticsearch.action.search.ClosePointInTimeRequest; -import org.elasticsearch.action.search.OpenPointInTimeAction; import org.elasticsearch.action.search.OpenPointInTimeRequest; import org.elasticsearch.action.search.OpenPointInTimeResponse; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportClosePointInTimeAction; +import org.elasticsearch.action.search.TransportOpenPointInTimeAction; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; @@ -79,7 +79,7 @@ private void setupIndex(int numDocs, int numberOfShards) throws IOException, Exe .field("static_int", 0) .field("invalid_random_int", randomInt()) .endObject(); - requests.add(client().prepareIndex("test").setSource(builder)); + requests.add(prepareIndex("test").setSource(builder)); } indexRandom(true, requests); } @@ -197,7 +197,7 @@ public void testPointInTime() throws Exception { for (String field : new String[] { null, "random_int", "static_int" }) { // Open point-in-time reader OpenPointInTimeRequest request = new OpenPointInTimeRequest("test").keepAlive(TimeValue.timeValueSeconds(10)); - OpenPointInTimeResponse response = client().execute(OpenPointInTimeAction.INSTANCE, request).actionGet(); + OpenPointInTimeResponse response = client().execute(TransportOpenPointInTimeAction.TYPE, request).actionGet(); String pointInTimeId = response.getPointInTimeId(); // Test sort on document IDs @@ -206,7 +206,7 @@ public void testPointInTime() throws Exception { assertSearchSlicesWithPointInTime(field, "random_int", pointInTimeId, max, numDocs); // Close point-in-time reader - client().execute(ClosePointInTimeAction.INSTANCE, new ClosePointInTimeRequest(pointInTimeId)).actionGet(); + client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pointInTimeId)).actionGet(); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java index 2926d36becb4a..2cd68398e211f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java @@ -63,6 +63,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; @@ -115,35 +117,37 @@ public void testIssue8226() { assertAcked(prepareCreate("test_" + i).addAlias(new Alias("test"))); } if (i > 0) { - client().prepareIndex("test_" + i).setId("" + i).setSource("{\"entry\": " + i + "}", XContentType.JSON).get(); + prepareIndex("test_" + i).setId("" + i).setSource("{\"entry\": " + i + "}", XContentType.JSON).get(); } } refresh(); // sort DESC - SearchResponse searchResponse = prepareSearch().addSort( - new FieldSortBuilder("entry").order(SortOrder.DESC).unmappedType(useMapping ? null : "long") - ).setSize(10).get(); - logClusterState(); - assertNoFailures(searchResponse); - - for (int j = 1; j < searchResponse.getHits().getHits().length; j++) { - Number current = (Number) searchResponse.getHits().getHits()[j].getSourceAsMap().get("entry"); - Number previous = (Number) searchResponse.getHits().getHits()[j - 1].getSourceAsMap().get("entry"); - assertThat(searchResponse.toString(), current.intValue(), lessThan(previous.intValue())); - } + assertNoFailuresAndResponse( + prepareSearch().addSort(new FieldSortBuilder("entry").order(SortOrder.DESC).unmappedType(useMapping ? null : "long")) + .setSize(10), + response -> { + logClusterState(); + for (int j = 1; j < response.getHits().getHits().length; j++) { + Number current = (Number) response.getHits().getHits()[j].getSourceAsMap().get("entry"); + Number previous = (Number) response.getHits().getHits()[j - 1].getSourceAsMap().get("entry"); + assertThat(response.toString(), current.intValue(), lessThan(previous.intValue())); + } + } + ); // sort ASC - searchResponse = prepareSearch().addSort( - new FieldSortBuilder("entry").order(SortOrder.ASC).unmappedType(useMapping ? null : "long") - ).setSize(10).get(); - logClusterState(); - assertNoFailures(searchResponse); - - for (int j = 1; j < searchResponse.getHits().getHits().length; j++) { - Number current = (Number) searchResponse.getHits().getHits()[j].getSourceAsMap().get("entry"); - Number previous = (Number) searchResponse.getHits().getHits()[j - 1].getSourceAsMap().get("entry"); - assertThat(searchResponse.toString(), current.intValue(), greaterThan(previous.intValue())); - } + assertNoFailuresAndResponse( + prepareSearch().addSort(new FieldSortBuilder("entry").order(SortOrder.ASC).unmappedType(useMapping ? null : "long")) + .setSize(10), + response -> { + logClusterState(); + for (int j = 1; j < response.getHits().getHits().length; j++) { + Number current = (Number) response.getHits().getHits()[j].getSourceAsMap().get("entry"); + Number previous = (Number) response.getHits().getHits()[j - 1].getSourceAsMap().get("entry"); + assertThat(response.toString(), current.intValue(), greaterThan(previous.intValue())); + } + } + ); } public void testIssue6614() throws ExecutionException, InterruptedException { @@ -159,46 +163,52 @@ public void testIssue6614() throws ExecutionException, InterruptedException { final int numDocs = randomIntBetween(1, 23); // hour of the day for (int j = 0; j < numDocs; j++) { builders.add( - client().prepareIndex(indexId) - .setSource( - "foo", - "bar", - "timeUpdated", - "2014/07/" + Strings.format("%02d", i + 1) + " " + Strings.format("%02d", j + 1) + ":00:00" - ) + prepareIndex(indexId).setSource( + "foo", + "bar", + "timeUpdated", + "2014/07/" + Strings.format("%02d", i + 1) + " " + Strings.format("%02d", j + 1) + ":00:00" + ) ); } indexRandom(true, builders); docs += builders.size(); builders.clear(); } - SearchResponse allDocsResponse = prepareSearch().setQuery( - QueryBuilders.boolQuery() - .must(QueryBuilders.termQuery("foo", "bar")) - .must(QueryBuilders.rangeQuery("timeUpdated").gte("2014/0" + randomIntBetween(1, 7) + "/01")) - ).addSort(new FieldSortBuilder("timeUpdated").order(SortOrder.ASC).unmappedType("date")).setSize(docs).get(); - assertNoFailures(allDocsResponse); - - final int numiters = randomIntBetween(1, 20); - for (int i = 0; i < numiters; i++) { - SearchResponse searchResponse = prepareSearch().setQuery( + final int finalDocs = docs; + assertNoFailuresAndResponse( + prepareSearch().setQuery( QueryBuilders.boolQuery() .must(QueryBuilders.termQuery("foo", "bar")) - .must(QueryBuilders.rangeQuery("timeUpdated").gte("2014/" + Strings.format("%02d", randomIntBetween(1, 7)) + "/01")) - ) - .addSort(new FieldSortBuilder("timeUpdated").order(SortOrder.ASC).unmappedType("date")) - .setSize(scaledRandomIntBetween(1, docs)) - .get(); - assertNoFailures(searchResponse); - for (int j = 0; j < searchResponse.getHits().getHits().length; j++) { - assertThat( - searchResponse.toString() + "\n vs. \n" + allDocsResponse.toString(), - searchResponse.getHits().getHits()[j].getId(), - equalTo(allDocsResponse.getHits().getHits()[j].getId()) - ); + .must(QueryBuilders.rangeQuery("timeUpdated").gte("2014/0" + randomIntBetween(1, 7) + "/01")) + ).addSort(new FieldSortBuilder("timeUpdated").order(SortOrder.ASC).unmappedType("date")).setSize(docs), + allDocsResponse -> { + final int numiters = randomIntBetween(1, 20); + for (int i = 0; i < numiters; i++) { + assertNoFailuresAndResponse( + prepareSearch().setQuery( + QueryBuilders.boolQuery() + .must(QueryBuilders.termQuery("foo", "bar")) + .must( + QueryBuilders.rangeQuery("timeUpdated") + .gte("2014/" + Strings.format("%02d", randomIntBetween(1, 7)) + "/01") + ) + ) + .addSort(new FieldSortBuilder("timeUpdated").order(SortOrder.ASC).unmappedType("date")) + .setSize(scaledRandomIntBetween(1, finalDocs)), + response -> { + for (int j = 0; j < response.getHits().getHits().length; j++) { + assertThat( + response.toString() + "\n vs. \n" + allDocsResponse.toString(), + response.getHits().getHits()[j].getId(), + equalTo(allDocsResponse.getHits().getHits()[j].getId()) + ); + } + } + ); + } } - } - + ); } public void testTrackScores() throws Exception { @@ -214,20 +224,19 @@ public void testTrackScores() throws Exception { ); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()).addSort("svalue", SortOrder.ASC).get(); - - assertThat(searchResponse.getHits().getMaxScore(), equalTo(Float.NaN)); - for (SearchHit hit : searchResponse.getHits()) { - assertThat(hit.getScore(), equalTo(Float.NaN)); - } - + assertResponse(prepareSearch().setQuery(matchAllQuery()).addSort("svalue", SortOrder.ASC), response -> { + assertThat(response.getHits().getMaxScore(), equalTo(Float.NaN)); + for (SearchHit hit : response.getHits()) { + assertThat(hit.getScore(), equalTo(Float.NaN)); + } + }); // now check with score tracking - searchResponse = prepareSearch().setQuery(matchAllQuery()).addSort("svalue", SortOrder.ASC).setTrackScores(true).get(); - - assertThat(searchResponse.getHits().getMaxScore(), not(equalTo(Float.NaN))); - for (SearchHit hit : searchResponse.getHits()) { - assertThat(hit.getScore(), not(equalTo(Float.NaN))); - } + assertResponse(prepareSearch().setQuery(matchAllQuery()).addSort("svalue", SortOrder.ASC).setTrackScores(true), response -> { + assertThat(response.getHits().getMaxScore(), not(equalTo(Float.NaN))); + for (SearchHit hit : response.getHits()) { + assertThat(hit.getScore(), not(equalTo(Float.NaN))); + } + }); } public void testRandomSorting() throws IOException, InterruptedException, ExecutionException { @@ -268,45 +277,47 @@ public void testRandomSorting() throws IOException, InterruptedException, Execut sparseBytes.put(ref, docId); } src.endObject(); - builders[i] = client().prepareIndex("test").setId(docId).setSource(src); + builders[i] = prepareIndex("test").setId(docId).setSource(src); } indexRandom(true, builders); { int size = between(1, denseBytes.size()); - SearchResponse searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .setSize(size) - .addSort("dense_bytes", SortOrder.ASC) - .get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo((long) numDocs)); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - Set> entrySet = denseBytes.entrySet(); - Iterator> iterator = entrySet.iterator(); - for (int i = 0; i < size; i++) { - assertThat(iterator.hasNext(), equalTo(true)); - Entry next = iterator.next(); - assertThat("pos: " + i, searchResponse.getHits().getAt(i).getId(), equalTo(next.getValue())); - assertThat(searchResponse.getHits().getAt(i).getSortValues()[0].toString(), equalTo(next.getKey().utf8ToString())); - } + assertNoFailuresAndResponse( + prepareSearch("test").setQuery(matchAllQuery()).setSize(size).addSort("dense_bytes", SortOrder.ASC), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo((long) numDocs)); + assertThat(response.getHits().getHits().length, equalTo(size)); + Set> entrySet = denseBytes.entrySet(); + Iterator> iterator = entrySet.iterator(); + for (int i = 0; i < size; i++) { + assertThat(iterator.hasNext(), equalTo(true)); + Entry next = iterator.next(); + assertThat("pos: " + i, response.getHits().getAt(i).getId(), equalTo(next.getValue())); + assertThat(response.getHits().getAt(i).getSortValues()[0].toString(), equalTo(next.getKey().utf8ToString())); + } + } + ); } if (sparseBytes.isEmpty() == false) { int size = between(1, sparseBytes.size()); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .setPostFilter(QueryBuilders.existsQuery("sparse_bytes")) - .setSize(size) - .addSort("sparse_bytes", SortOrder.ASC) - .get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo((long) sparseBytes.size())); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - Set> entrySet = sparseBytes.entrySet(); - Iterator> iterator = entrySet.iterator(); - for (int i = 0; i < size; i++) { - assertThat(iterator.hasNext(), equalTo(true)); - Entry next = iterator.next(); - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(next.getValue())); - assertThat(searchResponse.getHits().getAt(i).getSortValues()[0].toString(), equalTo(next.getKey().utf8ToString())); - } + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()) + .setPostFilter(QueryBuilders.existsQuery("sparse_bytes")) + .setSize(size) + .addSort("sparse_bytes", SortOrder.ASC), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo((long) sparseBytes.size())); + assertThat(response.getHits().getHits().length, equalTo(size)); + Set> entrySet = sparseBytes.entrySet(); + Iterator> iterator = entrySet.iterator(); + for (int i = 0; i < size; i++) { + assertThat(iterator.hasNext(), equalTo(true)); + Entry next = iterator.next(); + assertThat(response.getHits().getAt(i).getId(), equalTo(next.getValue())); + assertThat(response.getHits().getAt(i).getSortValues()[0].toString(), equalTo(next.getKey().utf8ToString())); + } + } + ); } } @@ -315,142 +326,161 @@ public void test3078() { ensureGreen(); for (int i = 1; i < 101; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", Integer.toString(i)).get(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field", Integer.toString(i)).get(); } refresh(); - SearchResponse searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)) - .get(); - assertThat(searchResponse.getHits().getAt(0).getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getSortValues()[0].toString(), equalTo("10")); - assertThat(searchResponse.getHits().getAt(2).getSortValues()[0].toString(), equalTo("100")); - + assertResponse( + prepareSearch("test").setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)), + response -> { + assertThat(response.getHits().getAt(0).getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getAt(1).getSortValues()[0].toString(), equalTo("10")); + assertThat(response.getHits().getAt(2).getSortValues()[0].toString(), equalTo("100")); + } + ); // reindex and refresh - client().prepareIndex("test").setId(Integer.toString(1)).setSource("field", Integer.toString(1)).get(); + prepareIndex("test").setId(Integer.toString(1)).setSource("field", Integer.toString(1)).get(); refresh(); - searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)) - .get(); - assertThat(searchResponse.getHits().getAt(0).getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getSortValues()[0].toString(), equalTo("10")); - assertThat(searchResponse.getHits().getAt(2).getSortValues()[0].toString(), equalTo("100")); - + assertResponse( + prepareSearch("test").setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)), + response -> { + assertThat(response.getHits().getAt(0).getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getAt(1).getSortValues()[0].toString(), equalTo("10")); + assertThat(response.getHits().getAt(2).getSortValues()[0].toString(), equalTo("100")); + } + ); // reindex - no refresh - client().prepareIndex("test").setId(Integer.toString(1)).setSource("field", Integer.toString(1)).get(); - - searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)) - .get(); - assertThat(searchResponse.getHits().getAt(0).getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getSortValues()[0].toString(), equalTo("10")); - assertThat(searchResponse.getHits().getAt(2).getSortValues()[0].toString(), equalTo("100")); - + prepareIndex("test").setId(Integer.toString(1)).setSource("field", Integer.toString(1)).get(); + + assertResponse( + prepareSearch("test").setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)), + response -> { + assertThat(response.getHits().getAt(0).getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getAt(1).getSortValues()[0].toString(), equalTo("10")); + assertThat(response.getHits().getAt(2).getSortValues()[0].toString(), equalTo("100")); + } + ); // force merge forceMerge(); refresh(); - client().prepareIndex("test").setId(Integer.toString(1)).setSource("field", Integer.toString(1)).get(); - searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)) - .get(); - assertThat(searchResponse.getHits().getAt(0).getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getSortValues()[0].toString(), equalTo("10")); - assertThat(searchResponse.getHits().getAt(2).getSortValues()[0].toString(), equalTo("100")); - + prepareIndex("test").setId(Integer.toString(1)).setSource("field", Integer.toString(1)).get(); + assertResponse( + prepareSearch("test").setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)), + response -> { + assertThat(response.getHits().getAt(0).getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getAt(1).getSortValues()[0].toString(), equalTo("10")); + assertThat(response.getHits().getAt(2).getSortValues()[0].toString(), equalTo("100")); + } + ); refresh(); - searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)) - .get(); - assertThat(searchResponse.getHits().getAt(0).getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getSortValues()[0].toString(), equalTo("10")); - assertThat(searchResponse.getHits().getAt(2).getSortValues()[0].toString(), equalTo("100")); + assertResponse( + prepareSearch("test").setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)), + response -> { + assertThat(response.getHits().getAt(0).getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getAt(1).getSortValues()[0].toString(), equalTo("10")); + assertThat(response.getHits().getAt(2).getSortValues()[0].toString(), equalTo("100")); + } + ); } public void testScoreSortDirection() throws Exception { createIndex("test"); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("field", 2).get(); - client().prepareIndex("test").setId("2").setSource("field", 1).get(); - client().prepareIndex("test").setId("3").setSource("field", 0).get(); + prepareIndex("test").setId("1").setSource("field", 2).get(); + prepareIndex("test").setId("2").setSource("field", 1).get(); + prepareIndex("test").setId("3").setSource("field", 0).get(); refresh(); - SearchResponse searchResponse = prepareSearch("test").setQuery( - QueryBuilders.functionScoreQuery(matchAllQuery(), ScoreFunctionBuilders.fieldValueFactorFunction("field")) - ).get(); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getScore(), Matchers.lessThan(searchResponse.getHits().getAt(0).getScore())); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2")); - assertThat(searchResponse.getHits().getAt(2).getScore(), Matchers.lessThan(searchResponse.getHits().getAt(1).getScore())); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); - - searchResponse = prepareSearch("test").setQuery( - QueryBuilders.functionScoreQuery(matchAllQuery(), ScoreFunctionBuilders.fieldValueFactorFunction("field")) - ).addSort("_score", SortOrder.DESC).get(); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getScore(), Matchers.lessThan(searchResponse.getHits().getAt(0).getScore())); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2")); - assertThat(searchResponse.getHits().getAt(2).getScore(), Matchers.lessThan(searchResponse.getHits().getAt(1).getScore())); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); - - searchResponse = prepareSearch("test").setQuery( - QueryBuilders.functionScoreQuery(matchAllQuery(), ScoreFunctionBuilders.fieldValueFactorFunction("field")) - ).addSort("_score", SortOrder.DESC).get(); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2")); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); + assertResponse( + prepareSearch("test").setQuery( + QueryBuilders.functionScoreQuery(matchAllQuery(), ScoreFunctionBuilders.fieldValueFactorFunction("field")) + ), + response -> { + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getScore(), Matchers.lessThan(response.getHits().getAt(0).getScore())); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(2).getScore(), Matchers.lessThan(response.getHits().getAt(1).getScore())); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + QueryBuilders.functionScoreQuery(matchAllQuery(), ScoreFunctionBuilders.fieldValueFactorFunction("field")) + ).addSort("_score", SortOrder.DESC), + response -> { + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getScore(), Matchers.lessThan(response.getHits().getAt(0).getScore())); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(2).getScore(), Matchers.lessThan(response.getHits().getAt(1).getScore())); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + QueryBuilders.functionScoreQuery(matchAllQuery(), ScoreFunctionBuilders.fieldValueFactorFunction("field")) + ).addSort("_score", SortOrder.DESC), + response -> { + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + } + ); } public void testScoreSortDirectionWithFunctionScore() throws Exception { createIndex("test"); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("field", 2).get(); - client().prepareIndex("test").setId("2").setSource("field", 1).get(); - client().prepareIndex("test").setId("3").setSource("field", 0).get(); + prepareIndex("test").setId("1").setSource("field", 2).get(); + prepareIndex("test").setId("2").setSource("field", 1).get(); + prepareIndex("test").setId("3").setSource("field", 0).get(); refresh(); - SearchResponse searchResponse = prepareSearch("test").setQuery( - functionScoreQuery(matchAllQuery(), fieldValueFactorFunction("field")) - ).get(); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getScore(), Matchers.lessThan(searchResponse.getHits().getAt(0).getScore())); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2")); - assertThat(searchResponse.getHits().getAt(2).getScore(), Matchers.lessThan(searchResponse.getHits().getAt(1).getScore())); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); - - searchResponse = prepareSearch("test").setQuery(functionScoreQuery(matchAllQuery(), fieldValueFactorFunction("field"))) - .addSort("_score", SortOrder.DESC) - .get(); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getScore(), Matchers.lessThan(searchResponse.getHits().getAt(0).getScore())); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2")); - assertThat(searchResponse.getHits().getAt(2).getScore(), Matchers.lessThan(searchResponse.getHits().getAt(1).getScore())); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); - - searchResponse = prepareSearch("test").setQuery(functionScoreQuery(matchAllQuery(), fieldValueFactorFunction("field"))) - .addSort("_score", SortOrder.DESC) - .get(); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2")); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); + assertResponse(prepareSearch("test").setQuery(functionScoreQuery(matchAllQuery(), fieldValueFactorFunction("field"))), response -> { + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getScore(), Matchers.lessThan(response.getHits().getAt(0).getScore())); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(2).getScore(), Matchers.lessThan(response.getHits().getAt(1).getScore())); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + }); + assertResponse( + prepareSearch("test").setQuery(functionScoreQuery(matchAllQuery(), fieldValueFactorFunction("field"))) + .addSort("_score", SortOrder.DESC), + response -> { + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getScore(), Matchers.lessThan(response.getHits().getAt(0).getScore())); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(2).getScore(), Matchers.lessThan(response.getHits().getAt(1).getScore())); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + } + ); + assertResponse( + prepareSearch("test").setQuery(functionScoreQuery(matchAllQuery(), fieldValueFactorFunction("field"))) + .addSort("_score", SortOrder.DESC), + response -> { + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + } + ); } public void testIssue2986() { assertAcked(indicesAdmin().prepareCreate("test").setMapping("field1", "type=keyword").get()); - client().prepareIndex("test").setId("1").setSource("{\"field1\":\"value1\"}", XContentType.JSON).get(); - client().prepareIndex("test").setId("2").setSource("{\"field1\":\"value2\"}", XContentType.JSON).get(); - client().prepareIndex("test").setId("3").setSource("{\"field1\":\"value3\"}", XContentType.JSON).get(); + prepareIndex("test").setId("1").setSource("{\"field1\":\"value1\"}", XContentType.JSON).get(); + prepareIndex("test").setId("2").setSource("{\"field1\":\"value2\"}", XContentType.JSON).get(); + prepareIndex("test").setId("3").setSource("{\"field1\":\"value3\"}", XContentType.JSON).get(); refresh(); - SearchResponse result = prepareSearch("test").setQuery(matchAllQuery()).setTrackScores(true).addSort("field1", SortOrder.ASC).get(); - - for (SearchHit hit : result.getHits()) { - assertFalse(Float.isNaN(hit.getScore())); - } + assertResponse(prepareSearch("test").setQuery(matchAllQuery()).setTrackScores(true).addSort("field1", SortOrder.ASC), response -> { + for (SearchHit hit : response.getHits()) { + assertFalse(Float.isNaN(hit.getScore())); + } + }); } public void testIssue2991() { @@ -462,35 +492,36 @@ public void testIssue2991() { } assertAcked(indicesAdmin().prepareCreate("test").setMapping("tag", "type=keyword").get()); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("tag", "alpha").get(); + prepareIndex("test").setId("1").setSource("tag", "alpha").get(); refresh(); - client().prepareIndex("test").setId("3").setSource("tag", "gamma").get(); + prepareIndex("test").setId("3").setSource("tag", "gamma").get(); refresh(); - client().prepareIndex("test").setId("4").setSource("tag", "delta").get(); + prepareIndex("test").setId("4").setSource("tag", "delta").get(); refresh(); - client().prepareIndex("test").setId("2").setSource("tag", "beta").get(); + prepareIndex("test").setId("2").setSource("tag", "beta").get(); refresh(); - SearchResponse resp = prepareSearch("test").setSize(2) - .setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("tag").order(SortOrder.ASC)) - .get(); - assertHitCount(resp, 4); - assertThat(resp.getHits().getHits().length, equalTo(2)); - assertFirstHit(resp, hasId("1")); - assertSecondHit(resp, hasId("2")); - - resp = prepareSearch("test").setSize(2) - .setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("tag").order(SortOrder.DESC)) - .get(); - assertHitCount(resp, 4); - assertThat(resp.getHits().getHits().length, equalTo(2)); - assertFirstHit(resp, hasId("3")); - assertSecondHit(resp, hasId("4")); + assertResponse( + prepareSearch("test").setSize(2).setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("tag").order(SortOrder.ASC)), + response -> { + assertHitCount(response, 4); + assertThat(response.getHits().getHits().length, equalTo(2)); + assertFirstHit(response, hasId("1")); + assertSecondHit(response, hasId("2")); + } + ); + assertResponse( + prepareSearch("test").setSize(2).setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("tag").order(SortOrder.DESC)), + response -> { + assertHitCount(response, 4); + assertThat(response.getHits().getHits().length, equalTo(2)); + assertFirstHit(response, hasId("3")); + assertSecondHit(response, hasId("4")); + } + ); } } @@ -534,8 +565,7 @@ public void testSimpleSorts() throws Exception { ensureGreen(); List builders = new ArrayList<>(); for (int i = 0; i < 10; i++) { - IndexRequestBuilder builder = client().prepareIndex("test") - .setId(Integer.toString(i)) + IndexRequestBuilder builder = prepareIndex("test").setId(Integer.toString(i)) .setSource( jsonBuilder().startObject() .field("str_value", new String(new char[] { (char) (97 + i), (char) (97 + i) })) @@ -565,172 +595,190 @@ public void testSimpleSorts() throws Exception { refresh(); // STRING - int size = 1 + random.nextInt(10); - - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("str_value", SortOrder.ASC).get(); - assertHitCount(searchResponse, 10); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); - assertThat( - searchResponse.getHits().getAt(i).getSortValues()[0].toString(), - equalTo(new String(new char[] { (char) (97 + i), (char) (97 + i) })) - ); + { + int size = 1 + random.nextInt(10); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("str_value", SortOrder.ASC), response -> { + assertHitCount(response, 10); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + assertThat( + response.getHits().getAt(i).getSortValues()[0].toString(), + equalTo(new String(new char[] { (char) (97 + i), (char) (97 + i) })) + ); + } + }); } - size = 1 + random.nextInt(10); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("str_value", SortOrder.DESC).get(); - - assertHitCount(searchResponse, 10); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); - assertThat( - searchResponse.getHits().getAt(i).getSortValues()[0].toString(), - equalTo(new String(new char[] { (char) (97 + (9 - i)), (char) (97 + (9 - i)) })) - ); + { + int size = 1 + random.nextInt(10); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("str_value", SortOrder.DESC), response -> { + assertHitCount(response, 10); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); + assertThat( + response.getHits().getAt(i).getSortValues()[0].toString(), + equalTo(new String(new char[] { (char) (97 + (9 - i)), (char) (97 + (9 - i)) })) + ); + } + assertThat(response.toString(), not(containsString("error"))); + }); } - - assertThat(searchResponse.toString(), not(containsString("error"))); - // BYTE - size = 1 + random.nextInt(10); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("byte_value", SortOrder.ASC).get(); - - assertHitCount(searchResponse, 10); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); - assertThat(((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).byteValue(), equalTo((byte) i)); + { + int size = 1 + random.nextInt(10); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("byte_value", SortOrder.ASC), response -> { + assertHitCount(response, 10); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + assertThat(((Number) response.getHits().getAt(i).getSortValues()[0]).byteValue(), equalTo((byte) i)); + } + }); } - size = 1 + random.nextInt(10); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("byte_value", SortOrder.DESC).get(); - - assertHitCount(searchResponse, 10); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); - assertThat(((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).byteValue(), equalTo((byte) (9 - i))); + { + int size = 1 + random.nextInt(10); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("byte_value", SortOrder.DESC), response -> { + assertHitCount(response, 10); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); + assertThat(((Number) response.getHits().getAt(i).getSortValues()[0]).byteValue(), equalTo((byte) (9 - i))); + } + assertThat(response.toString(), not(containsString("error"))); + }); } - - assertThat(searchResponse.toString(), not(containsString("error"))); - // SHORT - size = 1 + random.nextInt(10); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("short_value", SortOrder.ASC).get(); - - assertHitCount(searchResponse, 10); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); - assertThat(((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).shortValue(), equalTo((short) i)); + { + int size = 1 + random.nextInt(10); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("short_value", SortOrder.ASC), response -> { + assertHitCount(response, 10); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + assertThat(((Number) response.getHits().getAt(i).getSortValues()[0]).shortValue(), equalTo((short) i)); + } + }); } - size = 1 + random.nextInt(10); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("short_value", SortOrder.DESC).get(); - - assertHitCount(searchResponse, 10); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); - assertThat(((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).shortValue(), equalTo((short) (9 - i))); + { + int size = 1 + random.nextInt(10); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("short_value", SortOrder.DESC), response -> { + assertHitCount(response, 10); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); + assertThat(((Number) response.getHits().getAt(i).getSortValues()[0]).shortValue(), equalTo((short) (9 - i))); + } + assertThat(response.toString(), not(containsString("error"))); + }); } - - assertThat(searchResponse.toString(), not(containsString("error"))); - // INTEGER - size = 1 + random.nextInt(10); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("integer_value", SortOrder.ASC).get(); - - assertHitCount(searchResponse, 10); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); - assertThat(((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).intValue(), equalTo(i)); + { + int size = 1 + random.nextInt(10); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("integer_value", SortOrder.ASC), response -> { + assertHitCount(response, 10); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + assertThat(((Number) response.getHits().getAt(i).getSortValues()[0]).intValue(), equalTo(i)); + } + assertThat(response.toString(), not(containsString("error"))); + }); } + { + int size = 1 + random.nextInt(10); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("integer_value", SortOrder.DESC), response -> { + assertHitCount(response, 10); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); + assertThat(((Number) response.getHits().getAt(i).getSortValues()[0]).intValue(), equalTo((9 - i))); + } - assertThat(searchResponse.toString(), not(containsString("error"))); - size = 1 + random.nextInt(10); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("integer_value", SortOrder.DESC).get(); - - assertHitCount(searchResponse, 10); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); - assertThat(((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).intValue(), equalTo((9 - i))); + assertThat(response.toString(), not(containsString("error"))); + }); } - - assertThat(searchResponse.toString(), not(containsString("error"))); - // LONG - size = 1 + random.nextInt(10); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("long_value", SortOrder.ASC).get(); - - assertHitCount(searchResponse, 10); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); - assertThat(((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).longValue(), equalTo((long) i)); - } + { + int size = 1 + random.nextInt(10); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("long_value", SortOrder.ASC), response -> { + assertHitCount(response, 10); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + assertThat(((Number) response.getHits().getAt(i).getSortValues()[0]).longValue(), equalTo((long) i)); + } - assertThat(searchResponse.toString(), not(containsString("error"))); - size = 1 + random.nextInt(10); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("long_value", SortOrder.DESC).get(); - assertHitCount(searchResponse, 10L); - assertHitCount(searchResponse, 10); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); - assertThat(((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).longValue(), equalTo((long) (9 - i))); + assertThat(response.toString(), not(containsString("error"))); + }); + } + { + int size = 1 + random.nextInt(10); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("long_value", SortOrder.DESC), response -> { + assertHitCount(response, 10L); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); + assertThat(((Number) response.getHits().getAt(i).getSortValues()[0]).longValue(), equalTo((long) (9 - i))); + } + assertThat(response.toString(), not(containsString("error"))); + }); } - - assertThat(searchResponse.toString(), not(containsString("error"))); - // FLOAT - size = 1 + random.nextInt(10); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("float_value", SortOrder.ASC).get(); - - assertHitCount(searchResponse, 10L); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); - assertThat(((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).doubleValue(), closeTo(0.1d * i, 0.000001d)); + { + int size = 1 + random.nextInt(10); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("float_value", SortOrder.ASC), response -> { + assertHitCount(response, 10L); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + assertThat(((Number) response.getHits().getAt(i).getSortValues()[0]).doubleValue(), closeTo(0.1d * i, 0.000001d)); + } + assertThat(response.toString(), not(containsString("error"))); + }); } - - assertThat(searchResponse.toString(), not(containsString("error"))); - size = 1 + random.nextInt(10); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("float_value", SortOrder.DESC).get(); - - assertHitCount(searchResponse, 10); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); - assertThat(((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).doubleValue(), closeTo(0.1d * (9 - i), 0.000001d)); + { + int size = 1 + random.nextInt(10); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("float_value", SortOrder.DESC), response -> { + assertHitCount(response, 10); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); + assertThat(((Number) response.getHits().getAt(i).getSortValues()[0]).doubleValue(), closeTo(0.1d * (9 - i), 0.000001d)); + } + assertThat(response.toString(), not(containsString("error"))); + }); } - - assertThat(searchResponse.toString(), not(containsString("error"))); - // DOUBLE - size = 1 + random.nextInt(10); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("double_value", SortOrder.ASC).get(); - - assertHitCount(searchResponse, 10L); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); - assertThat(((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).doubleValue(), closeTo(0.1d * i, 0.000001d)); + { + int size = 1 + random.nextInt(10); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("double_value", SortOrder.ASC), response -> { + assertHitCount(response, 10L); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + assertThat(((Number) response.getHits().getAt(i).getSortValues()[0]).doubleValue(), closeTo(0.1d * i, 0.000001d)); + } + assertThat(response.toString(), not(containsString("error"))); + }); } - - assertThat(searchResponse.toString(), not(containsString("error"))); - size = 1 + random.nextInt(10); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("double_value", SortOrder.DESC).get(); - - assertHitCount(searchResponse, 10L); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); - assertThat(((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).doubleValue(), closeTo(0.1d * (9 - i), 0.000001d)); + { + int size = 1 + random.nextInt(10); + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("double_value", SortOrder.DESC), + response -> { + assertHitCount(response, 10L); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); + assertThat( + ((Number) response.getHits().getAt(i).getSortValues()[0]).doubleValue(), + closeTo(0.1d * (9 - i), 0.000001d) + ); + } + } + ); } - - assertNoFailures(searchResponse); } public void testSortMissingNumbers() throws Exception { @@ -752,15 +800,13 @@ public void testSortMissingNumbers() throws Exception { ) ); ensureGreen(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("id", "1").field("i_value", -1).field("d_value", -1.1).endObject()) .get(); - client().prepareIndex("test").setId("2").setSource(jsonBuilder().startObject().field("id", "2").endObject()).get(); + prepareIndex("test").setId("2").setSource(jsonBuilder().startObject().field("id", "2").endObject()).get(); - client().prepareIndex("test") - .setId("3") + prepareIndex("test").setId("3") .setSource(jsonBuilder().startObject().field("id", "1").field("i_value", 2).field("d_value", 2.2).endObject()) .get(); @@ -768,37 +814,35 @@ public void testSortMissingNumbers() throws Exception { refresh(); logger.info("--> sort with no missing (same as missing _last)"); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("i_value").order(SortOrder.ASC)) - .get(); - assertNoFailures(searchResponse); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("3")); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("2")); - + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("i_value").order(SortOrder.ASC)), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("3")); + assertThat(response.getHits().getAt(2).getId(), equalTo("2")); + } + ); logger.info("--> sort with missing _last"); - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("i_value").order(SortOrder.ASC).missing("_last")) - .get(); - assertNoFailures(searchResponse); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("3")); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("2")); - + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("i_value").order(SortOrder.ASC).missing("_last")), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("3")); + assertThat(response.getHits().getAt(2).getId(), equalTo("2")); + } + ); logger.info("--> sort with missing _first"); - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("i_value").order(SortOrder.ASC).missing("_first")) - .get(); - assertNoFailures(searchResponse); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("2")); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("i_value").order(SortOrder.ASC).missing("_first")), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("2")); + assertThat(response.getHits().getAt(1).getId(), equalTo("1")); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + } + ); } public void testSortMissingStrings() throws IOException { @@ -817,17 +861,11 @@ public void testSortMissingStrings() throws IOException { ) ); ensureGreen(); - client().prepareIndex("test") - .setId("1") - .setSource(jsonBuilder().startObject().field("id", "1").field("value", "a").endObject()) - .get(); + prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("id", "1").field("value", "a").endObject()).get(); - client().prepareIndex("test").setId("2").setSource(jsonBuilder().startObject().field("id", "2").endObject()).get(); + prepareIndex("test").setId("2").setSource(jsonBuilder().startObject().field("id", "2").endObject()).get(); - client().prepareIndex("test") - .setId("3") - .setSource(jsonBuilder().startObject().field("id", "1").field("value", "c").endObject()) - .get(); + prepareIndex("test").setId("3").setSource(jsonBuilder().startObject().field("id", "1").field("value", "c").endObject()).get(); flush(); refresh(); @@ -840,48 +878,53 @@ public void testSortMissingStrings() throws IOException { } logger.info("--> sort with no missing (same as missing _last)"); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC)) - .get(); - assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0)); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("3")); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("2")); - + assertResponse( + prepareSearch().setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC)), + response -> { + assertThat(Arrays.toString(response.getShardFailures()), response.getFailedShards(), equalTo(0)); + + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("3")); + assertThat(response.getHits().getAt(2).getId(), equalTo("2")); + } + ); logger.info("--> sort with missing _last"); - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC).missing("_last")) - .get(); - assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0)); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("3")); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("2")); - + assertResponse( + prepareSearch().setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC).missing("_last")), + response -> { + assertThat(Arrays.toString(response.getShardFailures()), response.getFailedShards(), equalTo(0)); + + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("3")); + assertThat(response.getHits().getAt(2).getId(), equalTo("2")); + } + ); logger.info("--> sort with missing _first"); - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC).missing("_first")) - .get(); - assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0)); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("2")); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); - + assertResponse( + prepareSearch().setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC).missing("_first")), + response -> { + assertThat(Arrays.toString(response.getShardFailures()), response.getFailedShards(), equalTo(0)); + + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("2")); + assertThat(response.getHits().getAt(1).getId(), equalTo("1")); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + } + ); logger.info("--> sort with missing b"); - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC).missing("b")) - .get(); - assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0)); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2")); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC).missing("b")), + response -> { + assertThat(Arrays.toString(response.getShardFailures()), response.getFailedShards(), equalTo(0)); + + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + } + ); } public void testSortMissingDates() throws IOException { @@ -902,9 +945,9 @@ public void testSortMissingDates() throws IOException { ) ); ensureGreen(); - client().prepareIndex(index).setId("1").setSource("mydate", "2021-01-01").get(); - client().prepareIndex(index).setId("2").setSource("mydate", "2021-02-01").get(); - client().prepareIndex(index).setId("3").setSource("other_field", "value").get(); + prepareIndex(index).setId("1").setSource("mydate", "2021-01-01").get(); + prepareIndex(index).setId("2").setSource("mydate", "2021-02-01").get(); + prepareIndex(index).setId("3").setSource("other_field", "value").get(); refresh(); @@ -914,24 +957,27 @@ public void testSortMissingDates() throws IOException { format = type.equals("date") ? "strict_date_optional_time" : "strict_date_optional_time_nanos"; } - SearchResponse searchResponse = prepareSearch(index).addSort( - SortBuilders.fieldSort("mydate").order(SortOrder.ASC).setFormat(format) - ).get(); - assertHitsInOrder(searchResponse, new String[] { "1", "2", "3" }); + assertResponse( + prepareSearch(index).addSort(SortBuilders.fieldSort("mydate").order(SortOrder.ASC).setFormat(format)), + response -> assertHitsInOrder(response, new String[] { "1", "2", "3" }) + ); - searchResponse = prepareSearch(index).addSort( - SortBuilders.fieldSort("mydate").order(SortOrder.ASC).missing("_first").setFormat(format) - ).get(); - assertHitsInOrder(searchResponse, new String[] { "3", "1", "2" }); + assertResponse( + prepareSearch(index).addSort(SortBuilders.fieldSort("mydate").order(SortOrder.ASC).missing("_first").setFormat(format)), + response -> assertHitsInOrder(response, new String[] { "3", "1", "2" }) + ); - searchResponse = prepareSearch(index).addSort(SortBuilders.fieldSort("mydate").order(SortOrder.DESC).setFormat(format)) - .get(); - assertHitsInOrder(searchResponse, new String[] { "2", "1", "3" }); + assertResponse( + prepareSearch(index).addSort(SortBuilders.fieldSort("mydate").order(SortOrder.DESC).setFormat(format)), + response -> assertHitsInOrder(response, new String[] { "2", "1", "3" }) + ); - searchResponse = prepareSearch(index).addSort( - SortBuilders.fieldSort("mydate").order(SortOrder.DESC).missing("_first").setFormat(format) - ).get(); - assertHitsInOrder(searchResponse, new String[] { "3", "2", "1" }); + assertResponse( + prepareSearch(index).addSort( + SortBuilders.fieldSort("mydate").order(SortOrder.DESC).missing("_first").setFormat(format) + ), + response -> assertHitsInOrder(response, new String[] { "3", "2", "1" }) + ); } } } @@ -960,12 +1006,12 @@ public void testSortMissingDatesMixedTypes() throws IOException { } ensureGreen(); - client().prepareIndex("test_date").setId("1").setSource("mydate", "2021-01-01").get(); - client().prepareIndex("test_date").setId("2").setSource("mydate", "2021-02-01").get(); - client().prepareIndex("test_date").setId("3").setSource("other_field", 1).get(); - client().prepareIndex("test_date_nanos").setId("4").setSource("mydate", "2021-03-01").get(); - client().prepareIndex("test_date_nanos").setId("5").setSource("mydate", "2021-04-01").get(); - client().prepareIndex("test_date_nanos").setId("6").setSource("other_field", 2).get(); + prepareIndex("test_date").setId("1").setSource("mydate", "2021-01-01").get(); + prepareIndex("test_date").setId("2").setSource("mydate", "2021-02-01").get(); + prepareIndex("test_date").setId("3").setSource("other_field", 1).get(); + prepareIndex("test_date_nanos").setId("4").setSource("mydate", "2021-03-01").get(); + prepareIndex("test_date_nanos").setId("5").setSource("mydate", "2021-04-01").get(); + prepareIndex("test_date_nanos").setId("6").setSource("other_field", 2).get(); refresh(); for (boolean withFormat : List.of(true, false)) { @@ -975,25 +1021,33 @@ public void testSortMissingDatesMixedTypes() throws IOException { } String index = "test*"; - SearchResponse searchResponse = prepareSearch(index).addSort( - SortBuilders.fieldSort("mydate").order(SortOrder.ASC).setFormat(format).setNumericType("date_nanos") - ).addSort(SortBuilders.fieldSort("other_field").order(SortOrder.ASC)).get(); - assertHitsInOrder(searchResponse, new String[] { "1", "2", "4", "5", "3", "6" }); - - searchResponse = prepareSearch(index).addSort( - SortBuilders.fieldSort("mydate").order(SortOrder.ASC).missing("_first").setFormat(format).setNumericType("date_nanos") - ).addSort(SortBuilders.fieldSort("other_field").order(SortOrder.ASC)).get(); - assertHitsInOrder(searchResponse, new String[] { "3", "6", "1", "2", "4", "5" }); - - searchResponse = prepareSearch(index).addSort( - SortBuilders.fieldSort("mydate").order(SortOrder.DESC).setFormat(format).setNumericType("date_nanos") - ).addSort(SortBuilders.fieldSort("other_field").order(SortOrder.ASC)).get(); - assertHitsInOrder(searchResponse, new String[] { "5", "4", "2", "1", "3", "6" }); - - searchResponse = prepareSearch(index).addSort( - SortBuilders.fieldSort("mydate").order(SortOrder.DESC).missing("_first").setFormat(format).setNumericType("date_nanos") - ).addSort(SortBuilders.fieldSort("other_field").order(SortOrder.ASC)).get(); - assertHitsInOrder(searchResponse, new String[] { "3", "6", "5", "4", "2", "1" }); + assertResponse( + prepareSearch(index).addSort( + SortBuilders.fieldSort("mydate").order(SortOrder.ASC).setFormat(format).setNumericType("date_nanos") + ).addSort(SortBuilders.fieldSort("other_field").order(SortOrder.ASC)), + response -> assertHitsInOrder(response, new String[] { "1", "2", "4", "5", "3", "6" }) + ); + + assertResponse( + prepareSearch(index).addSort( + SortBuilders.fieldSort("mydate").order(SortOrder.ASC).missing("_first").setFormat(format).setNumericType("date_nanos") + ).addSort(SortBuilders.fieldSort("other_field").order(SortOrder.ASC)), + response -> assertHitsInOrder(response, new String[] { "3", "6", "1", "2", "4", "5" }) + ); + + assertResponse( + prepareSearch(index).addSort( + SortBuilders.fieldSort("mydate").order(SortOrder.DESC).setFormat(format).setNumericType("date_nanos") + ).addSort(SortBuilders.fieldSort("other_field").order(SortOrder.ASC)), + response -> assertHitsInOrder(response, new String[] { "5", "4", "2", "1", "3", "6" }) + ); + + assertResponse( + prepareSearch(index).addSort( + SortBuilders.fieldSort("mydate").order(SortOrder.DESC).missing("_first").setFormat(format).setNumericType("date_nanos") + ).addSort(SortBuilders.fieldSort("other_field").order(SortOrder.ASC)), + response -> assertHitsInOrder(response, new String[] { "3", "6", "5", "4", "2", "1" }) + ); } } @@ -1010,15 +1064,16 @@ private void assertHitsInOrder(SearchResponse response, String[] expectedIds) { public void testIgnoreUnmapped() throws Exception { createIndex("test"); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("id", "1").field("i_value", -1).field("d_value", -1.1).endObject()) .get(); logger.info("--> sort with an unmapped field, verify it fails"); try { - SearchResponse result = prepareSearch().setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("kkk")).get(); - assertThat("Expected exception but returned with", result, nullValue()); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("kkk")), + response -> assertThat("Expected exception but returned with", response, nullValue()) + ); } catch (SearchPhaseExecutionException e) { // we check that it's a parse failure rather than a different shard failure for (ShardSearchFailure shardSearchFailure : e.shardFailures()) { @@ -1084,8 +1139,7 @@ public void testSortMVField() throws Exception { ); ensureGreen(); - client().prepareIndex("test") - .setId(Integer.toString(1)) + prepareIndex("test").setId(Integer.toString(1)) .setSource( jsonBuilder().startObject() .array("long_values", 1L, 5L, 10L, 8L) @@ -1098,8 +1152,7 @@ public void testSortMVField() throws Exception { .endObject() ) .get(); - client().prepareIndex("test") - .setId(Integer.toString(2)) + prepareIndex("test").setId(Integer.toString(2)) .setSource( jsonBuilder().startObject() .array("long_values", 11L, 15L, 20L, 7L) @@ -1112,8 +1165,7 @@ public void testSortMVField() throws Exception { .endObject() ) .get(); - client().prepareIndex("test") - .setId(Integer.toString(3)) + prepareIndex("test").setId(Integer.toString(3)) .setSource( jsonBuilder().startObject() .array("long_values", 2L, 1L, 3L, -4L) @@ -1129,252 +1181,243 @@ public void testSortMVField() throws Exception { refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("long_values", SortOrder.ASC).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).longValue(), equalTo(-4L)); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("long_values", SortOrder.ASC), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).longValue(), equalTo(1L)); + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).longValue(), equalTo(-4L)); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).longValue(), equalTo(7L)); + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).longValue(), equalTo(1L)); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("long_values", SortOrder.DESC).get(); + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).longValue(), equalTo(7L)); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("long_values", SortOrder.DESC), response -> { - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).longValue(), equalTo(20L)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).longValue(), equalTo(10L)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).longValue(), equalTo(3L)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .setSize(10) - .addSort(SortBuilders.fieldSort("long_values").order(SortOrder.DESC).sortMode(SortMode.SUM)) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).longValue(), equalTo(53L)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).longValue(), equalTo(24L)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).longValue(), equalTo(2L)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .setSize(10) - .addSort(SortBuilders.fieldSort("long_values").order(SortOrder.DESC).sortMode(SortMode.AVG)) - .get(); + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).longValue(), equalTo(20L)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).longValue(), equalTo(13L)); + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).longValue(), equalTo(10L)); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).longValue(), equalTo(6L)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).longValue(), equalTo(1L)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .setSize(10) - .addSort(SortBuilders.fieldSort("long_values").order(SortOrder.DESC).sortMode(SortMode.MEDIAN)) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).longValue(), equalTo(13L)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).longValue(), equalTo(7L)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).longValue(), equalTo(2L)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("int_values", SortOrder.ASC).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(-4)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(1)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(7)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("int_values", SortOrder.DESC).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(20)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(10)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(3)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("short_values", SortOrder.ASC).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(-4)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(1)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(7)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("short_values", SortOrder.DESC).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(20)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(10)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(3)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("byte_values", SortOrder.ASC).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(-4)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(1)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(7)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("byte_values", SortOrder.DESC).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(20)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(10)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(3)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("float_values", SortOrder.ASC).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).floatValue(), equalTo(-4f)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).floatValue(), equalTo(1f)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).floatValue(), equalTo(7f)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("float_values", SortOrder.DESC).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).floatValue(), equalTo(20f)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).floatValue(), equalTo(10f)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).floatValue(), equalTo(3f)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("double_values", SortOrder.ASC).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), equalTo(-4d)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), equalTo(1d)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), equalTo(7d)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("double_values", SortOrder.DESC).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), equalTo(20d)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), equalTo(10d)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), equalTo(3d)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("string_values", SortOrder.ASC).get(); + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).longValue(), equalTo(3L)); + }); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .setSize(10) + .addSort(SortBuilders.fieldSort("long_values").order(SortOrder.DESC).sortMode(SortMode.SUM)), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).longValue(), equalTo(53L)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); - assertThat(searchResponse.getHits().getAt(0).getSortValues()[0], equalTo("!4")); + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).longValue(), equalTo(24L)); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(searchResponse.getHits().getAt(1).getSortValues()[0], equalTo("01")); + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).longValue(), equalTo(2L)); + } + ); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .setSize(10) + .addSort(SortBuilders.fieldSort("long_values").order(SortOrder.DESC).sortMode(SortMode.AVG)), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); - assertThat(searchResponse.getHits().getAt(2).getSortValues()[0], equalTo("07")); + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).longValue(), equalTo(13L)); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("string_values", SortOrder.DESC).get(); + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).longValue(), equalTo(6L)); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).longValue(), equalTo(1L)); + } + ); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .setSize(10) + .addSort(SortBuilders.fieldSort("long_values").order(SortOrder.DESC).sortMode(SortMode.MEDIAN)), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(searchResponse.getHits().getAt(0).getSortValues()[0], equalTo("20")); + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).longValue(), equalTo(13L)); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(searchResponse.getHits().getAt(1).getSortValues()[0], equalTo("10")); + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).longValue(), equalTo(7L)); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); - assertThat(searchResponse.getHits().getAt(2).getSortValues()[0], equalTo("03")); + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).longValue(), equalTo(2L)); + } + ); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("int_values", SortOrder.ASC), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); + + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(-4)); + + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(1)); + + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(7)); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("int_values", SortOrder.DESC), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); + + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(20)); + + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(10)); + + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(3)); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("short_values", SortOrder.ASC), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); + + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(-4)); + + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(1)); + + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(7)); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("short_values", SortOrder.DESC), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); + + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(20)); + + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(10)); + + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(3)); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("byte_values", SortOrder.ASC), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); + + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(-4)); + + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(1)); + + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(7)); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("byte_values", SortOrder.DESC), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); + + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(20)); + + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(10)); + + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(3)); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("float_values", SortOrder.ASC), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); + + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).floatValue(), equalTo(-4f)); + + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).floatValue(), equalTo(1f)); + + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).floatValue(), equalTo(7f)); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("float_values", SortOrder.DESC), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); + + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).floatValue(), equalTo(20f)); + + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).floatValue(), equalTo(10f)); + + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).floatValue(), equalTo(3f)); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("double_values", SortOrder.ASC), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); + + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), equalTo(-4d)); + + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), equalTo(1d)); + + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), equalTo(7d)); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("double_values", SortOrder.DESC), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); + + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), equalTo(20d)); + + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), equalTo(10d)); + + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), equalTo(3d)); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("string_values", SortOrder.ASC), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); + + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); + assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo("!4")); + + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(response.getHits().getAt(1).getSortValues()[0], equalTo("01")); + + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); + assertThat(response.getHits().getAt(2).getSortValues()[0], equalTo("07")); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("string_values", SortOrder.DESC), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); + + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo("20")); + + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(response.getHits().getAt(1).getSortValues()[0], equalTo("10")); + + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(response.getHits().getAt(2).getSortValues()[0], equalTo("03")); + }); } public void testSortOnRareField() throws IOException { @@ -1393,86 +1436,80 @@ public void testSortOnRareField() throws IOException { ) ); ensureGreen(); - client().prepareIndex("test") - .setId(Integer.toString(1)) + prepareIndex("test").setId(Integer.toString(1)) .setSource(jsonBuilder().startObject().array("string_values", "01", "05", "10", "08").endObject()) .get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(3).addSort("string_values", SortOrder.DESC).get(); - - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(1))); - assertThat(searchResponse.getHits().getAt(0).getSortValues()[0], equalTo("10")); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(3).addSort("string_values", SortOrder.DESC), response -> { + assertThat(response.getHits().getHits().length, equalTo(1)); - client().prepareIndex("test") - .setId(Integer.toString(2)) + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(1))); + assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo("10")); + }); + prepareIndex("test").setId(Integer.toString(2)) .setSource(jsonBuilder().startObject().array("string_values", "11", "15", "20", "07").endObject()) .get(); for (int i = 0; i < 15; i++) { - client().prepareIndex("test") - .setId(Integer.toString(300 + i)) + prepareIndex("test").setId(Integer.toString(300 + i)) .setSource(jsonBuilder().startObject().array("some_other_field", "foobar").endObject()) .get(); } refresh(); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(2).addSort("string_values", SortOrder.DESC).get(); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(2).addSort("string_values", SortOrder.DESC), response -> { - assertThat(searchResponse.getHits().getHits().length, equalTo(2)); + assertThat(response.getHits().getHits().length, equalTo(2)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(searchResponse.getHits().getAt(0).getSortValues()[0], equalTo("20")); + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo("20")); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(searchResponse.getHits().getAt(1).getSortValues()[0], equalTo("10")); - - client().prepareIndex("test") - .setId(Integer.toString(3)) + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(response.getHits().getAt(1).getSortValues()[0], equalTo("10")); + }); + prepareIndex("test").setId(Integer.toString(3)) .setSource(jsonBuilder().startObject().array("string_values", "02", "01", "03", "!4").endObject()) .get(); for (int i = 0; i < 15; i++) { - client().prepareIndex("test") - .setId(Integer.toString(300 + i)) + prepareIndex("test").setId(Integer.toString(300 + i)) .setSource(jsonBuilder().startObject().array("some_other_field", "foobar").endObject()) .get(); } refresh(); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(3).addSort("string_values", SortOrder.DESC).get(); - - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(3).addSort("string_values", SortOrder.DESC), response -> { - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(searchResponse.getHits().getAt(0).getSortValues()[0], equalTo("20")); + assertThat(response.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(searchResponse.getHits().getAt(1).getSortValues()[0], equalTo("10")); + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo("20")); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); - assertThat(searchResponse.getHits().getAt(2).getSortValues()[0], equalTo("03")); + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(response.getHits().getAt(1).getSortValues()[0], equalTo("10")); + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(response.getHits().getAt(2).getSortValues()[0], equalTo("03")); + }); for (int i = 0; i < 15; i++) { - client().prepareIndex("test") - .setId(Integer.toString(300 + i)) + prepareIndex("test").setId(Integer.toString(300 + i)) .setSource(jsonBuilder().startObject().array("some_other_field", "foobar").endObject()) .get(); refresh(); } - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(3).addSort("string_values", SortOrder.DESC).get(); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(3).addSort("string_values", SortOrder.DESC), response -> { - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(searchResponse.getHits().getAt(0).getSortValues()[0], equalTo("20")); + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo("20")); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(searchResponse.getHits().getAt(1).getSortValues()[0], equalTo("10")); + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(response.getHits().getAt(1).getSortValues()[0], equalTo("10")); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); - assertThat(searchResponse.getHits().getAt(2).getSortValues()[0], equalTo("03")); + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(response.getHits().getAt(2).getSortValues()[0], equalTo("03")); + }); } public void testSortMetaField() throws Exception { @@ -1483,25 +1520,25 @@ public void testSortMetaField() throws Exception { final int numDocs = randomIntBetween(10, 20); IndexRequestBuilder[] indexReqs = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; ++i) { - indexReqs[i] = client().prepareIndex("test").setId(Integer.toString(i)).setSource(); + indexReqs[i] = prepareIndex("test").setId(Integer.toString(i)).setSource(); } indexRandom(true, indexReqs); SortOrder order = randomFrom(SortOrder.values()); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .setSize(randomIntBetween(1, numDocs + 5)) - .addSort("_id", order) - .get(); - assertNoFailures(searchResponse); - SearchHit[] hits = searchResponse.getHits().getHits(); - BytesRef previous = order == SortOrder.ASC ? new BytesRef() : UnicodeUtil.BIG_TERM; - for (int i = 0; i < hits.length; ++i) { - String idString = hits[i].getId(); - final BytesRef id = new BytesRef(idString); - assertEquals(idString, hits[i].getSortValues()[0]); - assertThat(previous, order == SortOrder.ASC ? lessThan(id) : greaterThan(id)); - previous = id; - } + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()).setSize(randomIntBetween(1, numDocs + 5)).addSort("_id", order), + response -> { + SearchHit[] hits = response.getHits().getHits(); + BytesRef previous = order == SortOrder.ASC ? new BytesRef() : UnicodeUtil.BIG_TERM; + for (int i = 0; i < hits.length; ++i) { + String idString = hits[i].getId(); + final BytesRef id = new BytesRef(idString); + assertEquals(idString, hits[i].getSortValues()[0]); + assertThat(previous, order == SortOrder.ASC ? lessThan(id) : greaterThan(id)); + previous = id; + } + } + ); // assertWarnings(ID_FIELD_DATA_DEPRECATION_MESSAGE); } finally { // unset cluster setting @@ -1554,8 +1591,7 @@ public void testNestedSort() throws IOException, InterruptedException, Execution ); ensureGreen(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .startArray("nested") @@ -1569,8 +1605,7 @@ public void testNestedSort() throws IOException, InterruptedException, Execution .endObject() ) .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource( jsonBuilder().startObject() .startArray("nested") @@ -1588,59 +1623,64 @@ public void testNestedSort() throws IOException, InterruptedException, Execution // We sort on nested field - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("nested.foo").setNestedSort(new NestedSortBuilder("nested")).order(SortOrder.DESC)) - .get(); - assertNoFailures(searchResponse); - SearchHit[] hits = searchResponse.getHits().getHits(); - assertThat(hits.length, is(2)); - assertThat(hits[0].getSortValues().length, is(1)); - assertThat(hits[1].getSortValues().length, is(1)); - assertThat(hits[0].getSortValues()[0], is("cba")); - assertThat(hits[1].getSortValues()[0], is("bar")); - + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort(SortBuilders.fieldSort("nested.foo").setNestedSort(new NestedSortBuilder("nested")).order(SortOrder.DESC)), + response -> { + SearchHit[] hits = response.getHits().getHits(); + assertThat(hits.length, is(2)); + assertThat(hits[0].getSortValues().length, is(1)); + assertThat(hits[1].getSortValues().length, is(1)); + assertThat(hits[0].getSortValues()[0], is("cba")); + assertThat(hits[1].getSortValues()[0], is("bar")); + } + ); // We sort on nested fields with max_children limit - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("nested.foo").setNestedSort(new NestedSortBuilder("nested").setMaxChildren(1)).order(SortOrder.DESC) - ) - .get(); - assertNoFailures(searchResponse); - hits = searchResponse.getHits().getHits(); - assertThat(hits.length, is(2)); - assertThat(hits[0].getSortValues().length, is(1)); - assertThat(hits[1].getSortValues().length, is(1)); - assertThat(hits[0].getSortValues()[0], is("bar")); - assertThat(hits[1].getSortValues()[0], is("abc")); - - { - SearchPhaseExecutionException exc = expectThrows( - SearchPhaseExecutionException.class, - () -> prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("nested.bar.foo") - .setNestedSort( - new NestedSortBuilder("nested").setNestedSort(new NestedSortBuilder("nested.bar").setMaxChildren(1)) + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("nested.foo") + .setNestedSort(new NestedSortBuilder("nested").setMaxChildren(1)) + .order(SortOrder.DESC) + ), + response -> { + SearchHit[] hits = response.getHits().getHits(); + assertThat(hits.length, is(2)); + assertThat(hits[0].getSortValues().length, is(1)); + assertThat(hits[1].getSortValues().length, is(1)); + assertThat(hits[0].getSortValues()[0], is("bar")); + assertThat(hits[1].getSortValues()[0], is("abc")); + + { + SearchPhaseExecutionException exc = expectThrows( + SearchPhaseExecutionException.class, + () -> prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("nested.bar.foo") + .setNestedSort( + new NestedSortBuilder("nested").setNestedSort(new NestedSortBuilder("nested.bar").setMaxChildren(1)) + ) + .order(SortOrder.DESC) ) - .order(SortOrder.DESC) - ) - .get() - ); - assertThat(exc.toString(), containsString("max_children is only supported on top level of nested sort")); - } - + .get() + ); + assertThat(exc.toString(), containsString("max_children is only supported on top level of nested sort")); + } + } + ); // We sort on nested sub field - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("nested.foo.sub").setNestedSort(new NestedSortBuilder("nested")).order(SortOrder.DESC)) - .get(); - assertNoFailures(searchResponse); - hits = searchResponse.getHits().getHits(); - assertThat(hits.length, is(2)); - assertThat(hits[0].getSortValues().length, is(1)); - assertThat(hits[1].getSortValues().length, is(1)); - assertThat(hits[0].getSortValues()[0], is("cba bca")); - assertThat(hits[1].getSortValues()[0], is("bar bar")); - + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort(SortBuilders.fieldSort("nested.foo.sub").setNestedSort(new NestedSortBuilder("nested")).order(SortOrder.DESC)), + response -> { + SearchHit[] hits = response.getHits().getHits(); + assertThat(hits.length, is(2)); + assertThat(hits[0].getSortValues().length, is(1)); + assertThat(hits[1].getSortValues().length, is(1)); + assertThat(hits[0].getSortValues()[0], is("cba bca")); + assertThat(hits[1].getSortValues()[0], is("bar bar")); + } + ); // missing nested path SearchPhaseExecutionException exc = expectThrows( SearchPhaseExecutionException.class, @@ -1664,7 +1704,7 @@ public void testSortDuelBetweenSingleShardAndMultiShardIndex() throws Exception for (String index : new String[] { "test1", "test2" }) { List docs = new ArrayList<>(); for (int i = 0; i < 256; i++) { - docs.add(client().prepareIndex(index).setId(Integer.toString(i)).setSource(sortField, i)); + docs.add(prepareIndex(index).setId(Integer.toString(i)).setSource(sortField, i)); } indexRandom(true, docs); } @@ -1673,20 +1713,26 @@ public void testSortDuelBetweenSingleShardAndMultiShardIndex() throws Exception SortOrder order = randomBoolean() ? SortOrder.ASC : SortOrder.DESC; int from = between(0, 256); int size = between(0, 256); - SearchResponse multiShardResponse = prepareSearch("test1").setFrom(from).setSize(size).addSort(sortField, order).get(); - assertNoFailures(multiShardResponse); - SearchResponse singleShardResponse = prepareSearch("test2").setFrom(from).setSize(size).addSort(sortField, order).get(); - assertNoFailures(singleShardResponse); - - assertThat(multiShardResponse.getHits().getTotalHits().value, equalTo(singleShardResponse.getHits().getTotalHits().value)); - assertThat(multiShardResponse.getHits().getHits().length, equalTo(singleShardResponse.getHits().getHits().length)); - for (int i = 0; i < multiShardResponse.getHits().getHits().length; i++) { - assertThat( - multiShardResponse.getHits().getAt(i).getSortValues()[0], - equalTo(singleShardResponse.getHits().getAt(i).getSortValues()[0]) - ); - assertThat(multiShardResponse.getHits().getAt(i).getId(), equalTo(singleShardResponse.getHits().getAt(i).getId())); - } + assertNoFailuresAndResponse( + prepareSearch("test1").setFrom(from).setSize(size).addSort(sortField, order), + multiShardResponse -> assertNoFailuresAndResponse( + prepareSearch("test2").setFrom(from).setSize(size).addSort(sortField, order), + singleShardResponse -> { + assertThat( + multiShardResponse.getHits().getTotalHits().value, + equalTo(singleShardResponse.getHits().getTotalHits().value) + ); + assertThat(multiShardResponse.getHits().getHits().length, equalTo(singleShardResponse.getHits().getHits().length)); + for (int i = 0; i < multiShardResponse.getHits().getHits().length; i++) { + assertThat( + multiShardResponse.getHits().getAt(i).getSortValues()[0], + equalTo(singleShardResponse.getHits().getAt(i).getSortValues()[0]) + ); + assertThat(multiShardResponse.getHits().getAt(i).getId(), equalTo(singleShardResponse.getHits().getAt(i).getId())); + } + } + ) + ); } public void testCustomFormat() throws Exception { @@ -1696,21 +1742,23 @@ public void testCustomFormat() throws Exception { assertAcked(prepareCreate("test").setMapping("ip", "type=ip")); indexRandom( true, - client().prepareIndex("test").setId("1").setSource("ip", "192.168.1.7"), - client().prepareIndex("test").setId("2").setSource("ip", "2001:db8::ff00:42:8329") + prepareIndex("test").setId("1").setSource("ip", "192.168.1.7"), + prepareIndex("test").setId("2").setSource("ip", "2001:db8::ff00:42:8329") ); - SearchResponse response = prepareSearch("test").addSort(SortBuilders.fieldSort("ip")).get(); - assertNoFailures(response); - assertEquals(2, response.getHits().getTotalHits().value); - assertArrayEquals(new String[] { "192.168.1.7" }, response.getHits().getAt(0).getSortValues()); - assertArrayEquals(new String[] { "2001:db8::ff00:42:8329" }, response.getHits().getAt(1).getSortValues()); - - response = prepareSearch("test").addSort(SortBuilders.fieldSort("ip")).searchAfter(new Object[] { "192.168.1.7" }).get(); - assertNoFailures(response); - assertEquals(2, response.getHits().getTotalHits().value); - assertEquals(1, response.getHits().getHits().length); - assertArrayEquals(new String[] { "2001:db8::ff00:42:8329" }, response.getHits().getAt(0).getSortValues()); + assertNoFailuresAndResponse(prepareSearch("test").addSort(SortBuilders.fieldSort("ip")), response -> { + assertEquals(2, response.getHits().getTotalHits().value); + assertArrayEquals(new String[] { "192.168.1.7" }, response.getHits().getAt(0).getSortValues()); + assertArrayEquals(new String[] { "2001:db8::ff00:42:8329" }, response.getHits().getAt(1).getSortValues()); + }); + assertNoFailuresAndResponse( + prepareSearch("test").addSort(SortBuilders.fieldSort("ip")).searchAfter(new Object[] { "192.168.1.7" }), + response -> { + assertEquals(2, response.getHits().getTotalHits().value); + assertEquals(1, response.getHits().getHits().length); + assertArrayEquals(new String[] { "2001:db8::ff00:42:8329" }, response.getHits().getAt(0).getSortValues()); + } + ); } public void testScriptFieldSort() throws Exception { @@ -1720,7 +1768,7 @@ public void testScriptFieldSort() throws Exception { IndexRequestBuilder[] indexReqs = new IndexRequestBuilder[numDocs]; List keywords = new ArrayList<>(); for (int i = 0; i < numDocs; ++i) { - indexReqs[i] = client().prepareIndex("test").setSource("number", i, "keyword", Integer.toString(i)); + indexReqs[i] = prepareIndex("test").setSource("number", i, "keyword", Integer.toString(i)); keywords.add(Integer.toString(i)); } Collections.sort(keywords); @@ -1728,34 +1776,38 @@ public void testScriptFieldSort() throws Exception { { Script script = new Script(ScriptType.INLINE, NAME, "doc['number'].value", Collections.emptyMap()); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .setSize(randomIntBetween(1, numDocs + 5)) - .addSort(SortBuilders.scriptSort(script, ScriptSortBuilder.ScriptSortType.NUMBER)) - .addSort(SortBuilders.scoreSort()) - .get(); - - double expectedValue = 0; - for (SearchHit hit : searchResponse.getHits()) { - assertThat(hit.getSortValues().length, equalTo(2)); - assertThat(hit.getSortValues()[0], equalTo(expectedValue++)); - assertThat(hit.getSortValues()[1], equalTo(1f)); - } + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .setSize(randomIntBetween(1, numDocs + 5)) + .addSort(SortBuilders.scriptSort(script, ScriptSortBuilder.ScriptSortType.NUMBER)) + .addSort(SortBuilders.scoreSort()), + response -> { + double expectedValue = 0; + for (SearchHit hit : response.getHits()) { + assertThat(hit.getSortValues().length, equalTo(2)); + assertThat(hit.getSortValues()[0], equalTo(expectedValue++)); + assertThat(hit.getSortValues()[1], equalTo(1f)); + } + } + ); } { Script script = new Script(ScriptType.INLINE, NAME, "doc['keyword'].value", Collections.emptyMap()); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .setSize(randomIntBetween(1, numDocs + 5)) - .addSort(SortBuilders.scriptSort(script, ScriptSortBuilder.ScriptSortType.STRING)) - .addSort(SortBuilders.scoreSort()) - .get(); - - int expectedValue = 0; - for (SearchHit hit : searchResponse.getHits()) { - assertThat(hit.getSortValues().length, equalTo(2)); - assertThat(hit.getSortValues()[0], equalTo(keywords.get(expectedValue++))); - assertThat(hit.getSortValues()[1], equalTo(1f)); - } + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .setSize(randomIntBetween(1, numDocs + 5)) + .addSort(SortBuilders.scriptSort(script, ScriptSortBuilder.ScriptSortType.STRING)) + .addSort(SortBuilders.scoreSort()), + response -> { + int expectedValue = 0; + for (SearchHit hit : response.getHits()) { + assertThat(hit.getSortValues().length, equalTo(2)); + assertThat(hit.getSortValues()[0], equalTo(keywords.get(expectedValue++))); + assertThat(hit.getSortValues()[1], equalTo(1f)); + } + } + ); } } @@ -1767,21 +1819,22 @@ public void testFieldAlias() throws Exception { ensureGreen("old_index", "new_index"); List builders = new ArrayList<>(); - builders.add(client().prepareIndex("old_index").setSource("distance", 42.0)); - builders.add(client().prepareIndex("old_index").setSource("distance", 50.5)); - builders.add(client().prepareIndex("new_index").setSource("route_length_miles", 100.2)); + builders.add(prepareIndex("old_index").setSource("distance", 42.0)); + builders.add(prepareIndex("old_index").setSource("distance", 50.5)); + builders.add(prepareIndex("new_index").setSource("route_length_miles", 100.2)); indexRandom(true, true, builders); - SearchResponse response = prepareSearch().setQuery(matchAllQuery()) - .setSize(builders.size()) - .addSort(SortBuilders.fieldSort("route_length_miles")) - .get(); - SearchHits hits = response.getHits(); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).setSize(builders.size()).addSort(SortBuilders.fieldSort("route_length_miles")), + response -> { + SearchHits hits = response.getHits(); - assertEquals(3, hits.getHits().length); - assertEquals(42.0, hits.getAt(0).getSortValues()[0]); - assertEquals(50.5, hits.getAt(1).getSortValues()[0]); - assertEquals(100.2, hits.getAt(2).getSortValues()[0]); + assertEquals(3, hits.getHits().length); + assertEquals(42.0, hits.getAt(0).getSortValues()[0]); + assertEquals(50.5, hits.getAt(1).getSortValues()[0]); + assertEquals(100.2, hits.getAt(2).getSortValues()[0]); + } + ); } public void testFieldAliasesWithMissingValues() throws Exception { @@ -1792,21 +1845,24 @@ public void testFieldAliasesWithMissingValues() throws Exception { ensureGreen("old_index", "new_index"); List builders = new ArrayList<>(); - builders.add(client().prepareIndex("old_index").setSource("distance", 42.0)); - builders.add(client().prepareIndex("old_index").setSource(Collections.emptyMap())); - builders.add(client().prepareIndex("new_index").setSource("route_length_miles", 100.2)); + builders.add(prepareIndex("old_index").setSource("distance", 42.0)); + builders.add(prepareIndex("old_index").setSource(Collections.emptyMap())); + builders.add(prepareIndex("new_index").setSource("route_length_miles", 100.2)); indexRandom(true, true, builders); - SearchResponse response = prepareSearch().setQuery(matchAllQuery()) - .setSize(builders.size()) - .addSort(SortBuilders.fieldSort("route_length_miles").missing(120.3)) - .get(); - SearchHits hits = response.getHits(); - - assertEquals(3, hits.getHits().length); - assertEquals(42.0, hits.getAt(0).getSortValues()[0]); - assertEquals(100.2, hits.getAt(1).getSortValues()[0]); - assertEquals(120.3, hits.getAt(2).getSortValues()[0]); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .setSize(builders.size()) + .addSort(SortBuilders.fieldSort("route_length_miles").missing(120.3)), + response -> { + SearchHits hits = response.getHits(); + + assertEquals(3, hits.getHits().length); + assertEquals(42.0, hits.getAt(0).getSortValues()[0]); + assertEquals(100.2, hits.getAt(1).getSortValues()[0]); + assertEquals(120.3, hits.getAt(2).getSortValues()[0]); + } + ); } public void testCastNumericType() throws Exception { @@ -1816,40 +1872,46 @@ public void testCastNumericType() throws Exception { ensureGreen("index_double", "index_long", "index_float"); List builders = new ArrayList<>(); - builders.add(client().prepareIndex("index_double").setSource("field", 12.6)); - builders.add(client().prepareIndex("index_long").setSource("field", 12)); - builders.add(client().prepareIndex("index_float").setSource("field", 12.1)); + builders.add(prepareIndex("index_double").setSource("field", 12.6)); + builders.add(prepareIndex("index_long").setSource("field", 12)); + builders.add(prepareIndex("index_float").setSource("field", 12.1)); indexRandom(true, true, builders); { - SearchResponse response = prepareSearch().setQuery(matchAllQuery()) - .setSize(builders.size()) - .addSort(SortBuilders.fieldSort("field").setNumericType("long")) - .get(); - SearchHits hits = response.getHits(); - - assertEquals(3, hits.getHits().length); - for (int i = 0; i < 3; i++) { - assertThat(hits.getAt(i).getSortValues()[0].getClass(), equalTo(Long.class)); - } - assertEquals(12L, hits.getAt(0).getSortValues()[0]); - assertEquals(12L, hits.getAt(1).getSortValues()[0]); - assertEquals(12L, hits.getAt(2).getSortValues()[0]); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .setSize(builders.size()) + .addSort(SortBuilders.fieldSort("field").setNumericType("long")), + response -> { + SearchHits hits = response.getHits(); + + assertEquals(3, hits.getHits().length); + for (int i = 0; i < 3; i++) { + assertThat(hits.getAt(i).getSortValues()[0].getClass(), equalTo(Long.class)); + } + assertEquals(12L, hits.getAt(0).getSortValues()[0]); + assertEquals(12L, hits.getAt(1).getSortValues()[0]); + assertEquals(12L, hits.getAt(2).getSortValues()[0]); + } + ); } { - SearchResponse response = prepareSearch().setQuery(matchAllQuery()) - .setSize(builders.size()) - .addSort(SortBuilders.fieldSort("field").setNumericType("double")) - .get(); - SearchHits hits = response.getHits(); - assertEquals(3, hits.getHits().length); - for (int i = 0; i < 3; i++) { - assertThat(hits.getAt(i).getSortValues()[0].getClass(), equalTo(Double.class)); - } - assertEquals(12D, hits.getAt(0).getSortValues()[0]); - assertEquals(12.1D, (double) hits.getAt(1).getSortValues()[0], 0.001f); - assertEquals(12.6D, hits.getAt(2).getSortValues()[0]); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .setSize(builders.size()) + .addSort(SortBuilders.fieldSort("field").setNumericType("double")), + response -> { + SearchHits hits = response.getHits(); + assertEquals(3, hits.getHits().length); + for (int i = 0; i < 3; i++) { + assertThat(hits.getAt(i).getSortValues()[0].getClass(), equalTo(Double.class)); + } + assertEquals(12D, hits.getAt(0).getSortValues()[0]); + assertEquals(12.1D, (double) hits.getAt(1).getSortValues()[0], 0.001f); + assertEquals(12.6D, hits.getAt(2).getSortValues()[0]); + } + ); } } @@ -1859,105 +1921,119 @@ public void testCastDate() throws Exception { ensureGreen("index_date", "index_date_nanos"); List builders = new ArrayList<>(); - builders.add(client().prepareIndex("index_date").setSource("field", "2024-04-11T23:47:17")); - builders.add(client().prepareIndex("index_date_nanos").setSource("field", "2024-04-11T23:47:16.854775807Z")); + builders.add(prepareIndex("index_date").setSource("field", "2024-04-11T23:47:17")); + builders.add(prepareIndex("index_date_nanos").setSource("field", "2024-04-11T23:47:16.854775807Z")); indexRandom(true, true, builders); { - SearchResponse response = prepareSearch().setQuery(matchAllQuery()) - .setSize(2) - .addSort(SortBuilders.fieldSort("field").setNumericType("date")) - .get(); - SearchHits hits = response.getHits(); - - assertEquals(2, hits.getHits().length); - for (int i = 0; i < 2; i++) { - assertThat(hits.getAt(i).getSortValues()[0].getClass(), equalTo(Long.class)); - } - assertEquals(1712879236854L, hits.getAt(0).getSortValues()[0]); - assertEquals(1712879237000L, hits.getAt(1).getSortValues()[0]); - - response = prepareSearch().setMaxConcurrentShardRequests(1) - .setQuery(matchAllQuery()) - .setSize(1) - .addSort(SortBuilders.fieldSort("field").setNumericType("date")) - .get(); - hits = response.getHits(); - - assertEquals(1, hits.getHits().length); - assertThat(hits.getAt(0).getSortValues()[0].getClass(), equalTo(Long.class)); - assertEquals(1712879236854L, hits.getAt(0).getSortValues()[0]); - - response = prepareSearch().setMaxConcurrentShardRequests(1) - .setQuery(matchAllQuery()) - .setSize(1) - .addSort(SortBuilders.fieldSort("field").order(SortOrder.DESC).setNumericType("date")) - .get(); - hits = response.getHits(); - - assertEquals(1, hits.getHits().length); - assertThat(hits.getAt(0).getSortValues()[0].getClass(), equalTo(Long.class)); - assertEquals(1712879237000L, hits.getAt(0).getSortValues()[0]); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).setSize(2).addSort(SortBuilders.fieldSort("field").setNumericType("date")), + response -> { + SearchHits hits = response.getHits(); + + assertEquals(2, hits.getHits().length); + for (int i = 0; i < 2; i++) { + assertThat(hits.getAt(i).getSortValues()[0].getClass(), equalTo(Long.class)); + } + assertEquals(1712879236854L, hits.getAt(0).getSortValues()[0]); + assertEquals(1712879237000L, hits.getAt(1).getSortValues()[0]); + } + ); + assertResponse( + prepareSearch().setMaxConcurrentShardRequests(1) + .setQuery(matchAllQuery()) + .setSize(1) + .addSort(SortBuilders.fieldSort("field").setNumericType("date")), + response -> { + SearchHits hits = response.getHits(); + + assertEquals(1, hits.getHits().length); + assertThat(hits.getAt(0).getSortValues()[0].getClass(), equalTo(Long.class)); + assertEquals(1712879236854L, hits.getAt(0).getSortValues()[0]); + } + ); + assertResponse( + prepareSearch().setMaxConcurrentShardRequests(1) + .setQuery(matchAllQuery()) + .setSize(1) + .addSort(SortBuilders.fieldSort("field").order(SortOrder.DESC).setNumericType("date")), + response -> { + SearchHits hits = response.getHits(); + + assertEquals(1, hits.getHits().length); + assertThat(hits.getAt(0).getSortValues()[0].getClass(), equalTo(Long.class)); + assertEquals(1712879237000L, hits.getAt(0).getSortValues()[0]); + } + ); } { - SearchResponse response = prepareSearch().setQuery(matchAllQuery()) - .setSize(2) - .addSort(SortBuilders.fieldSort("field").setNumericType("date_nanos")) - .get(); - SearchHits hits = response.getHits(); - assertEquals(2, hits.getHits().length); - for (int i = 0; i < 2; i++) { - assertThat(hits.getAt(i).getSortValues()[0].getClass(), equalTo(Long.class)); - } - assertEquals(1712879236854775807L, hits.getAt(0).getSortValues()[0]); - assertEquals(1712879237000000000L, hits.getAt(1).getSortValues()[0]); - - response = prepareSearch().setMaxConcurrentShardRequests(1) - .setQuery(matchAllQuery()) - .setSize(1) - .addSort(SortBuilders.fieldSort("field").setNumericType("date_nanos")) - .get(); - hits = response.getHits(); - assertEquals(1, hits.getHits().length); - assertThat(hits.getAt(0).getSortValues()[0].getClass(), equalTo(Long.class)); - assertEquals(1712879236854775807L, hits.getAt(0).getSortValues()[0]); - - response = prepareSearch().setMaxConcurrentShardRequests(1) - .setQuery(matchAllQuery()) - .setSize(1) - .addSort(SortBuilders.fieldSort("field").order(SortOrder.DESC).setNumericType("date_nanos")) - .get(); - hits = response.getHits(); - assertEquals(1, hits.getHits().length); - assertThat(hits.getAt(0).getSortValues()[0].getClass(), equalTo(Long.class)); - assertEquals(1712879237000000000L, hits.getAt(0).getSortValues()[0]); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).setSize(2).addSort(SortBuilders.fieldSort("field").setNumericType("date_nanos")), + response -> { + SearchHits hits = response.getHits(); + assertEquals(2, hits.getHits().length); + for (int i = 0; i < 2; i++) { + assertThat(hits.getAt(i).getSortValues()[0].getClass(), equalTo(Long.class)); + } + assertEquals(1712879236854775807L, hits.getAt(0).getSortValues()[0]); + assertEquals(1712879237000000000L, hits.getAt(1).getSortValues()[0]); + } + ); + assertResponse( + prepareSearch().setMaxConcurrentShardRequests(1) + .setQuery(matchAllQuery()) + .setSize(1) + .addSort(SortBuilders.fieldSort("field").setNumericType("date_nanos")), + response -> { + SearchHits hits = response.getHits(); + assertEquals(1, hits.getHits().length); + assertThat(hits.getAt(0).getSortValues()[0].getClass(), equalTo(Long.class)); + assertEquals(1712879236854775807L, hits.getAt(0).getSortValues()[0]); + } + ); + assertResponse( + prepareSearch().setMaxConcurrentShardRequests(1) + .setQuery(matchAllQuery()) + .setSize(1) + .addSort(SortBuilders.fieldSort("field").order(SortOrder.DESC).setNumericType("date_nanos")), + response -> { + SearchHits hits = response.getHits(); + assertEquals(1, hits.getHits().length); + assertThat(hits.getAt(0).getSortValues()[0].getClass(), equalTo(Long.class)); + assertEquals(1712879237000000000L, hits.getAt(0).getSortValues()[0]); + } + ); } { builders.clear(); - builders.add(client().prepareIndex("index_date").setSource("field", "1905-04-11T23:47:17")); + builders.add(prepareIndex("index_date").setSource("field", "1905-04-11T23:47:17")); indexRandom(true, true, builders); - SearchResponse response = prepareSearch().setQuery(matchAllQuery()) - .setSize(1) - .addSort(SortBuilders.fieldSort("field").setNumericType("date_nanos")) - .get(); - assertNotNull(response.getShardFailures()); - assertThat(response.getShardFailures().length, equalTo(1)); - assertThat(response.getShardFailures()[0].toString(), containsString("are before the epoch in 1970")); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).setSize(1).addSort(SortBuilders.fieldSort("field").setNumericType("date_nanos")), + response -> { + assertNotNull(response.getShardFailures()); + assertThat(response.getShardFailures().length, equalTo(1)); + assertThat(response.getShardFailures()[0].toString(), containsString("are before the epoch in 1970")); + } + ); } { builders.clear(); - builders.add(client().prepareIndex("index_date").setSource("field", "2346-04-11T23:47:17")); + builders.add(prepareIndex("index_date").setSource("field", "2346-04-11T23:47:17")); indexRandom(true, true, builders); - SearchResponse response = prepareSearch().setQuery(QueryBuilders.rangeQuery("field").gt("1970-01-01")) - .setSize(10) - .addSort(SortBuilders.fieldSort("field").setNumericType("date_nanos")) - .get(); - assertNotNull(response.getShardFailures()); - assertThat(response.getShardFailures().length, equalTo(1)); - assertThat(response.getShardFailures()[0].toString(), containsString("are after 2262")); + assertResponse( + prepareSearch().setQuery(QueryBuilders.rangeQuery("field").gt("1970-01-01")) + .setSize(10) + .addSort(SortBuilders.fieldSort("field").setNumericType("date_nanos")), + response -> { + assertNotNull(response.getShardFailures()); + assertThat(response.getShardFailures().length, equalTo(1)); + assertThat(response.getShardFailures()[0].toString(), containsString("are after 2262")); + } + ); } } @@ -1991,33 +2067,39 @@ public void testLongSortOptimizationCorrectResults() { bulkBuilder = client().prepareBulk(); } String source = "{\"long_field\":" + randomLong() + "}"; - bulkBuilder.add(client().prepareIndex("test1").setId(Integer.toString(i)).setSource(source, XContentType.JSON)); + bulkBuilder.add(prepareIndex("test1").setId(Integer.toString(i)).setSource(source, XContentType.JSON)); } refresh(); // *** 1. sort DESC on long_field - SearchResponse searchResponse = prepareSearch().addSort(new FieldSortBuilder("long_field").order(SortOrder.DESC)).setSize(10).get(); - assertNoFailures(searchResponse); - long previousLong = Long.MAX_VALUE; - for (int i = 0; i < searchResponse.getHits().getHits().length; i++) { - // check the correct sort order - SearchHit hit = searchResponse.getHits().getHits()[i]; - long currentLong = (long) hit.getSortValues()[0]; - assertThat("sort order is incorrect", currentLong, lessThanOrEqualTo(previousLong)); - previousLong = currentLong; - } + assertNoFailuresAndResponse( + prepareSearch().addSort(new FieldSortBuilder("long_field").order(SortOrder.DESC)).setSize(10), + response -> { + long previousLong = Long.MAX_VALUE; + for (int i = 0; i < response.getHits().getHits().length; i++) { + // check the correct sort order + SearchHit hit = response.getHits().getHits()[i]; + long currentLong = (long) hit.getSortValues()[0]; + assertThat("sort order is incorrect", currentLong, lessThanOrEqualTo(previousLong)); + previousLong = currentLong; + } + } + ); // *** 2. sort ASC on long_field - searchResponse = prepareSearch().addSort(new FieldSortBuilder("long_field").order(SortOrder.ASC)).setSize(10).get(); - assertNoFailures(searchResponse); - previousLong = Long.MIN_VALUE; - for (int i = 0; i < searchResponse.getHits().getHits().length; i++) { - // check the correct sort order - SearchHit hit = searchResponse.getHits().getHits()[i]; - long currentLong = (long) hit.getSortValues()[0]; - assertThat("sort order is incorrect", currentLong, greaterThanOrEqualTo(previousLong)); - previousLong = currentLong; - } + assertNoFailuresAndResponse( + prepareSearch().addSort(new FieldSortBuilder("long_field").order(SortOrder.ASC)).setSize(10), + response -> { + long previousLong = Long.MIN_VALUE; + for (int i = 0; i < response.getHits().getHits().length; i++) { + // check the correct sort order + SearchHit hit = response.getHits().getHits()[i]; + long currentLong = (long) hit.getSortValues()[0]; + assertThat("sort order is incorrect", currentLong, greaterThanOrEqualTo(previousLong)); + previousLong = currentLong; + } + } + ); } public void testSortMixedFieldTypes() { @@ -2026,17 +2108,14 @@ public void testSortMixedFieldTypes() { assertAcked(prepareCreate("index_double").setMapping("foo", "type=double").get()); assertAcked(prepareCreate("index_keyword").setMapping("foo", "type=keyword").get()); - client().prepareIndex("index_long").setId("1").setSource("foo", "123").get(); - client().prepareIndex("index_integer").setId("1").setSource("foo", "123").get(); - client().prepareIndex("index_double").setId("1").setSource("foo", "123").get(); - client().prepareIndex("index_keyword").setId("1").setSource("foo", "123").get(); + prepareIndex("index_long").setId("1").setSource("foo", "123").get(); + prepareIndex("index_integer").setId("1").setSource("foo", "123").get(); + prepareIndex("index_double").setId("1").setSource("foo", "123").get(); + prepareIndex("index_keyword").setId("1").setSource("foo", "123").get(); refresh(); { // mixing long and integer types is ok, as we convert integer sort to long sort - SearchResponse searchResponse = prepareSearch("index_long", "index_integer").addSort(new FieldSortBuilder("foo")) - .setSize(10) - .get(); - assertNoFailures(searchResponse); + assertNoFailures(prepareSearch("index_long", "index_integer").addSort(new FieldSortBuilder("foo")).setSize(10)); } String errMsg = "Can't sort on field [foo]; the field has incompatible sort types"; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceIT.java index 777db15b596ec..2d0fbb42a42e2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.sort; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.settings.Settings; @@ -29,6 +28,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; @@ -56,8 +56,7 @@ public void testDistanceSortingMVFields() throws Exception { assertAcked(prepareCreate("test").setSettings(settings).setMapping(xContentBuilder)); ensureGreen(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("names", "New York") @@ -69,8 +68,7 @@ public void testDistanceSortingMVFields() throws Exception { ) .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource( jsonBuilder().startObject() .field("names", "New York 2") @@ -82,8 +80,7 @@ public void testDistanceSortingMVFields() throws Exception { ) .get(); - client().prepareIndex("test") - .setId("3") + prepareIndex("test").setId("3") .setSource( jsonBuilder().startObject() .array("names", "Times Square", "Tribeca") @@ -103,8 +100,7 @@ public void testDistanceSortingMVFields() throws Exception { ) .get(); - client().prepareIndex("test") - .setId("4") + prepareIndex("test").setId("4") .setSource( jsonBuilder().startObject() .array("names", "Wall Street", "Soho") @@ -124,8 +120,7 @@ public void testDistanceSortingMVFields() throws Exception { ) .get(); - client().prepareIndex("test") - .setId("5") + prepareIndex("test").setId("5") .setSource( jsonBuilder().startObject() .array("names", "Greenwich Village", "Brooklyn") @@ -148,81 +143,87 @@ public void testDistanceSortingMVFields() throws Exception { indicesAdmin().prepareRefresh().get(); // Order: Asc - SearchResponse searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.ASC)) - .get(); - - assertHitCount(searchResponse, 5); - assertOrderedSearchHits(searchResponse, "1", "2", "3", "4", "5"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(421.2d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(462.1d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(1055.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(4).getSortValues()[0]).doubleValue(), closeTo(2029.0d, 10d)); - + assertResponse( + prepareSearch("test").setQuery(matchAllQuery()) + .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.ASC)), + response -> { + assertHitCount(response, 5); + assertOrderedSearchHits(response, "1", "2", "3", "4", "5"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(421.2d, 10d)); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(462.1d, 10d)); + assertThat(((Number) response.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(1055.0d, 10d)); + assertThat(((Number) response.getHits().getAt(4).getSortValues()[0]).doubleValue(), closeTo(2029.0d, 10d)); + } + ); // Order: Asc, Mode: max - searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.ASC).sortMode(SortMode.MAX)) - .get(); - - assertHitCount(searchResponse, 5); - assertOrderedSearchHits(searchResponse, "1", "2", "4", "3", "5"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(421.2d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1258.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(5286.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(4).getSortValues()[0]).doubleValue(), closeTo(8572.0d, 10d)); - + assertResponse( + prepareSearch("test").setQuery(matchAllQuery()) + .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.ASC).sortMode(SortMode.MAX)), + response -> { + assertHitCount(response, 5); + assertOrderedSearchHits(response, "1", "2", "4", "3", "5"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(421.2d, 10d)); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1258.0d, 10d)); + assertThat(((Number) response.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(5286.0d, 10d)); + assertThat(((Number) response.getHits().getAt(4).getSortValues()[0]).doubleValue(), closeTo(8572.0d, 10d)); + } + ); // Order: Desc - searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.DESC)) - .get(); - - assertHitCount(searchResponse, 5); - assertOrderedSearchHits(searchResponse, "5", "3", "4", "2", "1"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(8572.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(5286.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1258.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(421.2d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(4).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); - + assertResponse( + prepareSearch("test").setQuery(matchAllQuery()) + .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.DESC)), + response -> { + assertHitCount(response, 5); + assertOrderedSearchHits(response, "5", "3", "4", "2", "1"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(8572.0d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(5286.0d, 10d)); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1258.0d, 10d)); + assertThat(((Number) response.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(421.2d, 10d)); + assertThat(((Number) response.getHits().getAt(4).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); + } + ); // Order: Desc, Mode: min - searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.DESC).sortMode(SortMode.MIN)) - .get(); - - assertHitCount(searchResponse, 5); - assertOrderedSearchHits(searchResponse, "5", "4", "3", "2", "1"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(2029.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(1055.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(462.1d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(421.2d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(4).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); - - searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).sortMode(SortMode.AVG).order(SortOrder.ASC)) - .get(); - - assertHitCount(searchResponse, 5); - assertOrderedSearchHits(searchResponse, "1", "2", "4", "3", "5"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(421.2d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1157d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(2874d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(4).getSortValues()[0]).doubleValue(), closeTo(5301d, 10d)); - - searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).sortMode(SortMode.AVG).order(SortOrder.DESC)) - .get(); - - assertHitCount(searchResponse, 5); - assertOrderedSearchHits(searchResponse, "5", "3", "4", "2", "1"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(5301.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(2874.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1157.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(421.2d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(4).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); - + assertResponse( + prepareSearch("test").setQuery(matchAllQuery()) + .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.DESC).sortMode(SortMode.MIN)), + response -> { + assertHitCount(response, 5); + assertOrderedSearchHits(response, "5", "4", "3", "2", "1"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(2029.0d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(1055.0d, 10d)); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(462.1d, 10d)); + assertThat(((Number) response.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(421.2d, 10d)); + assertThat(((Number) response.getHits().getAt(4).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); + } + ); + assertResponse( + prepareSearch("test").setQuery(matchAllQuery()) + .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).sortMode(SortMode.AVG).order(SortOrder.ASC)), + response -> { + assertHitCount(response, 5); + assertOrderedSearchHits(response, "1", "2", "4", "3", "5"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(421.2d, 10d)); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1157d, 10d)); + assertThat(((Number) response.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(2874d, 10d)); + assertThat(((Number) response.getHits().getAt(4).getSortValues()[0]).doubleValue(), closeTo(5301d, 10d)); + } + ); + assertResponse( + prepareSearch("test").setQuery(matchAllQuery()) + .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).sortMode(SortMode.AVG).order(SortOrder.DESC)), + response -> { + assertHitCount(response, 5); + assertOrderedSearchHits(response, "5", "3", "4", "2", "1"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(5301.0d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(2874.0d, 10d)); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1157.0d, 10d)); + assertThat(((Number) response.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(421.2d, 10d)); + assertThat(((Number) response.getHits().getAt(4).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); + } + ); try { prepareSearch("test").setQuery(matchAllQuery()) .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).sortMode(SortMode.SUM)); @@ -247,8 +248,7 @@ public void testDistanceSortingWithMissingGeoPoint() throws Exception { assertAcked(prepareCreate("test").setSettings(settings).setMapping(xContentBuilder)); ensureGreen(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .array("names", "Times Square", "Tribeca") @@ -268,33 +268,33 @@ public void testDistanceSortingWithMissingGeoPoint() throws Exception { ) .get(); - client().prepareIndex("test") - .setId("2") - .setSource(jsonBuilder().startObject().array("names", "Wall Street", "Soho").endObject()) - .get(); + prepareIndex("test").setId("2").setSource(jsonBuilder().startObject().array("names", "Wall Street", "Soho").endObject()).get(); refresh(); // Order: Asc - SearchResponse searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.ASC)) - .get(); - - assertHitCount(searchResponse, 2); - assertOrderedSearchHits(searchResponse, "1", "2"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(462.1d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - + assertResponse( + prepareSearch("test").setQuery(matchAllQuery()) + .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.ASC)), + response -> { + assertHitCount(response, 2); + assertOrderedSearchHits(response, "1", "2"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(462.1d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + } + ); // Order: Desc - searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.DESC)) - .get(); - - // Doc with missing geo point is first, is consistent with 0.20.x - assertHitCount(searchResponse, 2); - assertOrderedSearchHits(searchResponse, "2", "1"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(5286d, 10d)); + assertResponse( + prepareSearch("test").setQuery(matchAllQuery()) + .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.DESC)), + response -> { + // Doc with missing geo point is first, is consistent with 0.20.x + assertHitCount(response, 2); + assertOrderedSearchHits(response, "2", "1"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(5286d, 10d)); + } + ); } public void testDistanceSortingNestedFields() throws Exception { @@ -322,8 +322,7 @@ public void testDistanceSortingNestedFields() throws Exception { indexRandom( true, - client().prepareIndex("companies") - .setId("1") + prepareIndex("companies").setId("1") .setSource( jsonBuilder().startObject() .field("name", "company 1") @@ -338,8 +337,7 @@ public void testDistanceSortingNestedFields() throws Exception { .endArray() .endObject() ), - client().prepareIndex("companies") - .setId("2") + prepareIndex("companies").setId("2") .setSource( jsonBuilder().startObject() .field("name", "company 2") @@ -363,8 +361,7 @@ public void testDistanceSortingNestedFields() throws Exception { .endArray() .endObject() ), - client().prepareIndex("companies") - .setId("3") + prepareIndex("companies").setId("3") .setSource( jsonBuilder().startObject() .field("name", "company 3") @@ -387,8 +384,7 @@ public void testDistanceSortingNestedFields() throws Exception { .endArray() .endObject() ), - client().prepareIndex("companies") - .setId("4") + prepareIndex("companies").setId("4") .setSource( jsonBuilder().startObject() .field("name", "company 4") @@ -416,119 +412,127 @@ public void testDistanceSortingNestedFields() throws Exception { ); // Order: Asc - SearchResponse searchResponse = prepareSearch("companies").setQuery(matchAllQuery()) - .addSort( - SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) - .order(SortOrder.ASC) - .setNestedSort(new NestedSortBuilder("branches")) - ) - .get(); - - assertHitCount(searchResponse, 4); - assertOrderedSearchHits(searchResponse, "1", "2", "3", "4"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(462.1d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1055.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(2029.0d, 10d)); - + assertResponse( + prepareSearch("companies").setQuery(matchAllQuery()) + .addSort( + SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) + .order(SortOrder.ASC) + .setNestedSort(new NestedSortBuilder("branches")) + ), + response -> { + assertHitCount(response, 4); + assertOrderedSearchHits(response, "1", "2", "3", "4"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(462.1d, 10d)); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1055.0d, 10d)); + assertThat(((Number) response.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(2029.0d, 10d)); + } + ); // Order: Asc, Mode: max - searchResponse = prepareSearch("companies").setQuery(matchAllQuery()) - .addSort( - SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) - .order(SortOrder.ASC) - .sortMode(SortMode.MAX) - .setNestedSort(new NestedSortBuilder("branches")) - ) - .get(); - - assertHitCount(searchResponse, 4); - assertOrderedSearchHits(searchResponse, "1", "3", "2", "4"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(1258.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(5286.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(8572.0d, 10d)); - + assertResponse( + prepareSearch("companies").setQuery(matchAllQuery()) + .addSort( + SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) + .order(SortOrder.ASC) + .sortMode(SortMode.MAX) + .setNestedSort(new NestedSortBuilder("branches")) + ), + response -> { + assertHitCount(response, 4); + assertOrderedSearchHits(response, "1", "3", "2", "4"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(1258.0d, 10d)); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(5286.0d, 10d)); + assertThat(((Number) response.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(8572.0d, 10d)); + } + ); // Order: Desc - searchResponse = prepareSearch("companies").setQuery(matchAllQuery()) - .addSort( - SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) - .order(SortOrder.DESC) - .setNestedSort(new NestedSortBuilder("branches")) - ) - .get(); - - assertHitCount(searchResponse, 4); - assertOrderedSearchHits(searchResponse, "4", "2", "3", "1"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(8572.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(5286.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1258.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); - + assertResponse( + prepareSearch("companies").setQuery(matchAllQuery()) + .addSort( + SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) + .order(SortOrder.DESC) + .setNestedSort(new NestedSortBuilder("branches")) + ), + response -> { + assertHitCount(response, 4); + assertOrderedSearchHits(response, "4", "2", "3", "1"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(8572.0d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(5286.0d, 10d)); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1258.0d, 10d)); + assertThat(((Number) response.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); + } + ); // Order: Desc, Mode: min - searchResponse = prepareSearch("companies").setQuery(matchAllQuery()) - .addSort( - SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) - .order(SortOrder.DESC) - .sortMode(SortMode.MIN) - .setNestedSort(new NestedSortBuilder("branches")) - ) - .get(); - - assertHitCount(searchResponse, 4); - assertOrderedSearchHits(searchResponse, "4", "3", "2", "1"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(2029.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(1055.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(462.1d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); - - searchResponse = prepareSearch("companies").setQuery(matchAllQuery()) - .addSort( - SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) - .sortMode(SortMode.AVG) - .order(SortOrder.ASC) - .setNestedSort(new NestedSortBuilder("branches")) - ) - .get(); - - assertHitCount(searchResponse, 4); - assertOrderedSearchHits(searchResponse, "1", "3", "2", "4"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(1157.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(2874.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(5301.0d, 10d)); - - searchResponse = prepareSearch("companies").setQuery(matchAllQuery()) - .addSort( - SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) - .setNestedSort(new NestedSortBuilder("branches")) - .sortMode(SortMode.AVG) - .order(SortOrder.DESC) - ) - .get(); - - assertHitCount(searchResponse, 4); - assertOrderedSearchHits(searchResponse, "4", "2", "3", "1"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(5301.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(2874.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1157.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); - - searchResponse = prepareSearch("companies").setQuery(matchAllQuery()) - .addSort( - SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) - .setNestedSort(new NestedSortBuilder("branches").setFilter(termQuery("branches.name", "brooklyn"))) - .sortMode(SortMode.AVG) - .order(SortOrder.ASC) - ) - .get(); - assertHitCount(searchResponse, 4); - assertFirstHit(searchResponse, hasId("4")); - assertSearchHits(searchResponse, "1", "2", "3", "4"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(8572.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(((Number) searchResponse.getHits().getAt(3).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - + assertResponse( + prepareSearch("companies").setQuery(matchAllQuery()) + .addSort( + SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) + .order(SortOrder.DESC) + .sortMode(SortMode.MIN) + .setNestedSort(new NestedSortBuilder("branches")) + ), + response -> { + assertHitCount(response, 4); + assertOrderedSearchHits(response, "4", "3", "2", "1"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(2029.0d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(1055.0d, 10d)); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(462.1d, 10d)); + assertThat(((Number) response.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); + } + ); + assertResponse( + prepareSearch("companies").setQuery(matchAllQuery()) + .addSort( + SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) + .sortMode(SortMode.AVG) + .order(SortOrder.ASC) + .setNestedSort(new NestedSortBuilder("branches")) + ), + response -> { + assertHitCount(response, 4); + assertOrderedSearchHits(response, "1", "3", "2", "4"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(1157.0d, 10d)); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(2874.0d, 10d)); + assertThat(((Number) response.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(5301.0d, 10d)); + } + ); + assertResponse( + prepareSearch("companies").setQuery(matchAllQuery()) + .addSort( + SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) + .setNestedSort(new NestedSortBuilder("branches")) + .sortMode(SortMode.AVG) + .order(SortOrder.DESC) + ), + response -> { + assertHitCount(response, 4); + assertOrderedSearchHits(response, "4", "2", "3", "1"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(5301.0d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(2874.0d, 10d)); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1157.0d, 10d)); + assertThat(((Number) response.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); + } + ); + assertResponse( + prepareSearch("companies").setQuery(matchAllQuery()) + .addSort( + SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) + .setNestedSort(new NestedSortBuilder("branches").setFilter(termQuery("branches.name", "brooklyn"))) + .sortMode(SortMode.AVG) + .order(SortOrder.ASC) + ), + response -> { + assertHitCount(response, 4); + assertFirstHit(response, hasId("4")); + assertSearchHits(response, "1", "2", "3", "4"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(8572.0d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(((Number) response.getHits().getAt(3).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + } + ); try { prepareSearch("companies").setQuery(matchAllQuery()) .addSort( @@ -562,7 +566,7 @@ public void testGeoDistanceFilter() throws IOException { XContentBuilder source = JsonXContent.contentBuilder().startObject().field("pin", Geohash.stringEncode(lon, lat)).endObject(); assertAcked(prepareCreate("locations").setSettings(settings).setMapping(mapping)); - client().prepareIndex("locations").setId("1").setCreate(true).setSource(source).get(); + prepareIndex("locations").setId("1").setCreate(true).setSource(source).get(); refresh(); client().prepareGet("locations", "1").get(); @@ -585,8 +589,7 @@ public void testDistanceSortingWithUnmappedField() throws Exception { assertAcked(prepareCreate("test2")); ensureGreen(); - client().prepareIndex("test1") - .setId("1") + prepareIndex("test1").setId("1") .setSource( jsonBuilder().startObject() .array("names", "Times Square", "Tribeca") @@ -606,40 +609,41 @@ public void testDistanceSortingWithUnmappedField() throws Exception { ) .get(); - client().prepareIndex("test2") - .setId("2") - .setSource(jsonBuilder().startObject().array("names", "Wall Street", "Soho").endObject()) - .get(); + prepareIndex("test2").setId("2").setSource(jsonBuilder().startObject().array("names", "Wall Street", "Soho").endObject()).get(); refresh(); // Order: Asc - SearchResponse searchResponse = prepareSearch("test1", "test2").setQuery(matchAllQuery()) - .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).ignoreUnmapped(true).order(SortOrder.ASC)) - .get(); - - assertHitCount(searchResponse, 2); - assertOrderedSearchHits(searchResponse, "1", "2"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(462.1d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - + assertResponse( + prepareSearch("test1", "test2").setQuery(matchAllQuery()) + .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).ignoreUnmapped(true).order(SortOrder.ASC)), + response -> { + assertHitCount(response, 2); + assertOrderedSearchHits(response, "1", "2"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(462.1d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + } + ); // Order: Desc - searchResponse = prepareSearch("test1", "test2").setQuery(matchAllQuery()) - .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).ignoreUnmapped(true).order(SortOrder.DESC)) - .get(); - - // Doc with missing geo point is first, is consistent with 0.20.x - assertHitCount(searchResponse, 2); - assertOrderedSearchHits(searchResponse, "2", "1"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(5286d, 10d)); - + assertResponse( + prepareSearch("test1", "test2").setQuery(matchAllQuery()) + .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).ignoreUnmapped(true).order(SortOrder.DESC)), + response -> { + // Doc with missing geo point is first, is consistent with 0.20.x + assertHitCount(response, 2); + assertOrderedSearchHits(response, "2", "1"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(5286d, 10d)); + } + ); // Make sure that by default the unmapped fields continue to fail - searchResponse = prepareSearch("test1", "test2").setQuery(matchAllQuery()) - .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.DESC)) - .get(); - assertThat(searchResponse.getFailedShards(), greaterThan(0)); - assertHitCount(searchResponse, 1); + assertResponse( + prepareSearch("test1", "test2").setQuery(matchAllQuery()) + .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.DESC)), + response -> { + assertThat(response.getFailedShards(), greaterThan(0)); + assertHitCount(response, 1); + } + ); } - } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java index 54d730cec2bc3..67426caf2faab 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java @@ -32,6 +32,7 @@ import static org.elasticsearch.search.sort.SortBuilders.fieldSort; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSortValues; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.closeTo; @@ -70,11 +71,7 @@ public void testManyToManyGeoPoints() throws ExecutionException, InterruptedExce logger.info("d1: {}", d1Builder); logger.info("d2: {}", d2Builder); - indexRandom( - true, - client().prepareIndex("index").setId("d1").setSource(d1Builder), - client().prepareIndex("index").setId("d2").setSource(d2Builder) - ); + indexRandom(true, prepareIndex("index").setId("d1").setSource(d1Builder), prepareIndex("index").setId("d2").setSource(d2Builder)); GeoPoint[] q = new GeoPoint[2]; if (randomBoolean()) { q[0] = new GeoPoint(2, 1); @@ -84,56 +81,65 @@ public void testManyToManyGeoPoints() throws ExecutionException, InterruptedExce q[0] = new GeoPoint(2, 1); } - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MIN).order(SortOrder.ASC)) - .get(); - assertOrderedSearchHits(searchResponse, "d1", "d2"); - assertThat( - (Double) searchResponse.getHits().getAt(0).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(2, 2, 3, 2, DistanceUnit.METERS), 10d) - ); - assertThat( - (Double) searchResponse.getHits().getAt(1).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(2, 1, 5, 1, DistanceUnit.METERS), 10d) - ); - - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MIN).order(SortOrder.DESC)) - .get(); - assertOrderedSearchHits(searchResponse, "d2", "d1"); - assertThat( - (Double) searchResponse.getHits().getAt(0).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(2, 1, 5, 1, DistanceUnit.METERS), 10d) - ); - assertThat( - (Double) searchResponse.getHits().getAt(1).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(2, 2, 3, 2, DistanceUnit.METERS), 10d) - ); - - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MAX).order(SortOrder.ASC)) - .get(); - assertOrderedSearchHits(searchResponse, "d1", "d2"); - assertThat( - (Double) searchResponse.getHits().getAt(0).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(2, 2, 4, 1, DistanceUnit.METERS), 10d) + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MIN).order(SortOrder.ASC)), + response -> { + assertOrderedSearchHits(response, "d1", "d2"); + assertThat( + (Double) response.getHits().getAt(0).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(2, 2, 3, 2, DistanceUnit.METERS), 10d) + ); + assertThat( + (Double) response.getHits().getAt(1).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(2, 1, 5, 1, DistanceUnit.METERS), 10d) + ); + } ); - assertThat( - (Double) searchResponse.getHits().getAt(1).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(2, 1, 6, 2, DistanceUnit.METERS), 10d) + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MIN).order(SortOrder.DESC)), + response -> { + assertOrderedSearchHits(response, "d2", "d1"); + assertThat( + (Double) response.getHits().getAt(0).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(2, 1, 5, 1, DistanceUnit.METERS), 10d) + ); + assertThat( + (Double) response.getHits().getAt(1).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(2, 2, 3, 2, DistanceUnit.METERS), 10d) + ); + } ); - - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MAX).order(SortOrder.DESC)) - .get(); - assertOrderedSearchHits(searchResponse, "d2", "d1"); - assertThat( - (Double) searchResponse.getHits().getAt(0).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(2, 1, 6, 2, DistanceUnit.METERS), 10d) + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MAX).order(SortOrder.ASC)), + response -> { + assertOrderedSearchHits(response, "d1", "d2"); + assertThat( + (Double) response.getHits().getAt(0).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(2, 2, 4, 1, DistanceUnit.METERS), 10d) + ); + assertThat( + (Double) response.getHits().getAt(1).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(2, 1, 6, 2, DistanceUnit.METERS), 10d) + ); + } ); - assertThat( - (Double) searchResponse.getHits().getAt(1).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(2, 2, 4, 1, DistanceUnit.METERS), 10d) + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MAX).order(SortOrder.DESC)), + response -> { + assertOrderedSearchHits(response, "d2", "d1"); + assertThat( + (Double) response.getHits().getAt(0).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(2, 1, 6, 2, DistanceUnit.METERS), 10d) + ); + assertThat( + (Double) response.getHits().getAt(1).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(2, 2, 4, 1, DistanceUnit.METERS), 10d) + ); + } ); } @@ -157,37 +163,38 @@ public void testSingeToManyAvgMedian() throws ExecutionException, InterruptedExc logger.info("d1: {}", d1Builder); logger.info("d2: {}", d2Builder); - indexRandom( - true, - client().prepareIndex("index").setId("d1").setSource(d1Builder), - client().prepareIndex("index").setId("d2").setSource(d2Builder) - ); + indexRandom(true, prepareIndex("index").setId("d1").setSource(d1Builder), prepareIndex("index").setId("d2").setSource(d2Builder)); GeoPoint q = new GeoPoint(0, 0); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.AVG).order(SortOrder.ASC)) - .get(); - assertOrderedSearchHits(searchResponse, "d2", "d1"); - assertThat( - (Double) searchResponse.getHits().getAt(0).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(0, 0, 0, 4, DistanceUnit.METERS), 10d) - ); - assertThat( - (Double) searchResponse.getHits().getAt(1).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(0, 0, 0, 5, DistanceUnit.METERS), 10d) - ); - - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MEDIAN).order(SortOrder.ASC)) - .get(); - assertOrderedSearchHits(searchResponse, "d1", "d2"); - assertThat( - (Double) searchResponse.getHits().getAt(0).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(0, 0, 0, 4, DistanceUnit.METERS), 10d) + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.AVG).order(SortOrder.ASC)), + response -> { + assertOrderedSearchHits(response, "d2", "d1"); + assertThat( + (Double) response.getHits().getAt(0).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(0, 0, 0, 4, DistanceUnit.METERS), 10d) + ); + assertThat( + (Double) response.getHits().getAt(1).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(0, 0, 0, 5, DistanceUnit.METERS), 10d) + ); + } ); - assertThat( - (Double) searchResponse.getHits().getAt(1).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(0, 0, 0, 5, DistanceUnit.METERS), 10d) + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MEDIAN).order(SortOrder.ASC)), + response -> { + assertOrderedSearchHits(response, "d1", "d2"); + assertThat( + (Double) response.getHits().getAt(0).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(0, 0, 0, 4, DistanceUnit.METERS), 10d) + ); + assertThat( + (Double) response.getHits().getAt(1).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(0, 0, 0, 5, DistanceUnit.METERS), 10d) + ); + } ); } @@ -227,11 +234,7 @@ public void testManyToManyGeoPointsWithDifferentFormats() throws ExecutionExcept GeoPoint[] d2Points = { new GeoPoint(4.5, 1), new GeoPoint(4.75, 2), new GeoPoint(5, 3), new GeoPoint(5.25, 4) }; createShuffeldJSONArray(d2Builder, d2Points); - indexRandom( - true, - client().prepareIndex("index").setId("d1").setSource(d1Builder), - client().prepareIndex("index").setId("d2").setSource(d2Builder) - ); + indexRandom(true, prepareIndex("index").setId("d1").setSource(d1Builder), prepareIndex("index").setId("d2").setSource(d2Builder)); List qPoints = Arrays.asList(new GeoPoint(2, 1), new GeoPoint(2, 2), new GeoPoint(2, 3), new GeoPoint(2, 4)); Collections.shuffle(qPoints, random()); @@ -245,30 +248,33 @@ public void testManyToManyGeoPointsWithDifferentFormats() throws ExecutionExcept } } - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC)) - .get(); - assertOrderedSearchHits(searchResponse, "d1", "d2"); - assertThat( - (Double) searchResponse.getHits().getAt(0).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(2.5, 1, 2, 1, DistanceUnit.METERS), 1.e-1) - ); - assertThat( - (Double) searchResponse.getHits().getAt(1).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(4.5, 1, 2, 1, DistanceUnit.METERS), 1.e-1) - ); - - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(geoDistanceSortBuilder.sortMode(SortMode.MAX).order(SortOrder.ASC)) - .get(); - assertOrderedSearchHits(searchResponse, "d1", "d2"); - assertThat( - (Double) searchResponse.getHits().getAt(0).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(3.25, 4, 2, 1, DistanceUnit.METERS), 1.e-1) + assertResponse( + prepareSearch().setQuery(matchAllQuery()).addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC)), + response -> { + assertOrderedSearchHits(response, "d1", "d2"); + assertThat( + (Double) response.getHits().getAt(0).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(2.5, 1, 2, 1, DistanceUnit.METERS), 1.e-1) + ); + assertThat( + (Double) response.getHits().getAt(1).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(4.5, 1, 2, 1, DistanceUnit.METERS), 1.e-1) + ); + } ); - assertThat( - (Double) searchResponse.getHits().getAt(1).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(5.25, 4, 2, 1, DistanceUnit.METERS), 1.e-1) + assertResponse( + prepareSearch().setQuery(matchAllQuery()).addSort(geoDistanceSortBuilder.sortMode(SortMode.MAX).order(SortOrder.ASC)), + response -> { + assertOrderedSearchHits(response, "d1", "d2"); + assertThat( + (Double) response.getHits().getAt(0).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(3.25, 4, 2, 1, DistanceUnit.METERS), 1.e-1) + ); + assertThat( + (Double) response.getHits().getAt(1).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(5.25, 4, 2, 1, DistanceUnit.METERS), 1.e-1) + ); + } ); } @@ -277,11 +283,9 @@ public void testSinglePointGeoDistanceSort() throws ExecutionException, Interrup assertAcked(prepareCreate("index").setMapping(LOCATION_FIELD, "type=geo_point")); indexRandom( true, - client().prepareIndex("index") - .setId("d1") + prepareIndex("index").setId("d1") .setSource(jsonBuilder().startObject().startObject(LOCATION_FIELD).field("lat", 1).field("lon", 1).endObject().endObject()), - client().prepareIndex("index") - .setId("d2") + prepareIndex("index").setId("d2") .setSource(jsonBuilder().startObject().startObject(LOCATION_FIELD).field("lat", 1).field("lon", 2).endObject().endObject()) ); @@ -289,42 +293,48 @@ public void testSinglePointGeoDistanceSort() throws ExecutionException, Interrup GeoDistanceSortBuilder geoDistanceSortBuilder = new GeoDistanceSortBuilder(LOCATION_FIELD, hashPoint); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC)) - .get(); - checkCorrectSortOrderForGeoSort(searchResponse); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC)), + response -> checkCorrectSortOrderForGeoSort(response) + ); geoDistanceSortBuilder = new GeoDistanceSortBuilder(LOCATION_FIELD, new GeoPoint(2, 2)); - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC)) - .get(); - checkCorrectSortOrderForGeoSort(searchResponse); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC)), + response -> checkCorrectSortOrderForGeoSort(response) + ); geoDistanceSortBuilder = new GeoDistanceSortBuilder(LOCATION_FIELD, 2, 2); - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC)) - .get(); - checkCorrectSortOrderForGeoSort(searchResponse); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC)), + response -> checkCorrectSortOrderForGeoSort(response) + ); - searchResponse = prepareSearch().setSource(new SearchSourceBuilder().sort(SortBuilders.geoDistanceSort(LOCATION_FIELD, 2.0, 2.0))) - .get(); - checkCorrectSortOrderForGeoSort(searchResponse); + assertResponse( + prepareSearch().setSource(new SearchSourceBuilder().sort(SortBuilders.geoDistanceSort(LOCATION_FIELD, 2.0, 2.0))), + response -> checkCorrectSortOrderForGeoSort(response) + ); - searchResponse = prepareSearch().setSource( - new SearchSourceBuilder().sort(SortBuilders.geoDistanceSort(LOCATION_FIELD, "s037ms06g7h0")) - ).get(); - checkCorrectSortOrderForGeoSort(searchResponse); + assertResponse( + prepareSearch().setSource(new SearchSourceBuilder().sort(SortBuilders.geoDistanceSort(LOCATION_FIELD, "s037ms06g7h0"))), + response -> checkCorrectSortOrderForGeoSort(response) + ); - searchResponse = prepareSearch().setSource(new SearchSourceBuilder().sort(SortBuilders.geoDistanceSort(LOCATION_FIELD, 2.0, 2.0))) - .get(); - checkCorrectSortOrderForGeoSort(searchResponse); + assertResponse( + prepareSearch().setSource(new SearchSourceBuilder().sort(SortBuilders.geoDistanceSort(LOCATION_FIELD, 2.0, 2.0))), + response -> checkCorrectSortOrderForGeoSort(response) + ); - searchResponse = prepareSearch().setSource( - new SearchSourceBuilder().sort(SortBuilders.geoDistanceSort(LOCATION_FIELD, 2.0, 2.0).validation(GeoValidationMethod.COERCE)) - ).get(); - checkCorrectSortOrderForGeoSort(searchResponse); + assertResponse( + prepareSearch().setSource( + new SearchSourceBuilder().sort( + SortBuilders.geoDistanceSort(LOCATION_FIELD, 2.0, 2.0).validation(GeoValidationMethod.COERCE) + ) + ), + response -> checkCorrectSortOrderForGeoSort(response) + ); } private static void checkCorrectSortOrderForGeoSort(SearchResponse searchResponse) { @@ -347,8 +357,8 @@ public void testCrossIndexIgnoreUnmapped() throws Exception { indexRandom( true, - client().prepareIndex("test1").setSource("str_field", "bcd", "long_field", 3, "double_field", 0.65), - client().prepareIndex("test2").setSource() + prepareIndex("test1").setSource("str_field", "bcd", "long_field", 3, "double_field", 0.65), + prepareIndex("test2").setSource() ); assertSortValues( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/SimpleSortIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/SimpleSortIT.java index db06eb1b5de0b..c7b934df0394f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/SimpleSortIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/SimpleSortIT.java @@ -147,8 +147,7 @@ public void testSimpleSorts() throws Exception { List builders = new ArrayList<>(); for (int i = 0; i < 10; i++) { builders.add( - client().prepareIndex("test") - .setId(Integer.toString(i)) + prepareIndex("test").setId(Integer.toString(i)) .setSource( jsonBuilder().startObject() .field("str_value", new String(new char[] { (char) (97 + i), (char) (97 + i) })) @@ -240,8 +239,7 @@ public void testSortMinValueScript() throws IOException { ensureGreen(); for (int i = 0; i < 10; i++) { - client().prepareIndex("test") - .setId("" + i) + prepareIndex("test").setId("" + i) .setSource( jsonBuilder().startObject() .field("ord", i) @@ -258,7 +256,7 @@ public void testSortMinValueScript() throws IOException { } for (int i = 10; i < 20; i++) { // add some docs that don't have values in those fields - client().prepareIndex("test").setId("" + i).setSource(jsonBuilder().startObject().field("ord", i).endObject()).get(); + prepareIndex("test").setId("" + i).setSource(jsonBuilder().startObject().field("ord", i).endObject()).get(); } indicesAdmin().prepareRefresh("test").get(); @@ -349,11 +347,11 @@ public void testDocumentsWithNullValue() throws Exception { assertAcked(prepareCreate("test").setMapping(mapping)); ensureGreen(); - client().prepareIndex("test").setSource(jsonBuilder().startObject().field("id", "1").field("svalue", "aaa").endObject()).get(); + prepareIndex("test").setSource(jsonBuilder().startObject().field("id", "1").field("svalue", "aaa").endObject()).get(); - client().prepareIndex("test").setSource(jsonBuilder().startObject().field("id", "2").nullField("svalue").endObject()).get(); + prepareIndex("test").setSource(jsonBuilder().startObject().field("id", "2").nullField("svalue").endObject()).get(); - client().prepareIndex("test").setSource(jsonBuilder().startObject().field("id", "3").field("svalue", "bbb").endObject()).get(); + prepareIndex("test").setSource(jsonBuilder().startObject().field("id", "3").field("svalue", "bbb").endObject()).get(); flush(); refresh(); @@ -432,10 +430,7 @@ public void test2920() throws IOException { ); ensureGreen(); for (int i = 0; i < 10; i++) { - client().prepareIndex("test") - .setId(Integer.toString(i)) - .setSource(jsonBuilder().startObject().field("value", "" + i).endObject()) - .get(); + prepareIndex("test").setId(Integer.toString(i)).setSource(jsonBuilder().startObject().field("value", "" + i).endObject()).get(); } refresh(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/source/MetadataFetchingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/source/MetadataFetchingIT.java index 1860082c833ad..2967bdc454aed 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/source/MetadataFetchingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/source/MetadataFetchingIT.java @@ -10,7 +10,6 @@ import org.apache.lucene.search.join.ScoreMode; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.index.query.InnerHitBuilder; import org.elasticsearch.index.query.NestedQueryBuilder; import org.elasticsearch.index.query.TermQueryBuilder; @@ -22,6 +21,7 @@ import java.util.Collections; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -31,59 +31,66 @@ public void testSimple() { assertAcked(prepareCreate("test")); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("field", "value").get(); + prepareIndex("test").setId("1").setSource("field", "value").get(); refresh(); - SearchResponse response = prepareSearch("test").storedFields("_none_").setFetchSource(false).setVersion(true).get(); - assertThat(response.getHits().getAt(0).getId(), nullValue()); - assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); - assertThat(response.getHits().getAt(0).getVersion(), notNullValue()); - - response = prepareSearch("test").storedFields("_none_").get(); - assertThat(response.getHits().getAt(0).getId(), nullValue()); - assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); + assertResponse(prepareSearch("test").storedFields("_none_").setFetchSource(false).setVersion(true), response -> { + assertThat(response.getHits().getAt(0).getId(), nullValue()); + assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); + assertThat(response.getHits().getAt(0).getVersion(), notNullValue()); + }); + + assertResponse(prepareSearch("test").storedFields("_none_"), response -> { + assertThat(response.getHits().getAt(0).getId(), nullValue()); + assertThat(response.getHits().getAt(0).getId(), nullValue()); + assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); + }); } public void testInnerHits() { assertAcked(prepareCreate("test").setMapping("nested", "type=nested")); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("field", "value", "nested", Collections.singletonMap("title", "foo")).get(); + prepareIndex("test").setId("1").setSource("field", "value", "nested", Collections.singletonMap("title", "foo")).get(); refresh(); - SearchResponse response = prepareSearch("test").storedFields("_none_") - .setFetchSource(false) - .setQuery( - new NestedQueryBuilder("nested", new TermQueryBuilder("nested.title", "foo"), ScoreMode.Total).innerHit( - new InnerHitBuilder().setStoredFieldNames(Collections.singletonList("_none_")) - .setFetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE) - ) - ) - .get(); - assertThat(response.getHits().getTotalHits().value, equalTo(1L)); - assertThat(response.getHits().getAt(0).getId(), nullValue()); - assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); - assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); - SearchHits hits = response.getHits().getAt(0).getInnerHits().get("nested"); - assertThat(hits.getTotalHits().value, equalTo(1L)); - assertThat(hits.getAt(0).getId(), nullValue()); - assertThat(hits.getAt(0).getSourceAsString(), nullValue()); + assertResponse( + prepareSearch("test").storedFields("_none_") + .setFetchSource(false) + .setQuery( + new NestedQueryBuilder("nested", new TermQueryBuilder("nested.title", "foo"), ScoreMode.Total).innerHit( + new InnerHitBuilder().setStoredFieldNames(Collections.singletonList("_none_")) + .setFetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE) + ) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getAt(0).getId(), nullValue()); + assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); + assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); + SearchHits hits = response.getHits().getAt(0).getInnerHits().get("nested"); + assertThat(hits.getTotalHits().value, equalTo(1L)); + assertThat(hits.getAt(0).getId(), nullValue()); + assertThat(hits.getAt(0).getSourceAsString(), nullValue()); + } + ); } public void testWithRouting() { assertAcked(prepareCreate("test")); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("field", "value").setRouting("toto").get(); + prepareIndex("test").setId("1").setSource("field", "value").setRouting("toto").get(); refresh(); - SearchResponse response = prepareSearch("test").storedFields("_none_").setFetchSource(false).get(); - assertThat(response.getHits().getAt(0).getId(), nullValue()); - assertThat(response.getHits().getAt(0).field("_routing"), nullValue()); - assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); + assertResponse(prepareSearch("test").storedFields("_none_").setFetchSource(false), response -> { + assertThat(response.getHits().getAt(0).getId(), nullValue()); + assertThat(response.getHits().getAt(0).field("_routing"), nullValue()); + assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); - response = prepareSearch("test").storedFields("_none_").get(); - assertThat(response.getHits().getAt(0).getId(), nullValue()); - assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); + response = prepareSearch("test").storedFields("_none_").get(); + assertThat(response.getHits().getAt(0).getId(), nullValue()); + assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); + }); } public void testInvalid() { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/source/SourceFetchingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/source/SourceFetchingIT.java index 3fcbc5cf4add6..81facfa8116c0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/source/SourceFetchingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/source/SourceFetchingIT.java @@ -8,9 +8,9 @@ package org.elasticsearch.search.source; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.test.ESIntegTestCase; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.core.IsEqual.equalTo; @@ -23,14 +23,17 @@ public void testSourceDefaultBehavior() { indexDoc("test", "1", "field", "value"); refresh(); - SearchResponse response = prepareSearch("test").get(); - assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); + assertResponse(prepareSearch("test"), response -> assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue())); - response = prepareSearch("test").addStoredField("bla").get(); - assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); + assertResponse( + prepareSearch("test").addStoredField("bla"), + response -> assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()) + ); - response = prepareSearch("test").addStoredField("_source").get(); - assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); + assertResponse( + prepareSearch("test").addStoredField("_source"), + response -> assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()) + ); } @@ -38,29 +41,33 @@ public void testSourceFiltering() { createIndex("test"); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("field1", "value", "field2", "value2").get(); + prepareIndex("test").setId("1").setSource("field1", "value", "field2", "value2").get(); refresh(); - SearchResponse response = prepareSearch("test").setFetchSource(false).get(); - assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); - - response = prepareSearch("test").setFetchSource(true).get(); - assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); - - response = prepareSearch("test").setFetchSource("field1", null).get(); - assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); - assertThat((String) response.getHits().getAt(0).getSourceAsMap().get("field1"), equalTo("value")); - - response = prepareSearch("test").setFetchSource("hello", null).get(); - assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(0)); - - response = prepareSearch("test").setFetchSource(new String[] { "*" }, new String[] { "field2" }).get(); - assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); - assertThat((String) response.getHits().getAt(0).getSourceAsMap().get("field1"), equalTo("value")); - + assertResponse( + prepareSearch("test").setFetchSource(false), + response -> assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()) + ); + + assertResponse( + prepareSearch("test").setFetchSource(true), + response -> assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()) + ); + + assertResponse(prepareSearch("test").setFetchSource("field1", null), response -> { + assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); + assertThat((String) response.getHits().getAt(0).getSourceAsMap().get("field1"), equalTo("value")); + }); + assertResponse(prepareSearch("test").setFetchSource("hello", null), response -> { + assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(0)); + }); + assertResponse(prepareSearch("test").setFetchSource(new String[] { "*" }, new String[] { "field2" }), response -> { + assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); + assertThat((String) response.getHits().getAt(0).getSourceAsMap().get("field1"), equalTo("value")); + }); } /** @@ -71,17 +78,18 @@ public void testSourceWithWildcardFiltering() { createIndex("test"); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("field", "value").get(); + prepareIndex("test").setId("1").setSource("field", "value").get(); refresh(); - SearchResponse response = prepareSearch("test").setFetchSource(new String[] { "*.notexisting", "field" }, null).get(); - assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); - assertThat((String) response.getHits().getAt(0).getSourceAsMap().get("field"), equalTo("value")); - - response = prepareSearch("test").setFetchSource(new String[] { "field.notexisting.*", "field" }, null).get(); - assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); - assertThat((String) response.getHits().getAt(0).getSourceAsMap().get("field"), equalTo("value")); + assertResponse(prepareSearch("test").setFetchSource(new String[] { "*.notexisting", "field" }, null), response -> { + assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); + assertThat((String) response.getHits().getAt(0).getSourceAsMap().get("field"), equalTo("value")); + }); + assertResponse(prepareSearch("test").setFetchSource(new String[] { "field.notexisting.*", "field" }, null), response -> { + assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); + assertThat((String) response.getHits().getAt(0).getSourceAsMap().get("field"), equalTo("value")); + }); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/stats/FieldUsageStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/stats/FieldUsageStatsIT.java index 32f5e14b944a2..0d850a3708044 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/stats/FieldUsageStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/stats/FieldUsageStatsIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.admin.indices.stats.FieldUsageShardResponse; import org.elasticsearch.action.admin.indices.stats.FieldUsageStatsAction; import org.elasticsearch.action.admin.indices.stats.FieldUsageStatsRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilders; @@ -30,6 +29,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; public class FieldUsageStatsIT extends ESIntegTestCase { @@ -55,8 +55,7 @@ public void testFieldUsageStats() throws ExecutionException, InterruptedExceptio LocalDate date = LocalDate.of(2015, 9, 1); for (int i = 0; i < 30; i++) { - client().prepareIndex("test") - .setId(Integer.toString(i)) + prepareIndex("test").setId(Integer.toString(i)) .setSource("field", "value", "field2", "value2", "date_field", formatter.format(date.plusDays(i))) .get(); } @@ -73,16 +72,18 @@ public void testFieldUsageStats() throws ExecutionException, InterruptedExceptio assertFalse(stats.hasField("field2")); assertFalse(stats.hasField("date_field")); - SearchResponse searchResponse = prepareSearch().setSearchType(SearchType.DEFAULT) - .setQuery(QueryBuilders.termQuery("field", "value")) - .addAggregation(AggregationBuilders.terms("agg1").field("field.keyword")) - .addAggregation(AggregationBuilders.filter("agg2", QueryBuilders.spanTermQuery("field2", "value2"))) - .setSize(between(5, 100)) - .setPreference("fixed") - .get(); - - assertHitCount(searchResponse, 30); - assertAllSuccessful(searchResponse); + assertResponse( + prepareSearch().setSearchType(SearchType.DEFAULT) + .setQuery(QueryBuilders.termQuery("field", "value")) + .addAggregation(AggregationBuilders.terms("agg1").field("field.keyword")) + .addAggregation(AggregationBuilders.filter("agg2", QueryBuilders.spanTermQuery("field2", "value2"))) + .setSize(between(5, 100)) + .setPreference("fixed"), + response -> { + assertHitCount(response, 30); + assertAllSuccessful(response); + } + ); stats = aggregated(client().execute(FieldUsageStatsAction.INSTANCE, new FieldUsageStatsRequest()).get().getStats().get("test")); logger.info("Stats after first query: {}", stats); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/stats/SearchStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/stats/SearchStatsIT.java index 07e8c516eda41..23384d1b199f9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/stats/SearchStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/stats/SearchStatsIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardIterator; @@ -39,7 +38,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -83,7 +83,7 @@ public void testSimpleStats() throws Exception { assertAcked(prepareCreate("test1").setSettings(indexSettings(shardsIdx1, 0))); int docsTest1 = scaledRandomIntBetween(3 * shardsIdx1, 5 * shardsIdx1); for (int i = 0; i < docsTest1; i++) { - client().prepareIndex("test1").setId(Integer.toString(i)).setSource("field", "value").get(); + prepareIndex("test1").setId(Integer.toString(i)).setSource("field", "value").get(); if (rarely()) { refresh(); } @@ -91,7 +91,7 @@ public void testSimpleStats() throws Exception { assertAcked(prepareCreate("test2").setSettings(indexSettings(shardsIdx2, 0))); int docsTest2 = scaledRandomIntBetween(3 * shardsIdx2, 5 * shardsIdx2); for (int i = 0; i < docsTest2; i++) { - client().prepareIndex("test2").setId(Integer.toString(i)).setSource("field", "value").get(); + prepareIndex("test2").setId(Integer.toString(i)).setSource("field", "value").get(); if (rarely()) { refresh(); } @@ -103,16 +103,22 @@ public void testSimpleStats() throws Exception { refresh(); int iters = scaledRandomIntBetween(100, 150); for (int i = 0; i < iters; i++) { - SearchResponse searchResponse = internalCluster().coordOnlyNodeClient() - .prepareSearch() - .setQuery(QueryBuilders.termQuery("field", "value")) - .setStats("group1", "group2") - .highlighter(new HighlightBuilder().field("field")) - .addScriptField("script1", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.field", Collections.emptyMap())) - .setSize(100) - .get(); - assertHitCount(searchResponse, docsTest1 + docsTest2); - assertAllSuccessful(searchResponse); + assertResponse( + internalCluster().coordOnlyNodeClient() + .prepareSearch() + .setQuery(QueryBuilders.termQuery("field", "value")) + .setStats("group1", "group2") + .highlighter(new HighlightBuilder().field("field")) + .addScriptField( + "script1", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.field", Collections.emptyMap()) + ) + .setSize(100), + response -> { + assertHitCount(response, docsTest1 + docsTest2); + assertAllSuccessful(response); + } + ); } IndicesStatsResponse indicesStats = indicesAdmin().prepareStats().get(); @@ -175,11 +181,7 @@ public void testOpenContexts() { final int docs = scaledRandomIntBetween(20, 50); for (int s = 0; s < numAssignedShards(index); s++) { for (int i = 0; i < docs; i++) { - client().prepareIndex(index) - .setId(Integer.toString(s * docs + i)) - .setSource("field", "value") - .setRouting(Integer.toString(s)) - .get(); + prepareIndex(index).setId(Integer.toString(s * docs + i)).setSource("field", "value").setRouting(Integer.toString(s)).get(); } } indicesAdmin().prepareRefresh(index).get(); @@ -188,11 +190,15 @@ public void testOpenContexts() { assertThat(indicesStats.getTotal().getSearch().getOpenContexts(), equalTo(0L)); int size = scaledRandomIntBetween(1, docs); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .setSize(size) - .setScroll(TimeValue.timeValueMinutes(2)) - .get(); - assertNoFailures(searchResponse); + final String[] scroll = new String[1]; + final int[] total = new int[1]; + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()).setSize(size).setScroll(TimeValue.timeValueMinutes(2)), + response -> { + scroll[0] = response.getScrollId(); + total[0] = response.getHits().getHits().length; + } + ); // refresh the stats now that scroll contexts are opened indicesStats = indicesAdmin().prepareStats(index).get(); @@ -202,11 +208,14 @@ public void testOpenContexts() { int hits = 0; while (true) { - if (searchResponse.getHits().getHits().length == 0) { + if (total[0] == 0) { break; } - hits += searchResponse.getHits().getHits().length; - searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueMinutes(2)).get(); + hits += total[0]; + assertResponse(client().prepareSearchScroll(scroll[0]).setScroll(TimeValue.timeValueMinutes(2)), response -> { + scroll[0] = response.getScrollId(); + total[0] = response.getHits().getHits().length; + }); } long expected = 0; @@ -220,7 +229,7 @@ public void testOpenContexts() { assertEquals(hits, docs * numAssignedShards(index)); assertThat(stats.getQueryCount(), greaterThanOrEqualTo(expected)); - clearScroll(searchResponse.getScrollId()); + clearScroll(scroll[0]); indicesStats = indicesAdmin().prepareStats().get(); stats = indicesStats.getTotal().getSearch().getTotal(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java index 9592d3904a90d..b5f7468d1645c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java @@ -58,6 +58,7 @@ import static org.elasticsearch.common.util.CollectionUtils.iterableAsArrayList; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasScore; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; @@ -94,8 +95,7 @@ public void testTieBreak() throws Exception { String value = "a" + randomAlphaOfLengthBetween(1, 10); entries[i] = value; indexRequestBuilders.add( - client().prepareIndex(INDEX) - .setId("" + i) + prepareIndex(INDEX).setId("" + i) .setSource( jsonBuilder().startObject().startObject(FIELD).field("input", value).field("weight", 10).endObject().endObject() ) @@ -117,8 +117,7 @@ public void testPrefix() throws Exception { List indexRequestBuilders = new ArrayList<>(); for (int i = 1; i <= numDocs; i++) { indexRequestBuilders.add( - client().prepareIndex(INDEX) - .setId("" + i) + prepareIndex(INDEX).setId("" + i) .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -145,8 +144,7 @@ public void testTextAndGlobalText() throws Exception { List indexRequestBuilders = new ArrayList<>(); for (int i = 1; i <= numDocs; i++) { indexRequestBuilders.add( - client().prepareIndex(INDEX) - .setId("" + i) + prepareIndex(INDEX).setId("" + i) .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -159,18 +157,22 @@ public void testTextAndGlobalText() throws Exception { } indexRandom(true, indexRequestBuilders); CompletionSuggestionBuilder noText = SuggestBuilders.completionSuggestion(FIELD); - SearchResponse searchResponse = prepareSearch(INDEX).suggest( - new SuggestBuilder().addSuggestion("foo", noText).setGlobalText("sugg") - ).get(); - assertSuggestions(searchResponse, "foo", "suggestion10", "suggestion9", "suggestion8", "suggestion7", "suggestion6"); + assertResponse( + prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", noText).setGlobalText("sugg")), + response -> assertSuggestions(response, "foo", "suggestion10", "suggestion9", "suggestion8", "suggestion7", "suggestion6") + ); CompletionSuggestionBuilder withText = SuggestBuilders.completionSuggestion(FIELD).text("sugg"); - searchResponse = prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", withText)).get(); - assertSuggestions(searchResponse, "foo", "suggestion10", "suggestion9", "suggestion8", "suggestion7", "suggestion6"); + assertResponse( + prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", withText)), + response -> assertSuggestions(response, "foo", "suggestion10", "suggestion9", "suggestion8", "suggestion7", "suggestion6") + ); // test that suggestion text takes precedence over global text - searchResponse = prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", withText).setGlobalText("bogus")).get(); - assertSuggestions(searchResponse, "foo", "suggestion10", "suggestion9", "suggestion8", "suggestion7", "suggestion6"); + assertResponse( + prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", withText).setGlobalText("bogus")), + response -> assertSuggestions(response, "foo", "suggestion10", "suggestion9", "suggestion8", "suggestion7", "suggestion6") + ); } public void testRegex() throws Exception { @@ -180,8 +182,7 @@ public void testRegex() throws Exception { List indexRequestBuilders = new ArrayList<>(); for (int i = 1; i <= numDocs; i++) { indexRequestBuilders.add( - client().prepareIndex(INDEX) - .setId("" + i) + prepareIndex(INDEX).setId("" + i) .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -204,8 +205,7 @@ public void testFuzzy() throws Exception { List indexRequestBuilders = new ArrayList<>(); for (int i = 1; i <= numDocs; i++) { indexRequestBuilders.add( - client().prepareIndex(INDEX) - .setId("" + i) + prepareIndex(INDEX).setId("" + i) .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -228,8 +228,7 @@ public void testEarlyTermination() throws Exception { List indexRequestBuilders = new ArrayList<>(); for (int i = 0; i < numDocs; i++) { indexRequestBuilders.add( - client().prepareIndex(INDEX) - .setId("" + i) + prepareIndex(INDEX).setId("" + i) .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -263,8 +262,7 @@ public void testSuggestDocument() throws Exception { List indexRequestBuilders = new ArrayList<>(); for (int i = 1; i <= numDocs; i++) { indexRequestBuilders.add( - client().prepareIndex(INDEX) - .setId("" + i) + prepareIndex(INDEX).setId("" + i) .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -278,18 +276,19 @@ public void testSuggestDocument() throws Exception { indexRandom(true, indexRequestBuilders); CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg").size(numDocs); - SearchResponse searchResponse = prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", prefix)).get(); - CompletionSuggestion completionSuggestion = searchResponse.getSuggest().getSuggestion("foo"); - CompletionSuggestion.Entry options = completionSuggestion.getEntries().get(0); - assertThat(options.getOptions().size(), equalTo(numDocs)); - int id = numDocs; - for (CompletionSuggestion.Entry.Option option : options) { - assertThat(option.getText().toString(), equalTo("suggestion" + id)); - assertThat(option.getHit(), hasId("" + id)); - assertThat(option.getHit(), hasScore((id))); - assertNotNull(option.getHit().getSourceAsMap()); - id--; - } + assertResponse(prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", prefix)), response -> { + CompletionSuggestion completionSuggestion = response.getSuggest().getSuggestion("foo"); + CompletionSuggestion.Entry options = completionSuggestion.getEntries().get(0); + assertThat(options.getOptions().size(), equalTo(numDocs)); + int id = numDocs; + for (CompletionSuggestion.Entry.Option option : options) { + assertThat(option.getText().toString(), equalTo("suggestion" + id)); + assertThat(option.getHit(), hasId("" + id)); + assertThat(option.getHit(), hasScore((id))); + assertNotNull(option.getHit().getSourceAsMap()); + id--; + } + }); } public void testSuggestDocumentNoSource() throws Exception { @@ -299,8 +298,7 @@ public void testSuggestDocumentNoSource() throws Exception { List indexRequestBuilders = new ArrayList<>(); for (int i = 1; i <= numDocs; i++) { indexRequestBuilders.add( - client().prepareIndex(INDEX) - .setId("" + i) + prepareIndex(INDEX).setId("" + i) .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -314,20 +312,19 @@ public void testSuggestDocumentNoSource() throws Exception { indexRandom(true, indexRequestBuilders); CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg").size(numDocs); - SearchResponse searchResponse = prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", prefix)) - .setFetchSource(false) - .get(); - CompletionSuggestion completionSuggestion = searchResponse.getSuggest().getSuggestion("foo"); - CompletionSuggestion.Entry options = completionSuggestion.getEntries().get(0); - assertThat(options.getOptions().size(), equalTo(numDocs)); - int id = numDocs; - for (CompletionSuggestion.Entry.Option option : options) { - assertThat(option.getText().toString(), equalTo("suggestion" + id)); - assertThat(option.getHit(), hasId("" + id)); - assertThat(option.getHit(), hasScore((id))); - assertNull(option.getHit().getSourceAsMap()); - id--; - } + assertResponse(prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", prefix)).setFetchSource(false), response -> { + CompletionSuggestion completionSuggestion = response.getSuggest().getSuggestion("foo"); + CompletionSuggestion.Entry options = completionSuggestion.getEntries().get(0); + assertThat(options.getOptions().size(), equalTo(numDocs)); + int id = numDocs; + for (CompletionSuggestion.Entry.Option option : options) { + assertThat(option.getText().toString(), equalTo("suggestion" + id)); + assertThat(option.getHit(), hasId("" + id)); + assertThat(option.getHit(), hasScore((id))); + assertNull(option.getHit().getSourceAsMap()); + id--; + } + }); } public void testSuggestDocumentSourceFiltering() throws Exception { @@ -337,8 +334,7 @@ public void testSuggestDocumentSourceFiltering() throws Exception { List indexRequestBuilders = new ArrayList<>(); for (int i = 1; i <= numDocs; i++) { indexRequestBuilders.add( - client().prepareIndex(INDEX) - .setId("" + i) + prepareIndex(INDEX).setId("" + i) .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -354,23 +350,25 @@ public void testSuggestDocumentSourceFiltering() throws Exception { indexRandom(true, indexRequestBuilders); CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg").size(numDocs); - SearchResponse searchResponse = prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", prefix)) - .setFetchSource("a", "b") - .get(); - CompletionSuggestion completionSuggestion = searchResponse.getSuggest().getSuggestion("foo"); - CompletionSuggestion.Entry options = completionSuggestion.getEntries().get(0); - assertThat(options.getOptions().size(), equalTo(numDocs)); - int id = numDocs; - for (CompletionSuggestion.Entry.Option option : options) { - assertThat(option.getText().toString(), equalTo("suggestion" + id)); - assertThat(option.getHit(), hasId("" + id)); - assertThat(option.getHit(), hasScore((id))); - assertNotNull(option.getHit().getSourceAsMap()); - Set sourceFields = option.getHit().getSourceAsMap().keySet(); - assertThat(sourceFields, contains("a")); - assertThat(sourceFields, not(contains("b"))); - id--; - } + assertResponse( + prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", prefix)).setFetchSource("a", "b"), + response -> { + CompletionSuggestion completionSuggestion = response.getSuggest().getSuggestion("foo"); + CompletionSuggestion.Entry options = completionSuggestion.getEntries().get(0); + assertThat(options.getOptions().size(), equalTo(numDocs)); + int id = numDocs; + for (CompletionSuggestion.Entry.Option option : options) { + assertThat(option.getText().toString(), equalTo("suggestion" + id)); + assertThat(option.getHit(), hasId("" + id)); + assertThat(option.getHit(), hasScore((id))); + assertNotNull(option.getHit().getSourceAsMap()); + Set sourceFields = option.getHit().getSourceAsMap().keySet(); + assertThat(sourceFields, contains("a")); + assertThat(sourceFields, not(contains("b"))); + id--; + } + } + ); } /** @@ -381,17 +379,19 @@ public void testSuggestEmptyIndex() throws IOException { createIndexAndMapping(mapping); CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD).prefix("v"); - SearchResponse searchResponse = prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", prefix)) - .setFetchSource("a", "b") - .get(); - Suggest suggest = searchResponse.getSuggest(); - assertNotNull(suggest); - CompletionSuggestion completionSuggestion = suggest.getSuggestion("foo"); - CompletionSuggestion.Entry options = completionSuggestion.getEntries().get(0); - assertEquals("v", options.getText().string()); - assertEquals(1, options.getLength()); - assertEquals(0, options.getOffset()); - assertEquals(0, options.options.size()); + assertResponse( + prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", prefix)).setFetchSource("a", "b"), + response -> { + Suggest suggest = response.getSuggest(); + assertNotNull(suggest); + CompletionSuggestion completionSuggestion = suggest.getSuggestion("foo"); + CompletionSuggestion.Entry options = completionSuggestion.getEntries().get(0); + assertEquals("v", options.getText().string()); + assertEquals(1, options.getLength()); + assertEquals(0, options.getOffset()); + assertEquals(0, options.options.size()); + } + ); } public void testThatWeightsAreWorking() throws Exception { @@ -400,8 +400,7 @@ public void testThatWeightsAreWorking() throws Exception { List similarNames = Arrays.asList("the", "The Prodigy", "The Verve", "The the"); // the weight is 1000 divided by string length, so the results are easy to to check for (String similarName : similarNames) { - client().prepareIndex(INDEX) - .setId(similarName) + prepareIndex(INDEX).setId(similarName) .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -425,8 +424,7 @@ public void testThatWeightMustBeAnInteger() throws Exception { Exception e = expectThrows( DocumentParsingException.class, - () -> client().prepareIndex(INDEX) - .setId("1") + () -> prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -445,8 +443,7 @@ public void testThatWeightMustBeAnInteger() throws Exception { public void testThatWeightCanBeAString() throws Exception { createIndexAndMapping(completionMappingBuilder); - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -461,22 +458,25 @@ public void testThatWeightCanBeAString() throws Exception { refresh(); - SearchResponse searchResponse = prepareSearch(INDEX).suggest( - new SuggestBuilder().addSuggestion("testSuggestions", new CompletionSuggestionBuilder(FIELD).text("test").size(10)) - ).get(); - - assertSuggestions(searchResponse, "testSuggestions", "testing"); - Suggest.Suggestion.Entry.Option option = searchResponse.getSuggest() - .getSuggestion("testSuggestions") - .getEntries() - .get(0) - .getOptions() - .get(0); - assertThat(option, is(instanceOf(CompletionSuggestion.Entry.Option.class))); - CompletionSuggestion.Entry.Option prefixOption = (CompletionSuggestion.Entry.Option) option; - - assertThat(prefixOption.getText().string(), equalTo("testing")); - assertThat((long) prefixOption.getScore(), equalTo(10L)); + assertResponse( + prepareSearch(INDEX).suggest( + new SuggestBuilder().addSuggestion("testSuggestions", new CompletionSuggestionBuilder(FIELD).text("test").size(10)) + ), + response -> { + assertSuggestions(response, "testSuggestions", "testing"); + Suggest.Suggestion.Entry.Option option = response.getSuggest() + .getSuggestion("testSuggestions") + .getEntries() + .get(0) + .getOptions() + .get(0); + assertThat(option, is(instanceOf(CompletionSuggestion.Entry.Option.class))); + CompletionSuggestion.Entry.Option prefixOption = (CompletionSuggestion.Entry.Option) option; + + assertThat(prefixOption.getText().string(), equalTo("testing")); + assertThat((long) prefixOption.getScore(), equalTo(10L)); + } + ); } public void testThatWeightMustNotBeANonNumberString() throws Exception { @@ -484,8 +484,7 @@ public void testThatWeightMustNotBeANonNumberString() throws Exception { Exception e = expectThrows( DocumentParsingException.class, - () -> client().prepareIndex(INDEX) - .setId("1") + () -> prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -508,8 +507,7 @@ public void testThatWeightAsStringMustBeInt() throws Exception { Exception e = expectThrows( DocumentParsingException.class, - () -> client().prepareIndex(INDEX) - .setId("1") + () -> prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -528,8 +526,7 @@ public void testThatWeightAsStringMustBeInt() throws Exception { public void testThatInputCanBeAStringInsteadOfAnArray() throws Exception { createIndexAndMapping(completionMappingBuilder); - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource(jsonBuilder().startObject().startObject(FIELD).field("input", "Foo Fighters").endObject().endObject()) .get(); @@ -542,8 +539,7 @@ public void testDisabledPreserveSeparators() throws Exception { completionMappingBuilder.preserveSeparators(false); createIndexAndMapping(completionMappingBuilder); - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -556,8 +552,7 @@ public void testDisabledPreserveSeparators() throws Exception { ) .get(); - client().prepareIndex(INDEX) - .setId("2") + prepareIndex(INDEX).setId("2") .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -579,15 +574,13 @@ public void testEnabledPreserveSeparators() throws Exception { completionMappingBuilder.preserveSeparators(true); createIndexAndMapping(completionMappingBuilder); - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject().startObject(FIELD).startArray("input").value("Foo Fighters").endArray().endObject().endObject() ) .get(); - client().prepareIndex(INDEX) - .setId("2") + prepareIndex(INDEX).setId("2") .setSource(jsonBuilder().startObject().startObject(FIELD).startArray("input").value("Foof").endArray().endObject().endObject()) .get(); @@ -599,8 +592,7 @@ public void testEnabledPreserveSeparators() throws Exception { public void testThatMultipleInputsAreSupported() throws Exception { createIndexAndMapping(completionMappingBuilder); - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -622,8 +614,7 @@ public void testThatMultipleInputsAreSupported() throws Exception { public void testThatShortSyntaxIsWorking() throws Exception { createIndexAndMapping(completionMappingBuilder); - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject().startArray(FIELD).value("The Prodigy Firestarter").value("Firestarter").endArray().endObject() ) @@ -640,8 +631,7 @@ public void testThatDisablingPositionIncrementsWorkForStopwords() throws Excepti completionMappingBuilder.searchAnalyzer("classic").indexAnalyzer("classic").preservePositionIncrements(false); createIndexAndMapping(completionMappingBuilder); - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject().startObject(FIELD).startArray("input").value("The Beatles").endArray().endObject().endObject() ) @@ -663,8 +653,7 @@ public void testThatUpgradeToMultiFieldsWorks() throws Exception { .endObject() .endObject(); assertAcked(prepareCreate(INDEX).setMapping(mapping)); - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setRefreshPolicy(IMMEDIATE) .setSource(jsonBuilder().startObject().field(FIELD, "Foo Fighters").endObject()) .get(); @@ -691,29 +680,31 @@ public void testThatUpgradeToMultiFieldsWorks() throws Exception { .get(); assertThat(putMappingResponse.isAcknowledged(), is(true)); - SearchResponse searchResponse = prepareSearch(INDEX).suggest( - new SuggestBuilder().addSuggestion("suggs", SuggestBuilders.completionSuggestion(FIELD + ".suggest").text("f").size(10)) - ).get(); - assertSuggestions(searchResponse, "suggs"); + assertResponse( + prepareSearch(INDEX).suggest( + new SuggestBuilder().addSuggestion("suggs", SuggestBuilders.completionSuggestion(FIELD + ".suggest").text("f").size(10)) + ), + response -> assertSuggestions(response, "suggs") + ); - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setRefreshPolicy(IMMEDIATE) .setSource(jsonBuilder().startObject().field(FIELD, "Foo Fighters").endObject()) .get(); ensureGreen(INDEX); - SearchResponse afterReindexingResponse = prepareSearch(INDEX).suggest( - new SuggestBuilder().addSuggestion("suggs", SuggestBuilders.completionSuggestion(FIELD + ".suggest").text("f").size(10)) - ).get(); - assertSuggestions(afterReindexingResponse, "suggs", "Foo Fighters"); + assertResponse( + prepareSearch(INDEX).suggest( + new SuggestBuilder().addSuggestion("suggs", SuggestBuilders.completionSuggestion(FIELD + ".suggest").text("f").size(10)) + ), + response -> assertSuggestions(response, "suggs", "Foo Fighters") + ); } public void testThatFuzzySuggesterWorks() throws Exception { createIndexAndMapping(completionMappingBuilder); - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject().startObject(FIELD).startArray("input").value("Nirvana").endArray().endObject().endObject() ) @@ -721,22 +712,28 @@ public void testThatFuzzySuggesterWorks() throws Exception { refresh(); - SearchResponse searchResponse = prepareSearch(INDEX).suggest( - new SuggestBuilder().addSuggestion("foo", SuggestBuilders.completionSuggestion(FIELD).prefix("Nirv").size(10)) - ).get(); - assertSuggestions(searchResponse, false, "foo", "Nirvana"); + assertResponse( + prepareSearch(INDEX).suggest( + new SuggestBuilder().addSuggestion("foo", SuggestBuilders.completionSuggestion(FIELD).prefix("Nirv").size(10)) + ), + response -> assertSuggestions(response, false, "foo", "Nirvana") + ); - searchResponse = prepareSearch(INDEX).suggest( - new SuggestBuilder().addSuggestion("foo", SuggestBuilders.completionSuggestion(FIELD).prefix("Nirw", Fuzziness.ONE).size(10)) - ).get(); - assertSuggestions(searchResponse, false, "foo", "Nirvana"); + assertResponse( + prepareSearch(INDEX).suggest( + new SuggestBuilder().addSuggestion( + "foo", + SuggestBuilders.completionSuggestion(FIELD).prefix("Nirw", Fuzziness.ONE).size(10) + ) + ), + response -> assertSuggestions(response, false, "foo", "Nirvana") + ); } public void testThatFuzzySuggesterSupportsEditDistances() throws Exception { createIndexAndMapping(completionMappingBuilder); - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject().startObject(FIELD).startArray("input").value("Nirvana").endArray().endObject().endObject() ) @@ -745,23 +742,32 @@ public void testThatFuzzySuggesterSupportsEditDistances() throws Exception { refresh(); // edit distance 1 - SearchResponse searchResponse = prepareSearch(INDEX).suggest( - new SuggestBuilder().addSuggestion("foo", SuggestBuilders.completionSuggestion(FIELD).prefix("Norw", Fuzziness.ONE).size(10)) - ).get(); - assertSuggestions(searchResponse, false, "foo"); + assertResponse( + prepareSearch(INDEX).suggest( + new SuggestBuilder().addSuggestion( + "foo", + SuggestBuilders.completionSuggestion(FIELD).prefix("Norw", Fuzziness.ONE).size(10) + ) + ), + response -> assertSuggestions(response, false, "foo") + ); // edit distance 2 - searchResponse = prepareSearch(INDEX).suggest( - new SuggestBuilder().addSuggestion("foo", SuggestBuilders.completionSuggestion(FIELD).prefix("Norw", Fuzziness.TWO).size(10)) - ).get(); - assertSuggestions(searchResponse, false, "foo", "Nirvana"); + assertResponse( + prepareSearch(INDEX).suggest( + new SuggestBuilder().addSuggestion( + "foo", + SuggestBuilders.completionSuggestion(FIELD).prefix("Norw", Fuzziness.TWO).size(10) + ) + ), + response -> assertSuggestions(response, false, "foo", "Nirvana") + ); } public void testThatFuzzySuggesterSupportsTranspositions() throws Exception { createIndexAndMapping(completionMappingBuilder); - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject().startObject(FIELD).startArray("input").value("Nirvana").endArray().endObject().endObject() ) @@ -769,25 +775,33 @@ public void testThatFuzzySuggesterSupportsTranspositions() throws Exception { refresh(); - SearchResponse searchResponse = prepareSearch(INDEX).suggest( - new SuggestBuilder().addSuggestion( - "foo", - SuggestBuilders.completionSuggestion(FIELD).prefix("Nriv", FuzzyOptions.builder().setTranspositions(false).build()).size(10) - ) - ).get(); - assertSuggestions(searchResponse, false, "foo"); + assertResponse( + prepareSearch(INDEX).suggest( + new SuggestBuilder().addSuggestion( + "foo", + SuggestBuilders.completionSuggestion(FIELD) + .prefix("Nriv", FuzzyOptions.builder().setTranspositions(false).build()) + .size(10) + ) + ), + response -> assertSuggestions(response, false, "foo") + ); - searchResponse = prepareSearch(INDEX).suggest( - new SuggestBuilder().addSuggestion("foo", SuggestBuilders.completionSuggestion(FIELD).prefix("Nriv", Fuzziness.ONE).size(10)) - ).get(); - assertSuggestions(searchResponse, false, "foo", "Nirvana"); + assertResponse( + prepareSearch(INDEX).suggest( + new SuggestBuilder().addSuggestion( + "foo", + SuggestBuilders.completionSuggestion(FIELD).prefix("Nriv", Fuzziness.ONE).size(10) + ) + ), + response -> assertSuggestions(response, false, "foo", "Nirvana") + ); } public void testThatFuzzySuggesterSupportsMinPrefixLength() throws Exception { createIndexAndMapping(completionMappingBuilder); - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject().startObject(FIELD).startArray("input").value("Nirvana").endArray().endObject().endObject() ) @@ -795,28 +809,35 @@ public void testThatFuzzySuggesterSupportsMinPrefixLength() throws Exception { refresh(); - SearchResponse searchResponse = prepareSearch(INDEX).suggest( - new SuggestBuilder().addSuggestion( - "foo", - SuggestBuilders.completionSuggestion(FIELD).prefix("Nriva", FuzzyOptions.builder().setFuzzyMinLength(6).build()).size(10) - ) - ).get(); - assertSuggestions(searchResponse, false, "foo"); + assertResponse( + prepareSearch(INDEX).suggest( + new SuggestBuilder().addSuggestion( + "foo", + SuggestBuilders.completionSuggestion(FIELD) + .prefix("Nriva", FuzzyOptions.builder().setFuzzyMinLength(6).build()) + .size(10) + ) + ), + response -> assertSuggestions(response, false, "foo") + ); - searchResponse = prepareSearch(INDEX).suggest( - new SuggestBuilder().addSuggestion( - "foo", - SuggestBuilders.completionSuggestion(FIELD).prefix("Nrivan", FuzzyOptions.builder().setFuzzyMinLength(6).build()).size(10) - ) - ).get(); - assertSuggestions(searchResponse, false, "foo", "Nirvana"); + assertResponse( + prepareSearch(INDEX).suggest( + new SuggestBuilder().addSuggestion( + "foo", + SuggestBuilders.completionSuggestion(FIELD) + .prefix("Nrivan", FuzzyOptions.builder().setFuzzyMinLength(6).build()) + .size(10) + ) + ), + response -> assertSuggestions(response, false, "foo", "Nirvana") + ); } public void testThatFuzzySuggesterSupportsNonPrefixLength() throws Exception { createIndexAndMapping(completionMappingBuilder); - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject().startObject(FIELD).startArray("input").value("Nirvana").endArray().endObject().endObject() ) @@ -824,28 +845,35 @@ public void testThatFuzzySuggesterSupportsNonPrefixLength() throws Exception { refresh(); - SearchResponse searchResponse = prepareSearch(INDEX).suggest( - new SuggestBuilder().addSuggestion( - "foo", - SuggestBuilders.completionSuggestion(FIELD).prefix("Nirw", FuzzyOptions.builder().setFuzzyPrefixLength(4).build()).size(10) - ) - ).get(); - assertSuggestions(searchResponse, false, "foo"); + assertResponse( + prepareSearch(INDEX).suggest( + new SuggestBuilder().addSuggestion( + "foo", + SuggestBuilders.completionSuggestion(FIELD) + .prefix("Nirw", FuzzyOptions.builder().setFuzzyPrefixLength(4).build()) + .size(10) + ) + ), + response -> assertSuggestions(response, false, "foo") + ); - searchResponse = prepareSearch(INDEX).suggest( - new SuggestBuilder().addSuggestion( - "foo", - SuggestBuilders.completionSuggestion(FIELD).prefix("Nirvo", FuzzyOptions.builder().setFuzzyPrefixLength(4).build()).size(10) - ) - ).get(); - assertSuggestions(searchResponse, false, "foo", "Nirvana"); + assertResponse( + prepareSearch(INDEX).suggest( + new SuggestBuilder().addSuggestion( + "foo", + SuggestBuilders.completionSuggestion(FIELD) + .prefix("Nirvo", FuzzyOptions.builder().setFuzzyPrefixLength(4).build()) + .size(10) + ) + ), + response -> assertSuggestions(response, false, "foo", "Nirvana") + ); } public void testThatFuzzySuggesterIsUnicodeAware() throws Exception { createIndexAndMapping(completionMappingBuilder); - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource(jsonBuilder().startObject().startObject(FIELD).startArray("input").value("ööööö").endArray().endObject().endObject()) .get(); @@ -857,23 +885,28 @@ public void testThatFuzzySuggesterIsUnicodeAware() throws Exception { .prefix("öööи", FuzzyOptions.builder().setUnicodeAware(true).build()) .size(10); - SearchResponse searchResponse = prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", completionSuggestionBuilder)) - .get(); - assertSuggestions(searchResponse, false, "foo", "ööööö"); + assertResponse( + prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", completionSuggestionBuilder)), + response -> assertSuggestions(response, false, "foo", "ööööö") + ); // removing unicode awareness leads to no result completionSuggestionBuilder = SuggestBuilders.completionSuggestion(FIELD) .prefix("öööи", FuzzyOptions.builder().setUnicodeAware(false).build()) .size(10); - searchResponse = prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", completionSuggestionBuilder)).get(); - assertSuggestions(searchResponse, false, "foo"); + assertResponse( + prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", completionSuggestionBuilder)), + response -> assertSuggestions(response, false, "foo") + ); // increasing edit distance instead of unicode awareness works again, as this is only a single character completionSuggestionBuilder = SuggestBuilders.completionSuggestion(FIELD) .prefix("öööи", FuzzyOptions.builder().setUnicodeAware(false).setFuzziness(Fuzziness.TWO).build()) .size(10); - searchResponse = prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", completionSuggestionBuilder)).get(); - assertSuggestions(searchResponse, false, "foo", "ööööö"); + assertResponse( + prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", completionSuggestionBuilder)), + response -> assertSuggestions(response, false, "foo", "ööööö") + ); } public void testThatStatsAreWorking() throws Exception { @@ -901,12 +934,10 @@ public void testThatStatsAreWorking() throws Exception { assertThat(putMappingResponse.isAcknowledged(), is(true)); // Index two entities - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource(jsonBuilder().startObject().field(FIELD, "Foo Fighters").field(otherField, "WHATEVER").endObject()) .get(); - client().prepareIndex(INDEX) - .setId("2") + prepareIndex(INDEX).setId("2") .setSource(jsonBuilder().startObject().field(FIELD, "Bar Fighters").field(otherField, "WHATEVER2").endObject()) .get(); @@ -954,8 +985,7 @@ public void testThatStatsAreWorking() throws Exception { public void testThatSortingOnCompletionFieldReturnsUsefulException() throws Exception { createIndexAndMapping(completionMappingBuilder); - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject().startObject(FIELD).startArray("input").value("Nirvana").endArray().endObject().endObject() ) @@ -984,8 +1014,7 @@ public void testThatSuggestStopFilterWorks() throws Exception { builder.indexAnalyzer("simple"); createIndexAndMappingAndSettings(settingsBuilder.build(), builder); - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -999,8 +1028,7 @@ public void testThatSuggestStopFilterWorks() throws Exception { .get(); // Higher weight so it's ranked first: - client().prepareIndex(INDEX) - .setId("2") + prepareIndex(INDEX).setId("2") .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -1032,8 +1060,7 @@ public void testThatIndexingInvalidFieldsInCompletionFieldResultsInException() t createIndexAndMapping(builder); try { - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -1065,15 +1092,14 @@ public void testSkipDuplicates() throws Exception { weights[id] = Math.max(weight, weights[id]); String suggestion = "suggestion-" + String.format(Locale.ENGLISH, "%03d", id); indexRequestBuilders.add( - client().prepareIndex(INDEX) - .setSource( - jsonBuilder().startObject() - .startObject(FIELD) - .field("input", suggestion) - .field("weight", weight) - .endObject() - .endObject() - ) + prepareIndex(INDEX).setSource( + jsonBuilder().startObject() + .startObject(FIELD) + .field("input", suggestion) + .field("weight", weight) + .endObject() + .endObject() + ) ); } indexRandom(true, indexRequestBuilders); @@ -1088,16 +1114,17 @@ public void testSkipDuplicates() throws Exception { .skipDuplicates(true) .size(numUnique); - SearchResponse searchResponse = prepareSearch(INDEX).suggest( - new SuggestBuilder().addSuggestion("suggestions", completionSuggestionBuilder) - ).get(); - assertSuggestions(searchResponse, true, "suggestions", expected); + assertResponse( + prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("suggestions", completionSuggestionBuilder)), + response -> assertSuggestions(response, true, "suggestions", expected) + ); } public void assertSuggestions(String suggestionName, SuggestionBuilder suggestBuilder, String... suggestions) { - SearchResponse searchResponse = prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion(suggestionName, suggestBuilder)) - .get(); - assertSuggestions(searchResponse, suggestionName, suggestions); + assertResponse( + prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion(suggestionName, suggestBuilder)), + response -> assertSuggestions(response, suggestionName, suggestions) + ); } public void assertSuggestions(String suggestion, String... suggestions) { @@ -1108,11 +1135,12 @@ public void assertSuggestions(String suggestion, String... suggestions) { public void assertSuggestionsNotInOrder(String suggestString, String... suggestions) { String suggestionName = RandomStrings.randomAsciiLettersOfLength(random(), 10); - SearchResponse searchResponse = prepareSearch(INDEX).suggest( - new SuggestBuilder().addSuggestion(suggestionName, SuggestBuilders.completionSuggestion(FIELD).text(suggestString).size(10)) - ).get(); - - assertSuggestions(searchResponse, false, suggestionName, suggestions); + assertResponse( + prepareSearch(INDEX).suggest( + new SuggestBuilder().addSuggestion(suggestionName, SuggestBuilders.completionSuggestion(FIELD).text(suggestString).size(10)) + ), + response -> assertSuggestions(response, false, suggestionName, suggestions) + ); } static void assertSuggestions(SearchResponse searchResponse, String name, String... suggestions) { @@ -1233,19 +1261,18 @@ private void createIndexAndMapping(CompletionMappingBuilder builder) throws IOEx public void testPrunedSegments() throws IOException { createIndexAndMappingAndSettings(indexSettings(1, 0).build(), completionMappingBuilder); - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject().startObject(FIELD).startArray("input").value("The Beatles").endArray().endObject().endObject() ) .get(); // we have 2 docs in a segment... - client().prepareIndex(INDEX).setId("2").setSource(jsonBuilder().startObject().field("somefield", "somevalue").endObject()).get(); + prepareIndex(INDEX).setId("2").setSource(jsonBuilder().startObject().field("somefield", "somevalue").endObject()).get(); ForceMergeResponse actionGet = indicesAdmin().prepareForceMerge().setFlush(true).setMaxNumSegments(1).get(); assertAllSuccessful(actionGet); refresh(); // update the first one and then merge.. the target segment will have no value in FIELD - client().prepareIndex(INDEX).setId("1").setSource(jsonBuilder().startObject().field("somefield", "somevalue").endObject()).get(); + prepareIndex(INDEX).setId("1").setSource(jsonBuilder().startObject().field("somefield", "somevalue").endObject()).get(); actionGet = indicesAdmin().prepareForceMerge().setFlush(true).setMaxNumSegments(1).get(); assertAllSuccessful(actionGet); refresh(); @@ -1278,8 +1305,7 @@ public void testVeryLongInput() throws IOException { ); // can cause stack overflow without the default max_input_length String longString = replaceReservedChars(randomRealisticUnicodeOfLength(randomIntBetween(5000, 10000)), (char) 0x01); - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject().startObject(FIELD).startArray("input").value(longString).endArray().endObject().endObject() ) @@ -1308,8 +1334,7 @@ public void testReservedChars() throws IOException { String string = "foo" + (char) 0x00 + "bar"; Exception e = expectThrows( DocumentParsingException.class, - () -> client().prepareIndex(INDEX) - .setId("1") + () -> prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -1342,8 +1367,7 @@ public void testIssue5930() throws IOException { ) ); String string = "foo bar"; - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource(jsonBuilder().startObject().field(FIELD, string).endObject()) .setRefreshPolicy(IMMEDIATE) .get(); @@ -1364,8 +1388,7 @@ public void testMultiDocSuggestions() throws Exception { List indexRequestBuilders = new ArrayList<>(); for (int i = 1; i <= numDocs; i++) { indexRequestBuilders.add( - client().prepareIndex(INDEX) - .setId("" + i) + prepareIndex(INDEX).setId("" + i) .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -1399,9 +1422,9 @@ public void testSuggestWithFieldAlias() throws Exception { assertAcked(prepareCreate(INDEX).setMapping(mapping)); List builders = new ArrayList<>(); - builders.add(client().prepareIndex(INDEX).setSource(FIELD, "apple")); - builders.add(client().prepareIndex(INDEX).setSource(FIELD, "mango")); - builders.add(client().prepareIndex(INDEX).setSource(FIELD, "papaya")); + builders.add(prepareIndex(INDEX).setSource(FIELD, "apple")); + builders.add(prepareIndex(INDEX).setSource(FIELD, "mango")); + builders.add(prepareIndex(INDEX).setSource(FIELD, "papaya")); indexRandom(true, false, builders); CompletionSuggestionBuilder suggestionBuilder = SuggestBuilders.completionSuggestion("alias").text("app"); @@ -1415,8 +1438,7 @@ public void testSuggestOnlyExplain() throws Exception { List indexRequestBuilders = new ArrayList<>(); for (int i = 1; i <= numDocs; i++) { indexRequestBuilders.add( - client().prepareIndex(INDEX) - .setId("" + i) + prepareIndex(INDEX).setId("" + i) .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -1429,10 +1451,10 @@ public void testSuggestOnlyExplain() throws Exception { } indexRandom(true, indexRequestBuilders); CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg"); - SearchResponse searchResponse = prepareSearch(INDEX).setExplain(true) - .suggest(new SuggestBuilder().addSuggestion("foo", prefix)) - .get(); - assertSuggestions(searchResponse, "foo", "suggestion10", "suggestion9", "suggestion8", "suggestion7", "suggestion6"); + assertResponse( + prepareSearch(INDEX).setExplain(true).suggest(new SuggestBuilder().addSuggestion("foo", prefix)), + response -> assertSuggestions(response, "foo", "suggestion10", "suggestion9", "suggestion8", "suggestion7", "suggestion6") + ); } public void testCompletionWithCollapse() throws Exception { @@ -1459,26 +1481,29 @@ public void testCompletionWithCollapse() throws Exception { XContentBuilder builder = jsonBuilder().startObject(); builder.startObject(suggestField).field("input", "suggestion" + i).field("weight", i).endObject(); builder.field("collapse_field", "collapse me").endObject(); // all docs the same value for collapsing - client().prepareIndex(index).setId("" + i).setSource(builder).get(); + prepareIndex(index).setId("" + i).setSource(builder).get(); } indicesAdmin().prepareRefresh(index).get(); CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(suggestField).prefix("sug").size(1); - SearchResponse searchResponse = prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()) - .setFrom(1) - .setSize(1) - .setCollapse(new CollapseBuilder("collapse_field")) - .suggest(new SuggestBuilder().addSuggestion("the_suggestion", prefix)) - .get(); - assertAllSuccessful(searchResponse); - - assertThat(searchResponse.getSuggest().getSuggestion("the_suggestion"), is(notNullValue())); - Suggest.Suggestion> suggestion = searchResponse.getSuggest() - .getSuggestion("the_suggestion"); - - List suggestionList = getNames(suggestion.getEntries().get(0)); - assertThat(suggestionList, contains("suggestion" + (numDocs - 1))); - assertEquals(0, searchResponse.getHits().getHits().length); + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()) + .setFrom(1) + .setSize(1) + .setCollapse(new CollapseBuilder("collapse_field")) + .suggest(new SuggestBuilder().addSuggestion("the_suggestion", prefix)), + response -> { + assertAllSuccessful(response); + + assertThat(response.getSuggest().getSuggestion("the_suggestion"), is(notNullValue())); + Suggest.Suggestion> suggestion = response.getSuggest() + .getSuggestion("the_suggestion"); + + List suggestionList = getNames(suggestion.getEntries().get(0)); + assertThat(suggestionList, contains("suggestion" + (numDocs - 1))); + assertEquals(0, response.getHits().getHits().length); + } + ); } public static boolean isReservedChar(char c) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java index a526781bcc3db..22d48c2f282d5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java @@ -12,7 +12,6 @@ import org.apache.lucene.tests.util.LuceneTestCase.SuppressCodecs; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; @@ -41,6 +40,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.core.IsEqual.equalTo; @@ -77,7 +77,7 @@ public void testContextPrefix() throws Exception { source.field("type", "type" + i % 3); } source.endObject(); - indexRequestBuilders.add(client().prepareIndex(INDEX).setId("" + i).setSource(source)); + indexRequestBuilders.add(prepareIndex(INDEX).setId("" + i).setSource(source)); } indexRandom(true, indexRequestBuilders); CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD) @@ -113,7 +113,7 @@ public void testContextRegex() throws Exception { source.field("type", "type" + i % 3); } source.endObject(); - indexRequestBuilders.add(client().prepareIndex(INDEX).setId("" + i).setSource(source)); + indexRequestBuilders.add(prepareIndex(INDEX).setId("" + i).setSource(source)); } indexRandom(true, indexRequestBuilders); CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD) @@ -149,7 +149,7 @@ public void testContextFuzzy() throws Exception { source.field("type", "type" + i % 3); } source.endObject(); - indexRequestBuilders.add(client().prepareIndex(INDEX).setId("" + i).setSource(source)); + indexRequestBuilders.add(prepareIndex(INDEX).setId("" + i).setSource(source)); } indexRandom(true, indexRequestBuilders); CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD) @@ -168,8 +168,7 @@ public void testContextFilteringWorksWithUTF8Categories() throws Exception { LinkedHashMap> map = new LinkedHashMap<>(Collections.singletonMap("cat", contextMapping)); final CompletionMappingBuilder mapping = new CompletionMappingBuilder().context(map); createIndexAndMapping(mapping); - DocWriteResponse indexResponse = client().prepareIndex(INDEX) - .setId("1") + DocWriteResponse indexResponse = prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -198,8 +197,7 @@ public void testSingleContextFiltering() throws Exception { List indexRequestBuilders = new ArrayList<>(); for (int i = 0; i < numDocs; i++) { indexRequestBuilders.add( - client().prepareIndex(INDEX) - .setId("" + i) + prepareIndex(INDEX).setId("" + i) .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -230,8 +228,7 @@ public void testSingleContextBoosting() throws Exception { List indexRequestBuilders = new ArrayList<>(); for (int i = 0; i < numDocs; i++) { indexRequestBuilders.add( - client().prepareIndex(INDEX) - .setId("" + i) + prepareIndex(INDEX).setId("" + i) .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -275,7 +272,7 @@ public void testMultiContextFiltering() throws Exception { .field("cat", "cat" + i % 2) .field("type", "type" + i % 4) .endObject(); - indexRequestBuilders.add(client().prepareIndex(INDEX).setId("" + i).setSource(source)); + indexRequestBuilders.add(prepareIndex(INDEX).setId("" + i).setSource(source)); } indexRandom(true, indexRequestBuilders); @@ -317,7 +314,7 @@ public void testMultiContextBoosting() throws Exception { .field("cat", "cat" + i % 2) .field("type", "type" + i % 4) .endObject(); - indexRequestBuilders.add(client().prepareIndex(INDEX).setId("" + i).setSource(source)); + indexRequestBuilders.add(prepareIndex(INDEX).setId("" + i).setSource(source)); } indexRandom(true, indexRequestBuilders); @@ -390,7 +387,7 @@ public void testSeveralContexts() throws Exception { source.field("type" + c, "type" + c + i % 4); } source.endObject(); - indexRequestBuilders.add(client().prepareIndex(INDEX).setId("" + i).setSource(source)); + indexRequestBuilders.add(prepareIndex(INDEX).setId("" + i).setSource(source)); } indexRandom(true, indexRequestBuilders); @@ -423,7 +420,7 @@ public void testGeoFiltering() throws Exception { .endObject() .endObject() .endObject(); - indexRequestBuilders.add(client().prepareIndex(INDEX).setId("" + i).setSource(source)); + indexRequestBuilders.add(prepareIndex(INDEX).setId("" + i).setSource(source)); } indexRandom(true, indexRequestBuilders); @@ -457,7 +454,7 @@ public void testGeoBoosting() throws Exception { .endObject() .endObject() .endObject(); - indexRequestBuilders.add(client().prepareIndex(INDEX).setId("" + i).setSource(source)); + indexRequestBuilders.add(prepareIndex(INDEX).setId("" + i).setSource(source)); } indexRandom(true, indexRequestBuilders); @@ -490,7 +487,7 @@ public void testGeoPointContext() throws Exception { .endObject() .endObject() .endObject(); - indexRequestBuilders.add(client().prepareIndex(INDEX).setId("" + i).setSource(source)); + indexRequestBuilders.add(prepareIndex(INDEX).setId("" + i).setSource(source)); } indexRandom(true, indexRequestBuilders); CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD) @@ -532,7 +529,7 @@ public void testGeoNeighbours() throws Exception { .endObject() .endObject() .endObject(); - indexRequestBuilders.add(client().prepareIndex(INDEX).setId("" + i).setSource(source)); + indexRequestBuilders.add(prepareIndex(INDEX).setId("" + i).setSource(source)); } indexRandom(true, indexRequestBuilders); @@ -595,7 +592,7 @@ public void testGeoField() throws Exception { .array("input", "Hotel Amsterdam in Berlin") .endObject() .endObject(); - client().prepareIndex(INDEX).setId("1").setSource(source1).get(); + prepareIndex(INDEX).setId("1").setSource(source1).get(); XContentBuilder source2 = jsonBuilder().startObject() .startObject("location") @@ -605,7 +602,7 @@ public void testGeoField() throws Exception { .array("input", "Hotel Berlin in Amsterdam") .endObject() .endObject(); - client().prepareIndex(INDEX).setId("2").setSource(source2).get(); + prepareIndex(INDEX).setId("2").setSource(source2).get(); refresh(); @@ -619,13 +616,13 @@ public void testGeoField() throws Exception { Collections.singletonList(GeoQueryContext.builder().setGeoPoint(new GeoPoint(52.52, 13.4)).build()) ) ); - SearchResponse searchResponse = prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion(suggestionName, context)).get(); - - assertEquals(searchResponse.getSuggest().size(), 1); - assertEquals( - "Hotel Amsterdam in Berlin", - searchResponse.getSuggest().getSuggestion(suggestionName).iterator().next().getOptions().iterator().next().getText().string() - ); + assertResponse(prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion(suggestionName, context)), response -> { + assertEquals(response.getSuggest().size(), 1); + assertEquals( + "Hotel Amsterdam in Berlin", + response.getSuggest().getSuggestion(suggestionName).iterator().next().getOptions().iterator().next().getText().string() + ); + }); } public void testSkipDuplicatesWithContexts() throws Exception { @@ -647,7 +644,7 @@ public void testSkipDuplicatesWithContexts() throws Exception { .field("cat", "cat" + id % 2) .field("type", "type" + id) .endObject(); - indexRequestBuilders.add(client().prepareIndex(INDEX).setId("" + i).setSource(source)); + indexRequestBuilders.add(prepareIndex(INDEX).setId("" + i).setSource(source)); } String[] expected = new String[numUnique]; for (int i = 0; i < numUnique; i++) { @@ -669,9 +666,10 @@ public void testSkipDuplicatesWithContexts() throws Exception { } public void assertSuggestions(String suggestionName, SuggestionBuilder suggestBuilder, String... suggestions) { - SearchResponse searchResponse = prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion(suggestionName, suggestBuilder)) - .get(); - CompletionSuggestSearchIT.assertSuggestions(searchResponse, suggestionName, suggestions); + assertResponse( + prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion(suggestionName, suggestBuilder)), + response -> CompletionSuggestSearchIT.assertSuggestions(response, suggestionName, suggestions) + ); } private void createIndexAndMapping(CompletionMappingBuilder completionMappingBuilder) throws IOException { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/SuggestSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/SuggestSearchIT.java index 95eb0f055b830..bade23e193e75 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/SuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/SuggestSearchIT.java @@ -13,7 +13,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexSettings; @@ -53,7 +52,9 @@ import static org.elasticsearch.search.suggest.SuggestBuilders.termSuggestion; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSuggestion; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSuggestionPhraseCollateMatchExists; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSuggestionSize; @@ -245,8 +246,10 @@ public void testSizeOneShard() throws Exception { } refresh(); - SearchResponse search = prepareSearch().setQuery(matchQuery("text", "spellchecker")).get(); - assertThat("didn't ask for suggestions but got some", search.getSuggest(), nullValue()); + assertResponse( + prepareSearch().setQuery(matchQuery("text", "spellchecker")), + response -> assertThat("didn't ask for suggestions but got some", response.getSuggest(), nullValue()) + ); TermSuggestionBuilder termSuggestion = termSuggestion("text").suggestMode(SuggestMode.ALWAYS) // Always, otherwise the results can // vary between requests. @@ -292,9 +295,9 @@ public void testUnmappedField() throws IOException, InterruptedException, Execut indexRandom( true, - client().prepareIndex("test").setSource("name", "I like iced tea"), - client().prepareIndex("test").setSource("name", "I like tea."), - client().prepareIndex("test").setSource("name", "I like ice cream.") + prepareIndex("test").setSource("name", "I like iced tea"), + prepareIndex("test").setSource("name", "I like tea."), + prepareIndex("test").setSource("name", "I like ice cream.") ); refresh(); @@ -329,8 +332,10 @@ public void testSimple() throws Exception { indexDoc("test", "4", "text", "abcc"); refresh(); - SearchResponse search = prepareSearch().setQuery(matchQuery("text", "spellcecker")).get(); - assertThat("didn't ask for suggestions but got some", search.getSuggest(), nullValue()); + assertResponse( + prepareSearch().setQuery(matchQuery("text", "spellcecker")), + response -> assertThat("didn't ask for suggestions but got some", response.getSuggest(), nullValue()) + ); TermSuggestionBuilder termSuggest = termSuggestion("text").suggestMode(SuggestMode.ALWAYS) // Always, otherwise the results can vary // between requests. @@ -776,9 +781,9 @@ public void testDifferentShardSize() throws Exception { ensureGreen(); indexRandom( true, - client().prepareIndex("test").setId("1").setSource("field1", "foobar1").setRouting("1"), - client().prepareIndex("test").setId("2").setSource("field1", "foobar2").setRouting("2"), - client().prepareIndex("test").setId("3").setSource("field1", "foobar3").setRouting("3") + prepareIndex("test").setId("1").setSource("field1", "foobar1").setRouting("1"), + prepareIndex("test").setId("2").setSource("field1", "foobar2").setRouting("2"), + prepareIndex("test").setId("3").setSource("field1", "foobar3").setRouting("3") ); Suggest suggest = searchSuggest( @@ -836,14 +841,14 @@ public void testShardFailures() throws IOException, InterruptedException { assertRequestBuilderThrows(request, SearchPhaseExecutionException.class); // When searching on a shard which does not hold yet any document of an existing type, we should not fail - SearchResponse searchResponse = prepareSearch().setSize(0) - .suggest( - new SuggestBuilder().setGlobalText("tetsting sugestion") - .addSuggestion("did_you_mean", phraseSuggestion("name").maxErrors(5.0f)) - ) - .get(); - ElasticsearchAssertions.assertNoFailures(searchResponse); - ElasticsearchAssertions.assertSuggestion(searchResponse.getSuggest(), 0, 0, "did_you_mean", "testing suggestions"); + assertNoFailuresAndResponse( + prepareSearch().setSize(0) + .suggest( + new SuggestBuilder().setGlobalText("tetsting sugestion") + .addSuggestion("did_you_mean", phraseSuggestion("name").maxErrors(5.0f)) + ), + response -> assertSuggestion(response.getSuggest(), 0, 0, "did_you_mean", "testing suggestions") + ); } // see #3469 @@ -876,17 +881,19 @@ public void testEmptyShards() throws IOException, InterruptedException { ensureGreen(); // test phrase suggestion on completely empty index - SearchResponse searchResponse = prepareSearch().setSize(0) - .suggest( - new SuggestBuilder().setGlobalText("tetsting sugestion") - .addSuggestion("did_you_mean", phraseSuggestion("name").maxErrors(5.0f)) - ) - .get(); - - assertNoFailures(searchResponse); - Suggest suggest = searchResponse.getSuggest(); - assertSuggestionSize(suggest, 0, 0, "did_you_mean"); - assertThat(suggest.getSuggestion("did_you_mean").getEntries().get(0).getText().string(), equalTo("tetsting sugestion")); + assertNoFailuresAndResponse( + prepareSearch().setSize(0) + .suggest( + new SuggestBuilder().setGlobalText("tetsting sugestion") + .addSuggestion("did_you_mean", phraseSuggestion("name").maxErrors(5.0f)) + ), + response -> { + assertNoFailures(response); + Suggest suggest = response.getSuggest(); + assertSuggestionSize(suggest, 0, 0, "did_you_mean"); + assertThat(suggest.getSuggestion("did_you_mean").getEntries().get(0).getText().string(), equalTo("tetsting sugestion")); + } + ); indexDoc("test", "11", "foo", "bar"); indexDoc("test", "12", "foo", "bar"); @@ -894,33 +901,34 @@ public void testEmptyShards() throws IOException, InterruptedException { refresh(); // test phrase suggestion but nothing matches - searchResponse = prepareSearch().setSize(0) - .suggest( - new SuggestBuilder().setGlobalText("tetsting sugestion") - .addSuggestion("did_you_mean", phraseSuggestion("name").maxErrors(5.0f)) - ) - .get(); - - assertNoFailures(searchResponse); - suggest = searchResponse.getSuggest(); - assertSuggestionSize(suggest, 0, 0, "did_you_mean"); - assertThat(suggest.getSuggestion("did_you_mean").getEntries().get(0).getText().string(), equalTo("tetsting sugestion")); - + assertNoFailuresAndResponse( + prepareSearch().setSize(0) + .suggest( + new SuggestBuilder().setGlobalText("tetsting sugestion") + .addSuggestion("did_you_mean", phraseSuggestion("name").maxErrors(5.0f)) + ), + response -> { + Suggest suggest = response.getSuggest(); + assertSuggestionSize(suggest, 0, 0, "did_you_mean"); + assertThat(suggest.getSuggestion("did_you_mean").getEntries().get(0).getText().string(), equalTo("tetsting sugestion")); + } + ); // finally indexing a document that will produce some meaningful suggestion indexDoc("test", "1", "name", "Just testing the suggestions api"); refresh(); - searchResponse = prepareSearch().setSize(0) - .suggest( - new SuggestBuilder().setGlobalText("tetsting sugestion") - .addSuggestion("did_you_mean", phraseSuggestion("name").maxErrors(5.0f)) - ) - .get(); - - assertNoFailures(searchResponse); - suggest = searchResponse.getSuggest(); - assertSuggestionSize(suggest, 0, 3, "did_you_mean"); - assertSuggestion(suggest, 0, 0, "did_you_mean", "testing suggestions"); + assertNoFailuresAndResponse( + prepareSearch().setSize(0) + .suggest( + new SuggestBuilder().setGlobalText("tetsting sugestion") + .addSuggestion("did_you_mean", phraseSuggestion("name").maxErrors(5.0f)) + ), + response -> { + Suggest suggest = response.getSuggest(); + assertSuggestionSize(suggest, 0, 3, "did_you_mean"); + assertSuggestion(suggest, 0, 0, "did_you_mean", "testing suggestions"); + } + ); } /** @@ -1110,7 +1118,7 @@ public void testSuggestWithManyCandidates() throws InterruptedException, Executi List builders = new ArrayList<>(); for (String title : titles) { - builders.add(client().prepareIndex("test").setSource("title", title)); + builders.add(prepareIndex("test").setSource("title", title)); } indexRandom(true, builders); @@ -1148,9 +1156,9 @@ public void testSuggestWithFieldAlias() throws Exception { assertAcked(prepareCreate("test").setMapping(mapping)); List builders = new ArrayList<>(); - builders.add(client().prepareIndex("test").setSource("text", "apple")); - builders.add(client().prepareIndex("test").setSource("text", "mango")); - builders.add(client().prepareIndex("test").setSource("text", "papaya")); + builders.add(prepareIndex("test").setSource("text", "apple")); + builders.add(prepareIndex("test").setSource("text", "mango")); + builders.add(prepareIndex("test").setSource("text", "papaya")); indexRandom(true, false, builders); TermSuggestionBuilder termSuggest = termSuggestion("alias").text("appple"); @@ -1173,10 +1181,10 @@ public void testPhraseSuggestMinDocFreq() throws Exception { assertAcked(prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 1).build()).setMapping(mapping)); List builders = new ArrayList<>(); - builders.add(client().prepareIndex("test").setSource("text", "apple")); - builders.add(client().prepareIndex("test").setSource("text", "apple")); - builders.add(client().prepareIndex("test").setSource("text", "apple")); - builders.add(client().prepareIndex("test").setSource("text", "appfle")); + builders.add(prepareIndex("test").setSource("text", "apple")); + builders.add(prepareIndex("test").setSource("text", "apple")); + builders.add(prepareIndex("test").setSource("text", "apple")); + builders.add(prepareIndex("test").setSource("text", "appfle")); indexRandom(true, false, builders); PhraseSuggestionBuilder phraseSuggest = phraseSuggestion("text").text("appple") @@ -1286,7 +1294,7 @@ public void testPhraseSuggesterCollate() throws InterruptedException, ExecutionE List builders = new ArrayList<>(); for (String title : titles) { - builders.add(client().prepareIndex("test").setSource("title", title)); + builders.add(prepareIndex("test").setSource("title", title)); } indexRandom(true, builders); @@ -1420,8 +1428,11 @@ protected Suggest searchSuggest(String suggestText, int expectShardsFailed, Map< suggestBuilder.addSuggestion(suggestion.getKey(), suggestion.getValue()); } builder.suggest(suggestBuilder); - SearchResponse actionGet = builder.get(); - assertThat(Arrays.toString(actionGet.getShardFailures()), actionGet.getFailedShards(), equalTo(expectShardsFailed)); - return actionGet.getSuggest(); + Suggest[] suggest = new Suggest[1]; + assertResponse(builder, response -> { + assertThat(Arrays.toString(response.getShardFailures()), response.getFailedShards(), equalTo(expectShardsFailed)); + suggest[0] = response.getSuggest(); + }); + return suggest[0]; } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/similarity/SimilarityIT.java b/server/src/internalClusterTest/java/org/elasticsearch/similarity/SimilarityIT.java index 5c1f925bddc49..55dca7810f845 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/similarity/SimilarityIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/similarity/SimilarityIT.java @@ -8,11 +8,11 @@ package org.elasticsearch.similarity; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.test.ESIntegTestCase; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.elasticsearch.index.query.QueryBuilders.matchQuery; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; @@ -20,7 +20,7 @@ public class SimilarityIT extends ESIntegTestCase { public void testCustomBM25Similarity() throws Exception { try { - indicesAdmin().prepareDelete("test").execute().actionGet(); + indicesAdmin().prepareDelete("test").get(); } catch (Exception e) { // ignore } @@ -45,24 +45,21 @@ public void testCustomBM25Similarity() throws Exception { .setSettings( indexSettings(1, 0).put("similarity.custom.type", "BM25").put("similarity.custom.k1", 2.0f).put("similarity.custom.b", 0.5f) ) - .execute() - .actionGet(); + .get(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource("field1", "the quick brown fox jumped over the lazy dog", "field2", "the quick brown fox jumped over the lazy dog") .setRefreshPolicy(IMMEDIATE) - .execute() - .actionGet(); + .get(); - SearchResponse bm25SearchResponse = prepareSearch().setQuery(matchQuery("field1", "quick brown fox")).execute().actionGet(); - assertThat(bm25SearchResponse.getHits().getTotalHits().value, equalTo(1L)); - float bm25Score = bm25SearchResponse.getHits().getHits()[0].getScore(); - - SearchResponse booleanSearchResponse = prepareSearch().setQuery(matchQuery("field2", "quick brown fox")).execute().actionGet(); - assertThat(booleanSearchResponse.getHits().getTotalHits().value, equalTo(1L)); - float defaultScore = booleanSearchResponse.getHits().getHits()[0].getScore(); - - assertThat(bm25Score, not(equalTo(defaultScore))); + assertResponse(prepareSearch().setQuery(matchQuery("field1", "quick brown fox")), bm25SearchResponse -> { + assertThat(bm25SearchResponse.getHits().getTotalHits().value, equalTo(1L)); + float bm25Score = bm25SearchResponse.getHits().getHits()[0].getScore(); + assertResponse(prepareSearch().setQuery(matchQuery("field2", "quick brown fox")), booleanSearchResponse -> { + assertThat(booleanSearchResponse.getHits().getTotalHits().value, equalTo(1L)); + float defaultScore = booleanSearchResponse.getHits().getHits()[0].getScore(); + assertThat(bm25Score, not(equalTo(defaultScore))); + }); + }); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java index ca522064e3d04..42c19a903b452 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java @@ -39,6 +39,7 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.transport.RemoteTransportException; import java.io.IOException; import java.nio.file.Files; @@ -768,7 +769,18 @@ public void testQueuedOperationsAndBrokenRepoOnMasterFailOver() throws Exception ensureStableCluster(3); awaitNoMoreRunningOperations(); - expectThrows(RepositoryException.class, deleteFuture::actionGet); + var innerException = expectThrows(ExecutionException.class, RuntimeException.class, deleteFuture::get); + + // There may be many layers of RTE to unwrap here, see https://github.com/elastic/elasticsearch/issues/102351. + // ExceptionsHelper#unwrapCause gives up at 10 layers of wrapping so we must unwrap more tenaciously by hand here: + while (true) { + if (innerException instanceof RemoteTransportException remoteTransportException) { + innerException = asInstanceOf(RuntimeException.class, remoteTransportException.getCause()); + } else { + assertThat(innerException, instanceOf(RepositoryException.class)); + break; + } + } } public void testQueuedSnapshotOperationsAndBrokenRepoOnMasterFailOver() throws Exception { @@ -948,7 +960,7 @@ public void testQueuedSnapshotsWaitingForShardReady() throws Exception { logger.info("--> wait for relocations to start"); assertBusy( - () -> assertThat(clusterAdmin().prepareHealth(testIndex).execute().actionGet().getRelocatingShards(), greaterThan(0)), + () -> assertThat(clusterAdmin().prepareHealth(testIndex).get().getRelocatingShards(), greaterThan(0)), 1L, TimeUnit.MINUTES ); @@ -1304,7 +1316,7 @@ public void testConcurrentOperationsLimit() throws Exception { final ConcurrentSnapshotExecutionException cse = expectThrows( ConcurrentSnapshotExecutionException.class, - () -> clusterAdmin().prepareCreateSnapshot(repoName, "expected-to-fail").execute().actionGet() + () -> clusterAdmin().prepareCreateSnapshot(repoName, "expected-to-fail").get() ); assertThat( cse.getMessage(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java index abb72286f971f..e7bc6f13383d1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java @@ -65,7 +65,7 @@ public void testRecreateCorruptedRepositoryUnblocksIt() throws Exception { createIndex("test-idx-1"); logger.info("--> indexing some data"); - indexRandom(true, client().prepareIndex("test-idx-1").setSource("foo", "bar")); + indexRandom(true, prepareIndex("test-idx-1").setSource("foo", "bar")); final String snapshot = "test-snap"; @@ -111,11 +111,7 @@ public void testConcurrentlyChangeRepositoryContents() throws Exception { createIndex("test-idx-1", "test-idx-2"); logger.info("--> indexing some data"); - indexRandom( - true, - client().prepareIndex("test-idx-1").setSource("foo", "bar"), - client().prepareIndex("test-idx-2").setSource("foo", "bar") - ); + indexRandom(true, prepareIndex("test-idx-1").setSource("foo", "bar"), prepareIndex("test-idx-2").setSource("foo", "bar")); final String snapshot = "test-snap"; @@ -186,11 +182,7 @@ public void testFindDanglingLatestGeneration() throws Exception { createIndex("test-idx-1", "test-idx-2"); logger.info("--> indexing some data"); - indexRandom( - true, - client().prepareIndex("test-idx-1").setSource("foo", "bar"), - client().prepareIndex("test-idx-2").setSource("foo", "bar") - ); + indexRandom(true, prepareIndex("test-idx-1").setSource("foo", "bar"), prepareIndex("test-idx-2").setSource("foo", "bar")); final String snapshot = "test-snap"; @@ -555,11 +547,7 @@ public void testDeleteSnapshotWithMissingIndexAndShardMetadata() throws Exceptio final String[] indices = { "test-idx-1", "test-idx-2" }; createIndex(indices); logger.info("--> indexing some data"); - indexRandom( - true, - client().prepareIndex("test-idx-1").setSource("foo", "bar"), - client().prepareIndex("test-idx-2").setSource("foo", "bar") - ); + indexRandom(true, prepareIndex("test-idx-1").setSource("foo", "bar"), prepareIndex("test-idx-2").setSource("foo", "bar")); logger.info("--> creating snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin() @@ -608,11 +596,7 @@ public void testDeleteSnapshotWithMissingMetadata() throws Exception { createIndex("test-idx-1", "test-idx-2"); logger.info("--> indexing some data"); - indexRandom( - true, - client().prepareIndex("test-idx-1").setSource("foo", "bar"), - client().prepareIndex("test-idx-2").setSource("foo", "bar") - ); + indexRandom(true, prepareIndex("test-idx-1").setSource("foo", "bar"), prepareIndex("test-idx-2").setSource("foo", "bar")); logger.info("--> creating snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin() @@ -655,11 +639,7 @@ public void testDeleteSnapshotWithCorruptedSnapshotFile() throws Exception { createIndex("test-idx-1", "test-idx-2"); logger.info("--> indexing some data"); - indexRandom( - true, - client().prepareIndex("test-idx-1").setSource("foo", "bar"), - client().prepareIndex("test-idx-2").setSource("foo", "bar") - ); + indexRandom(true, prepareIndex("test-idx-1").setSource("foo", "bar"), prepareIndex("test-idx-2").setSource("foo", "bar")); logger.info("--> creating snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin() @@ -714,9 +694,9 @@ public void testDeleteSnapshotWithCorruptedGlobalState() throws Exception { createIndex("test-idx-1", "test-idx-2"); indexRandom( true, - client().prepareIndex("test-idx-1").setSource("foo", "bar"), - client().prepareIndex("test-idx-2").setSource("foo", "bar"), - client().prepareIndex("test-idx-2").setSource("foo", "bar") + prepareIndex("test-idx-1").setSource("foo", "bar"), + prepareIndex("test-idx-2").setSource("foo", "bar"), + prepareIndex("test-idx-2").setSource("foo", "bar") ); flushAndRefresh("test-idx-1", "test-idx-2"); @@ -760,11 +740,7 @@ public void testSnapshotWithMissingShardLevelIndexFile() throws Exception { createIndex("test-idx-1", "test-idx-2"); logger.info("--> indexing some data"); - indexRandom( - true, - client().prepareIndex("test-idx-1").setSource("foo", "bar"), - client().prepareIndex("test-idx-2").setSource("foo", "bar") - ); + indexRandom(true, prepareIndex("test-idx-1").setSource("foo", "bar"), prepareIndex("test-idx-2").setSource("foo", "bar")); logger.info("--> creating snapshot"); clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-1").setWaitForCompletion(true).setIndices("test-idx-*").get(); @@ -821,14 +797,14 @@ private void assertRepositoryBlocked(String repo, String existingSnapshot) { logger.info("--> try to delete snapshot"); final RepositoryException ex = expectThrows( RepositoryException.class, - () -> clusterAdmin().prepareDeleteSnapshot(repo, existingSnapshot).execute().actionGet() + () -> clusterAdmin().prepareDeleteSnapshot(repo, existingSnapshot).get() ); assertThat(ex.getMessage(), containsString("concurrent modification of the index-N file")); logger.info("--> try to create snapshot"); final RepositoryException ex2 = expectThrows( RepositoryException.class, - () -> clusterAdmin().prepareCreateSnapshot(repo, existingSnapshot).execute().actionGet() + () -> clusterAdmin().prepareCreateSnapshot(repo, existingSnapshot).get() ); assertThat(ex2.getMessage(), containsString("The repository has been disabled to prevent data corruption")); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java index 030ea42d53f13..3a72ab792f571 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java @@ -62,8 +62,7 @@ public void testShouldNotRestoreRepositoryMetadata() { .setRestoreGlobalState(true) .setIndices("-*") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); logger.info("make sure old repository wasn't restored"); assertRequestBuilderThrows(clusterAdmin().prepareGetRepositories("test-repo-1"), RepositoryMissingException.class); @@ -104,8 +103,7 @@ public void testShouldRestoreOnlySnapshotMetadata() throws Exception { .setRestoreGlobalState(true) .setIndices("-*") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); var metadata = clusterAdmin().prepareState().get().getState().getMetadata(); logger.info("check that custom persistent metadata [{}] is correctly restored", metadata); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index e188c11125c42..59fc54347d1d5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -92,6 +92,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFutureThrows; import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.empty; @@ -104,6 +105,7 @@ import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.startsWith; @ClusterScope(scope = Scope.TEST, numDataNodes = 0) public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCase { @@ -194,18 +196,30 @@ public void testSnapshotWithStuckNode() throws Exception { } logger.info("--> making sure that snapshot no longer exists"); - expectThrows( - SnapshotMissingException.class, - () -> clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots("test-snap").execute().actionGet() - ); + expectThrows(SnapshotMissingException.class, () -> clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get()); - logger.info("--> Go through a loop of creating and deleting a snapshot to trigger repository cleanup"); + logger.info("--> trigger repository cleanup"); clusterAdmin().prepareCleanupRepository("test-repo").get(); - // Expect two files to remain in the repository: - // (1) index-(N+1) - // (2) index-latest - assertFileCount(repo, 2); + // Expect two or three files to remain in the repository: + // (1) index-latest + // (2) index-(N+1) + // (3) index-N (maybe: a fully successful deletion removes this, but cleanup does not, see #100718) + + final var blobPaths = getAllFilesInDirectoryAndDescendants(repo); + final var blobPathsString = blobPaths.toString(); + assertTrue(blobPathsString, blobPaths.remove(repo.resolve(BlobStoreRepository.INDEX_LATEST_BLOB))); + assertThat(blobPathsString, blobPaths, anyOf(hasSize(1), hasSize(2))); + final var repoGenerations = blobPaths.stream().mapToLong(blobPath -> { + final var blobName = repo.relativize(blobPath).toString(); + assertThat(blobPathsString, blobName, startsWith(BlobStoreRepository.INDEX_FILE_PREFIX)); + return Long.parseLong(blobName.substring(BlobStoreRepository.INDEX_FILE_PREFIX.length())); + }).toArray(); + + if (repoGenerations.length == 2) { + assertEquals(blobPathsString, 1, Math.abs(repoGenerations[0] - repoGenerations[1])); + } + logger.info("--> done"); } @@ -223,13 +237,7 @@ public void testRestoreIndexWithMissingShards() throws Exception { logger.info("--> shutdown one of the nodes"); internalCluster().stopRandomDataNode(); assertThat( - clusterAdmin().prepareHealth() - .setWaitForEvents(Priority.LANGUID) - .setTimeout("1m") - .setWaitForNodes("<2") - .execute() - .actionGet() - .isTimedOut(), + clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForNodes("<2").get().isTimedOut(), equalTo(false) ); @@ -258,8 +266,7 @@ public void testRestoreIndexWithMissingShards() throws Exception { () -> clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-1") .setIndices("test-idx-all", "test-idx-none", "test-idx-some", "test-idx-closed") .setWaitForCompletion(true) - .execute() - .actionGet() + .get() ); assertThat(sne.getMessage(), containsString("Indices don't have primary shards")); @@ -269,8 +276,7 @@ public void testRestoreIndexWithMissingShards() throws Exception { .setIndices("test-idx-all", "test-idx-none", "test-idx-some", "test-idx-closed") .setWaitForCompletion(false) .setPartial(true) - .execute() - .actionGet(); + .get(); assertBusy(() -> { SnapshotsStatusResponse snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus("test-repo") .setSnapshots("test-snap-2") @@ -304,8 +310,7 @@ public void testRestoreIndexWithMissingShards() throws Exception { .setIndices("test-idx-all", "test-idx-none", "test-idx-some", "test-idx-closed") .setWaitForCompletion(true) .setPartial(true) - .execute() - .actionGet(); + .get(); logger.info( "State: [{}], Reason: [{}]", createSnapshotResponse.getSnapshotInfo().state(), @@ -333,8 +338,7 @@ public void testRestoreIndexWithMissingShards() throws Exception { .setRestoreGlobalState(false) .setIndices("test-idx-all") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo(), notNullValue()); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(6)); assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), equalTo(6)); @@ -374,8 +378,7 @@ public void testRestoreIndexWithMissingShards() throws Exception { .setRestoreGlobalState(false) .setIndices("test-idx-closed") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo(), notNullValue()); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(4)); assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), equalTo(4)); @@ -413,13 +416,7 @@ public boolean clearData(String nodeName) { }); assertThat( - clusterAdmin().prepareHealth() - .setWaitForEvents(Priority.LANGUID) - .setTimeout("1m") - .setWaitForNodes("2") - .execute() - .actionGet() - .isTimedOut(), + clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForNodes("2").get().isTimedOut(), equalTo(false) ); @@ -704,7 +701,7 @@ public void testSnapshotTotalAndIncrementalSizes() throws Exception { int docs = between(10, 100); for (int i = 0; i < docs; i++) { - client().prepareIndex(indexName).setSource("test", "init").execute().actionGet(); + prepareIndex(indexName).setSource("test", "init").get(); } final Path repoPath = randomRepoPath(); @@ -737,7 +734,7 @@ public void testSnapshotTotalAndIncrementalSizes() throws Exception { // add few docs - less than initially docs = between(1, 5); for (int i = 0; i < docs; i++) { - client().prepareIndex(indexName).setSource("test", "test" + i).execute().actionGet(); + prepareIndex(indexName).setSource("test", "test" + i).get(); } // create another snapshot @@ -789,7 +786,7 @@ public void testDeduplicateIndexMetadata() throws Exception { int docs = between(10, 100); for (int i = 0; i < docs; i++) { - client().prepareIndex(indexName).setSource("test", "init").execute().actionGet(); + prepareIndex(indexName).setSource("test", "init").get(); } final Path repoPath = randomRepoPath(); @@ -801,7 +798,7 @@ public void testDeduplicateIndexMetadata() throws Exception { docs = between(1, 5); for (int i = 0; i < docs; i++) { - client().prepareIndex(indexName).setSource("test", "test" + i).execute().actionGet(); + prepareIndex(indexName).setSource("test", "test" + i).get(); } logger.info("--> restart random data node and add new data node to change index allocation"); @@ -818,7 +815,7 @@ public void testDeduplicateIndexMetadata() throws Exception { // index to some other field to trigger a change in index metadata for (int i = 0; i < docs; i++) { - client().prepareIndex(indexName).setSource("new_field", "test" + i).execute().actionGet(); + prepareIndex(indexName).setSource("new_field", "test" + i).get(); } createFullSnapshot(repositoryName, snapshot2); @@ -961,7 +958,7 @@ public void testRetentionLeasesClearedOnRestore() throws Exception { logger.debug("--> indexing {} docs into {}", snapshotDocCount, indexName); IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[snapshotDocCount]; for (int i = 0; i < snapshotDocCount; i++) { - indexRequestBuilders[i] = client().prepareIndex(indexName).setSource("field", "value"); + indexRequestBuilders[i] = prepareIndex(indexName).setSource("field", "value"); } indexRandom(true, indexRequestBuilders); assertDocCount(indexName, snapshotDocCount); @@ -986,7 +983,7 @@ public void testRetentionLeasesClearedOnRestore() throws Exception { logger.debug("--> indexing {} extra docs into {}", extraDocCount, indexName); indexRequestBuilders = new IndexRequestBuilder[extraDocCount]; for (int i = 0; i < extraDocCount; i++) { - indexRequestBuilders[i] = client().prepareIndex(indexName).setSource("field", "value"); + indexRequestBuilders[i] = prepareIndex(indexName).setSource("field", "value"); } indexRandom(true, indexRequestBuilders); } @@ -1108,7 +1105,7 @@ public void testSnapshotDeleteRelocatingPrimaryIndex() throws Exception { logger.info("--> wait for relocations to start"); assertBusy( - () -> assertThat(clusterAdmin().prepareHealth(indexName).execute().actionGet().getRelocatingShards(), greaterThan(0)), + () -> assertThat(clusterAdmin().prepareHealth(indexName).get().getRelocatingShards(), greaterThan(0)), 1L, TimeUnit.MINUTES ); @@ -1166,7 +1163,7 @@ public void testDeleteIndexDuringSnapshot() throws Exception { final int concurrentLoops = randomIntBetween(2, 5); final List> futures = new ArrayList<>(concurrentLoops); for (int i = 0; i < concurrentLoops; i++) { - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); futures.add(future); startSnapshotDeleteLoop(repoName, indexName, "test-snap-" + i, future); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java index d171dd2c89c78..3b129455d4eef 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java @@ -56,11 +56,11 @@ public void testWhenMetadataAreLoaded() throws Exception { createIndex("docs"); indexRandom( true, - client().prepareIndex("docs").setId("1").setSource("rank", 1), - client().prepareIndex("docs").setId("2").setSource("rank", 2), - client().prepareIndex("docs").setId("3").setSource("rank", 3), - client().prepareIndex("others").setSource("rank", 4), - client().prepareIndex("others").setSource("rank", 5) + prepareIndex("docs").setId("1").setSource("rank", 1), + prepareIndex("docs").setId("2").setSource("rank", 2), + prepareIndex("docs").setId("3").setSource("rank", 3), + prepareIndex("others").setSource("rank", 4), + prepareIndex("others").setSource("rank", 5) ); createRepository("repository", CountingMockRepositoryPlugin.TYPE); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MultiClusterRepoAccessIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MultiClusterRepoAccessIT.java index 0a2f00b6e0949..e6bae861e1d04 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MultiClusterRepoAccessIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MultiClusterRepoAccessIT.java @@ -82,7 +82,7 @@ public Path nodeConfigPath(int nodeOrdinal) { @After public void stopSecondCluster() throws IOException { - IOUtils.close(secondCluster); + IOUtils.close(secondCluster::close); } @Override @@ -127,7 +127,7 @@ public void testConcurrentDeleteFromOtherCluster() throws InterruptedException { final SnapshotException sne = expectThrows( SnapshotException.class, - () -> clusterAdmin().prepareCreateSnapshot(repoNameOnFirstCluster, "snap-4").setWaitForCompletion(true).execute().actionGet() + () -> clusterAdmin().prepareCreateSnapshot(repoNameOnFirstCluster, "snap-4").setWaitForCompletion(true).get() ); assertThat(sne.getMessage(), containsString("failed to update snapshot in repository")); final RepositoryException cause = (RepositoryException) sne.getCause(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryThrottlingStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryThrottlingStatsIT.java index 429c5c1a136c2..0f0858982b4ad 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryThrottlingStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryThrottlingStatsIT.java @@ -61,8 +61,7 @@ public void testRepositoryThrottlingStats() throws Exception { .setRenamePattern("test-") .setRenameReplacement("test2-") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertDocCount("test-idx", 100); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java index cd34f68471156..20313767c0677 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java @@ -309,8 +309,7 @@ public void testRestoreWithDifferentMappingsAndSettings() throws Exception { logger.info("--> restore all indices from the snapshot"); RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); logger.info("--> assert that old mapping is restored"); @@ -319,7 +318,7 @@ public void testRestoreWithDifferentMappingsAndSettings() throws Exception { assertThat(mappings.sourceAsMap().toString(), not(containsString("foo"))); logger.info("--> assert that old settings are restored"); - GetSettingsResponse getSettingsResponse = indicesAdmin().prepareGetSettings("test-idx").execute().actionGet(); + GetSettingsResponse getSettingsResponse = indicesAdmin().prepareGetSettings("test-idx").get(); assertThat(getSettingsResponse.getSetting("test-idx", "index.refresh_interval"), equalTo("10s")); } @@ -352,8 +351,7 @@ public void testRestoreAliases() throws Exception { RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") .setWaitForCompletion(true) .setRestoreGlobalState(true) - .execute() - .actionGet(); + .get(); // We don't restore any indices here assertThat( restoreSnapshotResponse.getRestoreInfo().successfulShards(), @@ -379,8 +377,7 @@ public void testRestoreAliases() throws Exception { .setWaitForCompletion(true) .setRestoreGlobalState(true) .setIncludeAliases(false) - .execute() - .actionGet(); + .get(); // We don't restore any indices here assertThat( restoreSnapshotResponse.getRestoreInfo().successfulShards(), @@ -431,8 +428,7 @@ public void testRestoreTemplates() throws Exception { RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") .setWaitForCompletion(true) .setRestoreGlobalState(true) - .execute() - .actionGet(); + .get(); // We don't restore any indices here assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0)); @@ -470,8 +466,7 @@ public void testRenameOnRestore() throws Exception { .setRenamePattern("(.+)") .setRenameReplacement("$1-copy") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertDocCount("test-idx-1-copy", 100L); @@ -487,8 +482,7 @@ public void testRenameOnRestore() throws Exception { .setRenamePattern("(.+)") .setRenameReplacement("$1-copy") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertDocCount("test-idx-1-copy", 100L); @@ -504,8 +498,7 @@ public void testRenameOnRestore() throws Exception { .setRenamePattern("(.+-2)") .setRenameReplacement("$1-copy") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); logger.info("--> delete indices"); @@ -519,8 +512,7 @@ public void testRenameOnRestore() throws Exception { .setRenamePattern("(.+)") .setRenameReplacement("same-name") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); fail("Shouldn't be here"); } catch (SnapshotRestoreException ex) { // Expected @@ -534,8 +526,7 @@ public void testRenameOnRestore() throws Exception { .setRenamePattern("test-idx-2") .setRenameReplacement("test-idx-1") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); fail("Shouldn't be here"); } catch (SnapshotRestoreException ex) { // Expected @@ -550,8 +541,7 @@ public void testRenameOnRestore() throws Exception { .setRenamePattern(".+") .setRenameReplacement("__WRONG__") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); fail("Shouldn't be here"); } catch (InvalidIndexNameException ex) { // Expected @@ -566,8 +556,7 @@ public void testRenameOnRestore() throws Exception { .setRenamePattern(".+") .setRenameReplacement("alias-3") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); fail("Shouldn't be here"); } catch (InvalidIndexNameException ex) { // Expected @@ -582,8 +571,7 @@ public void testRenameOnRestore() throws Exception { .setRenamePattern("test-idx") .setRenameReplacement("alias") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); fail("Shouldn't be here"); } catch (SnapshotRestoreException ex) { // Expected @@ -598,8 +586,7 @@ public void testRenameOnRestore() throws Exception { .setRenamePattern("test-idx-1") .setRenameReplacement("alias-2") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); fail("Shouldn't be here"); } catch (SnapshotRestoreException ex) { // Expected @@ -614,8 +601,7 @@ public void testRenameOnRestore() throws Exception { .setRenameReplacement("alias") .setWaitForCompletion(true) .setIncludeAliases(false) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); } @@ -683,7 +669,7 @@ public void testChangeSettingsOnRestore() throws Exception { final int numdocs = randomIntBetween(10, 100); IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex("test-idx").setId(Integer.toString(i)).setSource("field1", "Foo bar " + i); + builders[i] = prepareIndex("test-idx").setId(Integer.toString(i)).setSource("field1", "Foo bar " + i); } indexRandom(true, builders); flushAndRefresh(); @@ -740,12 +726,11 @@ public void testChangeSettingsOnRestore() throws Exception { .setIgnoreIndexSettings("index.analysis.*") .setIndexSettings(newIndexSettings) .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); logger.info("--> assert that correct settings are restored"); - GetSettingsResponse getSettingsResponse = client.admin().indices().prepareGetSettings("test-idx").execute().actionGet(); + GetSettingsResponse getSettingsResponse = client.admin().indices().prepareGetSettings("test-idx").get(); assertThat(getSettingsResponse.getSetting("test-idx", INDEX_REFRESH_INTERVAL_SETTING.getKey()), equalTo("5s")); // Make sure that number of shards didn't change assertThat(getSettingsResponse.getSetting("test-idx", SETTING_NUMBER_OF_SHARDS), equalTo("" + numberOfShards)); @@ -764,12 +749,11 @@ public void testChangeSettingsOnRestore() throws Exception { .setIgnoreIndexSettings("*") // delete everything we can delete .setIndexSettings(newIndexSettings) .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); logger.info("--> assert that correct settings are restored and index is still functional"); - getSettingsResponse = client.admin().indices().prepareGetSettings("test-idx").execute().actionGet(); + getSettingsResponse = client.admin().indices().prepareGetSettings("test-idx").get(); assertThat(getSettingsResponse.getSetting("test-idx", INDEX_REFRESH_INTERVAL_SETTING.getKey()), equalTo("5s")); // Make sure that number of shards didn't change assertThat(getSettingsResponse.getSetting("test-idx", SETTING_NUMBER_OF_SHARDS), equalTo("" + numberOfShards)); @@ -839,8 +823,7 @@ public void testRecreateBlocksOnRestore() throws Exception { .prepareRestoreSnapshot("test-repo", "test-snap") .setIndexSettings(changedSettings) .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); ClusterBlocks blocks = client.admin().cluster().prepareState().clear().setBlocks(true).get().getState().blocks(); @@ -906,7 +889,7 @@ public void testFailOnAncientVersion() throws Exception { final String oldSnapshot = initWithSnapshotVersion(repoName, repoPath, oldVersion); final SnapshotRestoreException snapshotRestoreException = expectThrows( SnapshotRestoreException.class, - () -> clusterAdmin().prepareRestoreSnapshot(repoName, oldSnapshot).execute().actionGet() + () -> clusterAdmin().prepareRestoreSnapshot(repoName, oldSnapshot).get() ); assertThat( snapshotRestoreException.getMessage(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 7f5cacdfc935a..abc2bf8fb7219 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -188,8 +188,7 @@ public void testBasicWorkFlow() throws Exception { logger.info("--> restore all indices from the snapshot"); RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); ensureGreen(); @@ -217,8 +216,7 @@ public void testBasicWorkFlow() throws Exception { restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") .setWaitForCompletion(true) .setIndices("test-idx-*", "-test-idx-2") - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); ensureGreen(); @@ -296,8 +294,7 @@ public void testFreshIndexUUID() { logger.info("--> restore all indices from the snapshot"); RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); ensureGreen(); @@ -314,8 +311,7 @@ public void testFreshIndexUUID() { .setRenamePattern("(.+)") .setRenameReplacement("$1-copy") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); String copyRestoreUUID = indicesAdmin().prepareGetSettings("test-copy") @@ -545,8 +541,7 @@ public void testDataFileCorruptionDuringRestore() throws Exception { .cluster() .prepareRestoreSnapshot("test-repo", "test-snap") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertThat( restoreSnapshotResponse.getRestoreInfo().failedShards(), @@ -750,8 +745,7 @@ public void testDeletionOfFailingToRecoverIndexShouldStopRestore() throws Except .cluster() .prepareRestoreSnapshot("test-repo", "test-snap") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); assertDocCount("test-idx", 100L); @@ -839,8 +833,7 @@ public void testDeleteSnapshot() throws Exception { .cluster() .prepareRestoreSnapshot("test-repo", lastSnapshot) .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertDocCount("test-idx", 10L * numberOfSnapshots); @@ -911,8 +904,7 @@ public void testMoveShardWhileSnapshotting() throws Exception { .cluster() .prepareRestoreSnapshot("test-repo", "test-snap") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertDocCount("test-idx", 100L); } @@ -950,7 +942,7 @@ public void testDeleteRepositoryWhileSnapshotting() throws Exception { logger.info("--> execution was blocked on node [{}], trying to delete repository", blockedNode); try { - client.admin().cluster().prepareDeleteRepository(randomFrom("test-repo", "test-*", "*")).execute().actionGet(); + client.admin().cluster().prepareDeleteRepository(randomFrom("test-repo", "test-*", "*")).get(); fail("shouldn't be able to delete in-use repository"); } catch (Exception ex) { logger.info("--> in-use repository deletion failed"); @@ -1002,8 +994,7 @@ public void testDeleteRepositoryWhileSnapshotting() throws Exception { .cluster() .prepareRestoreSnapshot("test-repo", "test-snap") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertDocCount("test-idx", 100); } @@ -1037,8 +1028,7 @@ public void testReadonlyRepository() throws Exception { .prepareRestoreSnapshot("readonly-repo", "test-snap") .setWaitForCompletion(true) .setIndices("test-idx") - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertDocCount("test-idx", 100L); @@ -1118,7 +1108,7 @@ public void testSnapshotStatus() throws Exception { }); logger.info("--> execution was blocked on node [{}], checking snapshot status with specified repository and snapshot", blockedNode); - SnapshotsStatusResponse response = client.admin().cluster().prepareSnapshotStatus("test-repo").execute().actionGet(); + SnapshotsStatusResponse response = client.admin().cluster().prepareSnapshotStatus("test-repo").get(); assertThat(response.getSnapshots().size(), equalTo(1)); SnapshotStatus snapshotStatus = response.getSnapshots().get(0); assertThat(snapshotStatus.getState(), equalTo(State.STARTED)); @@ -1133,7 +1123,7 @@ public void testSnapshotStatus() throws Exception { } logger.info("--> checking snapshot status for all currently running and snapshot with empty repository"); - response = client.admin().cluster().prepareSnapshotStatus().execute().actionGet(); + response = client.admin().cluster().prepareSnapshotStatus().get(); assertThat(response.getSnapshots().size(), equalTo(1)); snapshotStatus = response.getSnapshots().get(0); assertThat(snapshotStatus.getState(), equalTo(State.STARTED)); @@ -1148,12 +1138,7 @@ public void testSnapshotStatus() throws Exception { } logger.info("--> checking that _current returns the currently running snapshot"); - GetSnapshotsResponse getResponse = client.admin() - .cluster() - .prepareGetSnapshots("test-repo") - .setCurrentSnapshot() - .execute() - .actionGet(); + GetSnapshotsResponse getResponse = client.admin().cluster().prepareGetSnapshots("test-repo").setCurrentSnapshot().get(); assertThat(getResponse.getSnapshots().size(), equalTo(1)); SnapshotInfo snapshotInfo = getResponse.getSnapshots().get(0); assertThat(snapshotInfo.state(), equalTo(SnapshotState.IN_PROGRESS)); @@ -1170,7 +1155,7 @@ public void testSnapshotStatus() throws Exception { logger.info("Number of failed shards [{}]", snapshotInfo.shardFailures().size()); logger.info("--> checking snapshot status again after snapshot is done"); - response = client.admin().cluster().prepareSnapshotStatus("test-repo").addSnapshots("test-snap").execute().actionGet(); + response = client.admin().cluster().prepareSnapshotStatus("test-repo").addSnapshots("test-snap").get(); snapshotStatus = response.getSnapshots().get(0); assertThat(snapshotStatus.getIndices().size(), equalTo(1)); assertThat(snapshotStatus.includeGlobalState(), equalTo(false)); @@ -1183,19 +1168,12 @@ public void testSnapshotStatus() throws Exception { assertThat(indexStatus.getShards().size(), equalTo(snapshotInfo.totalShards())); logger.info("--> checking snapshot status after it is done with empty repository"); - response = client.admin().cluster().prepareSnapshotStatus().execute().actionGet(); + response = client.admin().cluster().prepareSnapshotStatus().get(); assertThat(response.getSnapshots().size(), equalTo(0)); logger.info("--> checking that _current no longer returns the snapshot"); assertThat( - client.admin() - .cluster() - .prepareGetSnapshots("test-repo") - .addSnapshots("_current") - .execute() - .actionGet() - .getSnapshots() - .isEmpty(), + client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("_current").get().getSnapshots().isEmpty(), equalTo(true) ); @@ -1241,7 +1219,7 @@ public void testSnapshotRelocatingPrimary() throws Exception { logger.info("--> wait for relocations to start"); assertBusy( - () -> assertThat(clusterAdmin().prepareHealth("test-idx").execute().actionGet().getRelocatingShards(), greaterThan(0)), + () -> assertThat(clusterAdmin().prepareHealth("test-idx").get().getRelocatingShards(), greaterThan(0)), 1L, TimeUnit.MINUTES ); @@ -1532,9 +1510,9 @@ public void testListCorruptedSnapshot() throws Exception { logger.info("--> indexing some data"); indexRandom( true, - client().prepareIndex("test-idx-1").setSource("foo", "bar"), - client().prepareIndex("test-idx-2").setSource("foo", "bar"), - client().prepareIndex("test-idx-3").setSource("foo", "bar") + prepareIndex("test-idx-1").setSource("foo", "bar"), + prepareIndex("test-idx-2").setSource("foo", "bar"), + prepareIndex("test-idx-3").setSource("foo", "bar") ); createSnapshot("test-repo", "test-snap-1", Collections.singletonList("test-idx-*")); @@ -1575,9 +1553,9 @@ public void testRestoreSnapshotWithCorruptedGlobalState() throws Exception { createIndex("test-idx-1", "test-idx-2"); indexRandom( true, - client().prepareIndex("test-idx-1").setSource("foo", "bar"), - client().prepareIndex("test-idx-2").setSource("foo", "bar"), - client().prepareIndex("test-idx-2").setSource("foo", "bar") + prepareIndex("test-idx-1").setSource("foo", "bar"), + prepareIndex("test-idx-2").setSource("foo", "bar"), + prepareIndex("test-idx-2").setSource("foo", "bar") ); flushAndRefresh("test-idx-1", "test-idx-2"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotBrokenSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotBrokenSettingsIT.java index bba6a2ae1a6b6..df2cf31e37470 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotBrokenSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotBrokenSettingsIT.java @@ -38,8 +38,7 @@ public void testExceptionWhenRestoringPersistentSettings() { .cluster() .prepareUpdateSettings() .setPersistentSettings(Settings.builder().put(BrokenSettingPlugin.BROKEN_SETTING.getKey(), value)) - .execute() - .actionGet(); + .get(); Consumer assertSettingValue = value -> assertThat( client.admin() @@ -47,8 +46,7 @@ public void testExceptionWhenRestoringPersistentSettings() { .prepareState() .setRoutingTable(false) .setNodes(false) - .execute() - .actionGet() + .get() .getState() .getMetadata() .persistentSettings() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotCustomPluginStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotCustomPluginStateIT.java index c307990b1a244..05888fd776641 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotCustomPluginStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotCustomPluginStateIT.java @@ -157,8 +157,7 @@ public void testIncludeGlobalState() throws Exception { RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-no-global-state") .setWaitForCompletion(true) .setRestoreGlobalState(false) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0)); logger.info("--> check that template wasn't restored"); @@ -169,8 +168,7 @@ public void testIncludeGlobalState() throws Exception { restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-with-global-state") .setWaitForCompletion(true) .setRestoreGlobalState(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0)); if (testTemplate) { @@ -226,8 +224,7 @@ public void testIncludeGlobalState() throws Exception { restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-no-global-state-with-index") .setWaitForCompletion(true) .setRestoreGlobalState(false) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java index f8e2ed03a3e39..cc7c7709075c0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java @@ -138,10 +138,7 @@ public void testExceptionOnMissingSnapBlob() throws IOException { logger.info("--> delete snap-${uuid}.dat file for this snapshot to simulate concurrent delete"); IOUtils.rm(repoPath.resolve(BlobStoreRepository.SNAPSHOT_PREFIX + snapshotInfo.snapshotId().getUUID() + ".dat")); - expectThrows( - SnapshotMissingException.class, - () -> clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots("test-snap").execute().actionGet() - ); + expectThrows(SnapshotMissingException.class, () -> clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get()); } public void testExceptionOnMissingShardLevelSnapBlob() throws IOException { @@ -172,7 +169,7 @@ public void testExceptionOnMissingShardLevelSnapBlob() throws IOException { expectThrows( SnapshotMissingException.class, - () -> clusterAdmin().prepareSnapshotStatus("test-repo").setSnapshots("test-snap").execute().actionGet() + () -> clusterAdmin().prepareSnapshotStatus("test-repo").setSnapshots("test-snap").get() ); } @@ -459,11 +456,7 @@ public void testGetSnapshotsWithSnapshotInProgress() throws Exception { expectThrows( SnapshotMissingException.class, - () -> clusterAdmin().prepareGetSnapshots("test-repo") - .setSnapshots(notExistedSnapshotName) - .setIgnoreUnavailable(false) - .execute() - .actionGet() + () -> clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots(notExistedSnapshotName).setIgnoreUnavailable(false).get() ); logger.info("--> unblock all data nodes"); @@ -677,8 +670,9 @@ public void testConcurrentCreateAndStatusAPICalls() throws Exception { }, 60, TimeUnit.SECONDS); for (ActionFuture status : statuses) { - assertThat(status.get().getSnapshots(), hasSize(snapshots)); - for (SnapshotStatus snapshot : status.get().getSnapshots()) { + var statusResponse = status.get(); + assertThat(statusResponse.getSnapshots(), hasSize(snapshots)); + for (SnapshotStatus snapshot : statusResponse.getSnapshots()) { assertThat(snapshot.getState(), allOf(not(SnapshotsInProgress.State.FAILED), not(SnapshotsInProgress.State.ABORTED))); for (final var shard : snapshot.getShards()) { if (shard.getStage() == SnapshotIndexShardStage.DONE) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java b/server/src/internalClusterTest/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java index d6dad537afaea..841f77ea7efab 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java @@ -48,17 +48,16 @@ public void testThreadNames() throws Exception { int numDocs = randomIntBetween(2, 100); IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; ++i) { - builders[i] = client().prepareIndex("idx") - .setSource( - jsonBuilder().startObject() - .field("str_value", "s" + i) - .array("str_values", new String[] { "s" + (i * 2), "s" + (i * 2 + 1) }) - .field("l_value", i) - .array("l_values", new int[] { i * 2, i * 2 + 1 }) - .field("d_value", i) - .array("d_values", new double[] { i * 2, i * 2 + 1 }) - .endObject() - ); + builders[i] = prepareIndex("idx").setSource( + jsonBuilder().startObject() + .field("str_value", "s" + i) + .array("str_values", new String[] { "s" + (i * 2), "s" + (i * 2 + 1) }) + .field("l_value", i) + .array("l_values", new int[] { i * 2, i * 2 + 1 }) + .field("d_value", i) + .array("d_values", new double[] { i * 2, i * 2 + 1 }) + .endObject() + ); } indexRandom(true, builders); int numSearches = randomIntBetween(2, 100); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/timeseries/support/TimeSeriesDimensionsLimitIT.java b/server/src/internalClusterTest/java/org/elasticsearch/timeseries/support/TimeSeriesDimensionsLimitIT.java index b3cb2e5f178ca..a62560588bdb2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/timeseries/support/TimeSeriesDimensionsLimitIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/timeseries/support/TimeSeriesDimensionsLimitIT.java @@ -46,18 +46,16 @@ public void testDimensionFieldNameLimit() throws IOException { ); final Exception ex = expectThrows( DocumentParsingException.class, - () -> client().prepareIndex("test") - .setSource( - "routing_field", - randomAlphaOfLength(10), - dimensionFieldName, - randomAlphaOfLength(1024), - "gauge", - randomIntBetween(10, 20), - "@timestamp", - Instant.now().toEpochMilli() - ) - .get() + () -> prepareIndex("test").setSource( + "routing_field", + randomAlphaOfLength(10), + dimensionFieldName, + randomAlphaOfLength(1024), + "gauge", + randomIntBetween(10, 20), + "@timestamp", + Instant.now().toEpochMilli() + ).get() ); assertThat( ex.getCause().getMessage(), @@ -76,14 +74,18 @@ public void testDimensionFieldValueLimit() throws IOException { dimensionFieldLimit ); long startTime = Instant.now().toEpochMilli(); - client().prepareIndex("test") - .setSource("field", randomAlphaOfLength(1024), "gauge", randomIntBetween(10, 20), "@timestamp", startTime) + prepareIndex("test").setSource("field", randomAlphaOfLength(1024), "gauge", randomIntBetween(10, 20), "@timestamp", startTime) .get(); final Exception ex = expectThrows( DocumentParsingException.class, - () -> client().prepareIndex("test") - .setSource("field", randomAlphaOfLength(1025), "gauge", randomIntBetween(10, 20), "@timestamp", startTime + 1) - .get() + () -> prepareIndex("test").setSource( + "field", + randomAlphaOfLength(1025), + "gauge", + randomIntBetween(10, 20), + "@timestamp", + startTime + 1 + ).get() ); assertThat(ex.getCause().getMessage(), equalTo("Dimension fields must be less than [1024] bytes but was [1025].")); } @@ -141,7 +143,7 @@ public void testTotalDimensionFieldsSizeLuceneLimit() throws IOException { for (int i = 0; i < dimensionFieldLimit; i++) { source.put(dimensionFieldNames.get(i), randomAlphaOfLength(1024)); } - final DocWriteResponse indexResponse = client().prepareIndex("test").setSource(source).get(); + final DocWriteResponse indexResponse = prepareIndex("test").setSource(source).get(); assertEquals(RestStatus.CREATED.getStatus(), indexResponse.status().getStatus()); } @@ -167,7 +169,7 @@ public void testTotalDimensionFieldsSizeLuceneLimitPlusOne() throws IOException for (int i = 0; i < dimensionFieldLimit; i++) { source.put(dimensionFieldNames.get(i), randomAlphaOfLength(1024)); } - final Exception ex = expectThrows(DocumentParsingException.class, () -> client().prepareIndex("test").setSource(source).get()); + final Exception ex = expectThrows(DocumentParsingException.class, () -> prepareIndex("test").setSource(source).get()); assertEquals("_tsid longer than [32766] bytes [33903].", ex.getCause().getMessage()); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/update/UpdateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/update/UpdateIT.java index 4e97560284c67..813ff8b4227bc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/update/UpdateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/update/UpdateIT.java @@ -151,26 +151,24 @@ public void testUpsert() throws Exception { UpdateResponse updateResponse = client().prepareUpdate(indexOrAlias(), "1") .setUpsert(XContentFactory.jsonBuilder().startObject().field("field", 1).endObject()) .setScript(fieldIncScript) - .execute() - .actionGet(); + .get(); assertEquals(DocWriteResponse.Result.CREATED, updateResponse.getResult()); assertThat(updateResponse.getIndex(), equalTo("test")); for (int i = 0; i < 5; i++) { - GetResponse getResponse = client().prepareGet("test", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "1").get(); assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("1")); } updateResponse = client().prepareUpdate(indexOrAlias(), "1") .setUpsert(XContentFactory.jsonBuilder().startObject().field("field", 1).endObject()) .setScript(fieldIncScript) - .execute() - .actionGet(); + .get(); assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult()); assertThat(updateResponse.getIndex(), equalTo("test")); for (int i = 0; i < 5; i++) { - GetResponse getResponse = client().prepareGet("test", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "1").get(); assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("2")); } } @@ -194,13 +192,12 @@ public void testScriptedUpsert() throws Exception { .setUpsert(XContentFactory.jsonBuilder().startObject().field("balance", openingBalance).endObject()) .setScriptedUpsert(true) .setScript(new Script(ScriptType.INLINE, UPDATE_SCRIPTS, UPSERT_SCRIPT, params)) - .execute() - .actionGet(); + .get(); assertEquals(DocWriteResponse.Result.CREATED, updateResponse.getResult()); assertThat(updateResponse.getIndex(), equalTo("test")); for (int i = 0; i < 5; i++) { - GetResponse getResponse = client().prepareGet("test", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "1").get(); assertThat(getResponse.getSourceAsMap().get("balance").toString(), equalTo("9")); } @@ -209,13 +206,12 @@ public void testScriptedUpsert() throws Exception { .setUpsert(XContentFactory.jsonBuilder().startObject().field("balance", openingBalance).endObject()) .setScriptedUpsert(true) .setScript(new Script(ScriptType.INLINE, UPDATE_SCRIPTS, UPSERT_SCRIPT, params)) - .execute() - .actionGet(); + .get(); assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult()); assertThat(updateResponse.getIndex(), equalTo("test")); for (int i = 0; i < 5; i++) { - GetResponse getResponse = client().prepareGet("test", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "1").get(); assertThat(getResponse.getSourceAsMap().get("balance").toString(), equalTo("7")); } } @@ -228,8 +224,7 @@ public void testUpsertDoc() throws Exception { .setDoc(XContentFactory.jsonBuilder().startObject().field("bar", "baz").endObject()) .setDocAsUpsert(true) .setFetchSource(true) - .execute() - .actionGet(); + .get(); assertThat(updateResponse.getIndex(), equalTo("test")); assertThat(updateResponse.getGetResult(), notNullValue()); assertThat(updateResponse.getGetResult().getIndex(), equalTo("test")); @@ -259,8 +254,7 @@ public void testUpsertFields() throws Exception { .setUpsert(XContentFactory.jsonBuilder().startObject().field("bar", "baz").endObject()) .setScript(new Script(ScriptType.INLINE, UPDATE_SCRIPTS, PUT_VALUES_SCRIPT, Collections.singletonMap("extra", "foo"))) .setFetchSource(true) - .execute() - .actionGet(); + .get(); assertThat(updateResponse.getIndex(), equalTo("test")); assertThat(updateResponse.getGetResult(), notNullValue()); @@ -272,8 +266,7 @@ public void testUpsertFields() throws Exception { .setUpsert(XContentFactory.jsonBuilder().startObject().field("bar", "baz").endObject()) .setScript(new Script(ScriptType.INLINE, UPDATE_SCRIPTS, PUT_VALUES_SCRIPT, Collections.singletonMap("extra", "foo"))) .setFetchSource(true) - .execute() - .actionGet(); + .get(); assertThat(updateResponse.getIndex(), equalTo("test")); assertThat(updateResponse.getGetResult(), notNullValue()); @@ -287,8 +280,7 @@ public void testIndexAutoCreation() throws Exception { .setUpsert(XContentFactory.jsonBuilder().startObject().field("bar", "baz").endObject()) .setScript(new Script(ScriptType.INLINE, UPDATE_SCRIPTS, PUT_VALUES_SCRIPT, Collections.singletonMap("extra", "foo"))) .setFetchSource(true) - .execute() - .actionGet(); + .get(); assertThat(updateResponse.getIndex(), equalTo("test")); assertThat(updateResponse.getGetResult(), notNullValue()); @@ -305,19 +297,19 @@ public void testUpdate() throws Exception { Script fieldIncScript = new Script(ScriptType.INLINE, UPDATE_SCRIPTS, FIELD_INC_SCRIPT, Collections.singletonMap("field", "field")); DocumentMissingException ex = expectThrows( DocumentMissingException.class, - () -> client().prepareUpdate(indexOrAlias(), "1").setScript(fieldIncScript).execute().actionGet() + () -> client().prepareUpdate(indexOrAlias(), "1").setScript(fieldIncScript).get() ); assertEquals("[1]: document missing", ex.getMessage()); - client().prepareIndex("test").setId("1").setSource("field", 1).execute().actionGet(); + prepareIndex("test").setId("1").setSource("field", 1).get(); - UpdateResponse updateResponse = client().prepareUpdate(indexOrAlias(), "1").setScript(fieldIncScript).execute().actionGet(); + UpdateResponse updateResponse = client().prepareUpdate(indexOrAlias(), "1").setScript(fieldIncScript).get(); assertThat(updateResponse.getVersion(), equalTo(2L)); assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult()); assertThat(updateResponse.getIndex(), equalTo("test")); for (int i = 0; i < 5; i++) { - GetResponse getResponse = client().prepareGet("test", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "1").get(); assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("2")); } @@ -326,14 +318,13 @@ public void testUpdate() throws Exception { params.put("field", "field"); updateResponse = client().prepareUpdate(indexOrAlias(), "1") .setScript(new Script(ScriptType.INLINE, UPDATE_SCRIPTS, FIELD_INC_SCRIPT, params)) - .execute() - .actionGet(); + .get(); assertThat(updateResponse.getVersion(), equalTo(3L)); assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult()); assertThat(updateResponse.getIndex(), equalTo("test")); for (int i = 0; i < 5; i++) { - GetResponse getResponse = client().prepareGet("test", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "1").get(); assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("5")); } @@ -347,14 +338,13 @@ public void testUpdate() throws Exception { Collections.singletonMap("_ctx", Collections.singletonMap("op", "none")) ) ) - .execute() - .actionGet(); + .get(); assertThat(updateResponse.getVersion(), equalTo(3L)); assertEquals(DocWriteResponse.Result.NOOP, updateResponse.getResult()); assertThat(updateResponse.getIndex(), equalTo("test")); for (int i = 0; i < 5; i++) { - GetResponse getResponse = client().prepareGet("test", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "1").get(); assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("5")); } @@ -368,19 +358,18 @@ public void testUpdate() throws Exception { Collections.singletonMap("_ctx", Collections.singletonMap("op", "delete")) ) ) - .execute() - .actionGet(); + .get(); assertThat(updateResponse.getVersion(), equalTo(4L)); assertEquals(DocWriteResponse.Result.DELETED, updateResponse.getResult()); assertThat(updateResponse.getIndex(), equalTo("test")); for (int i = 0; i < 5; i++) { - GetResponse getResponse = client().prepareGet("test", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "1").get(); assertThat(getResponse.isExists(), equalTo(false)); } // check _source parameter - client().prepareIndex("test").setId("1").setSource("field1", 1, "field2", 2).execute().actionGet(); + prepareIndex("test").setId("1").setSource("field1", 1, "field2", 2).get(); updateResponse = client().prepareUpdate(indexOrAlias(), "1") .setScript(new Script(ScriptType.INLINE, UPDATE_SCRIPTS, FIELD_INC_SCRIPT, Collections.singletonMap("field", "field1"))) .setFetchSource("field1", "field2") @@ -395,24 +384,20 @@ public void testUpdate() throws Exception { // check updates without script // add new field - client().prepareIndex("test").setId("1").setSource("field", 1).execute().actionGet(); + prepareIndex("test").setId("1").setSource("field", 1).get(); client().prepareUpdate(indexOrAlias(), "1") .setDoc(XContentFactory.jsonBuilder().startObject().field("field2", 2).endObject()) - .execute() - .actionGet(); + .get(); for (int i = 0; i < 5; i++) { - GetResponse getResponse = client().prepareGet("test", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "1").get(); assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("1")); assertThat(getResponse.getSourceAsMap().get("field2").toString(), equalTo("2")); } // change existing field - client().prepareUpdate(indexOrAlias(), "1") - .setDoc(XContentFactory.jsonBuilder().startObject().field("field", 3).endObject()) - .execute() - .actionGet(); + client().prepareUpdate(indexOrAlias(), "1").setDoc(XContentFactory.jsonBuilder().startObject().field("field", 3).endObject()).get(); for (int i = 0; i < 5; i++) { - GetResponse getResponse = client().prepareGet("test", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "1").get(); assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("3")); assertThat(getResponse.getSourceAsMap().get("field2").toString(), equalTo("2")); } @@ -427,13 +412,12 @@ public void testUpdate() throws Exception { testMap.put("commonkey", testMap2); testMap.put("map1", 8); - client().prepareIndex("test").setId("1").setSource("map", testMap).execute().actionGet(); + prepareIndex("test").setId("1").setSource("map", testMap).get(); client().prepareUpdate(indexOrAlias(), "1") .setDoc(XContentFactory.jsonBuilder().startObject().field("map", testMap3).endObject()) - .execute() - .actionGet(); + .get(); for (int i = 0; i < 5; i++) { - GetResponse getResponse = client().prepareGet("test", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "1").get(); Map map1 = get(getResponse.getSourceAsMap(), "map"); assertThat(map1.size(), equalTo(3)); assertThat(map1.containsKey("map1"), equalTo(true)); @@ -451,7 +435,7 @@ public void testUpdateWithIfSeqNo() throws Exception { createTestIndex(); ensureGreen(); - DocWriteResponse result = client().prepareIndex("test").setId("1").setSource("field", 1).get(); + DocWriteResponse result = prepareIndex("test").setId("1").setSource("field", 1).get(); expectThrows( VersionConflictEngineException.class, () -> client().prepareUpdate(indexOrAlias(), "1") @@ -498,8 +482,7 @@ public void testUpdateRequestWithBothScriptAndDoc() throws Exception { client().prepareUpdate(indexOrAlias(), "1") .setDoc(XContentFactory.jsonBuilder().startObject().field("field", 1).endObject()) .setScript(fieldIncScript) - .execute() - .actionGet(); + .get(); fail("Should have thrown ActionRequestValidationException"); } catch (ActionRequestValidationException e) { assertThat(e.validationErrors().size(), equalTo(1)); @@ -513,7 +496,7 @@ public void testUpdateRequestWithScriptAndShouldUpsertDoc() throws Exception { ensureGreen(); Script fieldIncScript = new Script(ScriptType.INLINE, UPDATE_SCRIPTS, FIELD_INC_SCRIPT, Collections.singletonMap("field", "field")); try { - client().prepareUpdate(indexOrAlias(), "1").setScript(fieldIncScript).setDocAsUpsert(true).execute().actionGet(); + client().prepareUpdate(indexOrAlias(), "1").setScript(fieldIncScript).setDocAsUpsert(true).get(); fail("Should have thrown ActionRequestValidationException"); } catch (ActionRequestValidationException e) { assertThat(e.validationErrors().size(), equalTo(1)); @@ -527,26 +510,18 @@ public void testContextVariables() throws Exception { ensureGreen(); // Index some documents - client().prepareIndex() - .setIndex("test") - .setId("id1") - .setRouting("routing1") - .setSource("field1", 1, "content", "foo") - .execute() - .actionGet(); - - client().prepareIndex().setIndex("test").setId("id2").setSource("field1", 0, "content", "bar").execute().actionGet(); + prepareIndex("test").setId("id1").setRouting("routing1").setSource("field1", 1, "content", "foo").get(); + prepareIndex("test").setId("id2").setSource("field1", 0, "content", "bar").get(); // Update the first object and note context variables values UpdateResponse updateResponse = client().prepareUpdate("test", "id1") .setRouting("routing1") .setScript(new Script(ScriptType.INLINE, UPDATE_SCRIPTS, EXTRACT_CTX_SCRIPT, Collections.emptyMap())) - .execute() - .actionGet(); + .get(); assertEquals(2, updateResponse.getVersion()); - GetResponse getResponse = client().prepareGet("test", "id1").setRouting("routing1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "id1").setRouting("routing1").get(); Map updateContext = get(getResponse.getSourceAsMap(), "update_context"); assertEquals("test", updateContext.get("_index")); assertEquals("id1", updateContext.get("_id")); @@ -556,12 +531,11 @@ public void testContextVariables() throws Exception { // Idem with the second object updateResponse = client().prepareUpdate("test", "id2") .setScript(new Script(ScriptType.INLINE, UPDATE_SCRIPTS, EXTRACT_CTX_SCRIPT, Collections.emptyMap())) - .execute() - .actionGet(); + .get(); assertEquals(2, updateResponse.getVersion()); - getResponse = client().prepareGet("test", "id2").execute().actionGet(); + getResponse = client().prepareGet("test", "id2").get(); updateContext = get(getResponse.getSourceAsMap(), "update_context"); assertEquals("test", updateContext.get("_index")); assertEquals("id2", updateContext.get("_id")); @@ -602,14 +576,13 @@ public void run() { .setScript(fieldIncScript) .setRetryOnConflict(Integer.MAX_VALUE) .setUpsert(jsonBuilder().startObject().field("field", 1).endObject()); - client().prepareBulk().add(updateRequestBuilder).execute().actionGet(); + client().prepareBulk().add(updateRequestBuilder).get(); } else { client().prepareUpdate(indexOrAlias(), Integer.toString(i)) .setScript(fieldIncScript) .setRetryOnConflict(Integer.MAX_VALUE) .setUpsert(jsonBuilder().startObject().field("field", 1).endObject()) - .execute() - .actionGet(); + .get(); } } logger.info("Client [{}] issued all [{}] requests.", Thread.currentThread().getName(), numberOfUpdatesPerThread); @@ -640,7 +613,7 @@ public void run() { } assertThat(failures.size(), equalTo(0)); for (int i = 0; i < numberOfUpdatesPerThread; i++) { - GetResponse response = client().prepareGet("test", Integer.toString(i)).execute().actionGet(); + GetResponse response = client().prepareGet("test", Integer.toString(i)).get(); assertThat(response.getId(), equalTo(Integer.toString(i))); assertThat(response.isExists(), equalTo(true)); assertThat(response.getVersion(), equalTo((long) numberOfThreads)); @@ -849,15 +822,14 @@ private void waitForOutstandingRequests(TimeValue timeOut, Semaphore requestsOut .setScript(fieldIncScript) .setRetryOnConflict(Integer.MAX_VALUE) .setUpsert(jsonBuilder().startObject().field("field", 1).endObject()) - .execute() - .actionGet(); + .get(); } refresh(); for (int i = 0; i < numberOfIdsPerThread; ++i) { int totalFailures = 0; - GetResponse response = client().prepareGet("test", Integer.toString(i)).execute().actionGet(); + GetResponse response = client().prepareGet("test", Integer.toString(i)).get(); if (response.isExists()) { assertThat(response.getId(), equalTo(Integer.toString(i))); int expectedVersion = (numberOfThreads * numberOfUpdatesPerId * 2) + 1; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/validate/SimpleValidateQueryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/validate/SimpleValidateQueryIT.java index afb86bd175973..603bfbeaa3dfe 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/validate/SimpleValidateQueryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/validate/SimpleValidateQueryIT.java @@ -65,56 +65,42 @@ public void testSimpleValidateQuery() throws Exception { .endObject() .endObject() ) - .execute() - .actionGet(); + .get(); refresh(); assertThat( indicesAdmin().prepareValidateQuery("test") .setQuery(QueryBuilders.wrapperQuery("foo".getBytes(StandardCharsets.UTF_8))) - .execute() - .actionGet() + .get() .isValid(), equalTo(false) ); assertThat( - indicesAdmin().prepareValidateQuery("test").setQuery(QueryBuilders.queryStringQuery("_id:1")).execute().actionGet().isValid(), + indicesAdmin().prepareValidateQuery("test").setQuery(QueryBuilders.queryStringQuery("_id:1")).get().isValid(), equalTo(true) ); assertThat( - indicesAdmin().prepareValidateQuery("test").setQuery(QueryBuilders.queryStringQuery("_i:d:1")).execute().actionGet().isValid(), + indicesAdmin().prepareValidateQuery("test").setQuery(QueryBuilders.queryStringQuery("_i:d:1")).get().isValid(), equalTo(false) ); assertThat( - indicesAdmin().prepareValidateQuery("test").setQuery(QueryBuilders.queryStringQuery("foo:1")).execute().actionGet().isValid(), + indicesAdmin().prepareValidateQuery("test").setQuery(QueryBuilders.queryStringQuery("foo:1")).get().isValid(), equalTo(true) ); assertThat( - indicesAdmin().prepareValidateQuery("test") - .setQuery(QueryBuilders.queryStringQuery("bar:hey").lenient(false)) - .execute() - .actionGet() - .isValid(), + indicesAdmin().prepareValidateQuery("test").setQuery(QueryBuilders.queryStringQuery("bar:hey").lenient(false)).get().isValid(), equalTo(false) ); assertThat( - indicesAdmin().prepareValidateQuery("test") - .setQuery(QueryBuilders.queryStringQuery("nonexistent:hello")) - .execute() - .actionGet() - .isValid(), + indicesAdmin().prepareValidateQuery("test").setQuery(QueryBuilders.queryStringQuery("nonexistent:hello")).get().isValid(), equalTo(true) ); assertThat( - indicesAdmin().prepareValidateQuery("test") - .setQuery(QueryBuilders.queryStringQuery("foo:1 AND")) - .execute() - .actionGet() - .isValid(), + indicesAdmin().prepareValidateQuery("test").setQuery(QueryBuilders.queryStringQuery("foo:1 AND")).get().isValid(), equalTo(false) ); } @@ -149,15 +135,10 @@ public void testExplainValidateQueryTwoNodes() throws IOException { .endObject() .endObject() ) - .execute() - .actionGet(); + .get(); for (int i = 0; i < 10; i++) { - client().prepareIndex("test") - .setSource("foo", "text", "bar", i, "baz", "blort") - .setId(Integer.toString(i)) - .execute() - .actionGet(); + prepareIndex("test").setSource("foo", "text", "bar", i, "baz", "blort").setId(Integer.toString(i)).get(); } refresh(); @@ -167,8 +148,7 @@ public void testExplainValidateQueryTwoNodes() throws IOException { .prepareValidateQuery("test") .setQuery(QueryBuilders.wrapperQuery("foo".getBytes(StandardCharsets.UTF_8))) .setExplain(true) - .execute() - .actionGet(); + .get(); assertThat(response.isValid(), equalTo(false)); assertThat(response.getQueryExplanation().size(), equalTo(1)); assertThat(response.getQueryExplanation().get(0).getError(), containsString("Failed to derive xcontent")); @@ -182,8 +162,7 @@ public void testExplainValidateQueryTwoNodes() throws IOException { .prepareValidateQuery("test") .setQuery(QueryBuilders.queryStringQuery("foo")) .setExplain(true) - .execute() - .actionGet(); + .get(); assertThat(response.isValid(), equalTo(true)); assertThat(response.getQueryExplanation().size(), equalTo(1)); assertThat( @@ -204,7 +183,7 @@ public void testExplainDateRangeInQueryString() { String aMonthAgo = DateTimeFormatter.ISO_LOCAL_DATE.format(now.plus(1, ChronoUnit.MONTHS)); String aMonthFromNow = DateTimeFormatter.ISO_LOCAL_DATE.format(now.minus(1, ChronoUnit.MONTHS)); - client().prepareIndex("test").setId("1").setSource("past", aMonthAgo, "future", aMonthFromNow).get(); + prepareIndex("test").setId("1").setSource("past", aMonthAgo, "future", aMonthFromNow).get(); refresh(); @@ -265,10 +244,10 @@ public void testExplainWithRewriteValidateQuery() { .setMapping("field", "type=text,analyzer=whitespace") .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1)) .get(); - client().prepareIndex("test").setId("1").setSource("field", "quick lazy huge brown pidgin").get(); - client().prepareIndex("test").setId("2").setSource("field", "the quick brown fox").get(); - client().prepareIndex("test").setId("3").setSource("field", "the quick lazy huge brown fox jumps over the tree").get(); - client().prepareIndex("test").setId("4").setSource("field", "the lazy dog quacks like a duck").get(); + prepareIndex("test").setId("1").setSource("field", "quick lazy huge brown pidgin").get(); + prepareIndex("test").setId("2").setSource("field", "the quick brown fox").get(); + prepareIndex("test").setId("3").setSource("field", "the quick lazy huge brown fox jumps over the tree").get(); + prepareIndex("test").setId("4").setSource("field", "the lazy dog quacks like a duck").get(); refresh(); // prefix queries @@ -311,10 +290,10 @@ public void testExplainWithRewriteValidateQueryAllShards() { .get(); // We are relying on specific routing behaviors for the result to be right, so // we cannot randomize the number of shards or change ids here. - client().prepareIndex("test").setId("1").setSource("field", "quick lazy huge brown pidgin").get(); - client().prepareIndex("test").setId("2").setSource("field", "the quick brown fox").get(); - client().prepareIndex("test").setId("3").setSource("field", "the quick lazy huge brown fox jumps over the tree").get(); - client().prepareIndex("test").setId("4").setSource("field", "the lazy dog quacks like a duck").get(); + prepareIndex("test").setId("1").setSource("field", "quick lazy huge brown pidgin").get(); + prepareIndex("test").setId("2").setSource("field", "the quick brown fox").get(); + prepareIndex("test").setId("3").setSource("field", "the quick lazy huge brown fox jumps over the tree").get(); + prepareIndex("test").setId("4").setSource("field", "the lazy dog quacks like a duck").get(); refresh(); // prefix queries @@ -357,8 +336,7 @@ private static void assertExplanation(QueryBuilder queryBuilder, Matcher .setQuery(queryBuilder) .setExplain(true) .setRewrite(withRewrite) - .execute() - .actionGet(); + .get(); assertThat(response.getQueryExplanation().size(), equalTo(1)); assertThat(response.getQueryExplanation().get(0).getError(), nullValue()); assertThat(response.getQueryExplanation().get(0).getExplanation(), matcher); @@ -376,8 +354,7 @@ private static void assertExplanations( .setExplain(true) .setRewrite(withRewrite) .setAllShards(allShards) - .execute() - .actionGet(); + .get(); assertThat(response.getQueryExplanation().size(), equalTo(matchers.size())); for (int i = 0; i < matchers.size(); i++) { assertThat(response.getQueryExplanation().get(i).getError(), nullValue()); @@ -391,15 +368,11 @@ public void testExplainTermsQueryWithLookup() { .setMapping("user", "type=integer", "followers", "type=integer") .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 2).put("index.number_of_routing_shards", 2)) .get(); - client().prepareIndex("twitter").setId("1").setSource("followers", new int[] { 1, 2, 3 }).get(); + prepareIndex("twitter").setId("1").setSource("followers", new int[] { 1, 2, 3 }).get(); refresh(); TermsQueryBuilder termsLookupQuery = QueryBuilders.termsLookupQuery("user", new TermsLookup("twitter", "1", "followers")); - ValidateQueryResponse response = indicesAdmin().prepareValidateQuery("twitter") - .setQuery(termsLookupQuery) - .setExplain(true) - .execute() - .actionGet(); + ValidateQueryResponse response = indicesAdmin().prepareValidateQuery("twitter").setQuery(termsLookupQuery).setExplain(true).get(); assertThat(response.isValid(), is(true)); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/versioning/ConcurrentDocumentOperationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/versioning/ConcurrentDocumentOperationIT.java index b191eb0cf4fe3..69c10edf89809 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/versioning/ConcurrentDocumentOperationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/versioning/ConcurrentDocumentOperationIT.java @@ -31,7 +31,7 @@ public void testConcurrentOperationOnSameDoc() throws Exception { final AtomicReference failure = new AtomicReference<>(); final CountDownLatch latch = new CountDownLatch(numberOfUpdates); for (int i = 0; i < numberOfUpdates; i++) { - client().prepareIndex("test").setId("1").setSource("field1", i).execute(new ActionListener<>() { + prepareIndex("test").setId("1").setSource("field1", i).execute(new ActionListener<>() { @Override public void onResponse(DocWriteResponse response) { latch.countDown(); @@ -50,12 +50,12 @@ public void onFailure(Exception e) { assertThat(failure.get(), nullValue()); - indicesAdmin().prepareRefresh().execute().actionGet(); + indicesAdmin().prepareRefresh().get(); logger.info("done indexing, check all have the same field value"); - Map masterSource = client().prepareGet("test", "1").execute().actionGet().getSourceAsMap(); + Map masterSource = client().prepareGet("test", "1").get().getSourceAsMap(); for (int i = 0; i < (cluster().size() * 5); i++) { - assertThat(client().prepareGet("test", "1").execute().actionGet().getSourceAsMap(), equalTo(masterSource)); + assertThat(client().prepareGet("test", "1").get().getSourceAsMap(), equalTo(masterSource)); } } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java b/server/src/internalClusterTest/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java index a68d56e05cb48..2b804293cd506 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java @@ -7,20 +7,22 @@ */ package org.elasticsearch.versioning; +import org.apache.logging.log4j.Level; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.cluster.coordination.LinearizabilityChecker; import org.elasticsearch.cluster.coordination.LinearizabilityChecker.LinearizabilityCheckAborted; import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.logging.ChunkedLoggingStream; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.discovery.AbstractDisruptionTestCase; @@ -30,7 +32,9 @@ import java.io.FileInputStream; import java.io.IOException; -import java.util.ArrayList; +import java.io.OutputStream; +import java.io.OutputStreamWriter; +import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.Base64; import java.util.List; @@ -132,7 +136,7 @@ public void testSeqNoCASLinearizability() { logger.info("--> Indexing initial doc for {} keys", numberOfKeys); List partitions = IntStream.range(0, numberOfKeys) - .mapToObj(i -> client().prepareIndex("test").setId("ID:" + i).setSource("value", -1).get()) + .mapToObj(i -> prepareIndex("test").setId("ID:" + i).setSource("value", -1).get()) .map(response -> new Partition(response.getId(), new Version(response.getPrimaryTerm(), response.getSeqNo()))) .toList(); @@ -429,25 +433,55 @@ public void assertLinearizable() { history ); LinearizabilityChecker.SequentialSpec spec = new CASSequentialSpec(initialVersion); - boolean linearizable = false; + Boolean linearizable = null; try { linearizable = LinearizabilityChecker.isLinearizable(spec, history, missingResponseGenerator()); } catch (LinearizabilityCheckAborted e) { - logger.warn("linearizability check check was aborted", e); + logger.warn("linearizability check was aborted, assuming linearizable", e); } finally { - // implicitly test that we can serialize all histories. - String serializedHistory = base64Serialize(history); - if (linearizable == false) { - // we dump base64 encoded data, since the nature of this test is that it does not reproduce even with same seed. - logger.error( - "Linearizability check failed. Spec: {}, initial version: {}, serialized history: {}", - spec, - initialVersion, - serializedHistory - ); + try { + if (Boolean.TRUE.equals(linearizable)) { + // ensure that we can serialize all histories. + writeHistory(new OutputStreamStreamOutput(OutputStream.nullOutputStream()), history); + } else { + final var outcome = linearizable == null ? "inconclusive" : "unlinearizable"; + + logger.error( + "Linearizability check did not succeed. Spec: {}, initial version: {}, outcome: {}", + spec, + initialVersion, + outcome + ); + // we dump base64 encoded data, since the nature of this test is that it does not reproduce even with same seed. + try ( + var chunkedLoggingStream = ChunkedLoggingStream.create( + logger, + Level.ERROR, + "raw " + outcome + " history in partition " + id, + ReferenceDocs.LOGGING // any old docs link will do + ); + var output = new OutputStreamStreamOutput(chunkedLoggingStream) + ) { + writeHistory(output, history); + } + try ( + var chunkedLoggingStream = ChunkedLoggingStream.create( + logger, + Level.ERROR, + "visualisation of " + outcome + " history in partition " + id, + ReferenceDocs.LOGGING // any old docs link will do + ); + var writer = new OutputStreamWriter(chunkedLoggingStream, StandardCharsets.UTF_8) + ) { + LinearizabilityChecker.writeVisualisation(spec, history, missingResponseGenerator(), writer); + } + assertNull("Must not be unlinearizable", linearizable); + } + } catch (IOException e) { + logger.error("failure writing out history", e); + fail(e); } } - assertTrue("Must be linearizable", linearizable); } } @@ -623,31 +657,15 @@ private static Function missingResponseGenerator() { return input -> new FailureHistoryOutput(); } - private String base64Serialize(LinearizabilityChecker.History history) { - BytesStreamOutput output = new BytesStreamOutput(); - try { - List events = history.copyEvents(); - output.writeInt(events.size()); - for (LinearizabilityChecker.Event event : events) { - writeEvent(event, output); - } - output.close(); - return Base64.getEncoder().encodeToString(BytesReference.toBytes(output.bytes())); - } catch (IOException e) { - throw new RuntimeException(e); - } + private static void writeHistory(StreamOutput output, LinearizabilityChecker.History history) throws IOException { + output.writeCollection(history.copyEvents(), ConcurrentSeqNoVersioningIT::writeEvent); } private static LinearizabilityChecker.History readHistory(StreamInput input) throws IOException { - int size = input.readInt(); - List events = new ArrayList<>(size); - for (int i = 0; i < size; ++i) { - events.add(readEvent(input)); - } - return new LinearizabilityChecker.History(events); + return new LinearizabilityChecker.History(input.readCollectionAsList(ConcurrentSeqNoVersioningIT::readEvent)); } - private static void writeEvent(LinearizabilityChecker.Event event, BytesStreamOutput output) throws IOException { + private static void writeEvent(StreamOutput output, LinearizabilityChecker.Event event) throws IOException { output.writeEnum(event.type()); output.writeNamedWriteable((NamedWriteable) event.value()); output.writeInt(event.id()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/versioning/SimpleVersioningIT.java b/server/src/internalClusterTest/java/org/elasticsearch/versioning/SimpleVersioningIT.java index 5a1c09098f21f..e7877dd862ded 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/versioning/SimpleVersioningIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/versioning/SimpleVersioningIT.java @@ -15,7 +15,6 @@ import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; @@ -36,6 +35,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFutureThrows; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -47,55 +47,41 @@ public void testExternalVersioningInitialDelete() throws Exception { // Note - external version doesn't throw version conflicts on deletes of non existent records. // This is different from internal versioning - DeleteResponse deleteResponse = client().prepareDelete("test", "1") - .setVersion(17) - .setVersionType(VersionType.EXTERNAL) - .execute() - .actionGet(); + DeleteResponse deleteResponse = client().prepareDelete("test", "1").setVersion(17).setVersionType(VersionType.EXTERNAL).get(); assertEquals(DocWriteResponse.Result.NOT_FOUND, deleteResponse.getResult()); // this should conflict with the delete command transaction which told us that the object was deleted at version 17. assertFutureThrows( - client().prepareIndex("test") - .setId("1") - .setSource("field1", "value1_1") - .setVersion(13) - .setVersionType(VersionType.EXTERNAL) - .execute(), + prepareIndex("test").setId("1").setSource("field1", "value1_1").setVersion(13).setVersionType(VersionType.EXTERNAL).execute(), VersionConflictEngineException.class ); - DocWriteResponse indexResponse = client().prepareIndex("test") - .setId("1") + DocWriteResponse indexResponse = prepareIndex("test").setId("1") .setSource("field1", "value1_1") .setVersion(18) .setVersionType(VersionType.EXTERNAL) - .execute() - .actionGet(); + .get(); assertThat(indexResponse.getVersion(), equalTo(18L)); } public void testExternalGTE() throws Exception { createIndex("test"); - DocWriteResponse indexResponse = client().prepareIndex("test") - .setId("1") + DocWriteResponse indexResponse = prepareIndex("test").setId("1") .setSource("field1", "value1_1") .setVersion(12) .setVersionType(VersionType.EXTERNAL_GTE) .get(); assertThat(indexResponse.getVersion(), equalTo(12L)); - indexResponse = client().prepareIndex("test") - .setId("1") + indexResponse = prepareIndex("test").setId("1") .setSource("field1", "value1_2") .setVersion(12) .setVersionType(VersionType.EXTERNAL_GTE) .get(); assertThat(indexResponse.getVersion(), equalTo(12L)); - indexResponse = client().prepareIndex("test") - .setId("1") + indexResponse = prepareIndex("test").setId("1") .setSource("field1", "value1_2") .setVersion(14) .setVersionType(VersionType.EXTERNAL_GTE) @@ -103,15 +89,11 @@ public void testExternalGTE() throws Exception { assertThat(indexResponse.getVersion(), equalTo(14L)); assertRequestBuilderThrows( - client().prepareIndex("test") - .setId("1") - .setSource("field1", "value1_1") - .setVersion(13) - .setVersionType(VersionType.EXTERNAL_GTE), + prepareIndex("test").setId("1").setSource("field1", "value1_1").setVersion(13).setVersionType(VersionType.EXTERNAL_GTE), VersionConflictEngineException.class ); - client().admin().indices().prepareRefresh().execute().actionGet(); + client().admin().indices().prepareRefresh().get(); if (randomBoolean()) { refresh(); } @@ -127,11 +109,7 @@ public void testExternalGTE() throws Exception { // Delete with a higher or equal version deletes all versions up to the given one. long v = randomIntBetween(14, 17); - DeleteResponse deleteResponse = client().prepareDelete("test", "1") - .setVersion(v) - .setVersionType(VersionType.EXTERNAL_GTE) - .execute() - .actionGet(); + DeleteResponse deleteResponse = client().prepareDelete("test", "1").setVersion(v).setVersionType(VersionType.EXTERNAL_GTE).get(); assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); assertThat(deleteResponse.getVersion(), equalTo(v)); @@ -142,7 +120,7 @@ public void testExternalGTE() throws Exception { ); // But delete with a higher version is OK. - deleteResponse = client().prepareDelete("test", "1").setVersion(18).setVersionType(VersionType.EXTERNAL_GTE).execute().actionGet(); + deleteResponse = client().prepareDelete("test", "1").setVersion(18).setVersionType(VersionType.EXTERNAL_GTE).get(); assertEquals(DocWriteResponse.Result.NOT_FOUND, deleteResponse.getResult()); assertThat(deleteResponse.getVersion(), equalTo(18L)); } @@ -151,31 +129,22 @@ public void testExternalVersioning() throws Exception { createIndex("test"); ensureGreen(); - DocWriteResponse indexResponse = client().prepareIndex("test") - .setId("1") + DocWriteResponse indexResponse = prepareIndex("test").setId("1") .setSource("field1", "value1_1") .setVersion(12) .setVersionType(VersionType.EXTERNAL) - .execute() - .actionGet(); + .get(); assertThat(indexResponse.getVersion(), equalTo(12L)); - indexResponse = client().prepareIndex("test") - .setId("1") + indexResponse = prepareIndex("test").setId("1") .setSource("field1", "value1_1") .setVersion(14) .setVersionType(VersionType.EXTERNAL) - .execute() - .actionGet(); + .get(); assertThat(indexResponse.getVersion(), equalTo(14L)); assertFutureThrows( - client().prepareIndex("test") - .setId("1") - .setSource("field1", "value1_1") - .setVersion(13) - .setVersionType(VersionType.EXTERNAL) - .execute(), + prepareIndex("test").setId("1").setSource("field1", "value1_1").setVersion(13).setVersionType(VersionType.EXTERNAL).execute(), VersionConflictEngineException.class ); @@ -183,7 +152,7 @@ public void testExternalVersioning() throws Exception { refresh(); } for (int i = 0; i < 10; i++) { - assertThat(client().prepareGet("test", "1").execute().actionGet().getVersion(), equalTo(14L)); + assertThat(client().prepareGet("test", "1").get().getVersion(), equalTo(14L)); } // deleting with a lower version fails. @@ -193,11 +162,7 @@ public void testExternalVersioning() throws Exception { ); // Delete with a higher version deletes all versions up to the given one. - DeleteResponse deleteResponse = client().prepareDelete("test", "1") - .setVersion(17) - .setVersionType(VersionType.EXTERNAL) - .execute() - .actionGet(); + DeleteResponse deleteResponse = client().prepareDelete("test", "1").setVersion(17).setVersionType(VersionType.EXTERNAL).get(); assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); assertThat(deleteResponse.getVersion(), equalTo(17L)); @@ -208,22 +173,20 @@ public void testExternalVersioning() throws Exception { ); // But delete with a higher version is OK. - deleteResponse = client().prepareDelete("test", "1").setVersion(18).setVersionType(VersionType.EXTERNAL).execute().actionGet(); + deleteResponse = client().prepareDelete("test", "1").setVersion(18).setVersionType(VersionType.EXTERNAL).get(); assertEquals(DocWriteResponse.Result.NOT_FOUND, deleteResponse.getResult()); assertThat(deleteResponse.getVersion(), equalTo(18L)); // TODO: This behavior breaks rest api returning http status 201 // good news is that it this is only the case until deletes GC kicks in. - indexResponse = client().prepareIndex("test") - .setId("1") + indexResponse = prepareIndex("test").setId("1") .setSource("field1", "value1_1") .setVersion(19) .setVersionType(VersionType.EXTERNAL) - .execute() - .actionGet(); + .get(); assertThat(indexResponse.getVersion(), equalTo(19L)); - deleteResponse = client().prepareDelete("test", "1").setVersion(20).setVersionType(VersionType.EXTERNAL).execute().actionGet(); + deleteResponse = client().prepareDelete("test", "1").setVersion(20).setVersionType(VersionType.EXTERNAL).get(); assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); assertThat(deleteResponse.getVersion(), equalTo(20L)); @@ -232,13 +195,11 @@ public void testExternalVersioning() throws Exception { Thread.sleep(300); // gc works based on estimated sampled time. Give it a chance... // And now we have previous version return -1 - indexResponse = client().prepareIndex("test") - .setId("1") + indexResponse = prepareIndex("test").setId("1") .setSource("field1", "value1_1") .setVersion(20) .setVersionType(VersionType.EXTERNAL) - .execute() - .actionGet(); + .get(); assertThat(indexResponse.getVersion(), equalTo(20L)); } @@ -248,7 +209,7 @@ public void testRequireUnitsOnUpdateSettings() throws Exception { HashMap newSettings = new HashMap<>(); newSettings.put("index.gc_deletes", "42"); try { - client().admin().indices().prepareUpdateSettings("test").setSettings(newSettings).execute().actionGet(); + client().admin().indices().prepareUpdateSettings("test").setSettings(newSettings).get(); fail("did not hit expected exception"); } catch (IllegalArgumentException iae) { // expected @@ -268,12 +229,7 @@ public void testCompareAndSetInitialDelete() throws Exception { VersionConflictEngineException.class ); - DocWriteResponse indexResponse = client().prepareIndex("test") - .setId("1") - .setSource("field1", "value1_1") - .setCreate(true) - .execute() - .actionGet(); + DocWriteResponse indexResponse = prepareIndex("test").setId("1").setSource("field1", "value1_1").setCreate(true).get(); assertThat(indexResponse.getVersion(), equalTo(1L)); } @@ -281,26 +237,26 @@ public void testCompareAndSet() { createIndex("test"); ensureGreen(); - DocWriteResponse indexResponse = client().prepareIndex("test").setId("1").setSource("field1", "value1_1").execute().actionGet(); + DocWriteResponse indexResponse = prepareIndex("test").setId("1").setSource("field1", "value1_1").get(); assertThat(indexResponse.getSeqNo(), equalTo(0L)); assertThat(indexResponse.getPrimaryTerm(), equalTo(1L)); - indexResponse = client().prepareIndex("test").setId("1").setSource("field1", "value1_2").setIfSeqNo(0L).setIfPrimaryTerm(1).get(); + indexResponse = prepareIndex("test").setId("1").setSource("field1", "value1_2").setIfSeqNo(0L).setIfPrimaryTerm(1).get(); assertThat(indexResponse.getSeqNo(), equalTo(1L)); assertThat(indexResponse.getPrimaryTerm(), equalTo(1L)); assertFutureThrows( - client().prepareIndex("test").setId("1").setSource("field1", "value1_1").setIfSeqNo(10).setIfPrimaryTerm(1).execute(), + prepareIndex("test").setId("1").setSource("field1", "value1_1").setIfSeqNo(10).setIfPrimaryTerm(1).execute(), VersionConflictEngineException.class ); assertFutureThrows( - client().prepareIndex("test").setId("1").setSource("field1", "value1_1").setIfSeqNo(10).setIfPrimaryTerm(2).execute(), + prepareIndex("test").setId("1").setSource("field1", "value1_1").setIfSeqNo(10).setIfPrimaryTerm(2).execute(), VersionConflictEngineException.class ); assertFutureThrows( - client().prepareIndex("test").setId("1").setSource("field1", "value1_1").setIfSeqNo(1).setIfPrimaryTerm(2).execute(), + prepareIndex("test").setId("1").setSource("field1", "value1_1").setIfSeqNo(1).setIfPrimaryTerm(2).execute(), VersionConflictEngineException.class ); @@ -317,7 +273,7 @@ public void testCompareAndSet() { VersionConflictEngineException.class ); - client().admin().indices().prepareRefresh().execute().actionGet(); + client().admin().indices().prepareRefresh().get(); for (int i = 0; i < 10; i++) { final GetResponse response = client().prepareGet("test", "1").get(); assertThat(response.getSeqNo(), equalTo(1L)); @@ -327,14 +283,18 @@ public void testCompareAndSet() { // search with versioning for (int i = 0; i < 10; i++) { // TODO: ADD SEQ NO! - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()).setVersion(true).execute().actionGet(); - assertThat(searchResponse.getHits().getAt(0).getVersion(), equalTo(2L)); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).setVersion(true), + response -> assertThat(response.getHits().getAt(0).getVersion(), equalTo(2L)) + ); } // search without versioning for (int i = 0; i < 10; i++) { - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()).execute().actionGet(); - assertThat(searchResponse.getHits().getAt(0).getVersion(), equalTo(Versions.NOT_FOUND)); + assertResponse( + prepareSearch().setQuery(matchAllQuery()), + response -> assertThat(response.getHits().getAt(0).getVersion(), equalTo(Versions.NOT_FOUND)) + ); } DeleteResponse deleteResponse = client().prepareDelete("test", "1").setIfSeqNo(1).setIfPrimaryTerm(1).get(); @@ -366,21 +326,21 @@ public void testSimpleVersioningWithFlush() throws Exception { createIndex("test"); ensureGreen(); - DocWriteResponse indexResponse = client().prepareIndex("test").setId("1").setSource("field1", "value1_1").get(); + DocWriteResponse indexResponse = prepareIndex("test").setId("1").setSource("field1", "value1_1").get(); assertThat(indexResponse.getSeqNo(), equalTo(0L)); - client().admin().indices().prepareFlush().execute().actionGet(); - indexResponse = client().prepareIndex("test").setId("1").setSource("field1", "value1_2").setIfSeqNo(0).setIfPrimaryTerm(1).get(); + client().admin().indices().prepareFlush().get(); + indexResponse = prepareIndex("test").setId("1").setSource("field1", "value1_2").setIfSeqNo(0).setIfPrimaryTerm(1).get(); assertThat(indexResponse.getSeqNo(), equalTo(1L)); - client().admin().indices().prepareFlush().execute().actionGet(); + client().admin().indices().prepareFlush().get(); assertRequestBuilderThrows( - client().prepareIndex("test").setId("1").setSource("field1", "value1_1").setIfSeqNo(0).setIfPrimaryTerm(1), + prepareIndex("test").setId("1").setSource("field1", "value1_1").setIfSeqNo(0).setIfPrimaryTerm(1), VersionConflictEngineException.class ); assertRequestBuilderThrows( - client().prepareIndex("test").setId("1").setCreate(true).setSource("field1", "value1_1"), + prepareIndex("test").setId("1").setCreate(true).setSource("field1", "value1_1"), VersionConflictEngineException.class ); @@ -390,20 +350,17 @@ public void testSimpleVersioningWithFlush() throws Exception { ); for (int i = 0; i < 10; i++) { - assertThat(client().prepareGet("test", "1").execute().actionGet().getVersion(), equalTo(2L)); + assertThat(client().prepareGet("test", "1").get().getVersion(), equalTo(2L)); } - client().admin().indices().prepareRefresh().execute().actionGet(); + client().admin().indices().prepareRefresh().get(); for (int i = 0; i < 10; i++) { - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .setVersion(true) - .seqNoAndPrimaryTerm(true) - .execute() - .actionGet(); - assertHitCount(searchResponse, 1); - assertThat(searchResponse.getHits().getAt(0).getVersion(), equalTo(2L)); - assertThat(searchResponse.getHits().getAt(0).getSeqNo(), equalTo(1L)); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setVersion(true).seqNoAndPrimaryTerm(true), response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getVersion(), equalTo(2L)); + assertThat(response.getHits().getAt(0).getSeqNo(), equalTo(1L)); + }); } } @@ -411,10 +368,7 @@ public void testVersioningWithBulk() { createIndex("test"); ensureGreen(); - BulkResponse bulkResponse = client().prepareBulk() - .add(client().prepareIndex("test").setId("1").setSource("field1", "value1_1")) - .execute() - .actionGet(); + BulkResponse bulkResponse = client().prepareBulk().add(prepareIndex("test").setId("1").setSource("field1", "value1_1")).get(); assertThat(bulkResponse.hasFailures(), equalTo(false)); assertThat(bulkResponse.getItems().length, equalTo(1)); IndexResponse indexResponse = bulkResponse.getItems()[0].getResponse(); @@ -686,8 +640,7 @@ public void run() { idVersion.response = client().prepareDelete("test", id) .setVersion(version) .setVersionType(VersionType.EXTERNAL) - .execute() - .actionGet(); + .get(); } catch (VersionConflictEngineException vcee) { // OK: our version is too old assertThat(version, lessThanOrEqualTo(truth.get(id).version)); @@ -695,8 +648,7 @@ public void run() { } } else { try { - idVersion.response = client().prepareIndex("test") - .setId(id) + idVersion.response = prepareIndex("test").setId(id) .setSource("foo", "bar") .setVersion(version) .setVersionType(VersionType.EXTERNAL) @@ -744,7 +696,7 @@ public void run() { } else { expected = -1; } - long actualVersion = client().prepareGet("test", id).execute().actionGet().getVersion(); + long actualVersion = client().prepareGet("test", id).get().getVersion(); if (actualVersion != expected) { logger.error("--> FAILED: idVersion={} actualVersion= {}", idVersion, actualVersion); failed = true; @@ -764,21 +716,19 @@ public void run() { public void testDeleteNotLost() throws Exception { // We require only one shard for this test, so that the 2nd delete provokes pruning the deletes map: - indicesAdmin().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 1)).execute().actionGet(); + indicesAdmin().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 1)).get(); ensureGreen(); updateIndexSettings(Settings.builder().put("index.gc_deletes", "10ms").put("index.refresh_interval", "-1"), "test"); // Index a doc: - client().prepareIndex("test") - .setId("id") + prepareIndex("test").setId("id") .setSource("foo", "bar") .setOpType(DocWriteRequest.OpType.INDEX) .setVersion(10) .setVersionType(VersionType.EXTERNAL) - .execute() - .actionGet(); + .get(); if (randomBoolean()) { // Force refresh so the add is sometimes visible in the searcher: @@ -786,20 +736,20 @@ public void testDeleteNotLost() throws Exception { } // Delete it - client().prepareDelete("test", "id").setVersion(11).setVersionType(VersionType.EXTERNAL).execute().actionGet(); + client().prepareDelete("test", "id").setVersion(11).setVersionType(VersionType.EXTERNAL).get(); // Real-time get should reflect delete: - assertThat("doc should have been deleted", client().prepareGet("test", "id").execute().actionGet().getVersion(), equalTo(-1L)); + assertThat("doc should have been deleted", client().prepareGet("test", "id").get().getVersion(), equalTo(-1L)); // ThreadPool.relativeTimeInMillis has default granularity of 200 msec, so we must sleep at least that long; sleep much longer in // case system is busy: Thread.sleep(1000); // Delete an unrelated doc (provokes pruning deletes from versionMap) - client().prepareDelete("test", "id2").setVersion(11).setVersionType(VersionType.EXTERNAL).execute().actionGet(); + client().prepareDelete("test", "id2").setVersion(11).setVersionType(VersionType.EXTERNAL).get(); // Real-time get should still reflect delete: - assertThat("doc should have been deleted", client().prepareGet("test", "id").execute().actionGet().getVersion(), equalTo(-1L)); + assertThat("doc should have been deleted", client().prepareGet("test", "id").get().getVersion(), equalTo(-1L)); } public void testGCDeletesZero() throws Exception { @@ -809,14 +759,12 @@ public void testGCDeletesZero() throws Exception { // We test deletes, but can't rely on wall-clock delete GC: updateIndexSettings(Settings.builder().put("index.gc_deletes", "0ms"), "test"); // Index a doc: - client().prepareIndex("test") - .setId("id") + prepareIndex("test").setId("id") .setSource("foo", "bar") .setOpType(DocWriteRequest.OpType.INDEX) .setVersion(10) .setVersionType(VersionType.EXTERNAL) - .execute() - .actionGet(); + .get(); if (randomBoolean()) { // Force refresh so the add is sometimes visible in the searcher: @@ -824,47 +772,39 @@ public void testGCDeletesZero() throws Exception { } // Delete it - client().prepareDelete("test", "id").setVersion(11).setVersionType(VersionType.EXTERNAL).execute().actionGet(); + client().prepareDelete("test", "id").setVersion(11).setVersionType(VersionType.EXTERNAL).get(); // Real-time get should reflect delete even though index.gc_deletes is 0: - assertThat("doc should have been deleted", client().prepareGet("test", "id").execute().actionGet().getVersion(), equalTo(-1L)); + assertThat("doc should have been deleted", client().prepareGet("test", "id").get().getVersion(), equalTo(-1L)); } public void testSpecialVersioning() { internalCluster().ensureAtLeastNumDataNodes(2); createIndex("test", Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build()); - DocWriteResponse doc1 = client().prepareIndex("test") - .setId("1") + DocWriteResponse doc1 = prepareIndex("test").setId("1") .setSource("field", "value1") .setVersion(0) .setVersionType(VersionType.EXTERNAL) - .execute() - .actionGet(); + .get(); assertThat(doc1.getVersion(), equalTo(0L)); - DocWriteResponse doc2 = client().prepareIndex("test") - .setId("1") + DocWriteResponse doc2 = prepareIndex("test").setId("1") .setSource("field", "value2") .setVersion(Versions.MATCH_ANY) .setVersionType(VersionType.INTERNAL) - .execute() - .actionGet(); + .get(); assertThat(doc2.getVersion(), equalTo(1L)); client().prepareDelete("test", "1").get(); // v2 - DocWriteResponse doc3 = client().prepareIndex("test") - .setId("1") + DocWriteResponse doc3 = prepareIndex("test").setId("1") .setSource("field", "value3") .setVersion(Versions.MATCH_DELETED) .setVersionType(VersionType.INTERNAL) - .execute() - .actionGet(); + .get(); assertThat(doc3.getVersion(), equalTo(3L)); - DocWriteResponse doc4 = client().prepareIndex("test") - .setId("1") + DocWriteResponse doc4 = prepareIndex("test").setId("1") .setSource("field", "value4") .setVersion(4L) .setVersionType(VersionType.EXTERNAL_GTE) - .execute() - .actionGet(); + .get(); assertThat(doc4.getVersion(), equalTo(4L)); // Make sure that these versions are replicated correctly setReplicaCount(1, "test"); diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java index 98dd182900f88..613e6868b8e9f 100644 --- a/server/src/main/java/module-info.java +++ b/server/src/main/java/module-info.java @@ -407,8 +407,10 @@ with org.elasticsearch.features.FeatureInfrastructureFeatures, org.elasticsearch.health.HealthFeatures, + org.elasticsearch.cluster.service.TransportFeatures, org.elasticsearch.cluster.metadata.MetadataFeatures, - org.elasticsearch.rest.RestFeatures; + org.elasticsearch.rest.RestFeatures, + org.elasticsearch.indices.IndicesFeatures; uses org.elasticsearch.plugins.internal.SettingsExtension; uses RestExtension; @@ -420,5 +422,9 @@ org.elasticsearch.index.codec.bloomfilter.ES87BloomFilterPostingsFormat; provides org.apache.lucene.codecs.DocValuesFormat with ES87TSDBDocValuesFormat; - exports org.elasticsearch.cluster.routing.allocation.shards to org.elasticsearch.shardhealth, org.elasticsearch.serverless.shardhealth; + exports org.elasticsearch.cluster.routing.allocation.shards + to + org.elasticsearch.shardhealth, + org.elasticsearch.serverless.shardhealth, + org.elasticsearch.serverless.apifiltering; } diff --git a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java index d625da5df9cc7..b67b59aeee076 100644 --- a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java +++ b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java @@ -264,8 +264,13 @@ public static void maybeDieOnAnotherThread(final Throwable throwable) { /** * Deduplicate the failures by exception message and index. + * @param failures array to deduplicate + * @return deduplicated array; if failures is null or empty, it will be returned without modification */ public static ShardOperationFailedException[] groupBy(ShardOperationFailedException[] failures) { + if (failures == null || failures.length == 0) { + return failures; + } List uniqueFailures = new ArrayList<>(); Set reasons = new HashSet<>(); for (ShardOperationFailedException failure : failures) { diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index edc4805c4d8ce..44f98305d2997 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -9,6 +9,7 @@ package org.elasticsearch; import org.elasticsearch.core.Assertions; +import org.elasticsearch.core.UpdateForV9; import java.lang.reflect.Field; import java.util.Collection; @@ -47,6 +48,7 @@ static TransportVersion def(int id) { return new TransportVersion(id); } + @UpdateForV9 // remove the transport versions with which v9 will not need to interact public static final TransportVersion ZERO = def(0); public static final TransportVersion V_7_0_0 = def(7_00_00_99); public static final TransportVersion V_7_0_1 = def(7_00_01_99); @@ -163,6 +165,22 @@ static TransportVersion def(int id) { public static final TransportVersion DEPRECATED_COMPONENT_TEMPLATES_ADDED = def(8_532_00_0); public static final TransportVersion UPDATE_NON_DYNAMIC_SETTINGS_ADDED = def(8_533_00_0); public static final TransportVersion REPO_ANALYSIS_REGISTER_OP_COUNT_ADDED = def(8_534_00_0); + public static final TransportVersion ML_TRAINED_MODEL_PREFIX_STRINGS_ADDED = def(8_535_00_0); + public static final TransportVersion COUNTED_KEYWORD_ADDED = def(8_536_00_0); + public static final TransportVersion SHAPE_VALUE_SERIALIZATION_ADDED = def(8_537_00_0); + public static final TransportVersion INFERENCE_MULTIPLE_INPUTS = def(8_538_00_0); + public static final TransportVersion ADDITIONAL_DESIRED_BALANCE_RECONCILIATION_STATS = def(8_539_00_0); + public static final TransportVersion ML_STATE_CHANGE_TIMESTAMPS = def(8_540_00_0); + public static final TransportVersion DATA_STREAM_FAILURE_STORE_ADDED = def(8_541_00_0); + public static final TransportVersion ML_INFERENCE_OPENAI_ADDED = def(8_542_00_0); + public static final TransportVersion SHUTDOWN_MIGRATION_STATUS_INCLUDE_COUNTS = def(8_543_00_0); + public static final TransportVersion TRANSFORM_GET_CHECKPOINT_QUERY_AND_CLUSTER_ADDED = def(8_544_00_0); + public static final TransportVersion GRANT_API_KEY_CLIENT_AUTHENTICATION_ADDED = def(8_545_00_0); + public static final TransportVersion PIT_WITH_INDEX_FILTER = def(8_546_00_0); + public static final TransportVersion NODE_INFO_VERSION_AS_STRING = def(8_547_00_0); + public static final TransportVersion GET_API_KEY_INVALIDATION_TIME_ADDED = def(8_548_00_0); + public static final TransportVersion ML_INFERENCE_GET_MULTIPLE_MODELS = def(8_549_00_0); + public static final TransportVersion INFERENCE_SERVICE_RESULTS_ADDED = def(8_550_00_0); /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 56a00e25022d4..5dd9a3a055043 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -115,6 +115,7 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_7_17_13 = new Version(7_17_13_99); public static final Version V_7_17_14 = new Version(7_17_14_99); public static final Version V_7_17_15 = new Version(7_17_15_99); + public static final Version V_7_17_16 = new Version(7_17_16_99); public static final Version V_8_0_0 = new Version(8_00_00_99); public static final Version V_8_0_1 = new Version(8_00_01_99); public static final Version V_8_1_0 = new Version(8_01_00_99); @@ -155,6 +156,7 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_8_10_4 = new Version(8_10_04_99); public static final Version V_8_11_0 = new Version(8_11_00_99); public static final Version V_8_11_1 = new Version(8_11_01_99); + public static final Version V_8_11_2 = new Version(8_11_02_99); public static final Version V_8_12_0 = new Version(8_12_00_99); public static final Version CURRENT = V_8_12_0; diff --git a/server/src/main/java/org/elasticsearch/action/ActionListener.java b/server/src/main/java/org/elasticsearch/action/ActionListener.java index 30ad4fdeaf04f..b0e18d5ef9b55 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionListener.java +++ b/server/src/main/java/org/elasticsearch/action/ActionListener.java @@ -327,7 +327,12 @@ private void assertFirstRun() { @Override public void onResponse(Response response) { assertFirstRun(); - delegate.onResponse(response); + try { + delegate.onResponse(response); + } catch (Exception e) { + assert false : new AssertionError("listener [" + delegate + "] must handle its own exceptions", e); + throw e; + } } @Override diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index a855b6b8ee7e3..e0f01405bcf0f 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -202,11 +202,12 @@ import org.elasticsearch.action.admin.indices.validate.query.TransportValidateQueryAction; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction; import org.elasticsearch.action.bulk.BulkAction; +import org.elasticsearch.action.bulk.SimulateBulkAction; import org.elasticsearch.action.bulk.TransportBulkAction; import org.elasticsearch.action.bulk.TransportShardBulkAction; +import org.elasticsearch.action.bulk.TransportSimulateBulkAction; import org.elasticsearch.action.delete.DeleteAction; import org.elasticsearch.action.delete.TransportDeleteAction; -import org.elasticsearch.action.explain.ExplainAction; import org.elasticsearch.action.explain.TransportExplainAction; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesAction; import org.elasticsearch.action.fieldcaps.TransportFieldCapabilitiesAction; @@ -225,15 +226,8 @@ import org.elasticsearch.action.ingest.PutPipelineTransportAction; import org.elasticsearch.action.ingest.SimulatePipelineAction; import org.elasticsearch.action.ingest.SimulatePipelineTransportAction; -import org.elasticsearch.action.search.ClearScrollAction; -import org.elasticsearch.action.search.ClosePointInTimeAction; -import org.elasticsearch.action.search.MultiSearchAction; -import org.elasticsearch.action.search.OpenPointInTimeAction; import org.elasticsearch.action.search.RestClosePointInTimeAction; import org.elasticsearch.action.search.RestOpenPointInTimeAction; -import org.elasticsearch.action.search.SearchAction; -import org.elasticsearch.action.search.SearchScrollAction; -import org.elasticsearch.action.search.SearchShardsAction; import org.elasticsearch.action.search.TransportClearScrollAction; import org.elasticsearch.action.search.TransportClosePointInTimeAction; import org.elasticsearch.action.search.TransportMultiSearchAction; @@ -439,6 +433,7 @@ import org.elasticsearch.rest.action.ingest.RestDeletePipelineAction; import org.elasticsearch.rest.action.ingest.RestGetPipelineAction; import org.elasticsearch.rest.action.ingest.RestPutPipelineAction; +import org.elasticsearch.rest.action.ingest.RestSimulateIngestAction; import org.elasticsearch.rest.action.ingest.RestSimulatePipelineAction; import org.elasticsearch.rest.action.search.RestClearScrollAction; import org.elasticsearch.rest.action.search.RestCountAction; @@ -758,15 +753,16 @@ public void reg actions.register(MultiGetAction.INSTANCE, TransportMultiGetAction.class); actions.register(TransportShardMultiGetAction.TYPE, TransportShardMultiGetAction.class); actions.register(BulkAction.INSTANCE, TransportBulkAction.class); + actions.register(SimulateBulkAction.INSTANCE, TransportSimulateBulkAction.class); actions.register(TransportShardBulkAction.TYPE, TransportShardBulkAction.class); - actions.register(SearchAction.INSTANCE, TransportSearchAction.class); - actions.register(SearchScrollAction.INSTANCE, TransportSearchScrollAction.class); - actions.register(OpenPointInTimeAction.INSTANCE, TransportOpenPointInTimeAction.class); - actions.register(ClosePointInTimeAction.INSTANCE, TransportClosePointInTimeAction.class); - actions.register(SearchShardsAction.INSTANCE, TransportSearchShardsAction.class); - actions.register(MultiSearchAction.INSTANCE, TransportMultiSearchAction.class); - actions.register(ExplainAction.INSTANCE, TransportExplainAction.class); - actions.register(ClearScrollAction.INSTANCE, TransportClearScrollAction.class); + actions.register(TransportSearchAction.TYPE, TransportSearchAction.class); + actions.register(TransportSearchScrollAction.TYPE, TransportSearchScrollAction.class); + actions.register(TransportOpenPointInTimeAction.TYPE, TransportOpenPointInTimeAction.class); + actions.register(TransportClosePointInTimeAction.TYPE, TransportClosePointInTimeAction.class); + actions.register(TransportSearchShardsAction.TYPE, TransportSearchShardsAction.class); + actions.register(TransportMultiSearchAction.TYPE, TransportMultiSearchAction.class); + actions.register(TransportExplainAction.TYPE, TransportExplainAction.class); + actions.register(TransportClearScrollAction.TYPE, TransportClearScrollAction.class); actions.register(RecoveryAction.INSTANCE, TransportRecoveryAction.class); actions.register(NodesReloadSecureSettingsAction.INSTANCE, TransportNodesReloadSecureSettingsAction.class); actions.register(AutoCreateAction.INSTANCE, AutoCreateAction.TransportAction.class); @@ -944,6 +940,7 @@ public void initRestHandlers(Supplier nodesInCluster, Predicate< registerHandler.accept(new RestGetComposableIndexTemplateAction()); registerHandler.accept(new RestDeleteComposableIndexTemplateAction()); registerHandler.accept(new RestSimulateIndexTemplateAction()); + registerHandler.accept(new RestSimulateIngestAction()); registerHandler.accept(new RestSimulateTemplateAction()); registerHandler.accept(new RestPutMappingAction()); diff --git a/server/src/main/java/org/elasticsearch/action/ActionRunnable.java b/server/src/main/java/org/elasticsearch/action/ActionRunnable.java index 3c59e3d66d4db..7feabf7e0241f 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionRunnable.java +++ b/server/src/main/java/org/elasticsearch/action/ActionRunnable.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.CheckedRunnable; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.Releasable; /** @@ -58,6 +59,32 @@ public String toString() { }); } + /** + * Same as {@link #supply(ActionListener, CheckedSupplier)} but the supplier always returns an object of reference counted result type + * which will have its reference count decremented after invoking the listener. + */ + public static ActionRunnable supplyAndDecRef( + ActionListener listener, + CheckedSupplier supplier + ) { + return wrap(listener, new CheckedConsumer<>() { + @Override + public void accept(ActionListener l) throws Exception { + var res = supplier.get(); + try { + l.onResponse(res); + } finally { + res.decRef(); + } + } + + @Override + public String toString() { + return supplier.toString(); + } + }); + } + /** * Creates a {@link Runnable} that wraps the given listener and a consumer of it that is executed when the {@link Runnable} is run. * Invokes {@link ActionListener#onFailure(Exception)} on it if an exception is thrown on executing the consumer. diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java index ef04198c7374b..e3373ded94dc7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java @@ -36,7 +36,9 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.List; import java.util.Locale; +import java.util.function.Consumer; import static java.lang.String.format; @@ -44,7 +46,7 @@ public class TransportUpdateDesiredNodesAction extends TransportMasterNodeAction private static final Logger logger = LogManager.getLogger(TransportUpdateDesiredNodesAction.class); private final FeatureService featureService; - private final DesiredNodesSettingsValidator settingsValidator; + private final Consumer> desiredNodesValidator; private final MasterServiceTaskQueue taskQueue; @Inject @@ -55,7 +57,28 @@ public TransportUpdateDesiredNodesAction( ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - DesiredNodesSettingsValidator settingsValidator, + AllocationService allocationService + ) { + this( + transportService, + clusterService, + featureService, + threadPool, + actionFilters, + indexNameExpressionResolver, + new DesiredNodesSettingsValidator(), + allocationService + ); + } + + TransportUpdateDesiredNodesAction( + TransportService transportService, + ClusterService clusterService, + FeatureService featureService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + Consumer> desiredNodesValidator, AllocationService allocationService ) { super( @@ -71,7 +94,7 @@ public TransportUpdateDesiredNodesAction( EsExecutors.DIRECT_EXECUTOR_SERVICE ); this.featureService = featureService; - this.settingsValidator = settingsValidator; + this.desiredNodesValidator = desiredNodesValidator; this.taskQueue = clusterService.createTaskQueue( "update-desired-nodes", Priority.URGENT, @@ -92,7 +115,7 @@ protected void masterOperation( ActionListener responseListener ) throws Exception { ActionListener.run(responseListener, listener -> { - settingsValidator.validate(request.getNodes()); + desiredNodesValidator.accept(request.getNodes()); taskQueue.submitTask("update-desired-nodes", new UpdateDesiredNodesTask(request, listener), request.masterNodeTimeout()); }); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java index af8637cf1febc..63c2be9050ab0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java @@ -19,6 +19,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.indices.SystemIndices; @@ -54,6 +55,7 @@ public class TransportGetFeatureUpgradeStatusAction extends TransportMasterNodeA /** * Once all feature migrations for 8.x -> 9.x have been tested, we can bump this to Version.V_8_0_0 */ + @UpdateForV9 public static final Version NO_UPGRADE_REQUIRED_VERSION = Version.V_7_0_0; public static final IndexVersion NO_UPGRADE_REQUIRED_INDEX_VERSION = IndexVersions.V_7_0_0; @@ -61,6 +63,7 @@ public class TransportGetFeatureUpgradeStatusAction extends TransportMasterNodeA PersistentTasksService persistentTasksService; @Inject + @UpdateForV9 // Once we begin working on 9.x, we need to update our migration classes public TransportGetFeatureUpgradeStatusAction( TransportService transportService, ThreadPool threadPool, @@ -82,8 +85,6 @@ public TransportGetFeatureUpgradeStatusAction( EsExecutors.DIRECT_EXECUTOR_SERVICE ); - assert Version.CURRENT.major == 8 : "Once we begin working on 9.x, we need to update our migration classes"; - this.systemIndices = systemIndices; this.persistentTasksService = persistentTasksService; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java index 6c10a6a07cba6..6e700ca4aecc3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java @@ -40,7 +40,7 @@ */ public class NodeInfo extends BaseNodeResponse { - private final Version version; + private final String version; private final TransportVersion transportVersion; private final IndexVersion indexVersion; private final Map componentVersions; @@ -61,16 +61,23 @@ public class NodeInfo extends BaseNodeResponse { public NodeInfo(StreamInput in) throws IOException { super(in); - version = Version.readVersion(in); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.NODE_INFO_VERSION_AS_STRING)) { + version = in.readString(); transportVersion = TransportVersion.readVersion(in); - } else { - transportVersion = TransportVersion.fromId(version.id); - } - if (in.getTransportVersion().onOrAfter(TransportVersions.NODE_INFO_INDEX_VERSION_ADDED)) { indexVersion = IndexVersion.readVersion(in); } else { - indexVersion = IndexVersion.fromId(version.id); + Version legacyVersion = Version.readVersion(in); + version = legacyVersion.toString(); + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { + transportVersion = TransportVersion.readVersion(in); + } else { + transportVersion = TransportVersion.fromId(legacyVersion.id); + } + if (in.getTransportVersion().onOrAfter(TransportVersions.NODE_INFO_INDEX_VERSION_ADDED)) { + indexVersion = IndexVersion.readVersion(in); + } else { + indexVersion = IndexVersion.fromId(legacyVersion.id); + } } if (in.getTransportVersion().onOrAfter(TransportVersions.NODE_INFO_COMPONENT_VERSIONS_ADDED)) { componentVersions = in.readImmutableMap(StreamInput::readString, StreamInput::readVInt); @@ -105,7 +112,7 @@ public NodeInfo(StreamInput in) throws IOException { } public NodeInfo( - Version version, + String version, TransportVersion transportVersion, IndexVersion indexVersion, Map componentVersions, @@ -156,7 +163,7 @@ public String getHostname() { * The current ES version */ public String getVersion() { - return version.toString(); + return version; } /** @@ -227,7 +234,11 @@ private void addInfoIfNonNull(Class clazz, @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - Version.writeVersion(version, out); + if (out.getTransportVersion().onOrAfter(TransportVersions.NODE_INFO_VERSION_AS_STRING)) { + out.writeString(version); + } else { + Version.writeVersion(Version.fromString(version), out); + } if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { TransportVersion.writeVersion(transportVersion, out); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java index 9f9613e7834a0..c19ff7ea3e46e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java @@ -8,7 +8,6 @@ package org.elasticsearch.action.admin.cluster.node.stats; -import org.elasticsearch.TransportVersions; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.support.nodes.BaseNodesRequest; import org.elasticsearch.common.Strings; @@ -20,37 +19,27 @@ import java.io.IOException; import java.util.Arrays; -import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; -import java.util.stream.Collectors; /** * A request to get node (cluster) level stats. */ public class NodesStatsRequest extends BaseNodesRequest { - private CommonStatsFlags indices = new CommonStatsFlags(); - private final Set requestedMetrics = new HashSet<>(); - private boolean includeShardsStats = true; + private NodesStatsRequestParameters nodesStatsRequestParameters; public NodesStatsRequest() { super((String[]) null); + nodesStatsRequestParameters = new NodesStatsRequestParameters(); } public NodesStatsRequest(StreamInput in) throws IOException { super(in); - indices = new CommonStatsFlags(in); - requestedMetrics.clear(); - requestedMetrics.addAll(in.readStringCollectionAsList()); - if (in.getTransportVersion().onOrAfter(TransportVersions.INCLUDE_SHARDS_STATS_ADDED)) { - includeShardsStats = in.readBoolean(); - } else { - includeShardsStats = true; - } + nodesStatsRequestParameters = new NodesStatsRequestParameters(in); } /** @@ -59,14 +48,15 @@ public NodesStatsRequest(StreamInput in) throws IOException { */ public NodesStatsRequest(String... nodesIds) { super(nodesIds); + nodesStatsRequestParameters = new NodesStatsRequestParameters(); } /** * Sets all the request flags. */ public NodesStatsRequest all() { - this.indices.all(); - this.requestedMetrics.addAll(Metric.allMetrics()); + this.nodesStatsRequestParameters.indices().all(); + this.nodesStatsRequestParameters.requestedMetrics().addAll(NodesStatsRequestParameters.Metric.allMetrics()); return this; } @@ -74,28 +64,28 @@ public NodesStatsRequest all() { * Clears all the request flags. */ public NodesStatsRequest clear() { - this.indices.clear(); - this.requestedMetrics.clear(); + this.nodesStatsRequestParameters.indices().clear(); + this.nodesStatsRequestParameters.requestedMetrics().clear(); return this; } /** - * Get indices. Handles separately from other metrics because it may or + * Get nodesStatsMetrics.indices(). Handles separately from other metrics because it may or * may not have submetrics. * @return flags indicating which indices stats to return */ public CommonStatsFlags indices() { - return indices; + return nodesStatsRequestParameters.indices(); } /** - * Set indices. Handles separately from other metrics because it may or + * Set nodesStatsMetrics.indices(). Handles separately from other metrics because it may or * may not involve submetrics. * @param indices flags indicating which indices stats to return * @return This object, for request chaining. */ public NodesStatsRequest indices(CommonStatsFlags indices) { - this.indices = indices; + nodesStatsRequestParameters.setIndices(indices); return this; } @@ -104,9 +94,9 @@ public NodesStatsRequest indices(CommonStatsFlags indices) { */ public NodesStatsRequest indices(boolean indices) { if (indices) { - this.indices.all(); + this.nodesStatsRequestParameters.indices().all(); } else { - this.indices.clear(); + this.nodesStatsRequestParameters.indices().clear(); } return this; } @@ -116,17 +106,17 @@ public NodesStatsRequest indices(boolean indices) { * handled separately. */ public Set requestedMetrics() { - return Set.copyOf(requestedMetrics); + return Set.copyOf(nodesStatsRequestParameters.requestedMetrics()); } /** * Add metric */ public NodesStatsRequest addMetric(String metric) { - if (Metric.allMetrics().contains(metric) == false) { + if (NodesStatsRequestParameters.Metric.allMetrics().contains(metric) == false) { throw new IllegalStateException("Used an illegal metric: " + metric); } - requestedMetrics.add(metric); + nodesStatsRequestParameters.requestedMetrics().add(metric); return this; } @@ -136,12 +126,12 @@ public NodesStatsRequest addMetric(String metric) { public NodesStatsRequest addMetrics(String... metrics) { // use sorted set for reliable ordering in error messages SortedSet metricsSet = new TreeSet<>(Set.of(metrics)); - if (Metric.allMetrics().containsAll(metricsSet) == false) { - metricsSet.removeAll(Metric.allMetrics()); + if (NodesStatsRequestParameters.Metric.allMetrics().containsAll(metricsSet) == false) { + metricsSet.removeAll(NodesStatsRequestParameters.Metric.allMetrics()); String plural = metricsSet.size() == 1 ? "" : "s"; throw new IllegalStateException("Used illegal metric" + plural + ": " + metricsSet); } - requestedMetrics.addAll(metricsSet); + nodesStatsRequestParameters.requestedMetrics().addAll(metricsSet); return this; } @@ -149,10 +139,10 @@ public NodesStatsRequest addMetrics(String... metrics) { * Remove metric */ public NodesStatsRequest removeMetric(String metric) { - if (Metric.allMetrics().contains(metric) == false) { + if (NodesStatsRequestParameters.Metric.allMetrics().contains(metric) == false) { throw new IllegalStateException("Used an illegal metric: " + metric); } - requestedMetrics.remove(metric); + nodesStatsRequestParameters.requestedMetrics().remove(metric); return this; } @@ -161,8 +151,8 @@ public String getDescription() { return Strings.format( "nodes=%s, metrics=%s, flags=%s", Arrays.toString(nodesIds()), - requestedMetrics.toString(), - Arrays.toString(indices.getFlags()) + nodesStatsRequestParameters.requestedMetrics().toString(), + Arrays.toString(nodesStatsRequestParameters.indices().getFlags()) ); } @@ -177,60 +167,17 @@ public String getDescription() { } public boolean includeShardsStats() { - return includeShardsStats; + return nodesStatsRequestParameters.includeShardsStats(); } public void setIncludeShardsStats(boolean includeShardsStats) { - this.includeShardsStats = includeShardsStats; + nodesStatsRequestParameters.setIncludeShardsStats(includeShardsStats); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - indices.writeTo(out); - out.writeStringCollection(requestedMetrics); - if (out.getTransportVersion().onOrAfter(TransportVersions.INCLUDE_SHARDS_STATS_ADDED)) { - out.writeBoolean(includeShardsStats); - } + nodesStatsRequestParameters.writeTo(out); } - /** - * An enumeration of the "core" sections of metrics that may be requested - * from the nodes stats endpoint. Eventually this list will be pluggable. - */ - public enum Metric { - OS("os"), - PROCESS("process"), - JVM("jvm"), - THREAD_POOL("thread_pool"), - FS("fs"), - TRANSPORT("transport"), - HTTP("http"), - BREAKER("breaker"), - SCRIPT("script"), - DISCOVERY("discovery"), - INGEST("ingest"), - ADAPTIVE_SELECTION("adaptive_selection"), - SCRIPT_CACHE("script_cache"), - INDEXING_PRESSURE("indexing_pressure"), - REPOSITORIES("repositories"); - - private String metricName; - - Metric(String name) { - this.metricName = name; - } - - public String metricName() { - return this.metricName; - } - - boolean containedIn(Set metricNames) { - return metricNames.contains(this.metricName()); - } - - static Set allMetrics() { - return Arrays.stream(values()).map(Metric::metricName).collect(Collectors.toSet()); - } - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java index 1cfa92d91e9f1..48f90ccc712fd 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java @@ -47,12 +47,12 @@ public NodesStatsRequestBuilder setIndices(boolean indices) { } public NodesStatsRequestBuilder setBreaker(boolean breaker) { - addOrRemoveMetric(breaker, NodesStatsRequest.Metric.BREAKER); + addOrRemoveMetric(breaker, NodesStatsRequestParameters.Metric.BREAKER); return this; } public NodesStatsRequestBuilder setScript(boolean script) { - addOrRemoveMetric(script, NodesStatsRequest.Metric.SCRIPT); + addOrRemoveMetric(script, NodesStatsRequestParameters.Metric.SCRIPT); return this; } @@ -68,7 +68,7 @@ public NodesStatsRequestBuilder setIndices(CommonStatsFlags indices) { * Should the node OS stats be returned. */ public NodesStatsRequestBuilder setOs(boolean os) { - addOrRemoveMetric(os, NodesStatsRequest.Metric.OS); + addOrRemoveMetric(os, NodesStatsRequestParameters.Metric.OS); return this; } @@ -76,7 +76,7 @@ public NodesStatsRequestBuilder setOs(boolean os) { * Should the node OS stats be returned. */ public NodesStatsRequestBuilder setProcess(boolean process) { - addOrRemoveMetric(process, NodesStatsRequest.Metric.PROCESS); + addOrRemoveMetric(process, NodesStatsRequestParameters.Metric.PROCESS); return this; } @@ -84,7 +84,7 @@ public NodesStatsRequestBuilder setProcess(boolean process) { * Should the node JVM stats be returned. */ public NodesStatsRequestBuilder setJvm(boolean jvm) { - addOrRemoveMetric(jvm, NodesStatsRequest.Metric.JVM); + addOrRemoveMetric(jvm, NodesStatsRequestParameters.Metric.JVM); return this; } @@ -92,7 +92,7 @@ public NodesStatsRequestBuilder setJvm(boolean jvm) { * Should the node thread pool stats be returned. */ public NodesStatsRequestBuilder setThreadPool(boolean threadPool) { - addOrRemoveMetric(threadPool, NodesStatsRequest.Metric.THREAD_POOL); + addOrRemoveMetric(threadPool, NodesStatsRequestParameters.Metric.THREAD_POOL); return this; } @@ -100,7 +100,7 @@ public NodesStatsRequestBuilder setThreadPool(boolean threadPool) { * Should the node file system stats be returned. */ public NodesStatsRequestBuilder setFs(boolean fs) { - addOrRemoveMetric(fs, NodesStatsRequest.Metric.FS); + addOrRemoveMetric(fs, NodesStatsRequestParameters.Metric.FS); return this; } @@ -108,7 +108,7 @@ public NodesStatsRequestBuilder setFs(boolean fs) { * Should the node Transport stats be returned. */ public NodesStatsRequestBuilder setTransport(boolean transport) { - addOrRemoveMetric(transport, NodesStatsRequest.Metric.TRANSPORT); + addOrRemoveMetric(transport, NodesStatsRequestParameters.Metric.TRANSPORT); return this; } @@ -116,7 +116,7 @@ public NodesStatsRequestBuilder setTransport(boolean transport) { * Should the node HTTP stats be returned. */ public NodesStatsRequestBuilder setHttp(boolean http) { - addOrRemoveMetric(http, NodesStatsRequest.Metric.HTTP); + addOrRemoveMetric(http, NodesStatsRequestParameters.Metric.HTTP); return this; } @@ -124,7 +124,7 @@ public NodesStatsRequestBuilder setHttp(boolean http) { * Should the discovery stats be returned. */ public NodesStatsRequestBuilder setDiscovery(boolean discovery) { - addOrRemoveMetric(discovery, NodesStatsRequest.Metric.DISCOVERY); + addOrRemoveMetric(discovery, NodesStatsRequestParameters.Metric.DISCOVERY); return this; } @@ -132,12 +132,12 @@ public NodesStatsRequestBuilder setDiscovery(boolean discovery) { * Should ingest statistics be returned. */ public NodesStatsRequestBuilder setIngest(boolean ingest) { - addOrRemoveMetric(ingest, NodesStatsRequest.Metric.INGEST); + addOrRemoveMetric(ingest, NodesStatsRequestParameters.Metric.INGEST); return this; } public NodesStatsRequestBuilder setAdaptiveSelection(boolean adaptiveSelection) { - addOrRemoveMetric(adaptiveSelection, NodesStatsRequest.Metric.ADAPTIVE_SELECTION); + addOrRemoveMetric(adaptiveSelection, NodesStatsRequestParameters.Metric.ADAPTIVE_SELECTION); return this; } @@ -145,24 +145,24 @@ public NodesStatsRequestBuilder setAdaptiveSelection(boolean adaptiveSelection) * Should script context cache statistics be returned */ public NodesStatsRequestBuilder setScriptCache(boolean scriptCache) { - addOrRemoveMetric(scriptCache, NodesStatsRequest.Metric.SCRIPT_CACHE); + addOrRemoveMetric(scriptCache, NodesStatsRequestParameters.Metric.SCRIPT_CACHE); return this; } public NodesStatsRequestBuilder setIndexingPressure(boolean indexingPressure) { - addOrRemoveMetric(indexingPressure, NodesStatsRequest.Metric.INDEXING_PRESSURE); + addOrRemoveMetric(indexingPressure, NodesStatsRequestParameters.Metric.INDEXING_PRESSURE); return this; } public NodesStatsRequestBuilder setRepositoryStats(boolean repositoryStats) { - addOrRemoveMetric(repositoryStats, NodesStatsRequest.Metric.REPOSITORIES); + addOrRemoveMetric(repositoryStats, NodesStatsRequestParameters.Metric.REPOSITORIES); return this; } /** * Helper method for adding metrics to a request */ - private void addOrRemoveMetric(boolean includeMetric, NodesStatsRequest.Metric metric) { + private void addOrRemoveMetric(boolean includeMetric, NodesStatsRequestParameters.Metric metric) { if (includeMetric) { request.addMetric(metric.metricName()); } else { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestParameters.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestParameters.java new file mode 100644 index 0000000000000..6c7ce472475cd --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestParameters.java @@ -0,0 +1,112 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.cluster.node.stats; + +import org.elasticsearch.TransportVersions; +import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; + +import java.io.IOException; +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * This class encapsulates the metrics and other information needed to define scope when we are requesting node stats. + */ +public class NodesStatsRequestParameters implements Writeable { + private CommonStatsFlags indices = new CommonStatsFlags(); + private final Set requestedMetrics = new HashSet<>(); + private boolean includeShardsStats = true; + + public NodesStatsRequestParameters() {} + + public NodesStatsRequestParameters(StreamInput in) throws IOException { + indices = new CommonStatsFlags(in); + requestedMetrics.clear(); + requestedMetrics.addAll(in.readStringCollectionAsList()); + if (in.getTransportVersion().onOrAfter(TransportVersions.INCLUDE_SHARDS_STATS_ADDED)) { + includeShardsStats = in.readBoolean(); + } else { + includeShardsStats = true; + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + indices.writeTo(out); + out.writeStringCollection(requestedMetrics); + if (out.getTransportVersion().onOrAfter(TransportVersions.INCLUDE_SHARDS_STATS_ADDED)) { + out.writeBoolean(includeShardsStats); + } + } + + public CommonStatsFlags indices() { + return indices; + } + + public void setIndices(CommonStatsFlags indices) { + this.indices = indices; + } + + public Set requestedMetrics() { + return requestedMetrics; + } + + public boolean includeShardsStats() { + return includeShardsStats; + } + + public void setIncludeShardsStats(boolean includeShardsStats) { + this.includeShardsStats = includeShardsStats; + } + + /** + * An enumeration of the "core" sections of metrics that may be requested + * from the nodes stats endpoint. Eventually this list will be pluggable. + */ + public enum Metric { + OS("os"), + PROCESS("process"), + JVM("jvm"), + THREAD_POOL("thread_pool"), + FS("fs"), + TRANSPORT("transport"), + HTTP("http"), + BREAKER("breaker"), + SCRIPT("script"), + DISCOVERY("discovery"), + INGEST("ingest"), + ADAPTIVE_SELECTION("adaptive_selection"), + SCRIPT_CACHE("script_cache"), + INDEXING_PRESSURE("indexing_pressure"), + REPOSITORIES("repositories"); + + private String metricName; + + Metric(String name) { + this.metricName = name; + } + + public String metricName() { + return this.metricName; + } + + boolean containedIn(Set metricNames) { + return metricNames.contains(this.metricName()); + } + + static Set allMetrics() { + return Arrays.stream(values()).map(Metric::metricName).collect(Collectors.toSet()); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index b9ab520c4da8d..96fc30f93c890 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -84,21 +84,21 @@ protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest, Task task) return nodeService.stats( request.indices(), request.includeShardsStats(), - NodesStatsRequest.Metric.OS.containedIn(metrics), - NodesStatsRequest.Metric.PROCESS.containedIn(metrics), - NodesStatsRequest.Metric.JVM.containedIn(metrics), - NodesStatsRequest.Metric.THREAD_POOL.containedIn(metrics), - NodesStatsRequest.Metric.FS.containedIn(metrics), - NodesStatsRequest.Metric.TRANSPORT.containedIn(metrics), - NodesStatsRequest.Metric.HTTP.containedIn(metrics), - NodesStatsRequest.Metric.BREAKER.containedIn(metrics), - NodesStatsRequest.Metric.SCRIPT.containedIn(metrics), - NodesStatsRequest.Metric.DISCOVERY.containedIn(metrics), - NodesStatsRequest.Metric.INGEST.containedIn(metrics), - NodesStatsRequest.Metric.ADAPTIVE_SELECTION.containedIn(metrics), - NodesStatsRequest.Metric.SCRIPT_CACHE.containedIn(metrics), - NodesStatsRequest.Metric.INDEXING_PRESSURE.containedIn(metrics), - NodesStatsRequest.Metric.REPOSITORIES.containedIn(metrics) + NodesStatsRequestParameters.Metric.OS.containedIn(metrics), + NodesStatsRequestParameters.Metric.PROCESS.containedIn(metrics), + NodesStatsRequestParameters.Metric.JVM.containedIn(metrics), + NodesStatsRequestParameters.Metric.THREAD_POOL.containedIn(metrics), + NodesStatsRequestParameters.Metric.FS.containedIn(metrics), + NodesStatsRequestParameters.Metric.TRANSPORT.containedIn(metrics), + NodesStatsRequestParameters.Metric.HTTP.containedIn(metrics), + NodesStatsRequestParameters.Metric.BREAKER.containedIn(metrics), + NodesStatsRequestParameters.Metric.SCRIPT.containedIn(metrics), + NodesStatsRequestParameters.Metric.DISCOVERY.containedIn(metrics), + NodesStatsRequestParameters.Metric.INGEST.containedIn(metrics), + NodesStatsRequestParameters.Metric.ADAPTIVE_SELECTION.containedIn(metrics), + NodesStatsRequestParameters.Metric.SCRIPT_CACHE.containedIn(metrics), + NodesStatsRequestParameters.Metric.INDEXING_PRESSURE.containedIn(metrics), + NodesStatsRequestParameters.Metric.REPOSITORIES.containedIn(metrics) ); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/NodesUsageRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/NodesUsageRequestBuilder.java deleted file mode 100644 index 361d4509ed95b..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/NodesUsageRequestBuilder.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.cluster.node.usage; - -import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder; -import org.elasticsearch.client.internal.ElasticsearchClient; - -public class NodesUsageRequestBuilder extends NodesOperationRequestBuilder< - NodesUsageRequest, - NodesUsageResponse, - NodesUsageRequestBuilder> { - - public NodesUsageRequestBuilder(ElasticsearchClient client, ActionType action) { - super(client, action, new NodesUsageRequest()); - } - -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java index 498350b766448..412a34a6e1562 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java @@ -20,13 +20,6 @@ public class DeleteRepositoryRequestBuilder extends AcknowledgedRequestBuilder< AcknowledgedResponse, DeleteRepositoryRequestBuilder> { - /** - * Constructs unregister repository request builder - */ - public DeleteRepositoryRequestBuilder(ElasticsearchClient client, DeleteRepositoryAction action) { - super(client, action, new DeleteRepositoryRequest()); - } - /** * Constructs unregister repository request builder with specified repository name */ diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java index 9ef6b5ca8a3d5..6a0d4a5e126f1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java @@ -20,13 +20,6 @@ public class GetRepositoriesRequestBuilder extends MasterNodeReadOperationReques GetRepositoriesResponse, GetRepositoriesRequestBuilder> { - /** - * Creates new get repository request builder - */ - public GetRepositoriesRequestBuilder(ElasticsearchClient client, GetRepositoriesAction action) { - super(client, action, new GetRepositoriesRequest()); - } - /** * Creates new get repository request builder */ diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java index 0ef45712e5051..21401ba986674 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java @@ -24,13 +24,6 @@ public class PutRepositoryRequestBuilder extends AcknowledgedRequestBuilder< AcknowledgedResponse, PutRepositoryRequestBuilder> { - /** - * Constructs register repository request - */ - public PutRepositoryRequestBuilder(ElasticsearchClient client, PutRepositoryAction action) { - super(client, action, new PutRepositoryRequest()); - } - /** * Constructs register repository request for the repository with a given name */ diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java index 7c40030f14c00..dc6257b222ab2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java @@ -19,13 +19,6 @@ public class VerifyRepositoryRequestBuilder extends MasterNodeOperationRequestBu VerifyRepositoryResponse, VerifyRepositoryRequestBuilder> { - /** - * Constructs unregister repository request builder - */ - public VerifyRepositoryRequestBuilder(ElasticsearchClient client, VerifyRepositoryAction action) { - super(client, action, new VerifyRepositoryRequest()); - } - /** * Constructs unregister repository request builder with specified repository name */ diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java index 25d3c53521345..ae6ec9a5b3c49 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java @@ -23,13 +23,6 @@ public class CreateSnapshotRequestBuilder extends MasterNodeOperationRequestBuil CreateSnapshotResponse, CreateSnapshotRequestBuilder> { - /** - * Constructs a new create snapshot request builder - */ - public CreateSnapshotRequestBuilder(ElasticsearchClient client, CreateSnapshotAction action) { - super(client, action, new CreateSnapshotRequest()); - } - /** * Constructs a new create snapshot request builder with specified repository and snapshot names */ diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java index 3ceab6badcaa8..4046c0bc7dd03 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java @@ -20,13 +20,6 @@ public class DeleteSnapshotRequestBuilder extends MasterNodeOperationRequestBuil AcknowledgedResponse, DeleteSnapshotRequestBuilder> { - /** - * Constructs delete snapshot request builder - */ - public DeleteSnapshotRequestBuilder(ElasticsearchClient client, DeleteSnapshotAction action) { - super(client, action, new DeleteSnapshotRequest()); - } - /** * Constructs delete snapshot request builder with specified repository and snapshot names */ diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java index 04e67a86e4a7a..49cc5df049332 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java @@ -25,13 +25,6 @@ public class RestoreSnapshotRequestBuilder extends MasterNodeOperationRequestBui RestoreSnapshotResponse, RestoreSnapshotRequestBuilder> { - /** - * Constructs new restore snapshot request builder - */ - public RestoreSnapshotRequestBuilder(ElasticsearchClient client, RestoreSnapshotAction action) { - super(client, action, new RestoreSnapshotRequest()); - } - /** * Constructs new restore snapshot request builder with specified repository and snapshot names */ diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/DenseVectorFieldStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/DenseVectorFieldStats.java index e27972a60cbc9..099c299e0114e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/DenseVectorFieldStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/DenseVectorFieldStats.java @@ -19,6 +19,8 @@ * Holds enhanced stats about a dense vector mapped field. */ public final class DenseVectorFieldStats extends FieldStats { + static final int UNSET = -1; + int indexedVectorCount; // number of times vectors with index:true are used in mappings of this cluster int indexedVectorDimMin; // minimum dimension of indexed vectors in this cluster int indexedVectorDimMax; // maximum dimension of indexed vectors in this cluster @@ -26,8 +28,8 @@ public final class DenseVectorFieldStats extends FieldStats { DenseVectorFieldStats(String name) { super(name); indexedVectorCount = 0; - indexedVectorDimMin = 1024; - indexedVectorDimMax = 0; + indexedVectorDimMin = UNSET; + indexedVectorDimMax = UNSET; } DenseVectorFieldStats(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java index 197a5d839eecf..e2ade5060c476 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java @@ -87,13 +87,17 @@ public static MappingStats of(Metadata metadata, Runnable ensureNotCancelled) { stats = fieldTypes.computeIfAbsent(type, DenseVectorFieldStats::new); boolean indexed = fieldMapping.containsKey("index") ? (boolean) fieldMapping.get("index") : false; if (indexed) { - ((DenseVectorFieldStats) stats).indexedVectorCount += count; - int dims = (int) fieldMapping.get("dims"); - if (dims < ((DenseVectorFieldStats) stats).indexedVectorDimMin) { - ((DenseVectorFieldStats) stats).indexedVectorDimMin = dims; - } - if (dims > ((DenseVectorFieldStats) stats).indexedVectorDimMax) { - ((DenseVectorFieldStats) stats).indexedVectorDimMax = dims; + DenseVectorFieldStats vStats = (DenseVectorFieldStats) stats; + vStats.indexedVectorCount += count; + Object obj = fieldMapping.get("dims"); + if (obj != null) { + int dims = (int) obj; + if (vStats.indexedVectorDimMin == DenseVectorFieldStats.UNSET || dims < vStats.indexedVectorDimMin) { + vStats.indexedVectorDimMin = dims; + } + if (vStats.indexedVectorDimMin == DenseVectorFieldStats.UNSET || dims > vStats.indexedVectorDimMax) { + vStats.indexedVectorDimMax = dims; + } } } } else { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java index ee6797ca58fb9..9d10065c9c3e9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; @@ -22,6 +23,7 @@ import java.io.IOException; import java.util.Map; +@UpdateForV9 // make this class a regular ActionRequest rather than a MasterNodeReadRequest public class GetAliasesRequest extends MasterNodeReadRequest implements AliasesRequest { public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.strictExpandHidden(); @@ -40,9 +42,10 @@ public GetAliasesRequest() {} /** * NB prior to 8.12 get-aliases was a TransportMasterNodeReadAction so for BwC we must remain able to read these requests until we no - * longer need to support {@link org.elasticsearch.TransportVersions#CLUSTER_FEATURES_ADDED} and earlier. Once we remove this we can - * also make this class a regular ActionRequest instead of a MasterNodeReadRequest. + * longer need to support calling this action remotely. Once we remove this we can also make this class a regular ActionRequest instead + * of a MasterNodeReadRequest. */ + @UpdateForV9 // remove this constructor public GetAliasesRequest(StreamInput in) throws IOException { super(in); indices = in.readStringArray(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java index c0e26b16585c4..edb05b0fcef75 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java @@ -12,6 +12,7 @@ import org.elasticsearch.cluster.metadata.AliasMetadata; import org.elasticsearch.cluster.metadata.DataStreamAlias; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.UpdateForV9; import java.io.IOException; import java.util.List; @@ -38,8 +39,9 @@ public Map> getDataStreamAliases() { /** * NB prior to 8.12 get-aliases was a TransportMasterNodeReadAction so for BwC we must remain able to write these responses until we no - * longer need to support {@link org.elasticsearch.TransportVersions#CLUSTER_FEATURES_ADDED} and earlier. + * longer need to support calling this action remotely. */ + @UpdateForV9 // replace this implementation with TransportAction.localOnly() @Override public void writeTo(StreamOutput out) throws IOException { out.writeMap(aliases, StreamOutput::writeCollection); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java index e43d1a825c233..9b9fb49c1bbe0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.indices.SystemIndices.SystemIndexAccessLevel; import org.elasticsearch.tasks.CancellableTask; @@ -41,9 +42,9 @@ /** * NB prior to 8.12 this was a TransportMasterNodeReadAction so for BwC it must be registered with the TransportService (i.e. a - * HandledTransportAction) until we no longer need to support {@link org.elasticsearch.TransportVersions#CLUSTER_FEATURES_ADDED} and - * earlier. + * HandledTransportAction) until we no longer need to support calling this action remotely. */ +@UpdateForV9 // remove the HandledTransportAction superclass, this action need not be registered with the TransportService public class TransportGetAliasesAction extends TransportLocalClusterStateAction { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(TransportGetAliasesAction.class); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestBuilder.java index 7e6b31271ae90..3d6bf0ff15bb1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestBuilder.java @@ -18,10 +18,6 @@ */ public class CloseIndexRequestBuilder extends AcknowledgedRequestBuilder { - public CloseIndexRequestBuilder(ElasticsearchClient client, CloseIndexAction action) { - super(client, action, new CloseIndexRequest()); - } - public CloseIndexRequestBuilder(ElasticsearchClient client, CloseIndexAction action, String... indices) { super(client, action, new CloseIndexRequest(indices)); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java index 1cec71d2abe53..87334afa3ed8a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java @@ -50,6 +50,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -111,7 +112,7 @@ public TransportAction( this.taskQueue = clusterService.createTaskQueue("auto-create", Priority.URGENT, batchExecutionContext -> { final var listener = new AllocationActionMultiListener(threadPool.getThreadContext()); final var taskContexts = batchExecutionContext.taskContexts(); - final var successfulRequests = Maps.newMapWithExpectedSize(taskContexts.size()); + final var successfulRequests = Maps.>newMapWithExpectedSize(taskContexts.size()); var state = batchExecutionContext.initialState(); for (final var taskContext : taskContexts) { final var task = taskContext.getTask(); @@ -169,6 +170,13 @@ public void onFailure(Exception e) { private ClusterStateAckListener getAckListener( String indexName, AllocationActionMultiListener allocationActionMultiListener + ) { + return getAckListener(List.of(indexName), allocationActionMultiListener); + } + + private ClusterStateAckListener getAckListener( + List indexNames, + AllocationActionMultiListener allocationActionMultiListener ) { return new ClusterStateAckListener() { @Override @@ -180,22 +188,22 @@ public boolean mustAck(DiscoveryNode discoveryNode) { public void onAllNodesAcked() { ActiveShardsObserver.waitForActiveShards( clusterService, - new String[] { indexName }, + indexNames.toArray(String[]::new), ActiveShardCount.DEFAULT, request.timeout(), allocationActionMultiListener.delay(listener) - .map(shardsAcked -> new CreateIndexResponse(true, shardsAcked, indexName)) + .map(shardsAcked -> new CreateIndexResponse(true, shardsAcked, indexNames.get(0))) ); } @Override public void onAckFailure(Exception e) { - allocationActionMultiListener.delay(listener).onResponse(new CreateIndexResponse(false, false, indexName)); + allocationActionMultiListener.delay(listener).onResponse(new CreateIndexResponse(false, false, indexNames.get(0))); } @Override public void onAckTimeout() { - allocationActionMultiListener.delay(listener).onResponse(new CreateIndexResponse(false, false, indexName)); + allocationActionMultiListener.delay(listener).onResponse(new CreateIndexResponse(false, false, indexNames.get(0))); } @Override @@ -212,7 +220,7 @@ public TimeValue ackTimeout() { */ ClusterState execute( ClusterState currentState, - Map successfulRequests, + Map> successfulRequests, ClusterStateTaskExecutor.TaskContext taskContext, AllocationActionMultiListener allocationActionMultiListener ) throws Exception { @@ -255,9 +263,13 @@ ClusterState execute( rerouteCompletionIsNotRequired() ); - final var indexName = clusterState.metadata().dataStreams().get(request.index()).getIndices().get(0).getName(); - taskContext.success(getAckListener(indexName, allocationActionMultiListener)); - successfulRequests.put(request, indexName); + final var dataStream = clusterState.metadata().dataStreams().get(request.index()); + final var backingIndexName = dataStream.getIndices().get(0).getName(); + final var indexNames = dataStream.getFailureIndices().isEmpty() + ? List.of(backingIndexName) + : List.of(backingIndexName, dataStream.getFailureIndices().get(0).getName()); + taskContext.success(getAckListener(indexNames, allocationActionMultiListener)); + successfulRequests.put(request, indexNames); return clusterState; } else { final var indexName = IndexNameExpressionResolver.resolveDateMathExpression(request.index()); @@ -272,7 +284,7 @@ ClusterState execute( if (shouldAutoCreate == false) { // The index already exists. taskContext.success(getAckListener(indexName, allocationActionMultiListener)); - successfulRequests.put(request, indexName); + successfulRequests.put(request, List.of(indexName)); return currentState; } } @@ -318,7 +330,7 @@ ClusterState execute( rerouteCompletionIsNotRequired() ); taskContext.success(getAckListener(indexName, allocationActionMultiListener)); - successfulRequests.put(request, indexName); + successfulRequests.put(request, List.of(indexName)); return clusterState; } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java index d3773a49df4dc..29af167679451 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java @@ -79,20 +79,18 @@ protected void shardOperationOnPrimary( IndexShard primary, ActionListener> listener ) { - ActionListener.completeWith(listener, () -> { - primary.flush(shardRequest.getRequest()); + primary.flush(shardRequest.getRequest(), listener.map(flushed -> { logger.trace("{} flush request executed on primary", primary.shardId()); return new PrimaryResult<>(shardRequest, new ReplicationResponse()); - }); + })); } @Override protected void shardOperationOnReplica(ShardFlushRequest request, IndexShard replica, ActionListener listener) { - ActionListener.completeWith(listener, () -> { - replica.flush(request.getRequest()); + replica.flush(request.getRequest(), listener.map(flushed -> { logger.trace("{} flush request executed on replica", replica.shardId()); return new ReplicaResult(); - }); + })); } // TODO: Remove this transition in 9.0 diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexRequestBuilder.java index 060ead9deb246..85a31925901d4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexRequestBuilder.java @@ -18,10 +18,6 @@ */ public class OpenIndexRequestBuilder extends AcknowledgedRequestBuilder { - public OpenIndexRequestBuilder(ElasticsearchClient client, OpenIndexAction action) { - super(client, action, new OpenIndexRequest()); - } - public OpenIndexRequestBuilder(ElasticsearchClient client, OpenIndexAction action, String... indices) { super(client, action, new OpenIndexRequest(indices)); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java index 9f8ac48feb861..a203f810ebf3d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java @@ -16,10 +16,6 @@ public class DeleteIndexTemplateRequestBuilder extends MasterNodeOperationReques AcknowledgedResponse, DeleteIndexTemplateRequestBuilder> { - public DeleteIndexTemplateRequestBuilder(ElasticsearchClient client, DeleteIndexTemplateAction action) { - super(client, action, new DeleteIndexTemplateRequest()); - } - public DeleteIndexTemplateRequestBuilder(ElasticsearchClient client, DeleteIndexTemplateAction action, String name) { super(client, action, new DeleteIndexTemplateRequest(name)); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java index 8eb9d0b93e6b1..194ac7b77f65c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java @@ -15,10 +15,6 @@ public class GetIndexTemplatesRequestBuilder extends MasterNodeReadOperationRequ GetIndexTemplatesResponse, GetIndexTemplatesRequestBuilder> { - public GetIndexTemplatesRequestBuilder(ElasticsearchClient client, GetIndexTemplatesAction action) { - super(client, action, new GetIndexTemplatesRequest()); - } - public GetIndexTemplatesRequestBuilder(ElasticsearchClient client, GetIndexTemplatesAction action, String... names) { super(client, action, new GetIndexTemplatesRequest(names)); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java index 4ab0b6bd221e9..af40637db6703 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java @@ -53,6 +53,7 @@ import java.util.stream.Collectors; import static java.util.Collections.emptyMap; +import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.isDataStreamsLifecycleOnlyMode; import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.findConflictingV1Templates; import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.findConflictingV2Templates; import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.findV2Template; @@ -69,6 +70,7 @@ public class TransportSimulateIndexTemplateAction extends TransportMasterNodeRea private final SystemIndices systemIndices; private final Set indexSettingProviders; private final ClusterSettings clusterSettings; + private final boolean isDslOnlyMode; @Inject public TransportSimulateIndexTemplateAction( @@ -100,6 +102,7 @@ public TransportSimulateIndexTemplateAction( this.systemIndices = systemIndices; this.indexSettingProviders = indexSettingProviders.getIndexSettingProviders(); this.clusterSettings = clusterService.getClusterSettings(); + this.isDslOnlyMode = isDataStreamsLifecycleOnlyMode(clusterService.getSettings()); } @Override @@ -146,6 +149,7 @@ protected void masterOperation( matchingTemplate, request.getIndexName(), stateWithTemplate, + isDslOnlyMode, xContentRegistry, indicesService, systemIndices, @@ -218,6 +222,7 @@ public static Template resolveTemplate( final String matchingTemplate, final String indexName, final ClusterState simulatedState, + final boolean isDslOnlyMode, final NamedXContentRegistry xContentRegistry, final IndicesService indicesService, final SystemIndices systemIndices, @@ -304,6 +309,9 @@ public static Template resolveTemplate( Settings settings = Settings.builder().put(templateSettings).put(additionalSettings.build()).build(); DataStreamLifecycle lifecycle = resolveLifecycle(simulatedState.metadata(), matchingTemplate); + if (template.getDataStreamTemplate() != null && lifecycle == null && isDslOnlyMode) { + lifecycle = DataStreamLifecycle.DEFAULT; + } return new Template(settings, mergedMapping, aliasesByName, lifecycle); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java index b99f436dd86f9..1f35d0b8a1268 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java @@ -39,6 +39,7 @@ import java.util.Map; import java.util.Set; +import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.isDataStreamsLifecycleOnlyMode; import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.findConflictingV1Templates; import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.findConflictingV2Templates; @@ -56,6 +57,7 @@ public class TransportSimulateTemplateAction extends TransportMasterNodeReadActi private final SystemIndices systemIndices; private final Set indexSettingProviders; private final ClusterSettings clusterSettings; + private final boolean isDslOnlyMode; @Inject public TransportSimulateTemplateAction( @@ -87,6 +89,7 @@ public TransportSimulateTemplateAction( this.systemIndices = systemIndices; this.indexSettingProviders = indexSettingProviders.getIndexSettingProviders(); this.clusterSettings = clusterService.getClusterSettings(); + this.isDslOnlyMode = isDataStreamsLifecycleOnlyMode(clusterService.getSettings()); } @Override @@ -162,6 +165,7 @@ protected void masterOperation( matchingTemplate, temporaryIndexName, stateWithTemplate, + isDslOnlyMode, xContentRegistry, indicesService, systemIndices, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java index 17439f2312036..7dc19ff52ce84 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java @@ -24,10 +24,6 @@ public class PutIndexTemplateRequestBuilder extends MasterNodeOperationRequestBu AcknowledgedResponse, PutIndexTemplateRequestBuilder> { - public PutIndexTemplateRequestBuilder(ElasticsearchClient client, PutIndexTemplateAction action) { - super(client, action, new PutIndexTemplateRequest()); - } - public PutIndexTemplateRequestBuilder(ElasticsearchClient client, PutIndexTemplateAction action, String name) { super(client, action, new PutIndexTemplateRequest(name)); } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java index 76259d899c90a..c2b6c666d829a 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java @@ -15,6 +15,7 @@ import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.ingest.SimulateIndexResponse; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -505,7 +506,9 @@ public void writeThin(StreamOutput out) throws IOException { } private void writeResponseType(StreamOutput out) throws IOException { - if (response instanceof IndexResponse) { + if (response instanceof SimulateIndexResponse) { + out.writeByte((byte) 4); + } else if (response instanceof IndexResponse) { out.writeByte((byte) 0); } else if (response instanceof DeleteResponse) { out.writeByte((byte) 1); @@ -523,6 +526,7 @@ private static DocWriteResponse readResponse(ShardId shardId, StreamInput in) th case 1 -> new DeleteResponse(shardId, in); case 2 -> null; case 3 -> new UpdateResponse(shardId, in); + case 4 -> new SimulateIndexResponse(in); default -> throw new IllegalArgumentException("Unexpected type [" + type + "]"); }; } @@ -534,6 +538,7 @@ private static DocWriteResponse readResponse(StreamInput in) throws IOException case 1 -> new DeleteResponse(in); case 2 -> null; case 3 -> new UpdateResponse(in); + case 4 -> new SimulateIndexResponse(in); default -> throw new IllegalArgumentException("Unexpected type [" + type + "]"); }; } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java index 3b6e69d16bae3..f1280587a0c55 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java @@ -8,7 +8,6 @@ package org.elasticsearch.action.bulk; -import org.elasticsearch.Version; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; @@ -19,6 +18,7 @@ import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.RestApiVersion; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.rest.action.document.RestBulkAction; @@ -430,32 +430,32 @@ public void parse( } } + @UpdateForV9 + // Warnings will need to be replaced with XContentEOFException from 9.x + private static void warnBulkActionNotProperlyClosed(String message) { + deprecationLogger.compatibleCritical(STRICT_ACTION_PARSING_WARNING_KEY, message); + } + private static void checkBulkActionIsProperlyClosed(XContentParser parser) throws IOException { XContentParser.Token token; try { token = parser.nextToken(); } catch (XContentEOFException ignore) { - assert Version.CURRENT.major == Version.V_7_17_0.major + 1; - deprecationLogger.compatibleCritical( - STRICT_ACTION_PARSING_WARNING_KEY, + warnBulkActionNotProperlyClosed( "A bulk action wasn't closed properly with the closing brace. Malformed objects are currently accepted but will be " + "rejected in a future version." ); return; } if (token != XContentParser.Token.END_OBJECT) { - assert Version.CURRENT.major == Version.V_7_17_0.major + 1; - deprecationLogger.compatibleCritical( - STRICT_ACTION_PARSING_WARNING_KEY, + warnBulkActionNotProperlyClosed( "A bulk action object contained multiple keys. Additional keys are currently ignored but will be rejected in a " + "future version." ); return; } if (parser.nextToken() != null) { - assert Version.CURRENT.major == Version.V_7_17_0.major + 1; - deprecationLogger.compatibleCritical( - STRICT_ACTION_PARSING_WARNING_KEY, + warnBulkActionNotProperlyClosed( "A bulk action contained trailing data after the closing brace. This is currently ignored but will be rejected in a " + "future version." ); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/Retry.java b/server/src/main/java/org/elasticsearch/action/bulk/Retry.java index 6503c207e8290..33fb81a6520cb 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/Retry.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/Retry.java @@ -66,7 +66,7 @@ public PlainActionFuture withBackoff( BiConsumer> consumer, BulkRequest bulkRequest ) { - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); withBackoff(consumer, bulkRequest, future); return future; } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchAction.java b/server/src/main/java/org/elasticsearch/action/bulk/SimulateBulkAction.java similarity index 55% rename from server/src/main/java/org/elasticsearch/action/search/SearchAction.java rename to server/src/main/java/org/elasticsearch/action/bulk/SimulateBulkAction.java index 61d7a3355dc8f..a799c60fe7b38 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/SimulateBulkAction.java @@ -6,17 +6,16 @@ * Side Public License, v 1. */ -package org.elasticsearch.action.search; +package org.elasticsearch.action.bulk; import org.elasticsearch.action.ActionType; -public class SearchAction extends ActionType { +public class SimulateBulkAction extends ActionType { - public static final SearchAction INSTANCE = new SearchAction(); - public static final String NAME = "indices:data/read/search"; + public static final SimulateBulkAction INSTANCE = new SimulateBulkAction(); + public static final String NAME = "indices:data/write/simulate/bulk"; - private SearchAction() { - super(NAME, SearchResponse::new); + private SimulateBulkAction() { + super(NAME, BulkResponse::new); } - } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/SimulateBulkRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/SimulateBulkRequest.java new file mode 100644 index 0000000000000..c167c88954b38 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/bulk/SimulateBulkRequest.java @@ -0,0 +1,81 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.bulk; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; + +import java.io.IOException; +import java.util.Map; + +/** + * This extends BulkRequest with support for providing substitute pipeline definitions. In a user request, the pipeline substitutions + * will look something like this: + * + * "pipeline_substitutions": { + * "my-pipeline-1": { + * "processors": [ + * { + * "set": { + * "field": "my-new-boolean-field", + * "value": true + * } + * } + * ] + * }, + * "my-pipeline-2": { + * "processors": [ + * { + * "set": { + * "field": "my-new-boolean-field", + * "value": true + * }, + * "rename": { + * "field": "old_field", + * "target_field": "new field" + * } + * } + * ] + * } + * } + * + * The pipelineSubstitutions Map held by this class is intended to be the result of XContentHelper.convertToMap(). The top-level keys + * are the pipelineIds ("my-pipeline-1" and "my-pipeline-2" in the example above). The values are the Maps of "processors" to the List of + * processor definitions. + */ +public class SimulateBulkRequest extends BulkRequest { + private final Map> pipelineSubstitutions; + + /** + * @param pipelineSubstitutions The pipeline definitions that are to be used in place of any pre-existing pipeline definitions with + * the same pipelineId. The key of the map is the pipelineId, and the value the pipeline definition as + * parsed by XContentHelper.convertToMap(). + */ + public SimulateBulkRequest(@Nullable Map> pipelineSubstitutions) { + super(); + this.pipelineSubstitutions = pipelineSubstitutions; + } + + @SuppressWarnings("unchecked") + public SimulateBulkRequest(StreamInput in) throws IOException { + super(in); + this.pipelineSubstitutions = (Map>) in.readGenericValue(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeGenericValue(pipelineSubstitutions); + } + + public Map> getPipelineSubstitutions() { + return pipelineSubstitutions; + } +} diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index f11baec87de9b..b89b5e2de7924 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -17,6 +17,7 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteRequest.OpType; import org.elasticsearch.action.DocWriteResponse; @@ -45,6 +46,7 @@ import org.elasticsearch.cluster.routing.IndexRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Assertions; @@ -92,6 +94,7 @@ public class TransportBulkAction extends HandledTransportAction bulkAction; private final ThreadPool threadPool; private final ClusterService clusterService; private final IngestService ingestService; @@ -141,8 +144,39 @@ public TransportBulkAction( SystemIndices systemIndices, LongSupplier relativeTimeProvider ) { - super(BulkAction.NAME, transportService, actionFilters, BulkRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); + this( + BulkAction.INSTANCE, + BulkRequest::new, + threadPool, + transportService, + clusterService, + ingestService, + client, + actionFilters, + indexNameExpressionResolver, + indexingPressure, + systemIndices, + relativeTimeProvider + ); + } + + TransportBulkAction( + ActionType bulkAction, + Writeable.Reader requestReader, + ThreadPool threadPool, + TransportService transportService, + ClusterService clusterService, + IngestService ingestService, + NodeClient client, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + IndexingPressure indexingPressure, + SystemIndices systemIndices, + LongSupplier relativeTimeProvider + ) { + super(bulkAction.name(), transportService, actionFilters, requestReader, EsExecutors.DIRECT_EXECUTOR_SERVICE); Objects.requireNonNull(relativeTimeProvider); + this.bulkAction = bulkAction; this.threadPool = threadPool; this.clusterService = clusterService; this.ingestService = ingestService; @@ -267,7 +301,6 @@ protected void doRun() { protected void doInternalExecute(Task task, BulkRequest bulkRequest, String executorName, ActionListener listener) { final long startTime = relativeTime(); - final AtomicArray responses = new AtomicArray<>(bulkRequest.requests.size()); boolean hasIndexRequestsWithPipelines = false; final Metadata metadata = clusterService.state().getMetadata(); @@ -301,7 +334,7 @@ protected void doInternalExecute(Task task, BulkRequest bulkRequest, String exec if (clusterService.localNode().isIngestNode()) { processBulkIndexIngestRequest(task, bulkRequest, executorName, l); } else { - ingestForwarder.forwardIngestRequest(BulkAction.INSTANCE, bulkRequest, l); + ingestForwarder.forwardIngestRequest(bulkAction, bulkRequest, l); } }); return; @@ -333,6 +366,30 @@ protected void doInternalExecute(Task task, BulkRequest bulkRequest, String exec } // Step 3: create all the indices that are missing, if there are any missing. start the bulk after all the creates come back. + createMissingIndicesAndIndexData( + task, + bulkRequest, + executorName, + listener, + autoCreateIndices, + indicesThatCannotBeCreated, + startTime + ); + } + + /* + * This method is responsible for creating any missing indices and indexing the data in the BulkRequest + */ + protected void createMissingIndicesAndIndexData( + Task task, + BulkRequest bulkRequest, + String executorName, + ActionListener listener, + Set autoCreateIndices, + Map indicesThatCannotBeCreated, + long startTime + ) { + final AtomicArray responses = new AtomicArray<>(bulkRequest.requests.size()); if (autoCreateIndices.isEmpty()) { executeBulk(task, bulkRequest, startTime, listener, executorName, responses, indicesThatCannotBeCreated); } else { @@ -383,6 +440,14 @@ protected void doRun() { } } + /* + * This returns the IngestService to be used for the given request. The default implementation ignores the request and always returns + * the same ingestService, but child classes might use information in the request in creating an IngestService specific to that request. + */ + protected IngestService getIngestService(BulkRequest request) { + return ingestService; + } + static void prohibitAppendWritesInBackingIndices(DocWriteRequest writeRequest, Metadata metadata) { DocWriteRequest.OpType opType = writeRequest.opType(); if ((opType == OpType.CREATE || opType == OpType.INDEX) == false) { @@ -488,7 +553,7 @@ private static boolean setResponseFailureIfIndexMatches( return false; } - private long buildTookInMillis(long startTimeNanos) { + protected long buildTookInMillis(long startTimeNanos) { return TimeUnit.NANOSECONDS.toMillis(relativeTime() - startTimeNanos); } @@ -806,7 +871,7 @@ private void processBulkIndexIngestRequest( ) { final long ingestStartTimeInNanos = System.nanoTime(); final BulkRequestModifier bulkRequestModifier = new BulkRequestModifier(original); - ingestService.executeBulkRequest( + getIngestService(original).executeBulkRequest( original.numberOfActions(), () -> bulkRequestModifier, bulkRequestModifier::markItemAsDropped, diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java new file mode 100644 index 0000000000000..7e2fef88c7680 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java @@ -0,0 +1,108 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.bulk; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.ingest.SimulateIndexResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.util.concurrent.AtomicArray; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.IndexingPressure; +import org.elasticsearch.indices.SystemIndices; +import org.elasticsearch.ingest.IngestService; +import org.elasticsearch.ingest.SimulateIngestService; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.util.Map; +import java.util.Set; + +public class TransportSimulateBulkAction extends TransportBulkAction { + @Inject + public TransportSimulateBulkAction( + ThreadPool threadPool, + TransportService transportService, + ClusterService clusterService, + IngestService ingestService, + NodeClient client, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + IndexingPressure indexingPressure, + SystemIndices systemIndices + ) { + super( + SimulateBulkAction.INSTANCE, + SimulateBulkRequest::new, + threadPool, + transportService, + clusterService, + ingestService, + client, + actionFilters, + indexNameExpressionResolver, + indexingPressure, + systemIndices, + System::nanoTime + ); + } + + /* + * This overrides indexData in TransportBulkAction in order to _not_ actually create any indices or index any data. Instead, each + * request gets a corresponding CREATE response, using information from the request. + */ + @Override + protected void createMissingIndicesAndIndexData( + Task task, + BulkRequest bulkRequest, + String executorName, + ActionListener listener, + Set autoCreateIndices, + Map indicesThatCannotBeCreated, + long startTime + ) { + final AtomicArray responses = new AtomicArray<>(bulkRequest.requests.size()); + for (int i = 0; i < bulkRequest.requests.size(); i++) { + DocWriteRequest request = bulkRequest.requests.get(i); + assert request instanceof IndexRequest; // This action is only ever called with IndexRequests + responses.set( + i, + BulkItemResponse.success( + 0, + DocWriteRequest.OpType.CREATE, + new SimulateIndexResponse( + request.id(), + request.index(), + request.version(), + ((IndexRequest) request).source(), + ((IndexRequest) request).getContentType(), + ((IndexRequest) request).getExecutedPipelines() + ) + ) + ); + } + listener.onResponse(new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTime))); + } + + /* + * This overrides TransportSimulateBulkAction's getIngestService to allow us to provide an IngestService that handles pipeline + * substitutions defined in the request. + */ + @Override + protected IngestService getIngestService(BulkRequest request) { + IngestService rawIngestService = super.getIngestService(request); + return new SimulateIngestService(rawIngestService, request); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java index 9c1fb63a6b8d0..7530fc18acb59 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java @@ -307,6 +307,24 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params, @Nulla builder.endArray(); } builder.field(DataStream.GENERATION_FIELD.getPreferredName(), dataStream.getGeneration()); + if (DataStream.isFailureStoreEnabled()) { + builder.field(DataStream.FAILURE_INDICES_FIELD.getPreferredName()); + builder.startArray(); + for (Index failureStore : dataStream.getFailureIndices()) { + builder.startObject(); + failureStore.toXContentFragment(builder); + IndexProperties indexProperties = indexSettingsValues.get(failureStore); + if (indexProperties != null) { + builder.field(PREFER_ILM.getPreferredName(), indexProperties.preferIlm()); + if (indexProperties.ilmPolicyName() != null) { + builder.field(ILM_POLICY_FIELD.getPreferredName(), indexProperties.ilmPolicyName()); + } + builder.field(MANAGED_BY.getPreferredName(), indexProperties.managedBy.displayValue); + } + builder.endObject(); + } + builder.endArray(); + } if (dataStream.getMetadata() != null) { builder.field(DataStream.METADATA_FIELD.getPreferredName(), dataStream.getMetadata()); } @@ -327,6 +345,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params, @Nulla builder.field(SYSTEM_FIELD.getPreferredName(), dataStream.isSystem()); builder.field(ALLOW_CUSTOM_ROUTING.getPreferredName(), dataStream.isAllowCustomRouting()); builder.field(REPLICATED.getPreferredName(), dataStream.isReplicated()); + if (DataStream.isFailureStoreEnabled()) { + builder.field(DataStream.FAILURE_STORE_FIELD.getPreferredName(), dataStream.isFailureStore()); + } if (timeSeries != null) { builder.startObject(TIME_SERIES.getPreferredName()); builder.startArray(TEMPORAL_RANGES.getPreferredName()); diff --git a/server/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java index 5cfdd2b796b14..29f8e4aba35f8 100644 --- a/server/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java @@ -21,10 +21,6 @@ public class DeleteRequestBuilder extends ReplicationRequestBuilder { - public DeleteRequestBuilder(ElasticsearchClient client, DeleteAction action) { - super(client, action, new DeleteRequest()); - } - public DeleteRequestBuilder(ElasticsearchClient client, DeleteAction action, @Nullable String index) { super(client, action, new DeleteRequest(index)); } diff --git a/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java b/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java index c2008823b0523..a2f4d6408a3a4 100644 --- a/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java +++ b/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java @@ -8,14 +8,12 @@ package org.elasticsearch.action.downsample; import org.elasticsearch.TransportVersions; -import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.MasterNodeRequest; -import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.TimeValue; @@ -164,10 +162,4 @@ public boolean equals(Object obj) { } } - public static class RequestBuilder extends ActionRequestBuilder { - - protected RequestBuilder(ElasticsearchClient client, DownsampleAction action) { - super(client, action, new Request()); - } - } } diff --git a/server/src/main/java/org/elasticsearch/action/explain/ExplainAction.java b/server/src/main/java/org/elasticsearch/action/explain/ExplainAction.java deleted file mode 100644 index 7668a48d623da..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/explain/ExplainAction.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.explain; - -import org.elasticsearch.action.ActionType; - -/** - * Entry point for the explain feature. - */ -public class ExplainAction extends ActionType { - - public static final ExplainAction INSTANCE = new ExplainAction(); - public static final String NAME = "indices:data/read/explain"; - - private ExplainAction() { - super(NAME, ExplainResponse::new); - } - -} diff --git a/server/src/main/java/org/elasticsearch/action/explain/ExplainRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/explain/ExplainRequestBuilder.java index 4a9ae67c60e1e..9ae05687649ea 100644 --- a/server/src/main/java/org/elasticsearch/action/explain/ExplainRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/explain/ExplainRequestBuilder.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.explain; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.single.shard.SingleShardOperationRequestBuilder; import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.Strings; @@ -20,11 +21,7 @@ */ public class ExplainRequestBuilder extends SingleShardOperationRequestBuilder { - ExplainRequestBuilder(ElasticsearchClient client, ExplainAction action) { - super(client, action, new ExplainRequest()); - } - - public ExplainRequestBuilder(ElasticsearchClient client, ExplainAction action, String index, String id) { + public ExplainRequestBuilder(ElasticsearchClient client, ActionType action, String index, String id) { super(client, action, new ExplainRequest().index(index).id(id)); } diff --git a/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java b/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java index 0b6a0a3276646..d889f8fac8113 100644 --- a/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java +++ b/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java @@ -11,6 +11,7 @@ import org.apache.lucene.search.Explanation; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.single.shard.TransportSingleShardAction; import org.elasticsearch.cluster.ClusterState; @@ -48,6 +49,7 @@ // TODO: AggregatedDfs. Currently the idf can be different then when executing a normal search with explain. public class TransportExplainAction extends TransportSingleShardAction { + public static final ActionType TYPE = new ActionType<>("indices:data/read/explain", ExplainResponse::new); private final SearchService searchService; @Inject @@ -60,7 +62,7 @@ public TransportExplainAction( IndexNameExpressionResolver indexNameExpressionResolver ) { super( - ExplainAction.NAME, + TYPE.name(), threadPool, clusterService, transportService, diff --git a/server/src/main/java/org/elasticsearch/action/get/GetResponse.java b/server/src/main/java/org/elasticsearch/action/get/GetResponse.java index 7af64bed9f3cb..6871c60f11a15 100644 --- a/server/src/main/java/org/elasticsearch/action/get/GetResponse.java +++ b/server/src/main/java/org/elasticsearch/action/get/GetResponse.java @@ -88,13 +88,6 @@ public long getPrimaryTerm() { return getResult.getPrimaryTerm(); } - /** - * The source of the document if exists. - */ - public byte[] getSourceAsBytes() { - return getResult.source(); - } - /** * Returns the internal source bytes, as they are returned without munging (for example, * might still be compressed). @@ -132,7 +125,7 @@ public Map getSourceAsMap() throws ElasticsearchParseException { } public Map getSource() { - return getResult.getSource(); + return getResult.sourceAsMap(); } public Map getFields() { diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java b/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java index 9dccdfc64620e..a9c0c8ef42380 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java @@ -37,7 +37,7 @@ public class IndexResponse extends DocWriteResponse { * information about the pipelines executed. An empty list means that there were no pipelines executed. */ @Nullable - private final List executedPipelines; + protected final List executedPipelines; public IndexResponse(ShardId shardId, StreamInput in) throws IOException { super(shardId, in); diff --git a/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineRequestBuilder.java index 32e1154a8af0a..fdc0e7ba42d92 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineRequestBuilder.java @@ -14,10 +14,6 @@ public class DeletePipelineRequestBuilder extends ActionRequestBuilder { - public DeletePipelineRequestBuilder(ElasticsearchClient client, DeletePipelineAction action) { - super(client, action, new DeletePipelineRequest()); - } - public DeletePipelineRequestBuilder(ElasticsearchClient client, DeletePipelineAction action, String id) { super(client, action, new DeletePipelineRequest(id)); } diff --git a/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineRequestBuilder.java index 9d11fddc5f92b..48d5fa0f0968a 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineRequestBuilder.java @@ -16,10 +16,6 @@ public class GetPipelineRequestBuilder extends MasterNodeReadOperationRequestBui GetPipelineResponse, GetPipelineRequestBuilder> { - public GetPipelineRequestBuilder(ElasticsearchClient client, GetPipelineAction action) { - super(client, action, new GetPipelineRequest()); - } - public GetPipelineRequestBuilder(ElasticsearchClient client, GetPipelineAction action, String[] ids) { super(client, action, new GetPipelineRequest(ids)); } diff --git a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequestBuilder.java index 0a68e13a24465..f7a90b94d37ca 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequestBuilder.java @@ -16,10 +16,6 @@ public class PutPipelineRequestBuilder extends ActionRequestBuilder { - public PutPipelineRequestBuilder(ElasticsearchClient client, PutPipelineAction action) { - super(client, action, new PutPipelineRequest()); - } - public PutPipelineRequestBuilder( ElasticsearchClient client, PutPipelineAction action, diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulateIndexResponse.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulateIndexResponse.java new file mode 100644 index 0000000000000..3363f3caa164b --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulateIndexResponse.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.ingest; + +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.support.replication.ReplicationResponse; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.util.List; + +/** + * This is an IndexResponse that is specifically for simulate requests. Unlike typical IndexResponses, we need to include the original + * source in a SimulateIndexResponse, and don't need most other fields. This has to extend IndexResponse though so that it can be used by + * BulkItemResponse in IngestService. + */ +public class SimulateIndexResponse extends IndexResponse { + private final BytesReference source; + private final XContentType sourceXContentType; + + @SuppressWarnings("this-escape") + public SimulateIndexResponse(StreamInput in) throws IOException { + super(in); + this.source = in.readBytesReference(); + this.sourceXContentType = XContentType.valueOf(in.readString()); + setShardInfo(new ReplicationResponse.ShardInfo(0, 0)); + } + + @SuppressWarnings("this-escape") + public SimulateIndexResponse( + String id, + String index, + long version, + BytesReference source, + XContentType sourceXContentType, + List pipelines + ) { + // We don't actually care about most of the IndexResponse fields: + super(new ShardId(index, "", 0), id == null ? "" : id, 0, 0, version, true, pipelines); + this.source = source; + this.sourceXContentType = sourceXContentType; + setShardInfo(new ReplicationResponse.ShardInfo(0, 0)); + } + + @Override + public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException { + builder.field("_id", getId()); + builder.field("_index", getShardId().getIndexName()); + builder.field("_version", getVersion()); + builder.field("_source", XContentHelper.convertToMap(source, false, sourceXContentType).v2()); + assert executedPipelines != null : "executedPipelines is null when it shouldn't be - we always list pipelines in simulate mode"; + builder.array("executed_pipelines", executedPipelines.toArray()); + return builder; + } + + @Override + public RestStatus status() { + return RestStatus.CREATED; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBytesReference(source); + out.writeString(sourceXContentType.name()); + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("SimulateIndexResponse["); + builder.append("index=").append(getIndex()); + try { + builder.append(",source=").append(XContentHelper.convertToJson(source, false, sourceXContentType)); + } catch (IOException e) { + throw new RuntimeException(e); + } + builder.append(",pipelines=[").append(String.join(", ", executedPipelines)); + return builder.append("]]").toString(); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequestBuilder.java index 92ee01f552da4..93f5ab9b78913 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequestBuilder.java @@ -15,13 +15,6 @@ public class SimulatePipelineRequestBuilder extends ActionRequestBuilder { - /** - * Create a new builder for {@link SimulatePipelineRequest}s - */ - public SimulatePipelineRequestBuilder(ElasticsearchClient client, SimulatePipelineAction action) { - super(client, action, new SimulatePipelineRequest()); - } - /** * Create a new builder for {@link SimulatePipelineRequest}s */ diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index b56cb0ca5926c..82c2f020a0962 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -160,13 +160,16 @@ abstract class AbstractSearchAsyncAction exten this.executor = executor; this.request = request; this.task = task; - this.listener = ActionListener.runAfter(listener, this::releaseContext); + this.listener = ActionListener.runAfter(listener, () -> Releasables.close(releasables)); this.nodeIdToConnection = nodeIdToConnection; this.concreteIndexBoosts = concreteIndexBoosts; this.clusterStateVersion = clusterState.version(); this.minTransportVersion = clusterState.getMinTransportVersion(); this.aliasFilter = aliasFilter; this.results = resultConsumer; + // register the release of the query consumer to free up the circuit breaker memory + // at the end of the search + addReleasable(resultConsumer::decRef); this.clusters = clusters; } @@ -189,10 +192,6 @@ public void addReleasable(Releasable releasable) { releasables.add(releasable); } - public void releaseContext() { - Releasables.close(releasables); - } - /** * Builds how long it took to execute the search. */ @@ -260,7 +259,7 @@ private boolean checkMinimumVersion(GroupShardsIterator sha if (it.getTargetNodeIds().isEmpty() == false) { boolean isCompatible = it.getTargetNodeIds().stream().anyMatch(nodeId -> { Transport.Connection conn = getConnection(it.getClusterAlias(), nodeId); - return conn == null ? true : conn.getVersion().onOrAfter(request.minCompatibleShardNode()); + return conn == null || conn.getNode().getVersion().onOrAfter(request.minCompatibleShardNode()); }); if (isCompatible == false) { return false; @@ -746,7 +745,7 @@ final void onPhaseDone() { // as a tribute to @kimchy aka. finishHim() public final Transport.Connection getConnection(String clusterAlias, String nodeId) { Transport.Connection conn = nodeIdToConnection.apply(clusterAlias, nodeId); Version minVersion = request.minCompatibleShardNode(); - if (minVersion != null && conn != null && conn.getVersion().before(minVersion)) { + if (minVersion != null && conn != null && conn.getNode().getVersion().before(minVersion)) { throw new VersionMismatchException("One of the shards is incompatible with the required minimum version [{}]", minVersion); } return conn; diff --git a/server/src/main/java/org/elasticsearch/action/search/ArraySearchPhaseResults.java b/server/src/main/java/org/elasticsearch/action/search/ArraySearchPhaseResults.java index 9f61042320f3e..b4fd0107f731f 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ArraySearchPhaseResults.java +++ b/server/src/main/java/org/elasticsearch/action/search/ArraySearchPhaseResults.java @@ -9,7 +9,10 @@ package org.elasticsearch.action.search; import org.elasticsearch.common.util.concurrent.AtomicArray; +import org.elasticsearch.core.AbstractRefCounted; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.search.SearchPhaseResult; +import org.elasticsearch.transport.LeakTracker; import java.util.stream.Stream; @@ -19,6 +22,8 @@ class ArraySearchPhaseResults extends SearchPhaseResults { final AtomicArray results; + private final RefCounted refCounted = LeakTracker.wrap(AbstractRefCounted.of(this::doClose)); + ArraySearchPhaseResults(int size) { super(size); this.results = new AtomicArray<>(size); @@ -32,9 +37,16 @@ Stream getSuccessfulResults() { void consumeResult(Result result, Runnable next) { assert results.get(result.getShardIndex()) == null : "shardIndex: " + result.getShardIndex() + " is already set"; results.set(result.getShardIndex(), result); + result.incRef(); next.run(); } + protected void doClose() { + for (Result result : getAtomicArray().asList()) { + result.decRef(); + } + } + boolean hasResult(int shardIndex) { return results.get(shardIndex) != null; } @@ -43,4 +55,24 @@ boolean hasResult(int shardIndex) { AtomicArray getAtomicArray() { return results; } + + @Override + public void incRef() { + refCounted.incRef(); + } + + @Override + public boolean tryIncRef() { + return refCounted.tryIncRef(); + } + + @Override + public boolean decRef() { + return refCounted.decRef(); + } + + @Override + public boolean hasReferences() { + return refCounted.hasReferences(); + } } diff --git a/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java index 6e553f254ee8b..9900ee9d824ae 100644 --- a/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java @@ -379,7 +379,7 @@ private boolean checkMinimumVersion(GroupShardsIterator sha if (it.getTargetNodeIds().isEmpty() == false) { boolean isCompatible = it.getTargetNodeIds().stream().anyMatch(nodeId -> { Transport.Connection conn = getConnection(new SendingTarget(it.getClusterAlias(), nodeId)); - return conn == null || conn.getVersion().onOrAfter(request.minCompatibleShardNode()); + return conn == null || conn.getNode().getVersion().onOrAfter(request.minCompatibleShardNode()); }); if (isCompatible == false) { return false; @@ -419,7 +419,7 @@ public void onPhaseFailure(String msg, Exception cause) { public Transport.Connection getConnection(SendingTarget sendingTarget) { Transport.Connection conn = nodeIdToConnection.apply(sendingTarget.clusterAlias, sendingTarget.nodeId); Version minVersion = request.minCompatibleShardNode(); - if (minVersion != null && conn != null && conn.getVersion().before(minVersion)) { + if (minVersion != null && conn != null && conn.getNode().getVersion().before(minVersion)) { throw new VersionMismatchException("One of the shards is incompatible with the required minimum version [{}]", minVersion); } return conn; @@ -480,6 +480,26 @@ synchronized FixedBitSet getPossibleMatches() { Stream getSuccessfulResults() { return Stream.empty(); } + + @Override + public void incRef() { + + } + + @Override + public boolean tryIncRef() { + return false; + } + + @Override + public boolean decRef() { + return false; + } + + @Override + public boolean hasReferences() { + return false; + } } private GroupShardsIterator getIterator( diff --git a/server/src/main/java/org/elasticsearch/action/search/ClearScrollAction.java b/server/src/main/java/org/elasticsearch/action/search/ClearScrollAction.java deleted file mode 100644 index ceee61bc47934..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/search/ClearScrollAction.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.search; - -import org.elasticsearch.action.ActionType; - -public class ClearScrollAction extends ActionType { - - public static final ClearScrollAction INSTANCE = new ClearScrollAction(); - public static final String NAME = "indices:data/read/scroll/clear"; - - private ClearScrollAction() { - super(NAME, ClearScrollResponse::new); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/search/ClearScrollRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/search/ClearScrollRequestBuilder.java index 42b734715bd89..2311a5f65eb40 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ClearScrollRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/search/ClearScrollRequestBuilder.java @@ -9,13 +9,14 @@ package org.elasticsearch.action.search; import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionType; import org.elasticsearch.client.internal.ElasticsearchClient; import java.util.List; public class ClearScrollRequestBuilder extends ActionRequestBuilder { - public ClearScrollRequestBuilder(ElasticsearchClient client, ClearScrollAction action) { + public ClearScrollRequestBuilder(ElasticsearchClient client, ActionType action) { super(client, action, new ClearScrollRequest()); } diff --git a/server/src/main/java/org/elasticsearch/action/search/ClosePointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/ClosePointInTimeAction.java deleted file mode 100644 index ae9757b5b516d..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/search/ClosePointInTimeAction.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.search; - -import org.elasticsearch.action.ActionType; - -public class ClosePointInTimeAction extends ActionType { - - public static final ClosePointInTimeAction INSTANCE = new ClosePointInTimeAction(); - public static final String NAME = "indices:data/read/close_point_in_time"; - - private ClosePointInTimeAction() { - super(NAME, ClosePointInTimeResponse::new); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/search/CountedCollector.java b/server/src/main/java/org/elasticsearch/action/search/CountedCollector.java index 34b33770efd55..d5605b280f385 100644 --- a/server/src/main/java/org/elasticsearch/action/search/CountedCollector.java +++ b/server/src/main/java/org/elasticsearch/action/search/CountedCollector.java @@ -25,6 +25,7 @@ final class CountedCollector { CountedCollector(ArraySearchPhaseResults resultConsumer, int expectedOps, Runnable onFinish, SearchPhaseContext context) { this.resultConsumer = resultConsumer; + resultConsumer.incRef(); this.counter = new CountDown(expectedOps); this.onFinish = onFinish; this.context = context; @@ -37,7 +38,11 @@ final class CountedCollector { void countDown() { assert counter.isCountedDown() == false : "more operations executed than specified"; if (counter.countDown()) { - onFinish.run(); + try { + onFinish.run(); + } finally { + resultConsumer.decRef(); + } } } diff --git a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java index e010e840d3f2d..ce2c86be4b4e6 100644 --- a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java @@ -66,7 +66,7 @@ final class DfsQueryPhase extends SearchPhase { // register the release of the query consumer to free up the circuit breaker memory // at the end of the search - context.addReleasable(queryResult); + context.addReleasable(queryResult::decRef); } @Override @@ -95,7 +95,7 @@ public void run() { connection, querySearchRequest, context.getTask(), - new SearchActionListener(shardTarget, shardIndex) { + new SearchActionListener<>(shardTarget, shardIndex) { @Override protected void innerOnResponse(QuerySearchResult response) { diff --git a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java index d0a4ca14ee4f3..e8d3ded154f55 100644 --- a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java @@ -9,10 +9,8 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.search.ScoreDoc; -import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AtomicArray; -import org.elasticsearch.search.RescoreDocIds; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.dfs.AggregatedDfs; @@ -20,7 +18,6 @@ import org.elasticsearch.search.fetch.ShardFetchSearchRequest; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchContextId; -import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.transport.Transport; @@ -70,6 +67,7 @@ final class FetchSearchPhase extends SearchPhase { ); } this.fetchResults = new ArraySearchPhaseResults<>(resultConsumer.getNumShards()); + context.addReleasable(fetchResults::decRef); this.queryResults = resultConsumer.getAtomicArray(); this.aggregatedDfs = aggregatedDfs; this.nextPhaseFactory = nextPhaseFactory; @@ -99,40 +97,32 @@ public void onFailure(Exception e) { private void innerRun() throws Exception { final int numShards = context.getNumShards(); - final boolean isScrollSearch = context.getRequest().scroll() != null; - final List phaseResults = queryResults.asList(); final SearchPhaseController.ReducedQueryPhase reducedQueryPhase = resultConsumer.reduce(); // Usually when there is a single shard, we force the search type QUERY_THEN_FETCH. But when there's kNN, we might // still use DFS_QUERY_THEN_FETCH, which does not perform the "query and fetch" optimization during the query phase. final boolean queryAndFetchOptimization = queryResults.length() == 1 && context.getRequest().hasKnnSearch() == false && reducedQueryPhase.rankCoordinatorContext() == null; - final Runnable finishPhase = () -> moveToNextPhase( - queryResults, - reducedQueryPhase, - queryAndFetchOptimization ? queryResults : fetchResults.getAtomicArray() - ); if (queryAndFetchOptimization) { - assert phaseResults.isEmpty() || phaseResults.get(0).fetchResult() != null - : "phaseResults empty [" + phaseResults.isEmpty() + "], single result: " + phaseResults.get(0).fetchResult(); + assert assertConsistentWithQueryAndFetchOptimization(); // query AND fetch optimization - finishPhase.run(); + moveToNextPhase(reducedQueryPhase, queryResults); } else { ScoreDoc[] scoreDocs = reducedQueryPhase.sortedTopDocs().scoreDocs(); - final List[] docIdsToLoad = SearchPhaseController.fillDocIdsToLoad(numShards, scoreDocs); // no docs to fetch -- sidestep everything and return if (scoreDocs.length == 0) { // we have to release contexts here to free up resources - phaseResults.stream().map(SearchPhaseResult::queryResult).forEach(this::releaseIrrelevantSearchContext); - finishPhase.run(); + queryResults.asList().stream().map(SearchPhaseResult::queryResult).forEach(this::releaseIrrelevantSearchContext); + moveToNextPhase(reducedQueryPhase, fetchResults.getAtomicArray()); } else { - final ScoreDoc[] lastEmittedDocPerShard = isScrollSearch + final ScoreDoc[] lastEmittedDocPerShard = context.getRequest().scroll() != null ? SearchPhaseController.getLastEmittedDocPerShard(reducedQueryPhase, numShards) : null; + final List[] docIdsToLoad = SearchPhaseController.fillDocIdsToLoad(numShards, scoreDocs); final CountedCollector counter = new CountedCollector<>( fetchResults, docIdsToLoad.length, // we count down every shard in the result no matter if we got any results or not - finishPhase, + () -> moveToNextPhase(reducedQueryPhase, fetchResults.getAtomicArray()), context ); for (int i = 0; i < docIdsToLoad.length; i++) { @@ -149,66 +139,43 @@ private void innerRun() throws Exception { // in any case we count down this result since we don't talk to this shard anymore counter.countDown(); } else { - SearchShardTarget shardTarget = queryResult.getSearchShardTarget(); - Transport.Connection connection = context.getConnection(shardTarget.getClusterAlias(), shardTarget.getNodeId()); - ShardFetchSearchRequest fetchSearchRequest = createFetchRequest( - queryResult.queryResult().getContextId(), - i, - entry, - lastEmittedDocPerShard, - context.getOriginalIndices(queryResult.getShardIndex()), - queryResult.getShardSearchRequest(), - queryResult.getRescoreDocIds() - ); - executeFetch( - queryResult.getShardIndex(), - shardTarget, - counter, - fetchSearchRequest, - queryResult.queryResult(), - connection - ); + executeFetch(queryResult, counter, entry, (lastEmittedDocPerShard != null) ? lastEmittedDocPerShard[i] : null); } } } } } - protected ShardFetchSearchRequest createFetchRequest( - ShardSearchContextId contextId, - int index, - List entry, - ScoreDoc[] lastEmittedDocPerShard, - OriginalIndices originalIndices, - ShardSearchRequest shardSearchRequest, - RescoreDocIds rescoreDocIds - ) { - final ScoreDoc lastEmittedDoc = (lastEmittedDocPerShard != null) ? lastEmittedDocPerShard[index] : null; - return new ShardFetchSearchRequest( - originalIndices, - contextId, - shardSearchRequest, - entry, - lastEmittedDoc, - rescoreDocIds, - aggregatedDfs - ); + private boolean assertConsistentWithQueryAndFetchOptimization() { + var phaseResults = queryResults.asList(); + assert phaseResults.isEmpty() || phaseResults.get(0).fetchResult() != null + : "phaseResults empty [" + phaseResults.isEmpty() + "], single result: " + phaseResults.get(0).fetchResult(); + return true; } private void executeFetch( - final int shardIndex, - final SearchShardTarget shardTarget, + SearchPhaseResult queryResult, final CountedCollector counter, - final ShardFetchSearchRequest fetchSearchRequest, - final QuerySearchResult querySearchResult, - final Transport.Connection connection + final List entry, + ScoreDoc lastEmittedDocForShard ) { + final SearchShardTarget shardTarget = queryResult.getSearchShardTarget(); + final int shardIndex = queryResult.getShardIndex(); + final ShardSearchContextId contextId = queryResult.queryResult().getContextId(); context.getSearchTransport() .sendExecuteFetch( - connection, - fetchSearchRequest, + context.getConnection(shardTarget.getClusterAlias(), shardTarget.getNodeId()), + new ShardFetchSearchRequest( + context.getOriginalIndices(queryResult.getShardIndex()), + contextId, + queryResult.getShardSearchRequest(), + entry, + lastEmittedDocForShard, + queryResult.getRescoreDocIds(), + aggregatedDfs + ), context.getTask(), - new SearchActionListener(shardTarget, shardIndex) { + new SearchActionListener<>(shardTarget, shardIndex) { @Override public void innerOnResponse(FetchSearchResult result) { try { @@ -222,14 +189,14 @@ public void innerOnResponse(FetchSearchResult result) { @Override public void onFailure(Exception e) { try { - logger.debug(() -> "[" + fetchSearchRequest.contextId() + "] Failed to execute fetch phase", e); + logger.debug(() -> "[" + contextId + "] Failed to execute fetch phase", e); progressListener.notifyFetchFailure(shardIndex, shardTarget, e); counter.onFailure(shardIndex, shardTarget, e); } finally { // the search context might not be cleared on the node where the fetch was executed for example // because the action was rejected by the thread pool. in this case we need to send a dedicated // request to clear the search context. - releaseIrrelevantSearchContext(querySearchResult); + releaseIrrelevantSearchContext(queryResult.queryResult()); } } } @@ -260,16 +227,14 @@ private void releaseIrrelevantSearchContext(QuerySearchResult queryResult) { } private void moveToNextPhase( - AtomicArray queryPhaseResults, SearchPhaseController.ReducedQueryPhase reducedQueryPhase, AtomicArray fetchResultsArr ) { final InternalSearchResponse internalResponse = SearchPhaseController.merge( context.getRequest().scroll() != null, reducedQueryPhase, - fetchResultsArr.asList(), - fetchResultsArr::get + fetchResultsArr ); - context.executeNextPhase(this, nextPhaseFactory.apply(internalResponse, queryPhaseResults)); + context.executeNextPhase(this, nextPhaseFactory.apply(internalResponse, queryResults)); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/MultiSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/MultiSearchAction.java deleted file mode 100644 index faea4b88e5c78..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/search/MultiSearchAction.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.search; - -import org.elasticsearch.action.ActionType; - -public class MultiSearchAction extends ActionType { - - public static final MultiSearchAction INSTANCE = new MultiSearchAction(); - public static final String NAME = "indices:data/read/msearch"; - - private MultiSearchAction() { - super(NAME, MultiSearchResponse::new); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequestBuilder.java index 57c536f3d371e..20888d652c8ac 100644 --- a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequestBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.ElasticsearchClient; @@ -17,7 +18,7 @@ */ public class MultiSearchRequestBuilder extends ActionRequestBuilder { - public MultiSearchRequestBuilder(ElasticsearchClient client, MultiSearchAction action) { + public MultiSearchRequestBuilder(ElasticsearchClient client, ActionType action) { super(client, action, new MultiSearchRequest()); } diff --git a/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java b/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java index 02b2b9b99e68f..aee631fb5d4cf 100644 --- a/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java @@ -19,8 +19,11 @@ import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.common.xcontent.ChunkedToXContentObject; +import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.transport.LeakTracker; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; @@ -134,19 +137,58 @@ public Exception getFailure() { private final Item[] items; private final long tookInMillis; + private final RefCounted refCounted = LeakTracker.wrap(new AbstractRefCounted() { + @Override + protected void closeInternal() { + for (int i = 0; i < items.length; i++) { + Item item = items[i]; + var r = item.response; + if (r != null) { + r.decRef(); + items[i] = null; + } + } + } + }); + public MultiSearchResponse(StreamInput in) throws IOException { super(in); items = in.readArray(Item::new, Item[]::new); tookInMillis = in.readVLong(); } + /** + * @param items individual search responses, the elements in this array are considered as owned by this instance for ref-counting + * purposes if their {@link Item#response} is non-null + */ public MultiSearchResponse(Item[] items, long tookInMillis) { this.items = items; this.tookInMillis = tookInMillis; } + @Override + public void incRef() { + refCounted.incRef(); + } + + @Override + public boolean tryIncRef() { + return refCounted.tryIncRef(); + } + + @Override + public boolean decRef() { + return refCounted.decRef(); + } + + @Override + public boolean hasReferences() { + return refCounted.hasReferences(); + } + @Override public Iterator iterator() { + assert hasReferences(); return Iterators.forArray(items); } @@ -154,6 +196,7 @@ public Iterator iterator() { * The list of responses, the order is the same as the one provided in the request. */ public Item[] getResponses() { + assert hasReferences(); return this.items; } @@ -166,12 +209,14 @@ public TimeValue getTook() { @Override public void writeTo(StreamOutput out) throws IOException { + assert hasReferences(); out.writeArray(items); out.writeVLong(tookInMillis); } @Override public Iterator toXContentChunked(ToXContent.Params params) { + assert hasReferences(); return Iterators.concat( ChunkedToXContentHelper.startObject(), Iterators.single((b, p) -> b.field("took", tookInMillis).startArray(Fields.RESPONSES)), diff --git a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeAction.java deleted file mode 100644 index 560f8aea1da5b..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeAction.java +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.search; - -import org.elasticsearch.action.ActionType; - -public class OpenPointInTimeAction extends ActionType { - public static final String NAME = "indices:data/read/open_point_in_time"; - public static final OpenPointInTimeAction INSTANCE = new OpenPointInTimeAction(); - - private OpenPointInTimeAction() { - super(NAME, OpenPointInTimeResponse::new); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java index 633e56b97a833..39813a883c428 100644 --- a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; @@ -38,6 +39,8 @@ public final class OpenPointInTimeRequest extends ActionRequest implements Indic @Nullable private String preference; + private QueryBuilder indexFilter; + public static final IndicesOptions DEFAULT_INDICES_OPTIONS = SearchRequest.DEFAULT_INDICES_OPTIONS; public OpenPointInTimeRequest(String... indices) { @@ -54,6 +57,9 @@ public OpenPointInTimeRequest(StreamInput in) throws IOException { if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { this.maxConcurrentShardRequests = in.readVInt(); } + if (in.getTransportVersion().onOrAfter(TransportVersions.PIT_WITH_INDEX_FILTER)) { + this.indexFilter = in.readOptionalNamedWriteable(QueryBuilder.class); + } } @Override @@ -67,6 +73,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { out.writeVInt(maxConcurrentShardRequests); } + if (out.getTransportVersion().onOrAfter(TransportVersions.PIT_WITH_INDEX_FILTER)) { + out.writeOptionalWriteable(indexFilter); + } } @Override @@ -153,6 +162,14 @@ public void maxConcurrentShardRequests(int maxConcurrentShardRequests) { this.maxConcurrentShardRequests = maxConcurrentShardRequests; } + public void indexFilter(QueryBuilder indexFilter) { + this.indexFilter = indexFilter; + } + + public QueryBuilder indexFilter() { + return indexFilter; + } + @Override public boolean allowsRemoteIndices() { return true; diff --git a/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java b/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java index ee956b5179902..b7b113601560b 100644 --- a/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java +++ b/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java @@ -39,7 +39,6 @@ import java.util.function.Consumer; import java.util.function.Supplier; -import static java.util.stream.Collectors.toCollection; import static org.elasticsearch.action.search.SearchPhaseController.getTopDocsSize; import static org.elasticsearch.action.search.SearchPhaseController.mergeTopDocs; import static org.elasticsearch.action.search.SearchPhaseController.setShardIndex; @@ -52,7 +51,7 @@ * needed to reduce the aggregations is estimated and a {@link CircuitBreakingException} is thrown if it * exceeds the maximum memory allowed in this breaker. */ -public class QueryPhaseResultConsumer extends ArraySearchPhaseResults implements Releasable { +public class QueryPhaseResultConsumer extends ArraySearchPhaseResults { private static final Logger logger = LogManager.getLogger(QueryPhaseResultConsumer.class); private final Executor executor; @@ -105,8 +104,12 @@ public QueryPhaseResultConsumer( } @Override - public void close() { - Releasables.close(pendingMerges); + protected void doClose() { + try { + super.doClose(); + } finally { + pendingMerges.close(); + } } @Override @@ -269,12 +272,9 @@ public synchronized void close() { assert circuitBreakerBytes >= 0; } - List toRelease = buffer.stream().map(b -> b::releaseAggs).collect(toCollection(ArrayList::new)); - toRelease.add(() -> { - circuitBreaker.addWithoutBreaking(-circuitBreakerBytes); - circuitBreakerBytes = 0; - }); - Releasables.close(toRelease); + releaseBuffer(); + circuitBreaker.addWithoutBreaking(-circuitBreakerBytes); + circuitBreakerBytes = 0; if (hasPendingMerges()) { // This is a theoretically unreachable exception. @@ -300,11 +300,10 @@ void sortBuffer() { } } - synchronized long addWithoutBreaking(long size) { + synchronized void addWithoutBreaking(long size) { circuitBreaker.addWithoutBreaking(size); circuitBreakerBytes += size; maxAggsCurrentBufferSize = Math.max(maxAggsCurrentBufferSize, circuitBreakerBytes); - return circuitBreakerBytes; } synchronized long addEstimateAndMaybeBreak(long estimatedSize) { @@ -350,8 +349,7 @@ public void consume(QuerySearchResult result, Runnable next) { addEstimateAndMaybeBreak(aggsSize); } catch (Exception exc) { result.releaseAggs(); - buffer.forEach(QuerySearchResult::releaseAggs); - buffer.clear(); + releaseBuffer(); onMergeFailure(exc); next.run(); return; @@ -379,6 +377,11 @@ public void consume(QuerySearchResult result, Runnable next) { } } + private void releaseBuffer() { + buffer.forEach(QuerySearchResult::releaseAggs); + buffer.clear(); + } + private synchronized void onMergeFailure(Exception exc) { if (hasFailure()) { assert circuitBreakerBytes == 0; diff --git a/server/src/main/java/org/elasticsearch/action/search/RestClosePointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/RestClosePointInTimeAction.java index a9da16bd62026..64702501581ea 100644 --- a/server/src/main/java/org/elasticsearch/action/search/RestClosePointInTimeAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/RestClosePointInTimeAction.java @@ -41,7 +41,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC clearRequest = ClosePointInTimeRequest.fromXContent(parser); } return channel -> client.execute( - ClosePointInTimeAction.INSTANCE, + TransportClosePointInTimeAction.TYPE, clearRequest, new RestToXContentListener<>(channel, ClosePointInTimeResponse::status) ); diff --git a/server/src/main/java/org/elasticsearch/action/search/RestOpenPointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/RestOpenPointInTimeAction.java index 815deac07dfcd..0e7f3f9111842 100644 --- a/server/src/main/java/org/elasticsearch/action/search/RestOpenPointInTimeAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/RestOpenPointInTimeAction.java @@ -17,9 +17,13 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; +import java.io.IOException; import java.util.List; +import static org.elasticsearch.index.query.AbstractQueryBuilder.parseTopLevelQuery; import static org.elasticsearch.rest.RestRequest.Method.POST; @ServerlessScope(Scope.PUBLIC) @@ -36,7 +40,7 @@ public List routes() { } @Override - public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); final OpenPointInTimeRequest openRequest = new OpenPointInTimeRequest(indices); openRequest.indicesOptions(IndicesOptions.fromRequest(request, OpenPointInTimeRequest.DEFAULT_INDICES_OPTIONS)); @@ -50,6 +54,20 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC ); openRequest.maxConcurrentShardRequests(maxConcurrentShardRequests); } - return channel -> client.execute(OpenPointInTimeAction.INSTANCE, openRequest, new RestToXContentListener<>(channel)); + + request.withContentOrSourceParamParserOrNull(parser -> { + if (parser != null) { + PARSER.parse(parser, openRequest, null); + } + }); + + return channel -> client.execute(TransportOpenPointInTimeAction.TYPE, openRequest, new RestToXContentListener<>(channel)); + } + + private static final ObjectParser PARSER = new ObjectParser<>("open_point_in_time_request"); + private static final ParseField INDEX_FILTER_FIELD = new ParseField("index_filter"); + + static { + PARSER.declareObject(OpenPointInTimeRequest::indexFilter, (p, c) -> parseTopLevelQuery(p), INDEX_FILTER_FIELD); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java index c7ad250892160..2fcb792f821c9 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java @@ -64,6 +64,7 @@ final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction clusters ); this.queryPhaseResultConsumer = queryPhaseResultConsumer; + addReleasable(queryPhaseResultConsumer::decRef); this.progressListener = task.getProgressListener(); // don't build the SearchShard list (can be expensive) if the SearchProgressListener won't use it if (progressListener != SearchProgressListener.NOOP) { @@ -90,7 +91,7 @@ protected SearchPhase getNextPhase(final SearchPhaseResults res final List dfsSearchResults = results.getAtomicArray().asList(); final AggregatedDfs aggregatedDfs = SearchPhaseController.aggregateDfs(dfsSearchResults); final List mergedKnnResults = SearchPhaseController.mergeKnnResults(getRequest(), dfsSearchResults); - + queryPhaseResultConsumer.incRef(); return new DfsQueryPhase( dfsSearchResults, aggregatedDfs, diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index 5af5c4c2ec602..0662e94b519d9 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.util.Maps; +import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.lucene.grouping.TopFieldGroups; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.SearchHit; @@ -60,7 +61,6 @@ import java.util.concurrent.Executor; import java.util.function.BiFunction; import java.util.function.Consumer; -import java.util.function.IntFunction; import java.util.function.Supplier; public final class SearchPhaseController { @@ -351,52 +351,58 @@ public static List[] fillDocIdsToLoad(int numShards, ScoreDoc[] shardDo public static InternalSearchResponse merge( boolean ignoreFrom, ReducedQueryPhase reducedQueryPhase, - Collection fetchResults, - IntFunction resultsLookup + AtomicArray fetchResultsArray ) { if (reducedQueryPhase.isEmptyResult) { return InternalSearchResponse.EMPTY_WITH_TOTAL_HITS; } ScoreDoc[] sortedDocs = reducedQueryPhase.sortedTopDocs.scoreDocs; - SearchHits hits = getHits(reducedQueryPhase, ignoreFrom, fetchResults, resultsLookup); - if (reducedQueryPhase.suggest != null) { - if (fetchResults.isEmpty() == false) { - int currentOffset = hits.getHits().length; - for (CompletionSuggestion suggestion : reducedQueryPhase.suggest.filter(CompletionSuggestion.class)) { - final List suggestionOptions = suggestion.getOptions(); - for (int scoreDocIndex = currentOffset; scoreDocIndex < currentOffset + suggestionOptions.size(); scoreDocIndex++) { - ScoreDoc shardDoc = sortedDocs[scoreDocIndex]; - SearchPhaseResult searchResultProvider = resultsLookup.apply(shardDoc.shardIndex); - if (searchResultProvider == null) { - // this can happen if we are hitting a shard failure during the fetch phase - // in this case we referenced the shard result via the ScoreDoc but never got a - // result from fetch. - // TODO it would be nice to assert this in the future - continue; - } - FetchSearchResult fetchResult = searchResultProvider.fetchResult(); - final int index = fetchResult.counterGetAndIncrement(); - assert index < fetchResult.hits().getHits().length - : "not enough hits fetched. index [" + index + "] length: " + fetchResult.hits().getHits().length; - SearchHit hit = fetchResult.hits().getHits()[index]; - CompletionSuggestion.Entry.Option suggestOption = suggestionOptions.get(scoreDocIndex - currentOffset); - hit.score(shardDoc.score); - hit.shard(fetchResult.getSearchShardTarget()); - suggestOption.setHit(hit); - } - currentOffset += suggestionOptions.size(); + var fetchResults = fetchResultsArray.asList(); + SearchHits hits = getHits(reducedQueryPhase, ignoreFrom, fetchResultsArray); + if (reducedQueryPhase.suggest != null && fetchResults.isEmpty() == false) { + mergeSuggest(reducedQueryPhase, fetchResultsArray, hits, sortedDocs); + } + return reducedQueryPhase.buildResponse(hits, fetchResults); + } + + private static void mergeSuggest( + ReducedQueryPhase reducedQueryPhase, + AtomicArray fetchResultsArray, + SearchHits hits, + ScoreDoc[] sortedDocs + ) { + int currentOffset = hits.getHits().length; + for (CompletionSuggestion suggestion : reducedQueryPhase.suggest.filter(CompletionSuggestion.class)) { + final List suggestionOptions = suggestion.getOptions(); + for (int scoreDocIndex = currentOffset; scoreDocIndex < currentOffset + suggestionOptions.size(); scoreDocIndex++) { + ScoreDoc shardDoc = sortedDocs[scoreDocIndex]; + SearchPhaseResult searchResultProvider = fetchResultsArray.get(shardDoc.shardIndex); + if (searchResultProvider == null) { + // this can happen if we are hitting a shard failure during the fetch phase + // in this case we referenced the shard result via the ScoreDoc but never got a + // result from fetch. + // TODO it would be nice to assert this in the future + continue; } - assert currentOffset == sortedDocs.length : "expected no more score doc slices"; + FetchSearchResult fetchResult = searchResultProvider.fetchResult(); + final int index = fetchResult.counterGetAndIncrement(); + assert index < fetchResult.hits().getHits().length + : "not enough hits fetched. index [" + index + "] length: " + fetchResult.hits().getHits().length; + SearchHit hit = fetchResult.hits().getHits()[index]; + CompletionSuggestion.Entry.Option suggestOption = suggestionOptions.get(scoreDocIndex - currentOffset); + hit.score(shardDoc.score); + hit.shard(fetchResult.getSearchShardTarget()); + suggestOption.setHit(hit); } + currentOffset += suggestionOptions.size(); } - return reducedQueryPhase.buildResponse(hits, fetchResults); + assert currentOffset == sortedDocs.length : "expected no more score doc slices"; } private static SearchHits getHits( ReducedQueryPhase reducedQueryPhase, boolean ignoreFrom, - Collection fetchResults, - IntFunction resultsLookup + AtomicArray fetchResultsArray ) { SortedTopDocs sortedTopDocs = reducedQueryPhase.sortedTopDocs; int sortScoreIndex = -1; @@ -408,6 +414,7 @@ private static SearchHits getHits( } } } + var fetchResults = fetchResultsArray.asList(); // clean the fetch counter for (SearchPhaseResult entry : fetchResults) { entry.fetchResult().initCounter(); @@ -422,7 +429,7 @@ private static SearchHits getHits( if (fetchResults.isEmpty() == false) { for (int i = 0; i < numSearchHits; i++) { ScoreDoc shardDoc = sortedTopDocs.scoreDocs[i]; - SearchPhaseResult fetchResultProvider = resultsLookup.apply(shardDoc.shardIndex); + SearchPhaseResult fetchResultProvider = fetchResultsArray.get(shardDoc.shardIndex); if (fetchResultProvider == null) { // this can happen if we are hitting a shard failure during the fetch phase // in this case we referenced the shard result via the ScoreDoc but never got a @@ -737,7 +744,7 @@ public record ReducedQueryPhase( /** * Creates a new search response from the given merged hits. - * @see #merge(boolean, ReducedQueryPhase, Collection, IntFunction) + * @see #merge(boolean, ReducedQueryPhase, AtomicArray) */ public InternalSearchResponse buildResponse(SearchHits hits, Collection fetchResults) { return new InternalSearchResponse( @@ -753,10 +760,8 @@ public InternalSearchResponse buildResponse(SearchHits hits, Collection fetchResults) { if (profileBuilder == null) { - assert fetchResults.stream() - .map(SearchPhaseResult::fetchResult) - .filter(r -> r != null) - .allMatch(r -> r.profileResult() == null) : "found fetch profile without search profile"; + assert fetchResults.stream().map(SearchPhaseResult::fetchResult).allMatch(r -> r == null || r.profileResult() == null) + : "found fetch profile without search profile"; return null; } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseResults.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseResults.java index edabbc86b4b31..11b8e0a0792a3 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseResults.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseResults.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.common.util.concurrent.AtomicArray; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.search.SearchPhaseResult; import java.util.stream.Stream; @@ -16,7 +17,7 @@ /** * This class acts as a basic result collection that can be extended to do on-the-fly reduction or result processing */ -abstract class SearchPhaseResults { +abstract class SearchPhaseResults implements RefCounted { private final int numShards; SearchPhaseResults(int numShards) { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchProgressActionListener.java b/server/src/main/java/org/elasticsearch/action/search/SearchProgressActionListener.java index 26466215a3e85..95e9b4cedeba5 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchProgressActionListener.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchProgressActionListener.java @@ -11,7 +11,7 @@ import org.elasticsearch.action.ActionListener; /** - * An {@link ActionListener} for search requests that allows to track progress of the {@link SearchAction}. + * An {@link ActionListener} for search requests that allows to track progress of the {@link TransportSearchAction}. * See {@link SearchProgressListener}. */ public abstract class SearchProgressActionListener extends SearchProgressListener implements ActionListener {} diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchProgressListener.java b/server/src/main/java/org/elasticsearch/action/search/SearchProgressListener.java index c6b0022593179..096f2606d3f02 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchProgressListener.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchProgressListener.java @@ -23,7 +23,7 @@ import java.util.stream.StreamSupport; /** - * A listener that allows to track progress of the {@link SearchAction}. + * A listener that allows to track progress of the {@link TransportSearchAction}. */ public abstract class SearchProgressListener { private static final Logger logger = LogManager.getLogger(SearchProgressListener.class); diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java index 2dfd46182266c..8cf4ee9b75f76 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java @@ -74,9 +74,6 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction { - public SearchRequestBuilder(ElasticsearchClient client, SearchAction action) { + public SearchRequestBuilder(ElasticsearchClient client, ActionType action) { super(client, action, new SearchRequest()); } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollAction.java deleted file mode 100644 index 25f0daab932da..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollAction.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.search; - -import org.elasticsearch.action.ActionType; - -public class SearchScrollAction extends ActionType { - - public static final SearchScrollAction INSTANCE = new SearchScrollAction(); - public static final String NAME = "indices:data/read/scroll"; - - private SearchScrollAction() { - super(NAME, SearchResponse::new); - } - -} diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java index df16c107a2619..fc1ccfb00d6ce 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java @@ -240,12 +240,7 @@ protected final void sendResponse( final AtomicArray fetchResults ) { try { - final InternalSearchResponse internalResponse = SearchPhaseController.merge( - true, - queryPhase, - fetchResults.asList(), - fetchResults::get - ); + final InternalSearchResponse internalResponse = SearchPhaseController.merge(true, queryPhase, fetchResults); // the scroll ID never changes we always return the same ID. This ID contains all the shards and their context ids // such that we can talk to them again in the next roundtrip. String scrollId = null; diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java index bf6517e97a842..bad0ed488d03b 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java @@ -99,13 +99,12 @@ public void run() { connection, shardFetchRequest, task, - new SearchActionListener(querySearchResult.getSearchShardTarget(), index) { + new SearchActionListener<>(querySearchResult.getSearchShardTarget(), index) { @Override protected void innerOnResponse(FetchSearchResult response) { fetchResults.setOnce(response.getShardIndex(), response); - if (counter.countDown()) { - sendResponse(reducedQueryPhase, fetchResults); - } + response.incRef(); + consumeResponse(counter, reducedQueryPhase); } @Override @@ -124,13 +123,20 @@ public void onFailure(Exception t) { } else { // the counter is set to the total size of docIdsToLoad // which can have null values so we have to count them down too - if (counter.countDown()) { - sendResponse(reducedQueryPhase, fetchResults); - } + consumeResponse(counter, reducedQueryPhase); } } } }; } + private void consumeResponse(CountDown counter, SearchPhaseController.ReducedQueryPhase reducedQueryPhase) { + if (counter.countDown()) { + sendResponse(reducedQueryPhase, fetchResults); + for (FetchSearchResult fetchSearchResult : fetchResults.asList()) { + fetchSearchResult.decRef(); + } + } + } + } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequestBuilder.java index e8348d189fcbc..4de27b8430417 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequestBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionType; import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.Scroll; @@ -18,11 +19,11 @@ */ public class SearchScrollRequestBuilder extends ActionRequestBuilder { - public SearchScrollRequestBuilder(ElasticsearchClient client, SearchScrollAction action) { + public SearchScrollRequestBuilder(ElasticsearchClient client, ActionType action) { super(client, action, new SearchScrollRequest()); } - public SearchScrollRequestBuilder(ElasticsearchClient client, SearchScrollAction action, String scrollId) { + public SearchScrollRequestBuilder(ElasticsearchClient client, ActionType action, String scrollId) { super(client, action, new SearchScrollRequest(scrollId)); } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchShardsAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchShardsAction.java deleted file mode 100644 index f4bfc2623fe1c..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/search/SearchShardsAction.java +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.search; - -import org.elasticsearch.action.ActionType; - -public class SearchShardsAction extends ActionType { - public static final String NAME = "indices:admin/search/search_shards"; - public static final SearchShardsAction INSTANCE = new SearchShardsAction(); - - private SearchShardsAction() { - super(NAME, SearchShardsResponse::new); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index 800ad7afbb8db..e46d26c3532ad 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -8,8 +8,6 @@ package org.elasticsearch.action.search; -import org.elasticsearch.TransportVersions; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.IndicesRequest; @@ -26,10 +24,8 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Nullable; -import org.elasticsearch.search.CanMatchShardResponse; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.dfs.DfsSearchResult; @@ -58,12 +54,9 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; -import java.util.Arrays; import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.concurrent.atomic.AtomicReferenceArray; import java.util.function.BiFunction; /** @@ -82,7 +75,6 @@ public class SearchTransportService { public static final String QUERY_FETCH_SCROLL_ACTION_NAME = "indices:data/read/search[phase/query+fetch/scroll]"; public static final String FETCH_ID_SCROLL_ACTION_NAME = "indices:data/read/search[phase/fetch/id/scroll]"; public static final String FETCH_ID_ACTION_NAME = "indices:data/read/search[phase/fetch/id]"; - public static final String QUERY_CAN_MATCH_NAME = "indices:data/read/search[can_match]"; public static final String QUERY_CAN_MATCH_NODE_NAME = "indices:data/read/search[can_match][n]"; private final TransportService transportService; @@ -137,79 +129,20 @@ public void sendFreeContext( public void sendCanMatch( Transport.Connection connection, - final ShardSearchRequest request, + final CanMatchNodeRequest request, SearchTask task, - final ActionListener listener + final ActionListener listener ) { transportService.sendChildRequest( connection, - QUERY_CAN_MATCH_NAME, + QUERY_CAN_MATCH_NODE_NAME, request, task, TransportRequestOptions.EMPTY, - new ActionListenerResponseHandler<>(listener, CanMatchShardResponse::new, TransportResponseHandler.TRANSPORT_WORKER) + new ActionListenerResponseHandler<>(listener, CanMatchNodeResponse::new, TransportResponseHandler.TRANSPORT_WORKER) ); } - public void sendCanMatch( - Transport.Connection connection, - final CanMatchNodeRequest request, - SearchTask task, - final ActionListener listener - ) { - if (connection.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0) - && connection.getNode().getVersion().onOrAfter(Version.V_7_16_0)) { - transportService.sendChildRequest( - connection, - QUERY_CAN_MATCH_NODE_NAME, - request, - task, - TransportRequestOptions.EMPTY, - new ActionListenerResponseHandler<>(listener, CanMatchNodeResponse::new, TransportResponseHandler.TRANSPORT_WORKER) - ); - } else { - // BWC layer: translate into shard-level requests - final List shardSearchRequests = request.createShardSearchRequests(); - final AtomicReferenceArray results = new AtomicReferenceArray<>( - shardSearchRequests.size() - ); - final CountDown counter = new CountDown(shardSearchRequests.size()); - final Runnable maybeFinish = () -> { - if (counter.countDown()) { - final CanMatchNodeResponse.ResponseOrFailure[] responses = - new CanMatchNodeResponse.ResponseOrFailure[shardSearchRequests.size()]; - for (int i = 0; i < responses.length; i++) { - responses[i] = results.get(i); - } - final CanMatchNodeResponse response = new CanMatchNodeResponse(Arrays.asList(responses)); - listener.onResponse(response); - } - }; - for (int i = 0; i < shardSearchRequests.size(); i++) { - final ShardSearchRequest shardSearchRequest = shardSearchRequests.get(i); - final int finalI = i; - try { - sendCanMatch(connection, shardSearchRequest, task, new ActionListener<>() { - @Override - public void onResponse(CanMatchShardResponse response) { - results.set(finalI, new CanMatchNodeResponse.ResponseOrFailure(response)); - maybeFinish.run(); - } - - @Override - public void onFailure(Exception e) { - results.set(finalI, new CanMatchNodeResponse.ResponseOrFailure(e)); - maybeFinish.run(); - } - }); - } catch (Exception e) { - results.set(finalI, new CanMatchNodeResponse.ResponseOrFailure(e)); - maybeFinish.run(); - } - } - } - } - public void sendClearAllScrollContexts(Transport.Connection connection, final ActionListener listener) { transportService.sendRequest( connection, @@ -347,7 +280,7 @@ void sendExecuteMultiSearch(final MultiSearchRequest request, SearchTask task, f final Transport.Connection connection = transportService.getConnection(transportService.getLocalNode()); transportService.sendChildRequest( connection, - MultiSearchAction.NAME, + TransportMultiSearchAction.TYPE.name(), request, task, new ConnectionCountingHandler<>(listener, MultiSearchResponse::new, clientConnections, connection.getNode().getId()) @@ -565,24 +498,11 @@ public static void registerRequestHandler(TransportService transportService, Sea ); TransportActionProxy.registerProxyAction(transportService, FETCH_ID_ACTION_NAME, true, FetchSearchResult::new); - // this is cheap, it does not fetch during the rewrite phase, so we can let it quickly execute on a networking thread - transportService.registerRequestHandler( - QUERY_CAN_MATCH_NAME, - EsExecutors.DIRECT_EXECUTOR_SERVICE, - ShardSearchRequest::new, - (request, channel, task) -> { - searchService.canMatch(request, new ChannelActionListener<>(channel)); - } - ); - TransportActionProxy.registerProxyAction(transportService, QUERY_CAN_MATCH_NAME, true, CanMatchShardResponse::new); - transportService.registerRequestHandler( QUERY_CAN_MATCH_NODE_NAME, transportService.getThreadPool().executor(ThreadPool.Names.SEARCH_COORDINATION), CanMatchNodeRequest::new, - (request, channel, task) -> { - searchService.canMatch(request, new ChannelActionListener<>(channel)); - } + (request, channel, task) -> searchService.canMatch(request, new ChannelActionListener<>(channel)) ); TransportActionProxy.registerProxyAction(transportService, QUERY_CAN_MATCH_NODE_NAME, true, CanMatchNodeResponse::new); } diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java index 8de2815a9d416..e1a6bb6c42b2e 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.cluster.service.ClusterService; @@ -19,6 +20,9 @@ public class TransportClearScrollAction extends HandledTransportAction { + public static final String NAME = "indices:data/read/scroll/clear"; + + public static final ActionType TYPE = new ActionType<>(NAME, ClearScrollResponse::new); private final ClusterService clusterService; private final SearchTransportService searchTransportService; @@ -29,7 +33,7 @@ public TransportClearScrollAction( ActionFilters actionFilters, SearchTransportService searchTransportService ) { - super(ClearScrollAction.NAME, transportService, actionFilters, ClearScrollRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); + super(TYPE.name(), transportService, actionFilters, ClearScrollRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.clusterService = clusterService; this.searchTransportService = searchTransportService; } diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportClosePointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportClosePointInTimeAction.java index 0434cb2f5895e..338e63d6af2a6 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportClosePointInTimeAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportClosePointInTimeAction.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.cluster.service.ClusterService; @@ -22,6 +23,10 @@ public class TransportClosePointInTimeAction extends HandledTransportAction { + public static final ActionType TYPE = new ActionType<>( + "indices:data/read/close_point_in_time", + ClosePointInTimeResponse::new + ); private final ClusterService clusterService; private final SearchTransportService searchTransportService; private final NamedWriteableRegistry namedWriteableRegistry; @@ -34,13 +39,7 @@ public TransportClosePointInTimeAction( SearchTransportService searchTransportService, NamedWriteableRegistry namedWriteableRegistry ) { - super( - ClosePointInTimeAction.NAME, - transportService, - actionFilters, - ClosePointInTimeRequest::new, - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + super(TYPE.name(), transportService, actionFilters, ClosePointInTimeRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.clusterService = clusterService; this.searchTransportService = searchTransportService; this.namedWriteableRegistry = namedWriteableRegistry; diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java index f51c700c8c8c9..a7d971069f96d 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java @@ -12,6 +12,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.client.internal.node.NodeClient; @@ -34,6 +35,8 @@ public class TransportMultiSearchAction extends HandledTransportAction { + public static final String NAME = "indices:data/read/msearch"; + public static final ActionType TYPE = new ActionType<>(NAME, MultiSearchResponse::new); private static final Logger logger = LogManager.getLogger(TransportMultiSearchAction.class); private final int allocatedProcessors; private final ThreadPool threadPool; @@ -50,7 +53,7 @@ public TransportMultiSearchAction( ActionFilters actionFilters, NodeClient client ) { - super(MultiSearchAction.NAME, transportService, actionFilters, MultiSearchRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); + super(TYPE.name(), transportService, actionFilters, MultiSearchRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.threadPool = threadPool; this.clusterService = clusterService; this.allocatedProcessors = EsExecutors.allocatedProcessors(settings); @@ -67,7 +70,7 @@ public TransportMultiSearchAction( LongSupplier relativeTimeProvider, NodeClient client ) { - super(MultiSearchAction.NAME, transportService, actionFilters, MultiSearchRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); + super(TYPE.name(), transportService, actionFilters, MultiSearchRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.threadPool = threadPool; this.clusterService = clusterService; this.allocatedProcessors = allocatedProcessors; @@ -183,9 +186,15 @@ private void handleResponse(final int responseSlot, final MultiSearchResponse.It } private void finish() { - listener.onResponse( - new MultiSearchResponse(responses.toArray(new MultiSearchResponse.Item[responses.length()]), buildTookInMillis()) + final var response = new MultiSearchResponse( + responses.toArray(new MultiSearchResponse.Item[responses.length()]), + buildTookInMillis() ); + try { + listener.onResponse(response); + } finally { + response.decRef(); + } } /** diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java index ae3c735e079e9..2bc642e6c0907 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java @@ -8,8 +8,11 @@ package org.elasticsearch.action.search; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.support.ActionFilters; @@ -28,6 +31,7 @@ import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchContextId; @@ -47,7 +51,14 @@ import java.util.function.BiFunction; public class TransportOpenPointInTimeAction extends HandledTransportAction { + + private static final Logger logger = LogManager.getLogger(TransportOpenPointInTimeAction.class); + public static final String OPEN_SHARD_READER_CONTEXT_NAME = "indices:data/read/open_reader_context"; + public static final ActionType TYPE = new ActionType<>( + "indices:data/read/open_point_in_time", + OpenPointInTimeResponse::new + ); private final TransportSearchAction transportSearchAction; private final SearchTransportService searchTransportService; @@ -62,13 +73,7 @@ public TransportOpenPointInTimeAction( TransportSearchAction transportSearchAction, SearchTransportService searchTransportService ) { - super( - OpenPointInTimeAction.NAME, - transportService, - actionFilters, - OpenPointInTimeRequest::new, - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + super(TYPE.name(), transportService, actionFilters, OpenPointInTimeRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.transportService = transportService; this.transportSearchAction = transportSearchAction; this.searchService = searchService; @@ -93,7 +98,8 @@ protected void doExecute(Task task, OpenPointInTimeRequest request, ActionListen .indicesOptions(request.indicesOptions()) .preference(request.preference()) .routing(request.routing()) - .allowPartialSearchResults(false); + .allowPartialSearchResults(false) + .source(new SearchSourceBuilder().query(request.indexFilter())); searchRequest.setMaxConcurrentShardRequests(request.maxConcurrentShardRequests()); searchRequest.setCcsMinimizeRoundtrips(false); transportSearchAction.executeRequest((SearchTask) task, searchRequest, listener.map(r -> { @@ -125,6 +131,63 @@ public SearchPhase newSearchPhase( boolean preFilter, ThreadPool threadPool, SearchResponse.Clusters clusters + ) { + if (SearchService.canRewriteToMatchNone(searchRequest.source())) { + return new CanMatchPreFilterSearchPhase( + logger, + searchTransportService, + connectionLookup, + aliasFilter, + concreteIndexBoosts, + threadPool.executor(ThreadPool.Names.SEARCH_COORDINATION), + searchRequest, + shardIterators, + timeProvider, + task, + false, + searchService.getCoordinatorRewriteContextProvider(timeProvider::absoluteStartMillis), + listener.delegateFailureAndWrap( + (searchResponseActionListener, searchShardIterators) -> openPointInTimePhase( + task, + searchRequest, + executor, + searchShardIterators, + timeProvider, + connectionLookup, + clusterState, + aliasFilter, + concreteIndexBoosts, + clusters + ).start() + ) + ); + } else { + return openPointInTimePhase( + task, + searchRequest, + executor, + shardIterators, + timeProvider, + connectionLookup, + clusterState, + aliasFilter, + concreteIndexBoosts, + clusters + ); + } + } + + SearchPhase openPointInTimePhase( + SearchTask task, + SearchRequest searchRequest, + Executor executor, + GroupShardsIterator shardIterators, + TransportSearchAction.SearchTimeProvider timeProvider, + BiFunction connectionLookup, + ClusterState clusterState, + Map aliasFilter, + Map concreteIndexBoosts, + SearchResponse.Clusters clusters ) { assert searchRequest.getMaxConcurrentShardRequests() == pitRequest.maxConcurrentShardRequests() : searchRequest.getMaxConcurrentShardRequests() + " != " + pitRequest.maxConcurrentShardRequests(); diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index a2d01e226b4ed..38d448a8a9372 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -14,6 +14,7 @@ import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.ShardOperationFailedException; @@ -111,6 +112,8 @@ public class TransportSearchAction extends HandledTransportAction { + public static final String NAME = "indices:data/read/search"; + public static final ActionType TYPE = new ActionType<>(NAME, SearchResponse::new); private static final Logger logger = LogManager.getLogger(TransportSearchAction.class); private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(TransportSearchAction.class); public static final String FROZEN_INDICES_DEPRECATION_MESSAGE = "Searching frozen indices [{}] is deprecated." @@ -160,7 +163,7 @@ public TransportSearchAction( NamedWriteableRegistry namedWriteableRegistry, ExecutorSelector executorSelector ) { - super(SearchAction.NAME, transportService, actionFilters, SearchRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); + super(TYPE.name(), transportService, actionFilters, SearchRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.threadPool = threadPool; this.circuitBreaker = circuitBreakerService.getBreaker(CircuitBreaker.REQUEST); this.searchPhaseController = searchPhaseController; @@ -722,7 +725,7 @@ Map createFinalResponse() { ); transportService.sendRequest( connection, - SearchShardsAction.NAME, + TransportSearchShardsAction.TYPE.name(), searchShardsRequest, TransportRequestOptions.EMPTY, new ActionListenerResponseHandler<>(singleListener, SearchShardsResponse::new, responseExecutor) diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java index 9874bcfb56c6a..0a2b496a5eb8a 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java @@ -12,6 +12,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; @@ -26,6 +27,7 @@ import static org.elasticsearch.action.search.TransportSearchHelper.parseScrollId; public class TransportSearchScrollAction extends HandledTransportAction { + public static final ActionType TYPE = new ActionType<>("indices:data/read/scroll", SearchResponse::new); private static final Logger logger = LogManager.getLogger(TransportSearchScrollAction.class); private final ClusterService clusterService; private final SearchTransportService searchTransportService; @@ -37,7 +39,7 @@ public TransportSearchScrollAction( ActionFilters actionFilters, SearchTransportService searchTransportService ) { - super(SearchScrollAction.NAME, transportService, actionFilters, SearchScrollRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); + super(TYPE.name(), transportService, actionFilters, SearchScrollRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.clusterService = clusterService; this.searchTransportService = searchTransportService; } diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchShardsAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchShardsAction.java index 4c8ade4d78ead..0d1672c77cbed 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchShardsAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchShardsAction.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; @@ -41,6 +42,9 @@ * An internal search shards API performs the can_match phase and returns target shards of indices that might match a query. */ public class TransportSearchShardsAction extends HandledTransportAction { + + public static final String NAME = "indices:admin/search/search_shards"; + public static final ActionType TYPE = new ActionType<>(NAME, SearchShardsResponse::new); private final TransportService transportService; private final TransportSearchAction transportSearchAction; private final SearchService searchService; @@ -61,7 +65,7 @@ public TransportSearchShardsAction( IndexNameExpressionResolver indexNameExpressionResolver ) { super( - SearchShardsAction.NAME, + TYPE.name(), transportService, actionFilters, SearchShardsRequest::new, @@ -79,7 +83,6 @@ public TransportSearchShardsAction( @Override protected void doExecute(Task task, SearchShardsRequest searchShardsRequest, ActionListener listener) { - assert ThreadPool.assertCurrentThreadPool(ThreadPool.Names.SEARCH_COORDINATION); final long relativeStartNanos = System.nanoTime(); SearchRequest original = new SearchRequest(searchShardsRequest.indices()).indicesOptions(searchShardsRequest.indicesOptions()) .routing(searchShardsRequest.routing()) diff --git a/server/src/main/java/org/elasticsearch/action/support/ChannelActionListener.java b/server/src/main/java/org/elasticsearch/action/support/ChannelActionListener.java index a4836ca322035..c746bc9acf2a1 100644 --- a/server/src/main/java/org/elasticsearch/action/support/ChannelActionListener.java +++ b/server/src/main/java/org/elasticsearch/action/support/ChannelActionListener.java @@ -28,6 +28,7 @@ public ChannelActionListener(TransportChannel channel) { @Override public void onResponse(Response response) { + response.incRef(); // acquire reference that will be released by channel.sendResponse below ActionListener.run(this, l -> l.channel.sendResponse(response)); } diff --git a/server/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java b/server/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java index b360443a396d1..721983b6af0e7 100644 --- a/server/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java +++ b/server/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java @@ -30,10 +30,6 @@ public class PlainActionFuture implements ActionFuture, ActionListener { - public static PlainActionFuture newFuture() { - return new PlainActionFuture<>(); - } - @Override public void onResponse(T result) { set(result); @@ -442,13 +438,13 @@ private static RuntimeException unwrapEsException(ElasticsearchException esEx) { } public static T get(CheckedConsumer, E> e) throws E { - PlainActionFuture fut = newFuture(); + PlainActionFuture fut = new PlainActionFuture<>(); e.accept(fut); return fut.actionGet(); } public static T get(CheckedConsumer, E> e, long timeout, TimeUnit unit) throws E { - PlainActionFuture fut = newFuture(); + PlainActionFuture fut = new PlainActionFuture<>(); e.accept(fut); return fut.actionGet(timeout, unit); } diff --git a/server/src/main/java/org/elasticsearch/action/support/RefCountingListener.java b/server/src/main/java/org/elasticsearch/action/support/RefCountingListener.java index f4d580a44621f..ff5c3115e569b 100644 --- a/server/src/main/java/org/elasticsearch/action/support/RefCountingListener.java +++ b/server/src/main/java/org/elasticsearch/action/support/RefCountingListener.java @@ -10,13 +10,13 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.Releasable; import java.util.Objects; import java.util.concurrent.Semaphore; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Consumer; /** * A mechanism to complete a listener on the completion of some (dynamic) collection of other actions. Basic usage is as follows: @@ -176,7 +176,7 @@ public String toString() { * It is also invalid to complete the returned listener more than once. Doing so will trip an assertion if assertions are enabled, but * will be ignored otherwise. */ - public ActionListener acquire(Consumer consumer) { + public ActionListener acquire(CheckedConsumer consumer) { final var ref = refs.acquire(); final var consumerRef = new AtomicReference<>(Objects.requireNonNull(consumer)); return new ActionListener<>() { @@ -187,10 +187,12 @@ public void onResponse(Response response) { if (acquiredConsumer == null) { assert false : "already closed"; } else { - acquiredConsumer.accept(response); + try { + acquiredConsumer.accept(response); + } catch (Exception e) { + addException(e); + } } - } catch (Exception e) { - addException(e); } } diff --git a/server/src/main/java/org/elasticsearch/action/support/RefCountingRunnable.java b/server/src/main/java/org/elasticsearch/action/support/RefCountingRunnable.java index d05f698749a3b..8dcc801f10c30 100644 --- a/server/src/main/java/org/elasticsearch/action/support/RefCountingRunnable.java +++ b/server/src/main/java/org/elasticsearch/action/support/RefCountingRunnable.java @@ -63,7 +63,6 @@ public final class RefCountingRunnable implements Releasable { private static final Logger logger = LogManager.getLogger(RefCountingRunnable.class); - static final String ALREADY_CLOSED_MESSAGE = "already closed, cannot acquire or release any further refs"; private final RefCounted refCounted; @@ -86,14 +85,11 @@ public RefCountingRunnable(Runnable delegate) { * will be ignored otherwise. This deviates from the contract of {@link java.io.Closeable}. */ public Releasable acquire() { - if (refCounted.tryIncRef()) { - // All refs are considered equal so there's no real need to allocate a new object here, although note that this deviates - // (subtly) from the docs for Closeable#close() which indicate that it should be idempotent. But only if assertions are - // disabled, and if assertions are enabled then we are asserting that we never double-close these things anyway. - return Releasables.assertOnce(this); - } - assert false : ALREADY_CLOSED_MESSAGE; - throw new IllegalStateException(ALREADY_CLOSED_MESSAGE); + refCounted.mustIncRef(); + // All refs are considered equal so there's no real need to allocate a new object here, although note that this deviates (subtly) + // from the docs for Closeable#close() which indicate that it should be idempotent. But only if assertions are disabled, and if + // assertions are enabled then we are asserting that we never double-close these things anyway. + return Releasables.assertOnce(this); } /** diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java index 194b4852c16d7..19c7561ccdb15 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java @@ -228,7 +228,7 @@ protected String[] resolveConcreteIndexNames(ClusterState clusterState, Request @Override protected void doExecute(Task task, Request request, ActionListener listener) { // workaround for https://github.com/elastic/elasticsearch/issues/97916 - TODO remove this when we can - request.incRef(); + request.mustIncRef(); executor.execute(ActionRunnable.wrapReleasing(listener, request::decRef, l -> doExecuteForked(task, request, listener))); } @@ -474,7 +474,7 @@ class NodeRequest extends TransportRequest implements IndicesRequest { } NodeRequest(Request indicesLevelRequest, List shards, String nodeId) { - indicesLevelRequest.incRef(); + indicesLevelRequest.mustIncRef(); this.indicesLevelRequest = indicesLevelRequest; this.shards = shards; this.nodeId = nodeId; diff --git a/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java b/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java index 71964d737e8d2..b771f6cc512d1 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java @@ -169,7 +169,7 @@ protected void doExecute(Task task, final Request request, ActionListener li protected abstract Response shardOperation(Request request, ShardId shardId) throws IOException; protected void asyncShardOperation(Request request, ShardId shardId, ActionListener listener) throws IOException { - getExecutor(request, shardId).execute(ActionRunnable.supply(listener, () -> shardOperation(request, shardId))); + getExecutor(request, shardId).execute(ActionRunnable.supplyAndDecRef(listener, () -> shardOperation(request, shardId))); } protected abstract Writeable.Reader getResponseReader(); diff --git a/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java b/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java index af87fd8cddb4b..e94a619c7785e 100644 --- a/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java @@ -290,7 +290,7 @@ public void writeTo(StreamOutput out) throws IOException { protected NodeTaskRequest(TasksRequest tasksRequest) { super(); - tasksRequest.incRef(); + tasksRequest.mustIncRef(); this.tasksRequest = tasksRequest; } @@ -356,14 +356,6 @@ private class NodeTasksResponse extends TransportResponse { this.exceptions = exceptions; } - public String getNodeId() { - return nodeId; - } - - public List getExceptions() { - return exceptions; - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(nodeId); diff --git a/server/src/main/java/org/elasticsearch/client/internal/node/NodeClient.java b/server/src/main/java/org/elasticsearch/client/internal/node/NodeClient.java index 0228dc7cc61ea..f75997d92b678 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/node/NodeClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/node/NodeClient.java @@ -110,7 +110,7 @@ public Task exe transportAction(action), request, localConnection, - new SafelyWrappedActionListener<>(listener) + ActionListener.assertOnce(listener) ); } @@ -148,27 +148,4 @@ public NamedWriteableRegistry getNamedWriteableRegistry() { return namedWriteableRegistry; } - private record SafelyWrappedActionListener(ActionListener listener) implements ActionListener { - - @Override - public void onResponse(Response response) { - try { - listener.onResponse(response); - } catch (Exception e) { - assert false : new AssertionError("callback must handle its own exceptions", e); - throw e; - } - } - - @Override - public void onFailure(Exception e) { - try { - listener.onFailure(e); - } catch (Exception ex) { - ex.addSuppressed(e); - assert false : new AssertionError("callback must handle its own exceptions", ex); - throw ex; - } - } - } } diff --git a/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java b/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java index 98bad4d3dd74c..82d0f2fb85847 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java @@ -247,10 +247,10 @@ import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteRequestBuilder; import org.elasticsearch.action.delete.DeleteResponse; -import org.elasticsearch.action.explain.ExplainAction; import org.elasticsearch.action.explain.ExplainRequest; import org.elasticsearch.action.explain.ExplainRequestBuilder; import org.elasticsearch.action.explain.ExplainResponse; +import org.elasticsearch.action.explain.TransportExplainAction; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesAction; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequestBuilder; @@ -280,21 +280,21 @@ import org.elasticsearch.action.ingest.SimulatePipelineRequest; import org.elasticsearch.action.ingest.SimulatePipelineRequestBuilder; import org.elasticsearch.action.ingest.SimulatePipelineResponse; -import org.elasticsearch.action.search.ClearScrollAction; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.search.ClearScrollRequestBuilder; import org.elasticsearch.action.search.ClearScrollResponse; -import org.elasticsearch.action.search.MultiSearchAction; import org.elasticsearch.action.search.MultiSearchRequest; import org.elasticsearch.action.search.MultiSearchRequestBuilder; import org.elasticsearch.action.search.MultiSearchResponse; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchScrollAction; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.action.search.SearchScrollRequestBuilder; +import org.elasticsearch.action.search.TransportClearScrollAction; +import org.elasticsearch.action.search.TransportMultiSearchAction; +import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.action.search.TransportSearchScrollAction; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.termvectors.MultiTermVectorsAction; @@ -320,11 +320,14 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentType; import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicBoolean; public abstract class AbstractClient implements Client { @@ -361,7 +364,7 @@ public final Ac ActionType action, Request request ) { - PlainActionFuture actionFuture = PlainActionFuture.newFuture(); + PlainActionFuture actionFuture = new RefCountedFuture<>(); execute(action, request, actionFuture); return actionFuture; } @@ -506,47 +509,47 @@ public MultiGetRequestBuilder prepareMultiGet() { @Override public ActionFuture search(final SearchRequest request) { - return execute(SearchAction.INSTANCE, request); + return execute(TransportSearchAction.TYPE, request); } @Override public void search(final SearchRequest request, final ActionListener listener) { - execute(SearchAction.INSTANCE, request, listener); + execute(TransportSearchAction.TYPE, request, listener); } @Override public SearchRequestBuilder prepareSearch(String... indices) { - return new SearchRequestBuilder(this, SearchAction.INSTANCE).setIndices(indices); + return new SearchRequestBuilder(this, TransportSearchAction.TYPE).setIndices(indices); } @Override public ActionFuture searchScroll(final SearchScrollRequest request) { - return execute(SearchScrollAction.INSTANCE, request); + return execute(TransportSearchScrollAction.TYPE, request); } @Override public void searchScroll(final SearchScrollRequest request, final ActionListener listener) { - execute(SearchScrollAction.INSTANCE, request, listener); + execute(TransportSearchScrollAction.TYPE, request, listener); } @Override public SearchScrollRequestBuilder prepareSearchScroll(String scrollId) { - return new SearchScrollRequestBuilder(this, SearchScrollAction.INSTANCE, scrollId); + return new SearchScrollRequestBuilder(this, TransportSearchScrollAction.TYPE, scrollId); } @Override public ActionFuture multiSearch(MultiSearchRequest request) { - return execute(MultiSearchAction.INSTANCE, request); + return execute(TransportMultiSearchAction.TYPE, request); } @Override public void multiSearch(MultiSearchRequest request, ActionListener listener) { - execute(MultiSearchAction.INSTANCE, request, listener); + execute(TransportMultiSearchAction.TYPE, request, listener); } @Override public MultiSearchRequestBuilder prepareMultiSearch() { - return new MultiSearchRequestBuilder(this, MultiSearchAction.INSTANCE); + return new MultiSearchRequestBuilder(this, TransportMultiSearchAction.TYPE); } @Override @@ -586,32 +589,32 @@ public MultiTermVectorsRequestBuilder prepareMultiTermVectors() { @Override public ExplainRequestBuilder prepareExplain(String index, String id) { - return new ExplainRequestBuilder(this, ExplainAction.INSTANCE, index, id); + return new ExplainRequestBuilder(this, TransportExplainAction.TYPE, index, id); } @Override public ActionFuture explain(ExplainRequest request) { - return execute(ExplainAction.INSTANCE, request); + return execute(TransportExplainAction.TYPE, request); } @Override public void explain(ExplainRequest request, ActionListener listener) { - execute(ExplainAction.INSTANCE, request, listener); + execute(TransportExplainAction.TYPE, request, listener); } @Override public void clearScroll(ClearScrollRequest request, ActionListener listener) { - execute(ClearScrollAction.INSTANCE, request, listener); + execute(TransportClearScrollAction.TYPE, request, listener); } @Override public ActionFuture clearScroll(ClearScrollRequest request) { - return execute(ClearScrollAction.INSTANCE, request); + return execute(TransportClearScrollAction.TYPE, request); } @Override public ClearScrollRequestBuilder prepareClearScroll() { - return new ClearScrollRequestBuilder(this, ClearScrollAction.INSTANCE); + return new ClearScrollRequestBuilder(this, TransportClearScrollAction.TYPE); } @Override @@ -1598,4 +1601,34 @@ protected void } }; } + + /** + * Same as {@link PlainActionFuture} but for use with {@link RefCounted} result types. Unlike {@code PlainActionFuture} this future + * acquires a reference to its result. This means that the result reference must be released by a call to {@link RefCounted#decRef()} + * on the result before it goes out of scope. + * @param reference counted result type + */ + private static class RefCountedFuture extends PlainActionFuture { + + @Override + public final void onResponse(R result) { + result.mustIncRef(); + if (set(result) == false) { + result.decRef(); + } + } + + private final AtomicBoolean getCalled = new AtomicBoolean(false); + + @Override + public R get() throws InterruptedException, ExecutionException { + final boolean firstCall = getCalled.compareAndSet(false, true); + if (firstCall == false) { + final IllegalStateException ise = new IllegalStateException("must only call .get() once per instance to avoid leaks"); + assert false : ise; + throw ise; + } + return super.get(); + } + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java b/server/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java index b400269265224..0392ca2e6581a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java @@ -92,26 +92,6 @@ public boolean indexRoutingTableChanged(String index) { return true; } - /** - * Returns the indices created in this event - */ - public List indicesCreated() { - if (metadataChanged() == false) { - return Collections.emptyList(); - } - List created = null; - for (Map.Entry cursor : state.metadata().indices().entrySet()) { - String index = cursor.getKey(); - if (previousState.metadata().hasIndex(index) == false) { - if (created == null) { - created = new ArrayList<>(); - } - created.add(index); - } - } - return created == null ? Collections.emptyList() : created; - } - /** * Returns the indices deleted in this event */ diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterFeatures.java b/server/src/main/java/org/elasticsearch/cluster/ClusterFeatures.java index ae68bfafdd6c5..95cc53376af59 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterFeatures.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterFeatures.java @@ -12,12 +12,14 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ChunkedToXContentObject; +import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.xcontent.ToXContent; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.IdentityHashMap; @@ -50,24 +52,24 @@ public ClusterFeatures(Map> nodeFeatures) { .collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, e -> Set.copyOf(e.getValue()))); } - private Set calculateAllNodeFeatures() { + public static Set calculateAllNodeFeatures(Collection> nodeFeatures) { if (nodeFeatures.isEmpty()) { return Set.of(); } Set allNodeFeatures = null; - for (Set featureSet : nodeFeatures.values()) { + for (Set featureSet : nodeFeatures) { if (allNodeFeatures == null) { allNodeFeatures = new HashSet<>(featureSet); } else { allNodeFeatures.retainAll(featureSet); } } - return Set.copyOf(allNodeFeatures); + return allNodeFeatures; } /** - * Returns the features reported by each node in the cluster. + * The features reported by each node in the cluster. *

    * NOTE: This should not be used directly. * Please use {@link org.elasticsearch.features.FeatureService#clusterHasFeature} instead. @@ -76,17 +78,28 @@ public Map> nodeFeatures() { return nodeFeatures; } + /** + * The features in all nodes in the cluster. + *

    + * NOTE: This should not be used directly. + * Please use {@link org.elasticsearch.features.FeatureService#clusterHasFeature} instead. + */ + public Set allNodeFeatures() { + if (allNodeFeatures == null) { + allNodeFeatures = Set.copyOf(calculateAllNodeFeatures(nodeFeatures.values())); + } + return allNodeFeatures; + } + /** * {@code true} if {@code feature} is present on all nodes in the cluster. *

    * NOTE: This should not be used directly, as it does not read historical features. * Please use {@link org.elasticsearch.features.FeatureService#clusterHasFeature} instead. */ + @SuppressForbidden(reason = "directly reading cluster features") public boolean clusterHasFeature(NodeFeature feature) { - if (allNodeFeatures == null) { - allNodeFeatures = calculateAllNodeFeatures(); - } - return allNodeFeatures.contains(feature.id()); + return allNodeFeatures().contains(feature.id()); } /** diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java index ee94008372dab..5f682804a5b88 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -8,7 +8,6 @@ package org.elasticsearch.cluster; -import org.elasticsearch.Version; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.ComponentTemplateMetadata; @@ -65,6 +64,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.health.metadata.HealthMetadataService; import org.elasticsearch.health.node.selection.HealthNodeTaskExecutor; @@ -77,6 +77,7 @@ import org.elasticsearch.snapshots.SnapshotsInfoService; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskResultsService; +import org.elasticsearch.telemetry.TelemetryProvider; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.upgrades.FeatureMigrationResults; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -126,7 +127,8 @@ public ClusterModule( SnapshotsInfoService snapshotsInfoService, ThreadPool threadPool, SystemIndices systemIndices, - WriteLoadForecaster writeLoadForecaster + WriteLoadForecaster writeLoadForecaster, + TelemetryProvider telemetryProvider ) { this.clusterPlugins = clusterPlugins; this.deciderList = createAllocationDeciders(settings, clusterService.getClusterSettings(), clusterPlugins); @@ -138,7 +140,8 @@ public ClusterModule( clusterPlugins, clusterService, this::reconcile, - writeLoadForecaster + writeLoadForecaster, + telemetryProvider ); this.clusterService = clusterService; this.indexNameExpressionResolver = new IndexNameExpressionResolver(threadPool.getThreadContext(), systemIndices); @@ -373,6 +376,7 @@ private static void addAllocationDecider(Map, AllocationDecider> decide } } + @UpdateForV9 // in v9 there is only one allocator private static ShardsAllocator createShardsAllocator( Settings settings, ClusterSettings clusterSettings, @@ -380,7 +384,8 @@ private static ShardsAllocator createShardsAllocator( List clusterPlugins, ClusterService clusterService, DesiredBalanceReconcilerAction reconciler, - WriteLoadForecaster writeLoadForecaster + WriteLoadForecaster writeLoadForecaster, + TelemetryProvider telemetryProvider ) { Map> allocators = new HashMap<>(); allocators.put(BALANCED_ALLOCATOR, () -> new BalancedShardsAllocator(clusterSettings, writeLoadForecaster)); @@ -391,7 +396,8 @@ private static ShardsAllocator createShardsAllocator( new BalancedShardsAllocator(clusterSettings, writeLoadForecaster), threadPool, clusterService, - reconciler + reconciler, + telemetryProvider ) ); @@ -404,7 +410,6 @@ private static ShardsAllocator createShardsAllocator( }); } String allocatorName = SHARDS_ALLOCATOR_TYPE_SETTING.get(settings); - assert Version.CURRENT.major == Version.V_7_17_0.major + 1; // in v9 there is only one allocator Supplier allocatorSupplier = allocators.get(allocatorName); if (allocatorSupplier == null) { throw new IllegalArgumentException("Unknown ShardsAllocator [" + allocatorName + "]"); diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java index 565e43455d8d7..e861ff3ecf27e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -818,12 +818,6 @@ public DiscoveryNodes nodes() { return nodes; } - // Deprecate to keep downstream projects compiling - @Deprecated(forRemoval = true) - public Builder putTransportVersion(String nodeId, TransportVersion transportVersion) { - return putCompatibilityVersions(nodeId, transportVersion, Map.of()); - } - public Builder putCompatibilityVersions( String nodeId, TransportVersion transportVersion, @@ -840,12 +834,6 @@ public Builder putCompatibilityVersions(String nodeId, CompatibilityVersions ver return this; } - // Deprecate to keep downstream projects compiling - @Deprecated(forRemoval = true) - public Builder compatibilityVersions(Map versions) { - return nodeIdsToCompatibilityVersions(versions); - } - public Builder nodeIdsToCompatibilityVersions(Map versions) { versions.forEach((key, value) -> Objects.requireNonNull(value, key)); // remove all versions not present in the new map @@ -1047,7 +1035,7 @@ private static TransportVersion inferTransportVersion(DiscoveryNode node) { TransportVersion tv; if (node.getVersion().before(Version.V_8_8_0)) { // 1-to-1 mapping between Version and TransportVersion - tv = TransportVersion.fromId(node.getVersion().id); + tv = TransportVersion.fromId(node.getPre811VersionId().getAsInt()); } else { // use the lowest value it could be for now tv = INFERRED_TRANSPORT_VERSION; diff --git a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java index bd5fbe189ead5..1744bcc91b834 100644 --- a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java +++ b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; +import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestParameters; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; @@ -284,8 +285,9 @@ public void onFailure(Exception e) { private void fetchNodeStats() { final NodesStatsRequest nodesStatsRequest = new NodesStatsRequest("data:true"); + nodesStatsRequest.setIncludeShardsStats(false); nodesStatsRequest.clear(); - nodesStatsRequest.addMetric(NodesStatsRequest.Metric.FS.metricName()); + nodesStatsRequest.addMetric(NodesStatsRequestParameters.Metric.FS.metricName()); nodesStatsRequest.timeout(fetchTimeout); client.admin().cluster().nodesStats(nodesStatsRequest, ActionListener.releaseAfter(new ActionListener<>() { @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java index 0f046d4ab94f1..1a079d03405d7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.Maps; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; @@ -221,11 +222,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public Iterator toXContentChunked(ToXContent.Params ignored) { - return Iterators.concat( - Iterators.single((builder, params) -> builder.startArray("snapshots")), - asStream().iterator(), - Iterators.single((builder, params) -> builder.endArray()) - ); + return Iterators.concat(ChunkedToXContentHelper.startArray("snapshots"), asStream().iterator(), ChunkedToXContentHelper.endArray()); } @Override @@ -346,14 +343,20 @@ private static boolean assertConsistentEntries(Map entries) { assert entry.repository().equals(repository) : "mismatched repository " + entry + " tracked under " + repository; for (Map.Entry shard : entry.shardsByRepoShardId().entrySet()) { final RepositoryShardId sid = shard.getKey(); + final ShardSnapshotStatus shardSnapshotStatus = shard.getValue(); assert assertShardStateConsistent( entriesForRepository, assignedShards, queuedShards, sid.indexName(), sid.shardId(), - shard.getValue() + shardSnapshotStatus ); + + assert entry.state() != State.ABORTED + || shardSnapshotStatus.state == ShardState.ABORTED + || shardSnapshotStatus.state().completed() + : sid + " is in state " + shardSnapshotStatus.state() + " in aborted snapshot " + entry.snapshot; } } // make sure in-flight-shard-states can be built cleanly for the entries without tripping assertions diff --git a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index 0f84ecab5f8b2..51fca588699e2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -33,6 +33,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.MasterServiceTaskQueue; import org.elasticsearch.common.Priority; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -60,7 +61,6 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -283,13 +283,13 @@ private static class ShardFailedTransportHandler implements TransportRequestHand private static final String TASK_SOURCE = "shard-failed"; @Override - public void messageReceived(FailedShardEntry request, TransportChannel channel, Task task) throws Exception { + public void messageReceived(FailedShardEntry request, TransportChannel channel, Task task) { logger.debug(() -> format("%s received shard failed for [%s]", request.getShardId(), request), request.failure); - var update = new FailedShardUpdateTask( - request, - new ChannelActionListener<>(channel).map(ignored -> TransportResponse.Empty.INSTANCE) + taskQueue.submitTask( + TASK_SOURCE, + new FailedShardUpdateTask(request, new ChannelActionListener<>(channel).map(ignored -> TransportResponse.Empty.INSTANCE)), + null ); - taskQueue.submitTask(TASK_SOURCE, update, null); } } @@ -423,7 +423,7 @@ public void clusterStatePublished(ClusterState newClusterState) { // The reroute called after failing some shards will not assign any shard back to the node on which it failed. If there were // no other options for a failed shard then it is left unassigned. However, absent other options it's better to try and // assign it again, even if that means putting it back on the node on which it previously failed: - final String reason = String.format(Locale.ROOT, "[%d] unassigned shards after failing shards", numberOfUnassignedShards); + final String reason = Strings.format("[%d] unassigned shards after failing shards", numberOfUnassignedShards); logger.trace("{}, scheduling a reroute", reason); rerouteService.reroute( reason, @@ -493,16 +493,15 @@ public void writeTo(StreamOutput out) throws IOException { @Override public String toString() { - List components = new ArrayList<>(6); - components.add("shard id [" + shardId + "]"); - components.add("allocation id [" + allocationId + "]"); - components.add("primary term [" + primaryTerm + "]"); - components.add("message [" + message + "]"); - components.add("markAsStale [" + markAsStale + "]"); - if (failure != null) { - components.add("failure [" + ExceptionsHelper.stackTrace(failure) + "]"); - } - return String.join(", ", components); + return Strings.format( + "FailedShardEntry{shardId [%s], allocationId [%s], primary term [%d], message [%s], markAsStale [%b], failure [%s]}", + shardId, + allocationId, + primaryTerm, + message, + markAsStale, + failure != null ? ExceptionsHelper.stackTrace(failure) : null + ); } @Override @@ -785,8 +784,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public String toString() { - return String.format( - Locale.ROOT, + return Strings.format( "StartedShardEntry{shardId [%s], allocationId [%s], primary term [%d], message [%s]}", shardId, allocationId, diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java index 113e8b0a7f388..402e170f1ea53 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java @@ -183,7 +183,7 @@ public Join handleStartJoin(StartJoinRequest startJoinRequest) { final String reason; if (electionWon == false) { reason = "failed election"; - } else if (startJoinRequest.getSourceNode().equals(localNode)) { + } else if (startJoinRequest.getMasterCandidateNode().equals(localNode)) { reason = "bumping term"; } else { reason = "standing down as leader"; @@ -200,7 +200,13 @@ public Join handleStartJoin(StartJoinRequest startJoinRequest) { joinVotes = new VoteCollection(); publishVotes = new VoteCollection(); - return new Join(localNode, startJoinRequest.getSourceNode(), getCurrentTerm(), getLastAcceptedTerm(), getLastAcceptedVersion()); + return new Join( + localNode, + startJoinRequest.getMasterCandidateNode(), + getCurrentTerm(), + getLastAcceptedTerm(), + getLastAcceptedVersion() + ); } /** @@ -211,12 +217,12 @@ public Join handleStartJoin(StartJoinRequest startJoinRequest) { * @throws CoordinationStateRejectedException if the arguments were incompatible with the current state of this object. */ public boolean handleJoin(Join join) { - assert join.targetMatches(localNode) : "handling join " + join + " for the wrong node " + localNode; + assert join.masterCandidateMatches(localNode) : "handling join " + join + " for the wrong node " + localNode; - if (join.getTerm() != getCurrentTerm()) { - logger.debug("handleJoin: ignored join due to term mismatch (expected: [{}], actual: [{}])", getCurrentTerm(), join.getTerm()); + if (join.term() != getCurrentTerm()) { + logger.debug("handleJoin: ignored join due to term mismatch (expected: [{}], actual: [{}])", getCurrentTerm(), join.term()); throw new CoordinationStateRejectedException( - "incoming term " + join.getTerm() + " does not match current term " + getCurrentTerm() + "incoming term " + join.term() + " does not match current term " + getCurrentTerm() ); } @@ -226,30 +232,30 @@ public boolean handleJoin(Join join) { } final long lastAcceptedTerm = getLastAcceptedTerm(); - if (join.getLastAcceptedTerm() > lastAcceptedTerm) { + if (join.lastAcceptedTerm() > lastAcceptedTerm) { logger.debug( "handleJoin: ignored join as joiner has a better last accepted term (expected: <=[{}], actual: [{}])", lastAcceptedTerm, - join.getLastAcceptedTerm() + join.lastAcceptedTerm() ); throw new CoordinationStateRejectedException( "incoming last accepted term " - + join.getLastAcceptedTerm() + + join.lastAcceptedTerm() + " of join higher than current last accepted term " + lastAcceptedTerm ); } - if (join.getLastAcceptedTerm() == lastAcceptedTerm && join.getLastAcceptedVersion() > getLastAcceptedVersion()) { + if (join.lastAcceptedTerm() == lastAcceptedTerm && join.lastAcceptedVersion() > getLastAcceptedVersion()) { logger.debug( "handleJoin: ignored join as joiner has a better last accepted version (expected: <=[{}], actual: [{}]) in term {}", getLastAcceptedVersion(), - join.getLastAcceptedVersion(), + join.lastAcceptedVersion(), lastAcceptedTerm ); throw new CoordinationStateRejectedException( "incoming last accepted version " - + join.getLastAcceptedVersion() + + join.lastAcceptedVersion() + " of join higher than current last accepted version " + getLastAcceptedVersion() + " in term " @@ -274,7 +280,7 @@ public boolean handleJoin(Join join) { logger.debug( "handleJoin: added join {} from [{}] for election, electionWon={} lastAcceptedTerm={} lastAcceptedVersion={}", join, - join.getSourceNode(), + join.votingNode(), electionWon, lastAcceptedTerm, getLastAcceptedVersion() @@ -592,7 +598,7 @@ public boolean addVote(DiscoveryNode sourceNode) { } public boolean addJoinVote(Join join) { - final boolean added = addVote(join.getSourceNode()); + final boolean added = addVote(join.votingNode()); if (added) { joins.add(join); } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 08e31e11ae256..3da890b37ade8 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -13,7 +13,7 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ChannelActionListener; -import org.elasticsearch.action.support.ListenableActionFuture; +import org.elasticsearch.action.support.RefCountingListener; import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.action.support.ThreadedActionListener; import org.elasticsearch.client.internal.Client; @@ -63,6 +63,7 @@ import org.elasticsearch.discovery.SeedHostsProvider; import org.elasticsearch.discovery.SeedHostsResolver; import org.elasticsearch.discovery.TransportAddressConnector; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.monitor.NodeHealthService; import org.elasticsearch.monitor.StatusInfo; @@ -212,7 +213,7 @@ public Coordinator( LeaderHeartbeatService leaderHeartbeatService, PreVoteCollector.Factory preVoteCollectorFactory, CompatibilityVersions compatibilityVersions, - Set features + FeatureService featureService ) { this.settings = settings; this.transportService = transportService; @@ -238,7 +239,7 @@ public Coordinator( reconfigurator::maybeReconfigureAfterNewMasterIsElected, this::getLatestStoredStateAfterWinningAnElection, compatibilityVersions, - features + featureService ); this.joinValidationService = new JoinValidationService( settings, @@ -488,7 +489,7 @@ PublishWithJoinResponse handlePublishRequest(PublishRequest publishRequest) { } private static Optional joinWithDestination(Optional lastJoin, DiscoveryNode leader, long term) { - if (lastJoin.isPresent() && lastJoin.get().targetMatches(leader) && lastJoin.get().getTerm() == term) { + if (lastJoin.isPresent() && lastJoin.get().masterCandidateMatches(leader) && lastJoin.get().term() == term) { return lastJoin; } @@ -565,6 +566,10 @@ public void onFailure(Exception e) { }); } + /** + * Attempts to abdicate master position to a new master-eligible node in the cluster. + * Broadcasts {@link StartJoinRequest} for {@param newMaster} to each member of the cluster. + */ private void abdicateTo(DiscoveryNode newMaster) { assert Thread.holdsLock(mutex); assert mode == Mode.LEADER : "expected to be leader on abdication but was " + mode; @@ -594,7 +599,7 @@ private Optional ensureTermAtLeast(DiscoveryNode sourceNode, long targetTe private Join joinLeaderInTerm(StartJoinRequest startJoinRequest) { synchronized (mutex) { - logger.debug("joinLeaderInTerm: for [{}] with term {}", startJoinRequest.getSourceNode(), startJoinRequest.getTerm()); + logger.debug("joinLeaderInTerm: for [{}] with term {}", startJoinRequest.getMasterCandidateNode(), startJoinRequest.getTerm()); final Join join = coordinationState.get().handleStartJoin(startJoinRequest); lastJoin = Optional.of(join); peerFinder.setCurrentTerm(getCurrentTerm()); @@ -629,21 +634,11 @@ private void handleJoinRequest(JoinRequest joinRequest, ActionListener joi transportService.connectToNode(joinRequest.getSourceNode(), new ActionListener<>() { @Override public void onResponse(Releasable response) { - boolean retainConnection = false; - try { - validateJoinRequest( - joinRequest, - ActionListener.runBefore(joinListener, () -> Releasables.close(response)) - .delegateFailure((l, ignored) -> processJoinRequest(joinRequest, l)) - ); - retainConnection = true; - } catch (Exception e) { - joinListener.onFailure(e); - } finally { - if (retainConnection == false) { - Releasables.close(response); - } - } + validateJoinRequest( + joinRequest, + ActionListener.runBefore(joinListener, () -> Releasables.close(response)) + .delegateFailure((l, ignored) -> processJoinRequest(joinRequest, l)) + ); } @Override @@ -682,48 +677,39 @@ private void validateJoinRequest(JoinRequest joinRequest, ActionListener v // - if we're already master that it can make sense of the current cluster state. // - we have a healthy PING channel to the node - final ClusterState stateForJoinValidation = getStateForJoinValidationService(); - final ListenableActionFuture validateStateListener = new ListenableActionFuture<>(); - if (stateForJoinValidation != null) { - assert stateForJoinValidation.nodes().isLocalNodeElectedMaster(); - onJoinValidators.forEach(a -> a.accept(joinRequest.getSourceNode(), stateForJoinValidation)); - if (stateForJoinValidation.getBlocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK) == false) { - // We do this in a couple of places including the cluster update thread. This one here is really just best effort to ensure - // we fail as fast as possible. - NodeJoinExecutor.ensureVersionBarrier( - joinRequest.getSourceNode().getVersion(), - stateForJoinValidation.getNodes().getMinNodeVersion() - ); - } - sendJoinValidate(joinRequest.getSourceNode(), validateStateListener); - } else { - sendJoinPing(joinRequest.getSourceNode(), TransportRequestOptions.Type.STATE, validateStateListener); - } + try (var listeners = new RefCountingListener(validateListener)) { + // The join will be rejected if any of these steps fail, but we wait them all to complete, particularly state validation, since + // the node will retry and we don't want lots of cluster states in flight. - sendJoinPing(joinRequest.getSourceNode(), TransportRequestOptions.Type.PING, new ActionListener<>() { - @Override - public void onResponse(Void ignored) { - validateStateListener.addListener(validateListener); - } + ActionListener.completeWith(listeners.acquire(), () -> { + final ClusterState stateForJoinValidation = getStateForJoinValidationService(); + if (stateForJoinValidation == null) { + return null; + } - @Override - public void onFailure(Exception e) { - // The join will be rejected, but we wait for the state validation to complete as well since the node will retry and we - // don't want lots of cluster states in flight. - validateStateListener.addListener(new ActionListener<>() { - @Override - public void onResponse(Void ignored) { - validateListener.onFailure(e); - } + assert stateForJoinValidation.nodes().isLocalNodeElectedMaster(); + onJoinValidators.forEach(a -> a.accept(joinRequest.getSourceNode(), stateForJoinValidation)); + if (stateForJoinValidation.getBlocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK) == false) { + // We do this in a couple of places including the cluster update thread. This one here is really just best effort to + // ensure we fail as fast as possible. + NodeJoinExecutor.ensureVersionBarrier( + joinRequest.getSourceNode().getVersion(), + stateForJoinValidation.getNodes().getMinNodeVersion() + ); + } + sendJoinValidate(joinRequest.getSourceNode(), listeners.acquire()); + return null; + }); - @Override - public void onFailure(Exception e2) { - e2.addSuppressed(e); - validateListener.onFailure(e2); - } - }); + if (listeners.isFailing() == false) { + // We may not have sent a state for validation, so just ping both channel types. + sendJoinPing(joinRequest.getSourceNode(), TransportRequestOptions.Type.PING, listeners.acquire()); + sendJoinPing(joinRequest.getSourceNode(), TransportRequestOptions.Type.STATE, listeners.acquire()); } - }); + } catch (Exception e) { + logger.error("unexpected exception in validateJoinRequest", e); + assert false : e; + } } private void sendJoinValidate(DiscoveryNode discoveryNode, ActionListener listener) { @@ -775,7 +761,7 @@ private void processJoinRequest(JoinRequest joinRequest, ActionListener jo final CoordinationState coordState = coordinationState.get(); final boolean prevElectionWon = coordState.electionWon() - && optionalJoin.stream().allMatch(j -> j.getTerm() <= getCurrentTerm()); + && optionalJoin.stream().allMatch(j -> j.term() <= getCurrentTerm()); optionalJoin.ifPresent(this::handleJoin); joinAccumulator.handleJoinRequest( @@ -1394,7 +1380,7 @@ boolean missingJoinVoteFrom(DiscoveryNode node) { private void handleJoin(Join join) { synchronized (mutex) { - ensureTermAtLeast(getLocalNode(), join.getTerm()).ifPresent(this::handleJoin); + ensureTermAtLeast(getLocalNode(), join.term()).ifPresent(this::handleJoin); if (coordinationState.get().electionWon()) { // If we have already won the election then the actual join does not matter for election purposes, so swallow any exception @@ -2070,7 +2056,7 @@ private void cancelTimeoutHandlers() { } private void handleAssociatedJoin(Join join) { - if (join.getTerm() == getCurrentTerm() && missingJoinVoteFrom(join.getSourceNode())) { + if (join.term() == getCurrentTerm() && missingJoinVoteFrom(join.votingNode())) { logger.trace("handling {}", join); handleJoin(join); } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Join.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Join.java index aacbed61b095a..d1fe472278f12 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Join.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Join.java @@ -16,110 +16,44 @@ /** * Triggered by a {@link StartJoinRequest}, instances of this class represent join votes, - * and have a source and target node. The source node is the node that provides the vote, - * and the target node is the node for which this vote is cast. A node will only cast - * a single vote per term, and this for a unique target node. The vote also carries - * information about the current state of the node that provided the vote, so that - * the receiver of the vote can determine if it has a more up-to-date state than the - * source node. + * and have a voting and master-candidate node. The voting node is the node that provides + * the vote, and the master-candidate node is the node for which this vote is cast. A join + * vote is cast to reform the cluster around a particular master-eligible node, to elect + * that node as the new master in a new term. + * + * A voting node will only cast a single vote per term. The vote includes information about + * the current state of the node casting the vote, so that the candidate for the vote can + * determine whether it has a more up-to-date state than the voting node. + * + * @param votingNode The node casting a vote for a master candidate. + * @param masterCandidateNode The master candidate node receiving the vote for election. + * @param term + * @param lastAcceptedTerm + * @param lastAcceptedVersion */ -public class Join implements Writeable { - private final DiscoveryNode sourceNode; - private final DiscoveryNode targetNode; - private final long term; - private final long lastAcceptedTerm; - private final long lastAcceptedVersion; - - public Join(DiscoveryNode sourceNode, DiscoveryNode targetNode, long term, long lastAcceptedTerm, long lastAcceptedVersion) { +public record Join(DiscoveryNode votingNode, DiscoveryNode masterCandidateNode, long term, long lastAcceptedTerm, long lastAcceptedVersion) + implements + Writeable { + public Join { assert term >= 0; assert lastAcceptedTerm >= 0; assert lastAcceptedVersion >= 0; - - this.sourceNode = sourceNode; - this.targetNode = targetNode; - this.term = term; - this.lastAcceptedTerm = lastAcceptedTerm; - this.lastAcceptedVersion = lastAcceptedVersion; } public Join(StreamInput in) throws IOException { - sourceNode = new DiscoveryNode(in); - targetNode = new DiscoveryNode(in); - term = in.readLong(); - lastAcceptedTerm = in.readLong(); - lastAcceptedVersion = in.readLong(); + this(new DiscoveryNode(in), new DiscoveryNode(in), in.readLong(), in.readLong(), in.readLong()); } @Override public void writeTo(StreamOutput out) throws IOException { - sourceNode.writeTo(out); - targetNode.writeTo(out); + votingNode.writeTo(out); + masterCandidateNode.writeTo(out); out.writeLong(term); out.writeLong(lastAcceptedTerm); out.writeLong(lastAcceptedVersion); } - public DiscoveryNode getSourceNode() { - return sourceNode; - } - - public DiscoveryNode getTargetNode() { - return targetNode; - } - - public boolean targetMatches(DiscoveryNode matchingNode) { - return targetNode.getId().equals(matchingNode.getId()); - } - - public long getLastAcceptedVersion() { - return lastAcceptedVersion; - } - - public long getTerm() { - return term; - } - - public long getLastAcceptedTerm() { - return lastAcceptedTerm; - } - - @Override - public String toString() { - return "Join{" - + "term=" - + term - + ", lastAcceptedTerm=" - + lastAcceptedTerm - + ", lastAcceptedVersion=" - + lastAcceptedVersion - + ", sourceNode=" - + sourceNode - + ", targetNode=" - + targetNode - + '}'; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - Join join = (Join) o; - - if (sourceNode.equals(join.sourceNode) == false) return false; - if (targetNode.equals(join.targetNode) == false) return false; - if (lastAcceptedVersion != join.lastAcceptedVersion) return false; - if (term != join.term) return false; - return lastAcceptedTerm == join.lastAcceptedTerm; - } - - @Override - public int hashCode() { - int result = (int) (lastAcceptedVersion ^ (lastAcceptedVersion >>> 32)); - result = 31 * result + sourceNode.hashCode(); - result = 31 * result + targetNode.hashCode(); - result = 31 * result + (int) (term ^ (term >>> 32)); - result = 31 * result + (int) (lastAcceptedTerm ^ (lastAcceptedTerm >>> 32)); - return result; + public boolean masterCandidateMatches(DiscoveryNode matchingNode) { + return masterCandidateNode.getId().equals(matchingNode.getId()); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java index e5dee6aeb67e2..d11d8ade2a036 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java @@ -33,6 +33,7 @@ import org.elasticsearch.core.Releasables; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.monitor.NodeHealthService; import org.elasticsearch.monitor.StatusInfo; @@ -62,6 +63,12 @@ import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.monitor.StatusInfo.Status.UNHEALTHY; +/** + * Handler for cluster join commands. A master-eligible node running for election will + * send a {@link StartJoinRequest} to each voting node in the cluster. A node that becomes + * aware of a new term and master will send a {@link Join} request to the new master, to + * re-form the cluster around the new master node. + */ public class JoinHelper { private static final Logger logger = LogManager.getLogger(JoinHelper.class); @@ -100,12 +107,12 @@ public class JoinHelper { Function maybeReconfigureAfterMasterElection, ObjLongConsumer> latestStoredStateSupplier, CompatibilityVersions compatibilityVersions, - Set features + FeatureService featureService ) { this.joinTaskQueue = masterService.createTaskQueue( "node-join", Priority.URGENT, - new NodeJoinExecutor(allocationService, rerouteService, maybeReconfigureAfterMasterElection) + new NodeJoinExecutor(allocationService, rerouteService, featureService, maybeReconfigureAfterMasterElection) ); this.clusterApplier = clusterApplier; this.transportService = transportService; @@ -115,7 +122,7 @@ public class JoinHelper { this.joinReasonService = joinReasonService; this.latestStoredStateSupplier = latestStoredStateSupplier; this.compatibilityVersions = compatibilityVersions; - this.features = features; + this.features = featureService.getNodeFeatures().keySet(); transportService.registerRequestHandler( JOIN_ACTION_NAME, @@ -136,7 +143,7 @@ public class JoinHelper { false, StartJoinRequest::new, (request, channel, task) -> { - final DiscoveryNode destination = request.getSourceNode(); + final DiscoveryNode destination = request.getMasterCandidateNode(); sendJoinRequest(destination, currentTermSupplier.getAsLong(), Optional.of(joinLeaderInTerm.apply(request))); channel.sendResponse(Empty.INSTANCE); } @@ -368,8 +375,8 @@ public void onFailure(Exception e) { } void sendStartJoinRequest(final StartJoinRequest startJoinRequest, final DiscoveryNode destination) { - assert startJoinRequest.getSourceNode().isMasterNode() - : "sending start-join request for master-ineligible " + startJoinRequest.getSourceNode(); + assert startJoinRequest.getMasterCandidateNode().isMasterNode() + : "sending start-join request for master-ineligible " + startJoinRequest.getMasterCandidateNode(); transportService.sendRequest(destination, START_JOIN_ACTION_NAME, startJoinRequest, new TransportResponseHandler.Empty() { @Override public Executor executor(ThreadPool threadPool) { diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinRequest.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinRequest.java index 2ba65873738a0..a6a2f454694ae 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinRequest.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinRequest.java @@ -60,7 +60,7 @@ public JoinRequest( long minimumTerm, Optional optionalJoin ) { - assert optionalJoin.isPresent() == false || optionalJoin.get().getSourceNode().equals(sourceNode); + assert optionalJoin.isPresent() == false || optionalJoin.get().votingNode().equals(sourceNode); this.sourceNode = sourceNode; this.compatibilityVersions = compatibilityVersions; this.features = features; @@ -76,7 +76,10 @@ public JoinRequest(StreamInput in) throws IOException { } else { // there's a 1-1 mapping from Version to TransportVersion before 8.8.0 // no known mapping versions here - compatibilityVersions = new CompatibilityVersions(TransportVersion.fromId(sourceNode.getVersion().id), Map.of()); + compatibilityVersions = new CompatibilityVersions( + TransportVersion.fromId(sourceNode.getPre811VersionId().getAsInt()), + Map.of() + ); } if (in.getTransportVersion().onOrAfter(TransportVersions.CLUSTER_FEATURES_ADDED)) { features = in.readCollectionAsSet(StreamInput::readString); @@ -121,7 +124,7 @@ public long getTerm() { // If the join is also present then its term will normally equal the corresponding term, but we do not require callers to // obtain the term and the join in a synchronized fashion so it's possible that they disagree. Also older nodes do not share the // minimum term, so for BWC we can take it from the join if present. - return Math.max(minimumTerm, optionalJoin.map(Join::getTerm).orElse(0L)); + return Math.max(minimumTerm, optionalJoin.map(Join::term).orElse(0L)); } public Optional getOptionalJoin() { diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java index d9911ad12df84..6ba35d6aec25a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java @@ -12,7 +12,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.cluster.ClusterState; @@ -30,6 +30,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.env.Environment; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.threadpool.ThreadPool; @@ -148,10 +149,23 @@ public JoinValidationService( } public void validateJoin(DiscoveryNode discoveryNode, ActionListener listener) { - if (discoveryNode.getVersion().onOrAfter(Version.V_8_3_0)) { + // This node isn't in the cluster yet so ClusterState#getMinTransportVersion() doesn't apply, we must obtain a specific connection + // so we can check its transport version to decide how to proceed. + + final Transport.Connection connection; + try { + connection = transportService.getConnection(discoveryNode); + assert connection != null; + } catch (Exception e) { + assert e instanceof NodeNotConnectedException : e; + listener.onFailure(e); + return; + } + + if (connection.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0)) { if (executeRefs.tryIncRef()) { try { - execute(new JoinValidation(discoveryNode, listener)); + execute(new JoinValidation(discoveryNode, connection, listener)); } finally { executeRefs.decRef(); } @@ -159,39 +173,44 @@ public void validateJoin(DiscoveryNode discoveryNode, ActionListener liste listener.onFailure(new NodeClosedException(transportService.getLocalNode())); } } else { - final var responseHandler = TransportResponseHandler.empty(responseExecutor, listener.delegateResponse((l, e) -> { - logger.warn(() -> "failed to validate incoming join request from node [" + discoveryNode + "]", e); - listener.onFailure( - new IllegalStateException( - String.format( - Locale.ROOT, - "failure when sending a join validation request from [%s] to [%s]", - transportService.getLocalNode().descriptionWithoutAttributes(), - discoveryNode.descriptionWithoutAttributes() - ), - e - ) - ); - })); - final var clusterState = clusterStateSupplier.get(); - if (clusterState != null) { - assert clusterState.nodes().isLocalNodeElectedMaster(); - transportService.sendRequest( - discoveryNode, - JOIN_VALIDATE_ACTION_NAME, - new ValidateJoinRequest(clusterState), - REQUEST_OPTIONS, - responseHandler - ); - } else { - transportService.sendRequest( - discoveryNode, - JoinHelper.JOIN_PING_ACTION_NAME, - TransportRequest.Empty.INSTANCE, - REQUEST_OPTIONS, - responseHandler - ); - } + legacyValidateJoin(discoveryNode, listener, connection); + } + } + + @UpdateForV9 + private void legacyValidateJoin(DiscoveryNode discoveryNode, ActionListener listener, Transport.Connection connection) { + final var responseHandler = TransportResponseHandler.empty(responseExecutor, listener.delegateResponse((l, e) -> { + logger.warn(() -> "failed to validate incoming join request from node [" + discoveryNode + "]", e); + listener.onFailure( + new IllegalStateException( + String.format( + Locale.ROOT, + "failure when sending a join validation request from [%s] to [%s]", + transportService.getLocalNode().descriptionWithoutAttributes(), + discoveryNode.descriptionWithoutAttributes() + ), + e + ) + ); + })); + final var clusterState = clusterStateSupplier.get(); + if (clusterState != null) { + assert clusterState.nodes().isLocalNodeElectedMaster(); + transportService.sendRequest( + connection, + JOIN_VALIDATE_ACTION_NAME, + new ValidateJoinRequest(clusterState), + REQUEST_OPTIONS, + responseHandler + ); + } else { + transportService.sendRequest( + connection, + JoinHelper.JOIN_PING_ACTION_NAME, + TransportRequest.Empty.INSTANCE, + REQUEST_OPTIONS, + responseHandler + ); } } @@ -312,27 +331,22 @@ public String toString() { private class JoinValidation extends ActionRunnable { private final DiscoveryNode discoveryNode; + private final Transport.Connection connection; - JoinValidation(DiscoveryNode discoveryNode, ActionListener listener) { + JoinValidation(DiscoveryNode discoveryNode, Transport.Connection connection, ActionListener listener) { super(listener); this.discoveryNode = discoveryNode; + this.connection = connection; } @Override - protected void doRun() throws Exception { - assert discoveryNode.getVersion().onOrAfter(Version.V_8_3_0) : discoveryNode.getVersion(); + protected void doRun() { + assert connection.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0) : discoveryNode.getVersion(); // NB these things never run concurrently to each other, or to the cache cleaner (see IMPLEMENTATION NOTES above) so it is safe // to do these (non-atomic) things to the (unsynchronized) statesByVersion map. - Transport.Connection connection; - try { - connection = transportService.getConnection(discoveryNode); - } catch (NodeNotConnectedException e) { - listener.onFailure(e); - return; - } - var version = connection.getTransportVersion(); - var cachedBytes = statesByVersion.get(version); - var bytes = maybeSerializeClusterState(cachedBytes, discoveryNode, version); + var transportVersion = connection.getTransportVersion(); + var cachedBytes = statesByVersion.get(transportVersion); + var bytes = maybeSerializeClusterState(cachedBytes, discoveryNode, transportVersion); if (bytes == null) { // Normally if we're not the master then the Coordinator sends a ping message just to validate connectivity instead of // getting here. But if we were the master when the Coordinator checked then we might not be the master any more, so we @@ -349,12 +363,11 @@ protected void doRun() throws Exception { ); return; } - assert bytes.hasReferences() : "already closed"; - bytes.incRef(); + bytes.mustIncRef(); transportService.sendRequest( connection, JOIN_VALIDATE_ACTION_NAME, - new BytesTransportRequest(bytes, version), + new BytesTransportRequest(bytes, transportVersion), REQUEST_OPTIONS, new CleanableResponseHandler<>( listener.map(ignored -> null), diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java index 00086c42ed4ae..480f1d5503d61 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java @@ -12,6 +12,7 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.ClusterFeatures; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.NotMasterException; @@ -25,6 +26,7 @@ import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.Priority; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; @@ -34,6 +36,7 @@ import java.util.Collections; import java.util.Comparator; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; @@ -55,19 +58,22 @@ public class NodeJoinExecutor implements ClusterStateTaskExecutor { private final AllocationService allocationService; private final RerouteService rerouteService; + private final FeatureService featureService; private final Function maybeReconfigureAfterMasterElection; - public NodeJoinExecutor(AllocationService allocationService, RerouteService rerouteService) { - this(allocationService, rerouteService, Function.identity()); + public NodeJoinExecutor(AllocationService allocationService, RerouteService rerouteService, FeatureService featureService) { + this(allocationService, rerouteService, featureService, Function.identity()); } public NodeJoinExecutor( AllocationService allocationService, RerouteService rerouteService, + FeatureService featureService, Function maybeReconfigureAfterMasterElection ) { this.allocationService = allocationService; this.rerouteService = rerouteService; + this.featureService = featureService; this.maybeReconfigureAfterMasterElection = maybeReconfigureAfterMasterElection; } @@ -123,6 +129,7 @@ public ClusterState execute(BatchExecutionContext batchExecutionContex DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(newState.nodes()); Map compatibilityVersionsMap = new HashMap<>(newState.compatibilityVersions()); Map> nodeFeatures = new HashMap<>(newState.nodeFeatures()); + Set allNodesFeatures = ClusterFeatures.calculateAllNodeFeatures(nodeFeatures.values()); assert nodesBuilder.isLocalNodeElectedMaster(); @@ -155,16 +162,17 @@ public ClusterState execute(BatchExecutionContext batchExecutionContex if (enforceVersionBarrier) { ensureVersionBarrier(node.getVersion(), minClusterNodeVersion); CompatibilityVersions.ensureVersionsCompatibility(compatibilityVersions, compatibilityVersionsMap.values()); - // TODO: enforce feature ratchet barrier } blockForbiddenVersions(compatibilityVersions.transportVersion()); ensureNodesCompatibility(node.getVersion(), minClusterNodeVersion, maxClusterNodeVersion); + enforceNodeFeatureBarrier(node.getId(), allNodesFeatures, features); // we do this validation quite late to prevent race conditions between nodes joining and importing dangling indices // we have to reject nodes that don't support all indices we have in this cluster ensureIndexCompatibility(node.getMinIndexVersion(), node.getMaxIndexVersion(), initialState.getMetadata()); nodesBuilder.add(node); compatibilityVersionsMap.put(node.getId(), compatibilityVersions); nodeFeatures.put(node.getId(), features); + allNodesFeatures.retainAll(features); nodesChanged = true; minClusterNodeVersion = Version.min(minClusterNodeVersion, node.getVersion()); maxClusterNodeVersion = Version.max(maxClusterNodeVersion, node.getVersion()); @@ -444,6 +452,16 @@ public static void ensureVersionBarrier(Version joiningNodeVersion, Version minC } } + private void enforceNodeFeatureBarrier(String nodeId, Set existingNodesFeatures, Set newNodeFeatures) { + // prevent join if it does not have one or more features that all other nodes have + Set missingFeatures = new HashSet<>(existingNodesFeatures); + missingFeatures.removeAll(newNodeFeatures); + + if (missingFeatures.isEmpty() == false) { + throw new IllegalStateException("Node " + nodeId + " is missing required features " + missingFeatures); + } + } + public static Collection> addBuiltInJoinValidators( Collection> onJoinValidators ) { diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Publication.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Publication.java index 9e7383a4c3f14..6afb85bdf629e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Publication.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Publication.java @@ -366,8 +366,8 @@ public void onResponse(PublishWithJoinResponse response) { if (response.getJoin().isPresent()) { final Join join = response.getJoin().get(); - assert discoveryNode.equals(join.getSourceNode()); - assert join.getTerm() == response.getPublishResponse().getTerm() : response; + assert discoveryNode.equals(join.votingNode()); + assert join.term() == response.getPublishResponse().getTerm() : response; logger.trace("handling join within publish response: {}", join); onJoin(join); } else { diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/StartJoinRequest.java b/server/src/main/java/org/elasticsearch/cluster/coordination/StartJoinRequest.java index df26646d154c6..cb492f39f9337 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/StartJoinRequest.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/StartJoinRequest.java @@ -16,34 +16,38 @@ /** * Represents the action of requesting a join vote (see {@link Join}) from a node. - * The source node represents the node that is asking for join votes. + * + * A {@link StartJoinRequest} is broadcast to each node in the cluster, requesting + * that each node join the new cluster formed around the master candidate node in a + * new term. The sender is either the new master candidate or the current master + * abdicating to another eligible node in the cluster. */ public class StartJoinRequest extends TransportRequest { - private final DiscoveryNode sourceNode; + private final DiscoveryNode masterCandidateNode; private final long term; - public StartJoinRequest(DiscoveryNode sourceNode, long term) { - this.sourceNode = sourceNode; + public StartJoinRequest(DiscoveryNode masterCandidateNode, long term) { + this.masterCandidateNode = masterCandidateNode; this.term = term; } public StartJoinRequest(StreamInput input) throws IOException { super(input); - this.sourceNode = new DiscoveryNode(input); + this.masterCandidateNode = new DiscoveryNode(input); this.term = input.readLong(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - sourceNode.writeTo(out); + masterCandidateNode.writeTo(out); out.writeLong(term); } - public DiscoveryNode getSourceNode() { - return sourceNode; + public DiscoveryNode getMasterCandidateNode() { + return masterCandidateNode; } public long getTerm() { @@ -52,7 +56,7 @@ public long getTerm() { @Override public String toString() { - return "StartJoinRequest{" + "term=" + term + ",node=" + sourceNode + "}"; + return "StartJoinRequest{" + "term=" + term + ",node=" + masterCandidateNode + "}"; } @Override @@ -63,12 +67,12 @@ public boolean equals(Object o) { StartJoinRequest that = (StartJoinRequest) o; if (term != that.term) return false; - return sourceNode.equals(that.sourceNode); + return masterCandidateNode.equals(that.masterCandidateNode); } @Override public int hashCode() { - int result = sourceNode.hashCode(); + int result = masterCandidateNode.hashCode(); result = 31 * result + (int) (term ^ (term >>> 32)); return result; } diff --git a/server/src/main/java/org/elasticsearch/cluster/desirednodes/DesiredNodesSettingsValidator.java b/server/src/main/java/org/elasticsearch/cluster/desirednodes/DesiredNodesSettingsValidator.java index 51f90b9610805..a2df7c234680a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/desirednodes/DesiredNodesSettingsValidator.java +++ b/server/src/main/java/org/elasticsearch/cluster/desirednodes/DesiredNodesSettingsValidator.java @@ -16,14 +16,16 @@ import java.util.ArrayList; import java.util.List; import java.util.Locale; +import java.util.function.Consumer; import java.util.stream.Collectors; import static java.lang.String.format; -public class DesiredNodesSettingsValidator { +public class DesiredNodesSettingsValidator implements Consumer> { private record DesiredNodeValidationError(int position, @Nullable String externalId, RuntimeException exception) {} - public void validate(List nodes) { + @Override + public void accept(List nodes) { final List validationErrors = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { final DesiredNode node = nodes.get(i); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java index faa3010adbf72..a0dd7bc3e9eef 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java @@ -111,59 +111,7 @@ public static Builder builder() { return new Builder(); } - /** - * @deprecated use {@link Builder} instead - */ - @Deprecated(forRemoval = true) - public ComposableIndexTemplate( - List indexPatterns, - @Nullable Template template, - @Nullable List componentTemplates, - @Nullable Long priority, - @Nullable Long version, - @Nullable Map metadata - ) { - this(indexPatterns, template, componentTemplates, priority, version, metadata, null, null, null, null); - } - - /** - * @deprecated use {@link Builder} instead - */ - @Deprecated(forRemoval = true) - public ComposableIndexTemplate( - List indexPatterns, - @Nullable Template template, - @Nullable List componentTemplates, - @Nullable Long priority, - @Nullable Long version, - @Nullable Map metadata, - @Nullable DataStreamTemplate dataStreamTemplate - ) { - this(indexPatterns, template, componentTemplates, priority, version, metadata, dataStreamTemplate, null, null, null); - } - - /** - * @deprecated use {@link Builder} instead - */ - @Deprecated(forRemoval = true) - public ComposableIndexTemplate( - List indexPatterns, - @Nullable Template template, - @Nullable List componentTemplates, - @Nullable Long priority, - @Nullable Long version, - @Nullable Map metadata, - @Nullable DataStreamTemplate dataStreamTemplate, - @Nullable Boolean allowAutoCreate - ) { - this(indexPatterns, template, componentTemplates, priority, version, metadata, dataStreamTemplate, allowAutoCreate, null, null); - } - - /** - * @deprecated use {@link Builder} instead - */ - @Deprecated(forRemoval = true) - public ComposableIndexTemplate( + private ComposableIndexTemplate( List indexPatterns, @Nullable Template template, @Nullable List componentTemplates, @@ -416,28 +364,42 @@ public static class DataStreamTemplate implements Writeable, ToXContentObject { private static final ParseField HIDDEN = new ParseField("hidden"); private static final ParseField ALLOW_CUSTOM_ROUTING = new ParseField("allow_custom_routing"); + private static final ParseField FAILURE_STORE = new ParseField("failure_store"); public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "data_stream_template", false, - args -> new DataStreamTemplate(args[0] != null && (boolean) args[0], args[1] != null && (boolean) args[1]) + args -> new DataStreamTemplate( + args[0] != null && (boolean) args[0], + args[1] != null && (boolean) args[1], + DataStream.isFailureStoreEnabled() && args[2] != null && (boolean) args[2] + ) ); static { PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), HIDDEN); PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), ALLOW_CUSTOM_ROUTING); + if (DataStream.isFailureStoreEnabled()) { + PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), FAILURE_STORE); + } } private final boolean hidden; private final boolean allowCustomRouting; + private final boolean failureStore; public DataStreamTemplate() { - this(false, false); + this(false, false, false); } public DataStreamTemplate(boolean hidden, boolean allowCustomRouting) { + this(hidden, allowCustomRouting, false); + } + + public DataStreamTemplate(boolean hidden, boolean allowCustomRouting, boolean failureStore) { this.hidden = hidden; this.allowCustomRouting = allowCustomRouting; + this.failureStore = failureStore; } DataStreamTemplate(StreamInput in) throws IOException { @@ -455,6 +417,11 @@ public DataStreamTemplate(boolean hidden, boolean allowCustomRouting) { boolean value = in.readBoolean(); assert value == false : "expected false, because this used to be an optional enum that never got set"; } + if (in.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION)) { + failureStore = in.readBoolean(); + } else { + failureStore = false; + } } /** @@ -483,6 +450,10 @@ public boolean isAllowCustomRouting() { return allowCustomRouting; } + public boolean hasFailureStore() { + return failureStore; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(hidden); @@ -493,6 +464,9 @@ public void writeTo(StreamOutput out) throws IOException { // See comment in constructor. out.writeBoolean(false); } + if (out.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION)) { + out.writeBoolean(failureStore); + } } @Override @@ -500,6 +474,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); builder.field("hidden", hidden); builder.field(ALLOW_CUSTOM_ROUTING.getPreferredName(), allowCustomRouting); + if (DataStream.isFailureStoreEnabled()) { + builder.field(FAILURE_STORE.getPreferredName(), failureStore); + } builder.endObject(); return builder; } @@ -509,12 +486,12 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; DataStreamTemplate that = (DataStreamTemplate) o; - return hidden == that.hidden && allowCustomRouting == that.allowCustomRouting; + return hidden == that.hidden && allowCustomRouting == that.allowCustomRouting && failureStore == that.failureStore; } @Override public int hashCode() { - return Objects.hash(hidden, allowCustomRouting); + return Objects.hash(hidden, allowCustomRouting, failureStore); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java index c5cf0b29f6273..34d8515d2dfdd 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java @@ -11,6 +11,7 @@ import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.PointValues; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverConfiguration; @@ -26,6 +27,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.time.DateFormatters; +import org.elasticsearch.common.util.FeatureFlag; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; @@ -66,7 +68,15 @@ public final class DataStream implements SimpleDiffable, ToXContentObject, IndexAbstraction { + public static final FeatureFlag FAILURE_STORE_FEATURE_FLAG = new FeatureFlag("failure_store"); + public static final TransportVersion ADDED_FAILURE_STORE_TRANSPORT_VERSION = TransportVersions.DATA_STREAM_FAILURE_STORE_ADDED; + + public static boolean isFailureStoreEnabled() { + return FAILURE_STORE_FEATURE_FLAG.isEnabled(); + } + public static final String BACKING_INDEX_PREFIX = ".ds-"; + public static final String FAILURE_STORE_PREFIX = ".fs-"; public static final DateFormatter DATE_FORMATTER = DateFormatter.forPattern("uuuu.MM.dd"); public static final String TIMESTAMP_FIELD_NAME = "@timestamp"; // Timeseries indices' leaf readers should be sorted by desc order of their timestamp field, as it allows search time optimizations @@ -100,6 +110,8 @@ public final class DataStream implements SimpleDiffable, ToXContentO private final IndexMode indexMode; @Nullable private final DataStreamLifecycle lifecycle; + private final boolean failureStore; + private final List failureIndices; public DataStream( String name, @@ -111,7 +123,9 @@ public DataStream( boolean system, boolean allowCustomRouting, IndexMode indexMode, - DataStreamLifecycle lifecycle + DataStreamLifecycle lifecycle, + boolean failureStore, + List failureIndices ) { this( name, @@ -124,7 +138,9 @@ public DataStream( System::currentTimeMillis, allowCustomRouting, indexMode, - lifecycle + lifecycle, + failureStore, + failureIndices ); } @@ -140,7 +156,9 @@ public DataStream( LongSupplier timeProvider, boolean allowCustomRouting, IndexMode indexMode, - DataStreamLifecycle lifecycle + DataStreamLifecycle lifecycle, + boolean failureStore, + List failureIndices ) { this.name = name; this.indices = List.copyOf(indices); @@ -155,6 +173,8 @@ public DataStream( this.allowCustomRouting = allowCustomRouting; this.indexMode = indexMode; this.lifecycle = lifecycle; + this.failureStore = failureStore; + this.failureIndices = failureIndices; assert assertConsistent(this.indices); } @@ -170,7 +190,7 @@ public DataStream( boolean allowCustomRouting, IndexMode indexMode ) { - this(name, indices, generation, metadata, hidden, replicated, system, allowCustomRouting, indexMode, null); + this(name, indices, generation, metadata, hidden, replicated, system, allowCustomRouting, indexMode, null, false, List.of()); } private static boolean assertConsistent(List indices) { @@ -207,6 +227,10 @@ public long getGeneration() { return generation; } + public List getFailureIndices() { + return failureIndices; + } + @Override public Index getWriteIndex() { return indices.get(indices.size() - 1); @@ -327,6 +351,16 @@ public boolean isAllowCustomRouting() { return allowCustomRouting; } + /** + * Determines if this data stream should persist ingest pipeline and mapping failures from bulk requests to a locally + * configured failure store. + * + * @return Whether this data stream should store ingestion failures. + */ + public boolean isFailureStore() { + return failureStore; + } + @Nullable public IndexMode getIndexMode() { return indexMode; @@ -369,7 +403,20 @@ public DataStream unsafeRollover(Index writeIndex, long generation, boolean time List backingIndices = new ArrayList<>(indices); backingIndices.add(writeIndex); - return new DataStream(name, backingIndices, generation, metadata, hidden, false, system, allowCustomRouting, indexMode, lifecycle); + return new DataStream( + name, + backingIndices, + generation, + metadata, + hidden, + false, + system, + allowCustomRouting, + indexMode, + lifecycle, + failureStore, + failureIndices + ); } /** @@ -444,7 +491,9 @@ public DataStream removeBackingIndex(Index index) { system, allowCustomRouting, indexMode, - lifecycle + lifecycle, + failureStore, + failureIndices ); } @@ -487,7 +536,9 @@ public DataStream replaceBackingIndex(Index existingBackingIndex, Index newBacki system, allowCustomRouting, indexMode, - lifecycle + lifecycle, + failureStore, + failureIndices ); } @@ -545,7 +596,9 @@ public DataStream addBackingIndex(Metadata clusterMetadata, Index index) { system, allowCustomRouting, indexMode, - lifecycle + lifecycle, + failureStore, + failureIndices ); } @@ -561,7 +614,9 @@ public DataStream promoteDataStream() { timeProvider, allowCustomRouting, indexMode, - lifecycle + lifecycle, + failureStore, + failureIndices ); } @@ -595,7 +650,9 @@ public DataStream snapshot(Collection indicesInSnapshot) { system, allowCustomRouting, indexMode, - lifecycle + lifecycle, + failureStore, + failureIndices ); } @@ -778,9 +835,28 @@ public static String getDefaultBackingIndexName(String dataStreamName, long gene ); } + /** + * Generates the name of the index that conforms to the default naming convention for backing indices + * on data streams given the specified data stream name, generation, and time. + * + * @param dataStreamName name of the data stream + * @param generation generation of the data stream + * @param epochMillis creation time for the backing index + * @return backing index name + */ + public static String getDefaultFailureStoreName(String dataStreamName, long generation, long epochMillis) { + return String.format( + Locale.ROOT, + FAILURE_STORE_PREFIX + "%s-%s-%06d", + dataStreamName, + DATE_FORMATTER.formatMillis(epochMillis), + generation + ); + } + public DataStream(StreamInput in) throws IOException { this( - in.readString(), + readName(in), readIndices(in), in.readVLong(), in.readMap(), @@ -789,12 +865,19 @@ public DataStream(StreamInput in) throws IOException { in.readBoolean(), in.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0) ? in.readBoolean() : false, in.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0) ? in.readOptionalEnum(IndexMode.class) : null, - in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020) ? in.readOptionalWriteable(DataStreamLifecycle::new) : null + in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020) ? in.readOptionalWriteable(DataStreamLifecycle::new) : null, + in.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION) ? in.readBoolean() : false, + in.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION) ? readIndices(in) : List.of() ); } + static String readName(StreamInput in) throws IOException { + String name = in.readString(); + in.readString(); // TODO: clear out the timestamp field, which is a constant https://github.com/elastic/elasticsearch/issues/101991 + return name; + } + static List readIndices(StreamInput in) throws IOException { - in.readString(); // timestamp field, which is always @timestamp return in.readCollectionAsImmutableList(Index::new); } @@ -805,7 +888,7 @@ public static Diff readDiffFrom(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); - out.writeString(TIMESTAMP_FIELD_NAME); + out.writeString(TIMESTAMP_FIELD_NAME); // TODO: clear this out in the future https://github.com/elastic/elasticsearch/issues/101991 out.writeCollection(indices); out.writeVLong(generation); out.writeGenericMap(metadata); @@ -821,6 +904,10 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { out.writeOptionalWriteable(lifecycle); } + if (out.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION)) { + out.writeBoolean(failureStore); + out.writeCollection(failureIndices); + } } public static final ParseField NAME_FIELD = new ParseField("name"); @@ -834,6 +921,8 @@ public void writeTo(StreamOutput out) throws IOException { public static final ParseField ALLOW_CUSTOM_ROUTING = new ParseField("allow_custom_routing"); public static final ParseField INDEX_MODE = new ParseField("index_mode"); public static final ParseField LIFECYCLE = new ParseField("lifecycle"); + public static final ParseField FAILURE_STORE_FIELD = new ParseField("failure_store"); + public static final ParseField FAILURE_INDICES_FIELD = new ParseField("failure_indices"); @SuppressWarnings("unchecked") private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( @@ -848,7 +937,9 @@ public void writeTo(StreamOutput out) throws IOException { args[6] != null && (boolean) args[6], args[7] != null && (boolean) args[7], args[8] != null ? IndexMode.fromString((String) args[8]) : null, - (DataStreamLifecycle) args[9] + (DataStreamLifecycle) args[9], + DataStream.isFailureStoreEnabled() && args[10] != null && (boolean) args[10], + DataStream.isFailureStoreEnabled() && args[11] != null ? (List) args[11] : List.of() ) ); @@ -871,6 +962,14 @@ public void writeTo(StreamOutput out) throws IOException { PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), ALLOW_CUSTOM_ROUTING); PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), INDEX_MODE); PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> DataStreamLifecycle.fromXContent(p), LIFECYCLE); + if (DataStream.isFailureStoreEnabled()) { + PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), FAILURE_STORE_FIELD); + PARSER.declareObjectArray( + ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> Index.fromXContent(p), + FAILURE_INDICES_FIELD + ); + } } public static DataStream fromXContent(XContentParser parser) throws IOException { @@ -895,6 +994,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params, @Nulla .endObject(); builder.xContentList(INDICES_FIELD.getPreferredName(), indices); builder.field(GENERATION_FIELD.getPreferredName(), generation); + if (DataStream.isFailureStoreEnabled() && failureIndices.isEmpty() == false) { + builder.xContentList(FAILURE_INDICES_FIELD.getPreferredName(), failureIndices); + } if (metadata != null) { builder.field(METADATA_FIELD.getPreferredName(), metadata); } @@ -902,6 +1004,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params, @Nulla builder.field(REPLICATED_FIELD.getPreferredName(), replicated); builder.field(SYSTEM_FIELD.getPreferredName(), system); builder.field(ALLOW_CUSTOM_ROUTING.getPreferredName(), allowCustomRouting); + if (DataStream.isFailureStoreEnabled()) { + builder.field(FAILURE_STORE_FIELD.getPreferredName(), failureStore); + } if (indexMode != null) { builder.field(INDEX_MODE.getPreferredName(), indexMode); } @@ -927,12 +1032,27 @@ public boolean equals(Object o) { && replicated == that.replicated && allowCustomRouting == that.allowCustomRouting && indexMode == that.indexMode - && Objects.equals(lifecycle, that.lifecycle); + && Objects.equals(lifecycle, that.lifecycle) + && failureStore == that.failureStore + && failureIndices.equals(that.failureIndices); } @Override public int hashCode() { - return Objects.hash(name, indices, generation, metadata, hidden, system, replicated, allowCustomRouting, indexMode, lifecycle); + return Objects.hash( + name, + indices, + generation, + metadata, + hidden, + system, + replicated, + allowCustomRouting, + indexMode, + lifecycle, + failureStore, + failureIndices + ); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java index a3a6e34d445d2..8d7ce0525e943 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; @@ -49,6 +50,19 @@ public class DataStreamLifecycle implements SimpleDiffable, // Versions over the wire public static final TransportVersion ADDED_ENABLED_FLAG_VERSION = TransportVersions.V_8_500_057; + public static final String DATA_STREAMS_LIFECYCLE_ONLY_SETTING_NAME = "data_streams.lifecycle_only.mode"; + + /** + * Check if {@link #DATA_STREAMS_LIFECYCLE_ONLY_SETTING_NAME} is present and set to {@code true}, indicating that + * we're running in a cluster configuration that is only expecting to use data streams lifecycles. + * + * @param settings the node settings + * @return true if {@link #DATA_STREAMS_LIFECYCLE_ONLY_SETTING_NAME} is present and set + */ + public static boolean isDataStreamsLifecycleOnlyMode(final Settings settings) { + return settings.getAsBoolean(DATA_STREAMS_LIFECYCLE_ONLY_SETTING_NAME, false); + } + public static final Setting CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING = new Setting<>( "cluster.lifecycle.default.rollover", "max_age=auto,max_primary_shard_size=50gb,min_docs=1,max_primary_shard_docs=200000000", @@ -57,6 +71,8 @@ public class DataStreamLifecycle implements SimpleDiffable, Setting.Property.NodeScope ); + public static final DataStreamLifecycle DEFAULT = new DataStreamLifecycle(); + public static final String DATA_STREAM_LIFECYCLE_ORIGIN = "data_stream_lifecycle"; public static final ParseField ENABLED_FIELD = new ParseField("enabled"); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index b50b1e0a74d93..0446b479b191d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -10,7 +10,6 @@ import org.apache.lucene.util.automaton.Automaton; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; @@ -63,7 +62,6 @@ public class IndexNameExpressionResolver { private static final Predicate ALWAYS_TRUE = s -> true; public static final String EXCLUDED_DATA_STREAMS_KEY = "es.excluded_ds"; - public static final Version SYSTEM_INDEX_ENFORCEMENT_VERSION = Version.V_8_0_0; public static final IndexVersion SYSTEM_INDEX_ENFORCEMENT_INDEX_VERSION = IndexVersions.V_8_0_0; private final ThreadContext threadContext; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java index 52b522ec5ddaa..d500a8b8e6876 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java @@ -46,6 +46,8 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; +import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.isDataStreamsLifecycleOnlyMode; + public class MetadataCreateDataStreamService { private static final Logger logger = LogManager.getLogger(MetadataCreateDataStreamService.class); @@ -53,6 +55,7 @@ public class MetadataCreateDataStreamService { private final ThreadPool threadPool; private final ClusterService clusterService; private final MetadataCreateIndexService metadataCreateIndexService; + private final boolean isDslOnlyMode; public MetadataCreateDataStreamService( ThreadPool threadPool, @@ -62,17 +65,23 @@ public MetadataCreateDataStreamService( this.threadPool = threadPool; this.clusterService = clusterService; this.metadataCreateIndexService = metadataCreateIndexService; + this.isDslOnlyMode = isDataStreamsLifecycleOnlyMode(clusterService.getSettings()); } public void createDataStream(CreateDataStreamClusterStateUpdateRequest request, ActionListener finalListener) { AtomicReference firstBackingIndexRef = new AtomicReference<>(); + AtomicReference firstFailureStoreRef = new AtomicReference<>(); ActionListener listener = finalListener.delegateFailureAndWrap((l, response) -> { if (response.isAcknowledged()) { String firstBackingIndexName = firstBackingIndexRef.get(); assert firstBackingIndexName != null; + String firstFailureStoreName = firstFailureStoreRef.get(); + var waitForIndices = firstFailureStoreName == null + ? new String[] { firstBackingIndexName } + : new String[] { firstBackingIndexName, firstFailureStoreName }; ActiveShardsObserver.waitForActiveShards( clusterService, - new String[] { firstBackingIndexName }, + waitForIndices, ActiveShardCount.DEFAULT, request.masterNodeTimeout(), l.map(shardsAcked -> AcknowledgedResponse.TRUE) @@ -87,8 +96,18 @@ public void createDataStream(CreateDataStreamClusterStateUpdateRequest request, new AckedClusterStateUpdateTask(Priority.HIGH, request, delegate.clusterStateUpdate()) { @Override public ClusterState execute(ClusterState currentState) throws Exception { - ClusterState clusterState = createDataStream(metadataCreateIndexService, currentState, request, delegate.reroute()); - firstBackingIndexRef.set(clusterState.metadata().dataStreams().get(request.name).getIndices().get(0).getName()); + ClusterState clusterState = createDataStream( + metadataCreateIndexService, + currentState, + isDslOnlyMode, + request, + delegate.reroute() + ); + DataStream createdDataStream = clusterState.metadata().dataStreams().get(request.name); + firstBackingIndexRef.set(createdDataStream.getIndices().get(0).getName()); + if (createdDataStream.getFailureIndices().isEmpty() == false) { + firstFailureStoreRef.set(createdDataStream.getFailureIndices().get(0).getName()); + } return clusterState; } } @@ -103,9 +122,9 @@ private void submitUnbatchedTask(@SuppressWarnings("SameParameterValue") String public ClusterState createDataStream( CreateDataStreamClusterStateUpdateRequest request, ClusterState current, - ActionListener listener + ActionListener rerouteListener ) throws Exception { - return createDataStream(metadataCreateIndexService, current, request, listener); + return createDataStream(metadataCreateIndexService, current, isDslOnlyMode, request, rerouteListener); } public static final class CreateDataStreamClusterStateUpdateRequest extends ClusterStateUpdateRequest< @@ -157,15 +176,20 @@ public boolean performReroute() { public SystemDataStreamDescriptor getSystemDataStreamDescriptor() { return descriptor; } + + long getStartTime() { + return startTime; + } } static ClusterState createDataStream( MetadataCreateIndexService metadataCreateIndexService, ClusterState currentState, + boolean isDslOnlyMode, CreateDataStreamClusterStateUpdateRequest request, - ActionListener listener + ActionListener rerouteListener ) throws Exception { - return createDataStream(metadataCreateIndexService, currentState, request, List.of(), null, listener); + return createDataStream(metadataCreateIndexService, currentState, isDslOnlyMode, request, List.of(), null, rerouteListener); } /** @@ -181,10 +205,11 @@ static ClusterState createDataStream( static ClusterState createDataStream( MetadataCreateIndexService metadataCreateIndexService, ClusterState currentState, + boolean isDslOnlyMode, CreateDataStreamClusterStateUpdateRequest request, List backingIndices, IndexMetadata writeIndex, - ActionListener listener + ActionListener rerouteListener ) throws Exception { String dataStreamName = request.name; SystemDataStreamDescriptor systemDataStreamDescriptor = request.getSystemDataStreamDescriptor(); @@ -213,6 +238,11 @@ static ClusterState createDataStream( "data_stream [" + dataStreamName + "] must not start with '" + DataStream.BACKING_INDEX_PREFIX + "'" ); } + if (dataStreamName.startsWith(DataStream.FAILURE_STORE_PREFIX)) { + throw new IllegalArgumentException( + "data_stream [" + dataStreamName + "] must not start with '" + DataStream.FAILURE_STORE_PREFIX + "'" + ); + } final var metadata = currentState.metadata(); final boolean isSystem = systemDataStreamDescriptor != null; @@ -220,43 +250,47 @@ static ClusterState createDataStream( ? systemDataStreamDescriptor.getComposableIndexTemplate() : lookupTemplateForDataStream(dataStreamName, currentState.metadata()); - if (writeIndex == null) { - String firstBackingIndexName = DataStream.getDefaultBackingIndexName(dataStreamName, 1, request.startTime); - CreateIndexClusterStateUpdateRequest createIndexRequest = new CreateIndexClusterStateUpdateRequest( - "initialize_data_stream", - firstBackingIndexName, - firstBackingIndexName - ).dataStreamName(dataStreamName) - .systemDataStreamDescriptor(systemDataStreamDescriptor) - .nameResolvedInstant(request.startTime) - .performReroute(request.performReroute()) - .setMatchingTemplate(template); - + // If we need to create a failure store, do so first. Do not reroute during the creation since we will do + // that as part of creating the backing index if required. + IndexMetadata failureStoreIndex = null; + if (template.getDataStreamTemplate().hasFailureStore()) { if (isSystem) { - createIndexRequest.settings(SystemIndexDescriptor.DEFAULT_SETTINGS); - } else { - createIndexRequest.settings(MetadataRolloverService.HIDDEN_INDEX_SETTINGS); + throw new IllegalArgumentException("Failure stores are not supported on system data streams"); } + String failureStoreIndexName = DataStream.getDefaultFailureStoreName(dataStreamName, 1, request.getStartTime()); + currentState = createFailureStoreIndex( + metadataCreateIndexService, + currentState, + request, + dataStreamName, + template, + failureStoreIndexName + ); + failureStoreIndex = currentState.metadata().index(failureStoreIndexName); + } - try { - currentState = metadataCreateIndexService.applyCreateIndexRequest(currentState, createIndexRequest, false, listener); - } catch (ResourceAlreadyExistsException e) { - // Rethrow as ElasticsearchStatusException, so that bulk transport action doesn't ignore it during - // auto index/data stream creation. - // (otherwise bulk execution fails later, because data stream will also not have been created) - throw new ElasticsearchStatusException( - "data stream could not be created because backing index [{}] already exists", - RestStatus.BAD_REQUEST, - e, - firstBackingIndexName - ); - } + if (writeIndex == null) { + String firstBackingIndexName = DataStream.getDefaultBackingIndexName(dataStreamName, 1, request.getStartTime()); + currentState = createBackingIndex( + metadataCreateIndexService, + currentState, + request, + rerouteListener, + dataStreamName, + systemDataStreamDescriptor, + isSystem, + template, + firstBackingIndexName + ); writeIndex = currentState.metadata().index(firstBackingIndexName); } else { - listener.onResponse(null); + rerouteListener.onResponse(null); } assert writeIndex != null; assert writeIndex.mapping() != null : "no mapping found for backing index [" + writeIndex.getIndex().getName() + "]"; + assert template.getDataStreamTemplate().hasFailureStore() == false || failureStoreIndex != null; + assert failureStoreIndex == null || failureStoreIndex.mapping() != null + : "no mapping found for failure store [" + failureStoreIndex.getIndex().getName() + "]"; List dsBackingIndices = backingIndices.stream() .map(IndexMetadata::getIndex) @@ -267,6 +301,7 @@ static ClusterState createDataStream( final DataStreamLifecycle lifecycle = isSystem ? MetadataIndexTemplateService.resolveLifecycle(template, systemDataStreamDescriptor.getComponentTemplates()) : MetadataIndexTemplateService.resolveLifecycle(template, metadata.componentTemplates()); + List failureIndices = failureStoreIndex == null ? List.of() : List.of(failureStoreIndex.getIndex()); DataStream newDataStream = new DataStream( dataStreamName, dsBackingIndices, @@ -277,7 +312,9 @@ static ClusterState createDataStream( isSystem, template.getDataStreamTemplate().isAllowCustomRouting(), indexMode, - lifecycle + lifecycle == null && isDslOnlyMode ? DataStreamLifecycle.DEFAULT : lifecycle, + template.getDataStreamTemplate().hasFailureStore(), + failureIndices ); Metadata.Builder builder = Metadata.builder(currentState.metadata()).put(newDataStream); @@ -301,6 +338,92 @@ static ClusterState createDataStream( return ClusterState.builder(currentState).metadata(builder).build(); } + private static ClusterState createBackingIndex( + MetadataCreateIndexService metadataCreateIndexService, + ClusterState currentState, + CreateDataStreamClusterStateUpdateRequest request, + ActionListener rerouteListener, + String dataStreamName, + SystemDataStreamDescriptor systemDataStreamDescriptor, + boolean isSystem, + ComposableIndexTemplate template, + String firstBackingIndexName + ) throws Exception { + CreateIndexClusterStateUpdateRequest createIndexRequest = new CreateIndexClusterStateUpdateRequest( + "initialize_data_stream", + firstBackingIndexName, + firstBackingIndexName + ).dataStreamName(dataStreamName) + .systemDataStreamDescriptor(systemDataStreamDescriptor) + .nameResolvedInstant(request.getStartTime()) + .performReroute(request.performReroute()) + .setMatchingTemplate(template); + + if (isSystem) { + createIndexRequest.settings(SystemIndexDescriptor.DEFAULT_SETTINGS); + } else { + createIndexRequest.settings(MetadataRolloverService.HIDDEN_INDEX_SETTINGS); + } + + try { + currentState = metadataCreateIndexService.applyCreateIndexRequest(currentState, createIndexRequest, false, rerouteListener); + } catch (ResourceAlreadyExistsException e) { + // Rethrow as ElasticsearchStatusException, so that bulk transport action doesn't ignore it during + // auto index/data stream creation. + // (otherwise bulk execution fails later, because data stream will also not have been created) + throw new ElasticsearchStatusException( + "data stream could not be created because backing index [{}] already exists", + RestStatus.BAD_REQUEST, + e, + firstBackingIndexName + ); + } + return currentState; + } + + private static ClusterState createFailureStoreIndex( + MetadataCreateIndexService metadataCreateIndexService, + ClusterState currentState, + CreateDataStreamClusterStateUpdateRequest request, + String dataStreamName, + ComposableIndexTemplate template, + String failureStoreIndexName + ) throws Exception { + if (DataStream.isFailureStoreEnabled() == false) { + return currentState; + } + + CreateIndexClusterStateUpdateRequest createIndexRequest = new CreateIndexClusterStateUpdateRequest( + "initialize_data_stream", + failureStoreIndexName, + failureStoreIndexName + ).dataStreamName(dataStreamName) + .nameResolvedInstant(request.getStartTime()) + .performReroute(false) + .setMatchingTemplate(template) + .settings(MetadataRolloverService.HIDDEN_INDEX_SETTINGS); + + try { + currentState = metadataCreateIndexService.applyCreateIndexRequest( + currentState, + createIndexRequest, + false, + AllocationActionListener.rerouteCompletionIsNotRequired() + ); + } catch (ResourceAlreadyExistsException e) { + // Rethrow as ElasticsearchStatusException, so that bulk transport action doesn't ignore it during + // auto index/data stream creation. + // (otherwise bulk execution fails later, because data stream will also not have been created) + throw new ElasticsearchStatusException( + "data stream could not be created because failure store index [{}] already exists", + RestStatus.BAD_REQUEST, + e, + failureStoreIndexName + ); + } + return currentState; + } + public static ComposableIndexTemplate lookupTemplateForDataStream(String dataStreamName, Metadata metadata) { final String v2Template = MetadataIndexTemplateService.findV2Template(metadata, dataStreamName, false); if (v2Template == null) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java index 8423a5ad37334..2ebcad22185fd 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java @@ -166,7 +166,9 @@ static ClusterState updateDataLifecycle( dataStream.isSystem(), dataStream.isAllowCustomRouting(), dataStream.getIndexMode(), - lifecycle + lifecycle, + dataStream.isFailureStore(), + dataStream.getFailureIndices() ) ); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java index 0c78d497d1194..1e2e15a6300c7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java @@ -92,6 +92,8 @@ public class MetadataIndexTemplateService { private static final CompressedXContent DEFAULT_TIMESTAMP_MAPPING_WITH_ROUTING; + private static final CompressedXContent DATA_STREAM_FAILURE_STORE_MAPPING; + static { final Map> defaultTimestampField = Map.of( DEFAULT_TIMESTAMP_FIELD, @@ -120,6 +122,110 @@ public class MetadataIndexTemplateService { .map(defaultTimestampField) .endObject() ); + /* + * The data stream failure store mapping. The JSON content is as follows: + * { + * "_doc": { + * "dynamic": false, + * "_routing": { + * "required": false + * }, + * "properties": { + * "@timestamp": { + * "type": "date", + * "ignore_malformed": false + * }, + * "document": { + * "properties": { + * "id": { + * "type": "keyword" + * }, + * "routing": { + * "type": "keyword" + * }, + * "index": { + * "type": "keyword" + * } + * } + * }, + * "error": { + * "properties": { + * "message": { + * "type": "wildcard" + * }, + * "stack_trace": { + * "type": "text" + * }, + * "type": { + * "type": "keyword" + * }, + * "pipeline": { + * "type": "keyword" + * }, + * "pipeline_trace": { + * "type": "keyword" + * }, + * "processor": { + * "type": "keyword" + * } + * } + * } + * } + * } + * } + */ + DATA_STREAM_FAILURE_STORE_MAPPING = new CompressedXContent( + (builder, params) -> builder.startObject(MapperService.SINGLE_MAPPING_NAME) + .field("dynamic", false) + .startObject(RoutingFieldMapper.NAME) + .field("required", false) + .endObject() + .startObject("properties") + .startObject(DEFAULT_TIMESTAMP_FIELD) + .field("type", DateFieldMapper.CONTENT_TYPE) + .field("ignore_malformed", false) + .endObject() + .startObject("document") + .startObject("properties") + // document.source is unmapped so that it can be persisted in source only without worrying that the document might cause + // a mapping error + .startObject("id") + .field("type", "keyword") + .endObject() + .startObject("routing") + .field("type", "keyword") + .endObject() + .startObject("index") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .startObject("error") + .startObject("properties") + .startObject("message") + .field("type", "wildcard") + .endObject() + .startObject("stack_trace") + .field("type", "text") + .endObject() + .startObject("type") + .field("type", "keyword") + .endObject() + .startObject("pipeline") + .field("type", "keyword") + .endObject() + .startObject("pipeline_trace") + .field("type", "keyword") + .endObject() + .startObject("processor") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + ); + } catch (IOException e) { throw new AssertionError(e); } @@ -1338,6 +1444,10 @@ public static List collectMappings( final String indexName ) { Objects.requireNonNull(template, "Composable index template must be provided"); + // Check if this is a failure store index, and if it is, discard any template mappings. Failure store mappings are predefined. + if (template.getDataStreamTemplate() != null && indexName.startsWith(DataStream.FAILURE_STORE_PREFIX)) { + return List.of(DATA_STREAM_FAILURE_STORE_MAPPING, ComposableIndexTemplate.DataStreamTemplate.DATA_STREAM_MAPPING_SNIPPET); + } List mappings = template.composedOf() .stream() .map(componentTemplates::get) @@ -1348,7 +1458,7 @@ public static List collectMappings( .collect(Collectors.toCollection(LinkedList::new)); // Add the actual index template's mappings, since it takes the highest precedence Optional.ofNullable(template.template()).map(Template::mappings).ifPresent(mappings::add); - if (template.getDataStreamTemplate() != null && indexName.startsWith(DataStream.BACKING_INDEX_PREFIX)) { + if (template.getDataStreamTemplate() != null && isDataStreamIndex(indexName)) { // add a default mapping for the `@timestamp` field, at the lowest precedence, to make bootstrapping data streams more // straightforward as all backing indices are required to have a timestamp field if (template.getDataStreamTemplate().isAllowCustomRouting()) { @@ -1359,7 +1469,7 @@ public static List collectMappings( } // Only include _timestamp mapping snippet if creating backing index. - if (indexName.startsWith(DataStream.BACKING_INDEX_PREFIX)) { + if (isDataStreamIndex(indexName)) { // Only if template has data stream definition this should be added and // adding this template last, since _timestamp field should have highest precedence: if (template.getDataStreamTemplate() != null) { @@ -1369,6 +1479,10 @@ public static List collectMappings( return Collections.unmodifiableList(mappings); } + private static boolean isDataStreamIndex(String indexName) { + return indexName.startsWith(DataStream.BACKING_INDEX_PREFIX) || indexName.startsWith(DataStream.FAILURE_STORE_PREFIX); + } + /** * Resolve index settings for the given list of v1 templates, templates are apply in reverse * order since they should be provided in order of priority/order diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java index b268be27e17ac..f7fa34d76498a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java @@ -41,6 +41,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; +import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.isDataStreamsLifecycleOnlyMode; import static org.elasticsearch.cluster.metadata.MetadataCreateDataStreamService.createDataStream; public class MetadataMigrateToDataStreamService { @@ -63,6 +64,7 @@ public class MetadataMigrateToDataStreamService { private final IndicesService indexServices; private final ThreadContext threadContext; private final MetadataCreateIndexService metadataCreateIndexService; + private final boolean isDslOnlyMode; public MetadataMigrateToDataStreamService( ThreadPool threadPool, @@ -74,6 +76,7 @@ public MetadataMigrateToDataStreamService( this.indexServices = indexServices; this.threadContext = threadPool.getThreadContext(); this.metadataCreateIndexService = metadataCreateIndexService; + this.isDslOnlyMode = isDataStreamsLifecycleOnlyMode(clusterService.getSettings()); } public void migrateToDataStream( @@ -104,7 +107,7 @@ public void migrateToDataStream( @Override public ClusterState execute(ClusterState currentState) throws Exception { - ClusterState clusterState = migrateToDataStream(currentState, indexMetadata -> { + ClusterState clusterState = migrateToDataStream(currentState, isDslOnlyMode, indexMetadata -> { try { return indexServices.createIndexMapperServiceForValidation(indexMetadata); } catch (IOException e) { @@ -125,6 +128,7 @@ private void submitUnbatchedTask(@SuppressWarnings("SameParameterValue") String static ClusterState migrateToDataStream( ClusterState currentState, + boolean isDslOnlyMode, Function mapperSupplier, MigrateToDataStreamClusterStateUpdateRequest request, MetadataCreateIndexService metadataCreateIndexService, @@ -155,6 +159,7 @@ static ClusterState migrateToDataStream( return createDataStream( metadataCreateIndexService, currentState, + isDslOnlyMode, req, backingIndices, currentState.metadata().index(writeIndex), diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ShutdownShardMigrationStatus.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ShutdownShardMigrationStatus.java index ecc26d15d001f..15fab193dad57 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ShutdownShardMigrationStatus.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ShutdownShardMigrationStatus.java @@ -36,27 +36,72 @@ public class ShutdownShardMigrationStatus implements Writeable, ChunkedToXConten public static final String NODE_ALLOCATION_DECISION_KEY = "node_allocation_decision"; private final SingleNodeShutdownMetadata.Status status; + private final long startedShards; + private final long relocatingShards; + private final long initializingShards; private final long shardsRemaining; @Nullable private final String explanation; @Nullable private final ShardAllocationDecision allocationDecision; - public ShutdownShardMigrationStatus(SingleNodeShutdownMetadata.Status status, long shardsRemaining) { - this(status, shardsRemaining, null, null); + public ShutdownShardMigrationStatus( + SingleNodeShutdownMetadata.Status status, + long shardsRemaining, + @Nullable String explanation, + @Nullable ShardAllocationDecision allocationDecision + ) { + this(status, -1, -1, -1, shardsRemaining, explanation, null); } - public ShutdownShardMigrationStatus(SingleNodeShutdownMetadata.Status status, long shardsRemaining, @Nullable String explanation) { - this(status, shardsRemaining, explanation, null); + public ShutdownShardMigrationStatus( + SingleNodeShutdownMetadata.Status status, + long startedShards, + long relocatingShards, + long initializingShards + ) { + this( + status, + startedShards, + relocatingShards, + initializingShards, + startedShards + relocatingShards + initializingShards, + null, + null + ); } public ShutdownShardMigrationStatus( SingleNodeShutdownMetadata.Status status, + long startedShards, + long relocatingShards, + long initializingShards, + @Nullable String explanation + ) { + this( + status, + startedShards, + relocatingShards, + initializingShards, + startedShards + relocatingShards + initializingShards, + explanation, + null + ); + } + + private ShutdownShardMigrationStatus( + SingleNodeShutdownMetadata.Status status, + long startedShards, + long relocatingShards, + long initializingShards, long shardsRemaining, @Nullable String explanation, @Nullable ShardAllocationDecision allocationDecision ) { this.status = Objects.requireNonNull(status, "status must not be null"); + this.startedShards = startedShards; + this.relocatingShards = relocatingShards; + this.initializingShards = initializingShards; this.shardsRemaining = shardsRemaining; this.explanation = explanation; this.allocationDecision = allocationDecision; @@ -64,7 +109,17 @@ public ShutdownShardMigrationStatus( public ShutdownShardMigrationStatus(StreamInput in) throws IOException { this.status = in.readEnum(SingleNodeShutdownMetadata.Status.class); - this.shardsRemaining = in.readLong(); + if (in.getTransportVersion().onOrAfter(TransportVersions.SHUTDOWN_MIGRATION_STATUS_INCLUDE_COUNTS)) { + this.startedShards = in.readZLong(); + this.relocatingShards = in.readZLong(); + this.initializingShards = in.readZLong(); + this.shardsRemaining = in.readZLong(); + } else { + this.startedShards = -1; + this.relocatingShards = -1; + this.initializingShards = -1; + this.shardsRemaining = in.readLong(); + } this.explanation = in.readOptionalString(); if (in.getTransportVersion().onOrAfter(ALLOCATION_DECISION_ADDED_VERSION)) { this.allocationDecision = in.readOptionalWriteable(ShardAllocationDecision::new); @@ -99,6 +154,11 @@ public Iterator toXContentChunked(ToXContent.Params params private XContentBuilder buildHeader(XContentBuilder builder) throws IOException { builder.field("status", status); + if (startedShards != -1) { + builder.field("started_shards", startedShards); + builder.field("relocating_shards", relocatingShards); + builder.field("initializing_shards", initializingShards); + } builder.field("shard_migrations_remaining", shardsRemaining); if (Objects.nonNull(explanation)) { builder.field("explanation", explanation); @@ -109,7 +169,14 @@ private XContentBuilder buildHeader(XContentBuilder builder) throws IOException @Override public void writeTo(StreamOutput out) throws IOException { out.writeEnum(status); - out.writeLong(shardsRemaining); + if (out.getTransportVersion().onOrAfter(TransportVersions.SHUTDOWN_MIGRATION_STATUS_INCLUDE_COUNTS)) { + out.writeZLong(startedShards); + out.writeZLong(relocatingShards); + out.writeZLong(initializingShards); + out.writeZLong(shardsRemaining); + } else { + out.writeLong(shardsRemaining); + } out.writeOptionalString(explanation); if (out.getTransportVersion().onOrAfter(ALLOCATION_DECISION_ADDED_VERSION)) { out.writeOptionalWriteable(allocationDecision); @@ -119,9 +186,12 @@ public void writeTo(StreamOutput out) throws IOException { @Override public boolean equals(Object o) { if (this == o) return true; - if ((o instanceof ShutdownShardMigrationStatus) == false) return false; + if (o == null || getClass() != o.getClass()) return false; ShutdownShardMigrationStatus that = (ShutdownShardMigrationStatus) o; - return shardsRemaining == that.shardsRemaining + return startedShards == that.startedShards + && relocatingShards == that.relocatingShards + && initializingShards == that.initializingShards + && shardsRemaining == that.shardsRemaining && status == that.status && Objects.equals(explanation, that.explanation) && Objects.equals(allocationDecision, that.allocationDecision); @@ -129,7 +199,7 @@ public boolean equals(Object o) { @Override public int hashCode() { - return Objects.hash(status, shardsRemaining, explanation, allocationDecision); + return Objects.hash(status, startedShards, relocatingShards, initializingShards, shardsRemaining, explanation, allocationDecision); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java b/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java index bd15d924c9c19..8e257ff2c7a54 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java @@ -114,6 +114,7 @@ public class IndexShardRoutingTable { allShardsStarted = false; } } + assert primary != null || shards.isEmpty() : shards; this.primary = primary; this.replicas = CollectionUtils.wrapUnmodifiableOrEmptySingleton(replicas); this.activeShards = CollectionUtils.wrapUnmodifiableOrEmptySingleton(activeShards); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java index 5f477a9ca66df..ea0ee630ef073 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java @@ -8,7 +8,6 @@ package org.elasticsearch.cluster.routing.allocation; -import org.elasticsearch.Version; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -16,6 +15,7 @@ import org.elasticsearch.common.unit.RelativeByteSizeValue; import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.UpdateForV9; import java.util.Iterator; import java.util.List; @@ -152,7 +152,11 @@ public class DiskThresholdSettings { private volatile TimeValue rerouteInterval; static { - assert Version.CURRENT.major == Version.V_7_0_0.major + 1; // this check is unnecessary in v9 + checkAutoReleaseIndexEnabled(); + } + + @UpdateForV9 // this check is unnecessary in v9 + private static void checkAutoReleaseIndexEnabled() { final String AUTO_RELEASE_INDEX_ENABLED_KEY = "es.disk.auto_release_flood_stage_block"; final String property = System.getProperty(AUTO_RELEASE_INDEX_ENABLED_KEY); if (property != null) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 438c81b5fbb98..64f88ac1e2417 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -41,6 +41,7 @@ import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.Tuple; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.gateway.PriorityComparator; import org.elasticsearch.index.shard.ShardId; @@ -150,6 +151,7 @@ public BalancedShardsAllocator(ClusterSettings clusterSettings, WriteLoadForecas * * Once {@link org.elasticsearch.Version#V_7_17_0} goes out of scope, start to properly reject such bad values. */ + @UpdateForV9 private static float ensureValidThreshold(float threshold) { if (1.0f <= threshold) { return threshold; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ContinuousComputation.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ContinuousComputation.java index eecd0a7410513..7cdffc3a5bf24 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ContinuousComputation.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ContinuousComputation.java @@ -10,12 +10,12 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; -import org.elasticsearch.threadpool.ThreadPool; import java.util.Objects; -import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicReference; /** @@ -27,15 +27,15 @@ public abstract class ContinuousComputation { private static final Logger logger = LogManager.getLogger(ContinuousComputation.class); - private final ExecutorService executorService; + private final Executor executor; private final AtomicReference enqueuedInput = new AtomicReference<>(); private final Processor processor = new Processor(); /** - * @param threadPool Each computation runs on a {@code GENERIC} thread from this thread pool. At most one task executes at once. + * @param executor the {@link Executor} with which to execute the computation */ - public ContinuousComputation(ThreadPool threadPool) { - this.executorService = threadPool.generic(); + public ContinuousComputation(Executor executor) { + this.executor = executor; } /** @@ -44,7 +44,7 @@ public ContinuousComputation(ThreadPool threadPool) { public void onNewInput(T input) { assert input != null; if (enqueuedInput.getAndSet(Objects.requireNonNull(input)) == null) { - executorService.execute(processor); + executor.execute(processor); } } @@ -74,6 +74,7 @@ private class Processor extends AbstractRunnable { @Override public void onFailure(Exception e) { + logger.error(Strings.format("unexpected error processing [%s]", ContinuousComputation.this), e); assert false : e; } @@ -85,14 +86,16 @@ public void onRejection(Exception e) { } @Override - protected void doRun() throws Exception { + protected void doRun() { final T input = enqueuedInput.get(); assert input != null; - processInput(input); - - if (enqueuedInput.compareAndSet(input, null) == false) { - executorService.execute(this); + try { + processInput(input); + } finally { + if (enqueuedInput.compareAndSet(input, null) == false) { + executor.execute(this); + } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java index 7d24872cf51dc..60a6ec2e49899 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java @@ -51,6 +51,7 @@ public class DesiredBalanceComputer { private final ThreadPool threadPool; private final ShardsAllocator delegateAllocator; + // stats protected final MeanMetric iterations = new MeanMetric(); public static final Setting PROGRESS_LOG_INTERVAL_SETTING = Setting.timeSetting( diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java index 6fac97e34d022..dc3cbfa8b5ae8 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java @@ -29,6 +29,10 @@ import org.elasticsearch.gateway.PriorityComparator; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.telemetry.metric.DoubleGauge; +import org.elasticsearch.telemetry.metric.DoubleWithAttributes; +import org.elasticsearch.telemetry.metric.LongGaugeMetric; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.threadpool.ThreadPool; import java.util.Comparator; @@ -69,13 +73,57 @@ public class DesiredBalanceReconciler { private final NodeAllocationOrdering allocationOrdering = new NodeAllocationOrdering(); private final NodeAllocationOrdering moveOrdering = new NodeAllocationOrdering(); - public DesiredBalanceReconciler(ClusterSettings clusterSettings, ThreadPool threadPool) { + // stats + /** + * Number of unassigned shards during last reconciliation + */ + protected final LongGaugeMetric unassignedShards; + /** + * Total number of assigned shards during last reconciliation + */ + protected final LongGaugeMetric totalAllocations; + /** + * Number of assigned shards during last reconciliation that are not allocated on desired node and need to be moved + */ + protected final LongGaugeMetric undesiredAllocations; + private final DoubleGauge undesiredAllocationsRatio; + + public DesiredBalanceReconciler(ClusterSettings clusterSettings, ThreadPool threadPool, MeterRegistry meterRegistry) { this.undesiredAllocationLogInterval = new FrequencyCappedAction(threadPool); clusterSettings.initializeAndWatch(UNDESIRED_ALLOCATIONS_LOG_INTERVAL_SETTING, this.undesiredAllocationLogInterval::setMinInterval); clusterSettings.initializeAndWatch( UNDESIRED_ALLOCATIONS_LOG_THRESHOLD_SETTING, value -> this.undesiredAllocationsLogThreshold = value ); + + unassignedShards = LongGaugeMetric.create( + meterRegistry, + "es.allocator.desired_balance.shards.unassigned", + "Unassigned shards count", + "{shard}" + ); + totalAllocations = LongGaugeMetric.create( + meterRegistry, + "es.allocator.desired_balance.shards.count", + "Total shards count", + "{shard}" + ); + undesiredAllocations = LongGaugeMetric.create( + meterRegistry, + "es.allocator.desired_balance.allocations.undesired", + "Count of shards allocated on undesired nodes", + "{shard}" + ); + undesiredAllocationsRatio = meterRegistry.registerDoubleGauge( + "es.allocator.desired_balance.allocations.undesired_ratio", + "Ratio of undesired allocations to shard count", + "1", + () -> { + var total = totalAllocations.get(); + var undesired = undesiredAllocations.get(); + return new DoubleWithAttributes(total != 0 ? (double) undesired / total : 0.0); + } + ); } public void reconcile(DesiredBalance desiredBalance, RoutingAllocation allocation) { @@ -445,8 +493,9 @@ private void balance() { return; } - long allAllocations = 0; - long undesiredAllocations = 0; + int unassignedShards = routingNodes.unassigned().size() + routingNodes.unassigned().ignored().size(); + int totalAllocations = 0; + int undesiredAllocations = 0; // Iterate over all started shards and try to move any which are on undesired nodes. In the presence of throttling shard // movements, the goal of this iteration order is to achieve a fairer movement of shards from the nodes that are offloading the @@ -454,7 +503,7 @@ private void balance() { for (final var iterator = OrderedShardsIterator.create(routingNodes, moveOrdering); iterator.hasNext();) { final var shardRouting = iterator.next(); - allAllocations++; + totalAllocations++; if (shardRouting.started() == false) { // can only rebalance started shards @@ -504,10 +553,14 @@ private void balance() { } } - maybeLogUndesiredAllocationsWarning(allAllocations, undesiredAllocations, routingNodes.size()); + DesiredBalanceReconciler.this.unassignedShards.set(unassignedShards); + DesiredBalanceReconciler.this.undesiredAllocations.set(undesiredAllocations); + DesiredBalanceReconciler.this.totalAllocations.set(totalAllocations); + + maybeLogUndesiredAllocationsWarning(totalAllocations, undesiredAllocations, routingNodes.size()); } - private void maybeLogUndesiredAllocationsWarning(long allAllocations, long undesiredAllocations, int nodeCount) { + private void maybeLogUndesiredAllocationsWarning(int allAllocations, int undesiredAllocations, int nodeCount) { // more shards than cluster can relocate with one reroute final boolean nonEmptyRelocationBacklog = undesiredAllocations > 2L * nodeCount; final boolean warningThresholdReached = undesiredAllocations > undesiredAllocationsLogThreshold * allAllocations; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java index 11d2317f5bcea..64f1eb704a2f3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.telemetry.TelemetryProvider; import org.elasticsearch.threadpool.ThreadPool; import java.util.ArrayList; @@ -77,14 +78,16 @@ public DesiredBalanceShardsAllocator( ShardsAllocator delegateAllocator, ThreadPool threadPool, ClusterService clusterService, - DesiredBalanceReconcilerAction reconciler + DesiredBalanceReconcilerAction reconciler, + TelemetryProvider telemetryProvider ) { this( delegateAllocator, threadPool, clusterService, new DesiredBalanceComputer(clusterSettings, threadPool, delegateAllocator), - reconciler + reconciler, + telemetryProvider ); } @@ -93,14 +96,19 @@ public DesiredBalanceShardsAllocator( ThreadPool threadPool, ClusterService clusterService, DesiredBalanceComputer desiredBalanceComputer, - DesiredBalanceReconcilerAction reconciler + DesiredBalanceReconcilerAction reconciler, + TelemetryProvider telemetryProvider ) { this.delegateAllocator = delegateAllocator; this.threadPool = threadPool; this.reconciler = reconciler; this.desiredBalanceComputer = desiredBalanceComputer; - this.desiredBalanceReconciler = new DesiredBalanceReconciler(clusterService.getClusterSettings(), threadPool); - this.desiredBalanceComputation = new ContinuousComputation<>(threadPool) { + this.desiredBalanceReconciler = new DesiredBalanceReconciler( + clusterService.getClusterSettings(), + threadPool, + telemetryProvider.getMeterRegistry() + ); + this.desiredBalanceComputation = new ContinuousComputation<>(threadPool.generic()) { @Override protected void processInput(DesiredBalanceInput desiredBalanceInput) { @@ -141,7 +149,7 @@ private DesiredBalance getInitialDesiredBalance() { @Override public String toString() { - return "DesiredBalanceShardsAllocator#updateDesiredBalanceAndReroute"; + return "DesiredBalanceShardsAllocator#allocate"; } }; this.queue = new PendingListenersQueue(); @@ -272,7 +280,10 @@ public DesiredBalanceStats getStats() { desiredBalanceComputer.iterations.sum(), computedShardMovements.sum(), cumulativeComputationTime.count(), - cumulativeReconciliationTime.count() + cumulativeReconciliationTime.count(), + desiredBalanceReconciler.unassignedShards.get(), + desiredBalanceReconciler.totalAllocations.get(), + desiredBalanceReconciler.undesiredAllocations.get() ); } @@ -282,6 +293,10 @@ private void onNoLongerMaster() { queue.completeAllAsNotMaster(); pendingDesiredBalanceMoves.clear(); desiredBalanceReconciler.clear(); + + desiredBalanceReconciler.unassignedShards.set(0); + desiredBalanceReconciler.totalAllocations.set(0); + desiredBalanceReconciler.undesiredAllocations.set(0); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceStats.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceStats.java index b8a1d3e1b899d..8a95b947735f1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceStats.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceStats.java @@ -19,6 +19,8 @@ import java.io.IOException; +import static org.elasticsearch.TransportVersions.ADDITIONAL_DESIRED_BALANCE_RECONCILIATION_STATS; + public record DesiredBalanceStats( long lastConvergedIndex, boolean computationActive, @@ -28,7 +30,10 @@ public record DesiredBalanceStats( long computationIterations, long computedShardMovements, long cumulativeComputationTime, - long cumulativeReconciliationTime + long cumulativeReconciliationTime, + long unassignedShards, + long totalAllocations, + long undesiredAllocations ) implements Writeable, ToXContentObject { private static final TransportVersion COMPUTED_SHARD_MOVEMENTS_VERSION = TransportVersions.V_8_8_0; @@ -50,7 +55,10 @@ public static DesiredBalanceStats readFrom(StreamInput in) throws IOException { in.readVLong(), in.getTransportVersion().onOrAfter(COMPUTED_SHARD_MOVEMENTS_VERSION) ? in.readVLong() : -1, in.readVLong(), - in.readVLong() + in.readVLong(), + in.getTransportVersion().onOrAfter(ADDITIONAL_DESIRED_BALANCE_RECONCILIATION_STATS) ? in.readVLong() : -1, + in.getTransportVersion().onOrAfter(ADDITIONAL_DESIRED_BALANCE_RECONCILIATION_STATS) ? in.readVLong() : -1, + in.getTransportVersion().onOrAfter(ADDITIONAL_DESIRED_BALANCE_RECONCILIATION_STATS) ? in.readVLong() : -1 ); } @@ -67,6 +75,11 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeVLong(cumulativeComputationTime); out.writeVLong(cumulativeReconciliationTime); + if (out.getTransportVersion().onOrAfter(ADDITIONAL_DESIRED_BALANCE_RECONCILIATION_STATS)) { + out.writeVLong(unassignedShards); + out.writeVLong(totalAllocations); + out.writeVLong(undesiredAllocations); + } } @Override @@ -81,7 +94,21 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("computed_shard_movements", computedShardMovements); builder.humanReadableField("computation_time_in_millis", "computation_time", new TimeValue(cumulativeComputationTime)); builder.humanReadableField("reconciliation_time_in_millis", "reconciliation_time", new TimeValue(cumulativeReconciliationTime)); + builder.field("unassigned_shards", unassignedShards); + builder.field("total_allocations", totalAllocations); + builder.field("undesired_allocations", undesiredAllocations); + builder.field("undesired_allocations_ratio", undesiredAllocationsRatio()); builder.endObject(); return builder; } + + public double undesiredAllocationsRatio() { + if (unassignedShards == -1 || totalAllocations == -1 || undesiredAllocations == -1) { + return -1.0; + } else if (totalAllocations == 0) { + return 0.0; + } else { + return (double) undesiredAllocations / totalAllocations; + } + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index fe001480e5f46..0e0d15a02d042 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.DiskUsage; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -27,6 +26,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.UpdateForV9; import java.util.Map; @@ -69,6 +69,7 @@ public class DiskThresholdDecider extends AllocationDecider { public static final String NAME = "disk_threshold"; + @UpdateForV9 public static final Setting ENABLE_FOR_SINGLE_DATA_NODE = Setting.boolSetting( "cluster.routing.allocation.disk.watermark.enable_for_single_data_node", true, @@ -98,7 +99,6 @@ public void validate(Boolean value) { public DiskThresholdDecider(Settings settings, ClusterSettings clusterSettings) { this.diskThresholdSettings = new DiskThresholdSettings(settings, clusterSettings); - assert Version.CURRENT.major < 9 : "remove enable_for_single_data_node in 9"; // get deprecation warnings. boolean enabledForSingleDataNode = ENABLE_FOR_SINGLE_DATA_NODE.get(settings); assert enabledForSingleDataNode; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java index be04022685b85..b562ba8e9482d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java @@ -12,7 +12,8 @@ import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.index.shard.ShardId; + +import java.util.Objects; /** * This {@link org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider} prevents shards that @@ -65,26 +66,52 @@ private static Decision canMove(ShardRouting shardRouting, RoutingAllocation all return YES_NOT_RUNNING; } - final ShardId shardId = shardRouting.shardId(); - return snapshotsInProgress.asStream() - .filter(entry -> entry.hasShardsInInitState() && entry.isClone() == false) - .map(entry -> entry.shards().get(shardId)) - .filter( - shardSnapshotStatus -> shardSnapshotStatus != null - && shardSnapshotStatus.state().completed() == false - && shardSnapshotStatus.nodeId() != null - && shardSnapshotStatus.nodeId().equals(shardRouting.currentNodeId()) - ) - .findAny() - .map( - shardSnapshotStatus -> allocation.decision( + if (shardRouting.currentNodeId() == null) { + // Shard is not assigned to a node + return YES_NOT_SNAPSHOTTED; + } + + for (final var entriesByRepo : snapshotsInProgress.entriesByRepo()) { + for (final var entry : entriesByRepo) { + if (entry.isClone()) { + // clones do not run on data nodes + continue; + } + + if (entry.hasShardsInInitState() == false) { + // this snapshot has no running shard snapshots + // (NB this means we let ABORTED shards move without waiting for them to complete) + continue; + } + + final var shardSnapshotStatus = entry.shards().get(shardRouting.shardId()); + + if (shardSnapshotStatus == null) { + // this snapshot is not snapshotting the shard to allocate + continue; + } + + if (shardSnapshotStatus.state().completed()) { + // this shard snapshot is complete + continue; + } + + if (Objects.equals(shardRouting.currentNodeId(), shardSnapshotStatus.nodeId()) == false) { + // this shard snapshot is allocated to a different node + continue; + } + + return allocation.decision( Decision.THROTTLE, NAME, - "waiting for snapshotting of shard [%s] to complete on this node [%s]", - shardId, - shardSnapshotStatus.nodeId() - ) - ) - .orElse(YES_NOT_SNAPSHOTTED); + "waiting for snapshot [%s] of shard [%s] to complete on node [%s]", + entry.snapshot(), + shardRouting.shardId(), + shardRouting.currentNodeId() + ); + } + } + + return YES_NOT_SNAPSHOTTED; } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorService.java index 76ca9f88b4b58..74da033fd8811 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorService.java @@ -249,7 +249,7 @@ static void updateShardAllocationStatus( ); public static final String ENABLE_TIER_ACTION_GUIDE = "https://ela.st/enable-tier"; - public static final Map ACTION_ENABLE_TIERS_LOOKUP = DataTier.ALL_DATA_TIERS.stream() + private static final Map ACTION_ENABLE_TIERS_LOOKUP = DataTier.ALL_DATA_TIERS.stream() .collect( Collectors.toUnmodifiableMap( tier -> tier, @@ -276,7 +276,7 @@ static void updateShardAllocationStatus( INCREASE_SHARD_LIMIT_ACTION_GUIDE ); - public static final Map ACTION_INCREASE_SHARD_LIMIT_INDEX_SETTING_LOOKUP = DataTier.ALL_DATA_TIERS + private static final Map ACTION_INCREASE_SHARD_LIMIT_INDEX_SETTING_LOOKUP = DataTier.ALL_DATA_TIERS .stream() .collect( Collectors.toUnmodifiableMap( @@ -307,7 +307,7 @@ static void updateShardAllocationStatus( INCREASE_CLUSTER_SHARD_LIMIT_ACTION_GUIDE ); - public static final Map ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING_LOOKUP = DataTier.ALL_DATA_TIERS + private static final Map ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING_LOOKUP = DataTier.ALL_DATA_TIERS .stream() .collect( Collectors.toUnmodifiableMap( @@ -405,6 +405,7 @@ static void updateShardAllocationStatus( TIER_CAPACITY_ACTION_GUIDE ); + // Visible for testing public static final Map ACTION_INCREASE_TIER_CAPACITY_LOOKUP = DataTier.ALL_DATA_TIERS.stream() .collect( Collectors.toUnmodifiableMap( @@ -622,11 +623,11 @@ List diagnoseAllocationResults( ClusterState state, List nodeAllocationResults ) { - IndexMetadata index = state.metadata().index(shardRouting.index()); + IndexMetadata indexMetadata = state.metadata().index(shardRouting.index()); List diagnosisDefs = new ArrayList<>(); - if (index != null) { - diagnosisDefs.addAll(checkIsAllocationDisabled(index, nodeAllocationResults)); - diagnosisDefs.addAll(checkDataTierRelatedIssues(index, nodeAllocationResults, state)); + if (indexMetadata != null) { + diagnosisDefs.addAll(checkIsAllocationDisabled(indexMetadata, nodeAllocationResults)); + diagnosisDefs.addAll(checkNodeRoleRelatedIssues(indexMetadata, nodeAllocationResults, state, shardRouting)); } if (diagnosisDefs.isEmpty()) { diagnosisDefs.add(ACTION_CHECK_ALLOCATION_EXPLAIN_API); @@ -640,7 +641,7 @@ List diagnoseAllocationResults( * @param outcome The outcome expected * @return A predicate that returns true if the decision exists and matches the expected outcome, false otherwise. */ - private static Predicate hasDeciderResult(String deciderName, Decision.Type outcome) { + protected static Predicate hasDeciderResult(String deciderName, Decision.Type outcome) { return (nodeResult) -> { Decision decision = nodeResult.getCanAllocateDecision(); return decision != null && decision.getDecisions().stream().anyMatch(d -> deciderName.equals(d.label()) && outcome == d.type()); @@ -676,26 +677,29 @@ List checkIsAllocationDisabled(IndexMetadata indexMetadata } /** - * Generates a list of diagnoses for common problems that keep a shard from allocating to nodes in a data tier. + * Generates a list of diagnoses for common problems that keep a shard from allocating to nodes depending on their role; + * a very common example of such a case are data tiers. * @param indexMetadata Index metadata for the shard being diagnosed. * @param nodeAllocationResults allocation decision results for all nodes in the cluster. * @param clusterState the current cluster state. + * @param shardRouting the shard the nodeAllocationResults refer to * @return A list of diagnoses for the provided unassigned shard */ - public List checkDataTierRelatedIssues( + protected List checkNodeRoleRelatedIssues( IndexMetadata indexMetadata, List nodeAllocationResults, - ClusterState clusterState + ClusterState clusterState, + ShardRouting shardRouting ) { List diagnosisDefs = new ArrayList<>(); - if (indexMetadata.getTierPreference().size() > 0) { + if (indexMetadata.getTierPreference().isEmpty() == false) { List dataTierAllocationResults = nodeAllocationResults.stream() .filter(hasDeciderResult(DATA_TIER_ALLOCATION_DECIDER_NAME, Decision.Type.YES)) .toList(); if (dataTierAllocationResults.isEmpty()) { // Shard must be allocated on specific tiers but no nodes were enabled for those tiers. for (String tier : indexMetadata.getTierPreference()) { - Optional.ofNullable(ACTION_ENABLE_TIERS_LOOKUP.get(tier)).ifPresent(diagnosisDefs::add); + Optional.ofNullable(getAddNodesWithRoleAction(tier)).ifPresent(diagnosisDefs::add); } } else { // Collect the nodes from the tiers this index is allowed on @@ -719,29 +723,29 @@ public List checkDataTierRelatedIssues( // Run checks for data tier specific problems diagnosisDefs.addAll( - checkDataTierAtShardLimit(indexMetadata, clusterState, dataTierAllocationResults, dataTierNodes, preferredTier) + checkNodesWithRoleAtShardLimit(indexMetadata, clusterState, dataTierAllocationResults, dataTierNodes, preferredTier) ); diagnosisDefs.addAll(checkDataTierShouldMigrate(indexMetadata, dataTierAllocationResults, preferredTier, dataTierNodes)); - checkNotEnoughNodesInDataTier(dataTierAllocationResults, preferredTier).ifPresent(diagnosisDefs::add); + checkNotEnoughNodesWithRole(dataTierAllocationResults, preferredTier).ifPresent(diagnosisDefs::add); } } return diagnosisDefs; } - private List checkDataTierAtShardLimit( + protected List checkNodesWithRoleAtShardLimit( IndexMetadata indexMetadata, ClusterState clusterState, - List dataTierAllocationResults, - Set dataTierNodes, - @Nullable String preferredTier + List nodeRoleAllocationResults, + Set nodesWithRoles, + @Nullable String role ) { - // All tier nodes at shards limit? - if (dataTierAllocationResults.stream().allMatch(hasDeciderResult(ShardsLimitAllocationDecider.NAME, Decision.Type.NO))) { + // All applicable nodes at shards limit? + if (nodeRoleAllocationResults.stream().allMatch(hasDeciderResult(ShardsLimitAllocationDecider.NAME, Decision.Type.NO))) { List diagnosisDefs = new ArrayList<>(); - // We need the routing nodes for the tiers this index is allowed on to determine the offending shard limits - List dataTierRoutingNodes = clusterState.getRoutingNodes() + // We need the routing nodes for the role this index is allowed on to determine the offending shard limits + List candidateNodes = clusterState.getRoutingNodes() .stream() - .filter(routingNode -> dataTierNodes.contains(routingNode.node())) + .filter(routingNode -> nodesWithRoles.contains(routingNode.node())) .toList(); // Determine which total_shards_per_node settings are present @@ -752,34 +756,29 @@ private List checkDataTierAtShardLimit( // Determine which total_shards_per_node settings are keeping things from allocating boolean clusterShardsPerNodeShouldChange = false; if (clusterShardsPerNode > 0) { - int minShardCountInTier = dataTierRoutingNodes.stream() - .map(RoutingNode::numberOfOwningShards) - .min(Integer::compareTo) - .orElse(-1); - clusterShardsPerNodeShouldChange = minShardCountInTier >= clusterShardsPerNode; + int minShardCount = candidateNodes.stream().map(RoutingNode::numberOfOwningShards).min(Integer::compareTo).orElse(-1); + clusterShardsPerNodeShouldChange = minShardCount >= clusterShardsPerNode; } boolean indexShardsPerNodeShouldChange = false; if (indexShardsPerNode > 0) { - int minShardCountInTier = dataTierRoutingNodes.stream() + int minShardCount = candidateNodes.stream() .map(routingNode -> routingNode.numberOfOwningShardsForIndex(indexMetadata.getIndex())) .min(Integer::compareTo) .orElse(-1); - indexShardsPerNodeShouldChange = minShardCountInTier >= indexShardsPerNode; + indexShardsPerNodeShouldChange = minShardCount >= indexShardsPerNode; } // Add appropriate diagnosis - if (preferredTier != null) { - // We cannot allocate the shard to the most preferred tier because a shard limit is reached. + if (role != null) { + // We cannot allocate the shard to the most preferred role because a shard limit is reached. if (clusterShardsPerNodeShouldChange) { - Optional.ofNullable(ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING_LOOKUP.get(preferredTier)) - .ifPresent(diagnosisDefs::add); + Optional.ofNullable(getIncreaseShardLimitClusterSettingAction(role)).ifPresent(diagnosisDefs::add); } if (indexShardsPerNodeShouldChange) { - Optional.ofNullable(ACTION_INCREASE_SHARD_LIMIT_INDEX_SETTING_LOOKUP.get(preferredTier)).ifPresent(diagnosisDefs::add); + Optional.ofNullable(getIncreaseShardLimitIndexSettingAction(role)).ifPresent(diagnosisDefs::add); } } else { - // We couldn't determine a desired tier. This is likely because there are no tiers in the cluster, - // only `data` nodes. Give a generic ask for increasing the shard limit. + // We couldn't determine a desired role. Give a generic ask for increasing the shard limit. if (clusterShardsPerNodeShouldChange) { diagnosisDefs.add(ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING); } @@ -838,16 +837,16 @@ private static List checkDataTierShouldMigrate( } } - private static Optional checkNotEnoughNodesInDataTier( - List dataTierAllocationResults, - @Nullable String preferredTier + protected Optional checkNotEnoughNodesWithRole( + List nodeAllocationResults, + @Nullable String role ) { - // Not enough tier nodes to hold shards on different nodes? - if (dataTierAllocationResults.stream().allMatch(hasDeciderResult(SameShardAllocationDecider.NAME, Decision.Type.NO))) { - // We couldn't determine a desired tier. This is likely because there are no tiers in the cluster, - // only `data` nodes. Give a generic ask for increasing the shard limit. - if (preferredTier != null) { - return Optional.ofNullable(ACTION_INCREASE_TIER_CAPACITY_LOOKUP.get(preferredTier)); + // Not enough nodes to hold shards on different nodes? + if (nodeAllocationResults.stream().allMatch(hasDeciderResult(SameShardAllocationDecider.NAME, Decision.Type.NO))) { + // We couldn't determine a desired role. This is likely because there are no nodes with the relevant role in the cluster. + // Give a generic ask for increasing the shard limit. + if (role != null) { + return Optional.ofNullable(getIncreaseNodeWithRoleCapacityAction(role)); } else { return Optional.of(ACTION_INCREASE_NODE_CAPACITY); } @@ -856,6 +855,26 @@ private static Optional checkNotEnoughNodesInDataTier( } } + @Nullable + public Diagnosis.Definition getAddNodesWithRoleAction(String role) { + return ACTION_ENABLE_TIERS_LOOKUP.get(role); + } + + @Nullable + public Diagnosis.Definition getIncreaseShardLimitIndexSettingAction(String role) { + return ACTION_INCREASE_SHARD_LIMIT_INDEX_SETTING_LOOKUP.get(role); + } + + @Nullable + public Diagnosis.Definition getIncreaseShardLimitClusterSettingAction(String role) { + return ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING_LOOKUP.get(role); + } + + @Nullable + public Diagnosis.Definition getIncreaseNodeWithRoleCapacityAction(String role) { + return ACTION_INCREASE_TIER_CAPACITY_LOOKUP.get(role); + } + public class ShardAllocationStatus { protected final ShardAllocationCounts primaries = new ShardAllocationCounts(); protected final ShardAllocationCounts replicas = new ShardAllocationCounts(); diff --git a/server/src/main/java/org/elasticsearch/cluster/service/TransportFeatures.java b/server/src/main/java/org/elasticsearch/cluster/service/TransportFeatures.java new file mode 100644 index 0000000000000..bfecc577f7a47 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/service/TransportFeatures.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.service; + +import org.elasticsearch.Version; +import org.elasticsearch.features.FeatureSpecification; +import org.elasticsearch.features.NodeFeature; + +import java.util.Map; + +public class TransportFeatures implements FeatureSpecification { + @Override + public Map getHistoricalFeatures() { + // transport version was introduced in 8.8.0, but we need to wait until all nodes are >8.8.0 + // to properly detect when we need to fix transport versions + return Map.of(TransportVersionsFixupListener.FIX_TRANSPORT_VERSION, Version.V_8_8_1); + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java b/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java index a54130aec95b6..e77d44e5ad71e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java @@ -9,7 +9,6 @@ package org.elasticsearch.cluster.service; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; @@ -26,6 +25,9 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.UpdateForV9; +import org.elasticsearch.features.FeatureService; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; import org.elasticsearch.threadpool.Scheduler; @@ -47,10 +49,13 @@ * due to the master node not understanding cluster state with transport versions added in 8.8.0. * Any nodes with the inferred placeholder cluster state is then refreshed with their actual transport version */ +@UpdateForV9 // this can be removed in v9 public class TransportVersionsFixupListener implements ClusterStateListener { private static final Logger logger = LogManager.getLogger(TransportVersionsFixupListener.class); + static final NodeFeature FIX_TRANSPORT_VERSION = new NodeFeature("transport.fix_transport_version"); + private static final TimeValue RETRY_TIME = TimeValue.timeValueSeconds(30); private final MasterServiceTaskQueue taskQueue; @@ -58,13 +63,20 @@ public class TransportVersionsFixupListener implements ClusterStateListener { private final Scheduler scheduler; private final Executor executor; private final Set pendingNodes = Collections.synchronizedSet(new HashSet<>()); + private final FeatureService featureService; - public TransportVersionsFixupListener(ClusterService service, ClusterAdminClient client, ThreadPool threadPool) { + public TransportVersionsFixupListener( + ClusterService service, + ClusterAdminClient client, + FeatureService featureService, + ThreadPool threadPool + ) { // there tends to be a lot of state operations on an upgrade - this one is not time-critical, // so use LOW priority. It just needs to be run at some point after upgrade. this( service.createTaskQueue("fixup-transport-versions", Priority.LOW, new TransportVersionUpdater()), client, + featureService, threadPool, threadPool.executor(ThreadPool.Names.CLUSTER_COORDINATION) ); @@ -73,11 +85,13 @@ public TransportVersionsFixupListener(ClusterService service, ClusterAdminClient TransportVersionsFixupListener( MasterServiceTaskQueue taskQueue, ClusterAdminClient client, + FeatureService featureService, Scheduler scheduler, Executor executor ) { this.taskQueue = taskQueue; this.client = client; + this.featureService = featureService; this.scheduler = scheduler; this.executor = executor; } @@ -139,7 +153,7 @@ public void clusterChanged(ClusterChangedEvent event) { // if the min node version > 8.8.0, and the cluster state has some transport versions == 8.8.0, // then refresh all inferred transport versions to their real versions // now that everything should understand cluster state with transport versions - if (event.state().nodes().getMinNodeVersion().after(Version.V_8_8_0) + if (featureService.clusterHasFeature(event.state(), FIX_TRANSPORT_VERSION) && event.state().getMinTransportVersion().equals(INFERRED_TRANSPORT_VERSION)) { // find all the relevant nodes diff --git a/server/src/main/java/org/elasticsearch/common/breaker/ChildMemoryCircuitBreaker.java b/server/src/main/java/org/elasticsearch/common/breaker/ChildMemoryCircuitBreaker.java index b7afe8211184a..a12071f9c27e3 100644 --- a/server/src/main/java/org/elasticsearch/common/breaker/ChildMemoryCircuitBreaker.java +++ b/server/src/main/java/org/elasticsearch/common/breaker/ChildMemoryCircuitBreaker.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.indices.breaker.BreakerSettings; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; +import org.elasticsearch.telemetry.metric.LongCounter; import java.util.concurrent.atomic.AtomicLong; @@ -29,17 +30,25 @@ public class ChildMemoryCircuitBreaker implements CircuitBreaker { private final Logger logger; private final HierarchyCircuitBreakerService parent; private final String name; + private final LongCounter trippedCountMeter; /** * Create a circuit breaker that will break if the number of estimated * bytes grows above the limit. All estimations will be multiplied by * the given overheadConstant. Uses the given oldBreaker to initialize * the starting offset. + * @param trippedCountMeter the counter used to report the tripped count metric * @param settings settings to configure this breaker * @param parent parent circuit breaker service to delegate tripped breakers to * @param name the name of the breaker */ - public ChildMemoryCircuitBreaker(BreakerSettings settings, Logger logger, HierarchyCircuitBreakerService parent, String name) { + public ChildMemoryCircuitBreaker( + LongCounter trippedCountMeter, + BreakerSettings settings, + Logger logger, + HierarchyCircuitBreakerService parent, + String name + ) { this.name = name; this.limitAndOverhead = new LimitAndOverhead(settings.getLimit(), settings.getOverhead()); this.durability = settings.getDurability(); @@ -48,6 +57,7 @@ public ChildMemoryCircuitBreaker(BreakerSettings settings, Logger logger, Hierar this.logger = logger; logger.trace(() -> format("creating ChildCircuitBreaker with settings %s", settings)); this.parent = parent; + this.trippedCountMeter = trippedCountMeter; } /** @@ -58,6 +68,7 @@ public ChildMemoryCircuitBreaker(BreakerSettings settings, Logger logger, Hierar public void circuitBreak(String fieldName, long bytesNeeded) { final long memoryBytesLimit = this.limitAndOverhead.limit; this.trippedCount.incrementAndGet(); + this.trippedCountMeter.increment(); final String message = "[" + this.name + "] Data too large, data for [" diff --git a/server/src/main/java/org/elasticsearch/common/cache/Cache.java b/server/src/main/java/org/elasticsearch/common/cache/Cache.java index 7cd6fa471040a..98a4b90da73d5 100644 --- a/server/src/main/java/org/elasticsearch/common/cache/Cache.java +++ b/server/src/main/java/org/elasticsearch/common/cache/Cache.java @@ -102,8 +102,8 @@ void setExpireAfterAccessNanos(long expireAfterAccessNanos) { this.entriesExpireAfterAccess = true; } - // pkg-private for testing - long getExpireAfterAccessNanos() { + // public for testing + public long getExpireAfterAccessNanos() { return this.expireAfterAccessNanos; } diff --git a/server/src/main/java/org/elasticsearch/common/inject/ModulesBuilder.java b/server/src/main/java/org/elasticsearch/common/inject/ModulesBuilder.java index 6b702f41e7c5d..60e6fa5fff22a 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/ModulesBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/inject/ModulesBuilder.java @@ -22,6 +22,11 @@ public ModulesBuilder add(Module... newModules) { return this; } + public T bindToInstance(Class cls, T instance) { + modules.add(b -> b.bind(cls).toInstance(instance)); + return instance; + } + @Override public Iterator iterator() { return modules.iterator(); diff --git a/server/src/main/java/org/elasticsearch/common/network/CloseableChannel.java b/server/src/main/java/org/elasticsearch/common/network/CloseableChannel.java index 3e184c41ef006..b0d1ec931b0be 100644 --- a/server/src/main/java/org/elasticsearch/common/network/CloseableChannel.java +++ b/server/src/main/java/org/elasticsearch/common/network/CloseableChannel.java @@ -83,7 +83,7 @@ static void closeChannels(List channels, boolean if (blocking) { ArrayList> futures = new ArrayList<>(channels.size()); for (final C channel : channels) { - PlainActionFuture closeFuture = PlainActionFuture.newFuture(); + PlainActionFuture closeFuture = new PlainActionFuture<>(); channel.addCloseListener(closeFuture); futures.add(closeFuture); } diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 69e61e7e70001..8e469973c0f08 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -80,6 +80,7 @@ import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexingPressure; +import org.elasticsearch.index.MergePolicyConfig; import org.elasticsearch.indices.IndexingMemoryController; import org.elasticsearch.indices.IndicesQueryCache; import org.elasticsearch.indices.IndicesRequestCache; @@ -425,6 +426,7 @@ public void apply(Settings value, Settings current, Settings previous) { ScriptService.CONTEXTS_ALLOWED_SETTING, IndicesService.INDICES_CACHE_CLEAN_INTERVAL_SETTING, IndicesFieldDataCache.INDICES_FIELDDATA_CACHE_SIZE_KEY, + IndicesFieldDataCache.INDICES_FIELDDATA_CACHE_EXPIRE, IndicesRequestCache.INDICES_CACHE_QUERY_SIZE, IndicesRequestCache.INDICES_CACHE_QUERY_EXPIRE, HunspellService.HUNSPELL_LAZY_LOAD, @@ -577,6 +579,8 @@ public void apply(Settings value, Settings current, Settings previous) { IndicesClusterStateService.SHARD_LOCK_RETRY_TIMEOUT_SETTING, IngestSettings.GROK_WATCHDOG_INTERVAL, IngestSettings.GROK_WATCHDOG_MAX_EXECUTION_TIME, - TDigestExecutionHint.SETTING + TDigestExecutionHint.SETTING, + MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT_SETTING, + MergePolicyConfig.DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT_SETTING ).filter(Objects::nonNull).collect(Collectors.toSet()); } diff --git a/server/src/main/java/org/elasticsearch/common/unit/Processors.java b/server/src/main/java/org/elasticsearch/common/unit/Processors.java index c71005678fc0d..481ced3bb31d6 100644 --- a/server/src/main/java/org/elasticsearch/common/unit/Processors.java +++ b/server/src/main/java/org/elasticsearch/common/unit/Processors.java @@ -56,7 +56,7 @@ public static Processors of(Double count) { } if (validNumberOfProcessors(count) == false) { - throw new IllegalArgumentException("processors must be a non-negative number; provided [" + count + "]"); + throw new IllegalArgumentException("processors must be a positive number; provided [" + count + "]"); } return new Processors(count); diff --git a/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java b/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java index 72a2fc41a9a12..2c623882afe14 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java @@ -64,6 +64,10 @@ public byte set(long index, byte value) { @Override public boolean get(long index, int len, BytesRef ref) { assert index + len <= size(); + if (len == 0) { + ref.length = 0; + return false; + } int pageIndex = pageIndex(index); final int indexInPage = indexInPage(index); if (indexInPage + len <= pageSize()) { diff --git a/server/src/main/java/org/elasticsearch/common/util/CancellableSingleObjectCache.java b/server/src/main/java/org/elasticsearch/common/util/CancellableSingleObjectCache.java index 467efff9e72c4..081dec3f6b7db 100644 --- a/server/src/main/java/org/elasticsearch/common/util/CancellableSingleObjectCache.java +++ b/server/src/main/java/org/elasticsearch/common/util/CancellableSingleObjectCache.java @@ -192,7 +192,7 @@ private final class CachedItem extends AbstractRefCounted { CachedItem(Key key) { this.key = key; - incRef(); // start with a refcount of 2 so we're not closed while adding the first listener + mustIncRef(); // start with a refcount of 2 so we're not closed while adding the first listener this.future.addListener(new ActionListener<>() { @Override public void onResponse(Value value) { diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThrottledIterator.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThrottledIterator.java index d6ac42a9211c9..34236b957dea2 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThrottledIterator.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThrottledIterator.java @@ -88,7 +88,7 @@ private void run() { } } try (var itemRefs = new ItemRefCounted()) { - itemRefs.incRef(); + itemRefs.mustIncRef(); itemConsumer.accept(Releasables.releaseOnce(itemRefs::decRef), item); } catch (Exception e) { logger.error(Strings.format("exception when processing [%s] with [%s]", item, itemConsumer), e); @@ -108,7 +108,7 @@ private class ItemRefCounted extends AbstractRefCounted implements Releasable { private boolean isRecursive = true; ItemRefCounted() { - refs.incRef(); + refs.mustIncRef(); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentHelper.java b/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentHelper.java index 0a2b631f2b545..5a49896cf1a36 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentHelper.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentHelper.java @@ -94,7 +94,7 @@ public static Iterator wrapWithObject(String return Iterators.concat(startObject(name), iterator, endObject()); } - private static Iterator map(String name, Map map, Function, ToXContent> toXContent) { + public static Iterator map(String name, Map map, Function, ToXContent> toXContent) { return wrapWithObject(name, Iterators.map(map.entrySet().iterator(), toXContent)); } diff --git a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java index fabc10e336368..0552335ab092d 100644 --- a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java +++ b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.coordination.Coordinator; @@ -26,6 +25,7 @@ import org.elasticsearch.cluster.service.MasterService; import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; @@ -35,6 +35,8 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.core.UpdateForV9; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.gateway.GatewayMetaState; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.monitor.NodeHealthService; @@ -62,7 +64,7 @@ /** * A module for loading classes for node discovery. */ -public class DiscoveryModule { +public class DiscoveryModule extends AbstractModule { private static final Logger logger = LogManager.getLogger(DiscoveryModule.class); public static final String MULTI_NODE_DISCOVERY_TYPE = "multi-node"; @@ -112,7 +114,7 @@ public DiscoveryModule( NodeHealthService nodeHealthService, CircuitBreakerService circuitBreakerService, CompatibilityVersions compatibilityVersions, - Set features + FeatureService featureService ) { final Collection> joinValidators = new ArrayList<>(); final Map> hostProviders = new HashMap<>(); @@ -172,19 +174,7 @@ public DiscoveryModule( throw new IllegalArgumentException("Unknown election strategy " + ELECTION_STRATEGY_SETTING.get(settings)); } - if (LEGACY_MULTI_NODE_DISCOVERY_TYPE.equals(discoveryType)) { - assert Version.CURRENT.major == Version.V_7_0_0.major + 1; - DeprecationLogger.getLogger(DiscoveryModule.class) - .critical( - DeprecationCategory.SETTINGS, - "legacy-discovery-type", - "Support for setting [{}] to [{}] is deprecated and will be removed in a future version. Set this setting to [{}] " - + "instead.", - DISCOVERY_TYPE_SETTING.getKey(), - LEGACY_MULTI_NODE_DISCOVERY_TYPE, - MULTI_NODE_DISCOVERY_TYPE - ); - } + checkLegacyMultiNodeDiscoveryType(discoveryType); this.reconfigurator = getReconfigurator(settings, clusterSettings, clusterCoordinationPlugins); var preVoteCollectorFactory = getPreVoteCollectorFactory(clusterCoordinationPlugins); @@ -215,7 +205,7 @@ public DiscoveryModule( leaderHeartbeatService, preVoteCollectorFactory, compatibilityVersions, - features + featureService ); } else { throw new IllegalArgumentException("Unknown discovery type [" + discoveryType + "]"); @@ -224,6 +214,22 @@ public DiscoveryModule( logger.info("using discovery type [{}] and seed hosts providers {}", discoveryType, seedProviderNames); } + @UpdateForV9 + private static void checkLegacyMultiNodeDiscoveryType(String discoveryType) { + if (LEGACY_MULTI_NODE_DISCOVERY_TYPE.equals(discoveryType)) { + DeprecationLogger.getLogger(DiscoveryModule.class) + .critical( + DeprecationCategory.SETTINGS, + "legacy-discovery-type", + "Support for setting [{}] to [{}] is deprecated and will be removed in a future version. Set this setting to [{}] " + + "instead.", + DISCOVERY_TYPE_SETTING.getKey(), + LEGACY_MULTI_NODE_DISCOVERY_TYPE, + MULTI_NODE_DISCOVERY_TYPE + ); + } + } + // visible for testing static Reconfigurator getReconfigurator( Settings settings, @@ -285,6 +291,12 @@ public static boolean isSingleNodeDiscovery(Settings settings) { return SINGLE_NODE_DISCOVERY_TYPE.equals(DISCOVERY_TYPE_SETTING.get(settings)); } + @Override + protected void configure() { + bind(Coordinator.class).toInstance(coordinator); + bind(Reconfigurator.class).toInstance(reconfigurator); + } + public Coordinator getCoordinator() { return coordinator; } diff --git a/server/src/main/java/org/elasticsearch/env/Environment.java b/server/src/main/java/org/elasticsearch/env/Environment.java index 44cf74c4339a6..2f738eb1412a5 100644 --- a/server/src/main/java/org/elasticsearch/env/Environment.java +++ b/server/src/main/java/org/elasticsearch/env/Environment.java @@ -326,11 +326,7 @@ public static FileStore getFileStore(final Path path) throws IOException { public static long getUsableSpace(Path path) throws IOException { long freeSpaceInBytes = Environment.getFileStore(path).getUsableSpace(); - - /* See: https://bugs.openjdk.java.net/browse/JDK-8162520 */ - if (freeSpaceInBytes < 0) { - freeSpaceInBytes = Long.MAX_VALUE; - } + assert freeSpaceInBytes >= 0; return freeSpaceInBytes; } diff --git a/server/src/main/java/org/elasticsearch/env/NodeMetadata.java b/server/src/main/java/org/elasticsearch/env/NodeMetadata.java index 77415bbaea949..2122e5fcc8b6c 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeMetadata.java +++ b/server/src/main/java/org/elasticsearch/env/NodeMetadata.java @@ -10,6 +10,7 @@ import org.elasticsearch.Build; import org.elasticsearch.Version; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.gateway.MetadataStateFormat; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; @@ -154,23 +155,19 @@ public void setNodeVersionId(int nodeVersionId) { this.nodeVersion = Version.fromId(nodeVersionId); } - public void setPreviousNodeVersionId(int previousNodeVersionId) { - this.previousNodeVersion = Version.fromId(previousNodeVersionId); - } - public void setOldestIndexVersion(int oldestIndexVersion) { this.oldestIndexVersion = IndexVersion.fromId(oldestIndexVersion); } + private Version getVersionOrFallbackToEmpty() { + return Objects.requireNonNullElse(this.nodeVersion, Version.V_EMPTY); + } + public NodeMetadata build() { - final Version nodeVersion; + @UpdateForV9 // version is required in the node metadata from v9 onwards + final Version nodeVersion = getVersionOrFallbackToEmpty(); final IndexVersion oldestIndexVersion; - if (this.nodeVersion == null) { - assert Version.CURRENT.major <= Version.V_7_0_0.major + 1 : "version is required in the node metadata from v9 onwards"; - nodeVersion = Version.V_EMPTY; - } else { - nodeVersion = this.nodeVersion; - } + if (this.previousNodeVersion == null) { previousNodeVersion = nodeVersion; } diff --git a/server/src/main/java/org/elasticsearch/features/FeatureData.java b/server/src/main/java/org/elasticsearch/features/FeatureData.java new file mode 100644 index 0000000000000..273617205ee47 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/features/FeatureData.java @@ -0,0 +1,102 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.features; + +import org.elasticsearch.Version; +import org.elasticsearch.common.Strings; + +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.NavigableMap; +import java.util.Set; +import java.util.TreeMap; + +import static org.elasticsearch.features.FeatureService.CLUSTER_FEATURES_ADDED_VERSION; + +/** + * Reads and consolidate features exposed by a list {@link FeatureSpecification}, grouping them into historical features and node + * features for the consumption of {@link FeatureService} + */ +public class FeatureData { + private final NavigableMap> historicalFeatures; + private final Map nodeFeatures; + + private FeatureData(NavigableMap> historicalFeatures, Map nodeFeatures) { + this.historicalFeatures = historicalFeatures; + this.nodeFeatures = nodeFeatures; + } + + public static FeatureData createFromSpecifications(List specs) { + Map allFeatures = new HashMap<>(); + + NavigableMap> historicalFeatures = new TreeMap<>(); + Map nodeFeatures = new HashMap<>(); + for (FeatureSpecification spec : specs) { + for (var hfe : spec.getHistoricalFeatures().entrySet()) { + FeatureSpecification existing = allFeatures.putIfAbsent(hfe.getKey().id(), spec); + // the same SPI class can be loaded multiple times if it's in the base classloader + if (existing != null && existing.getClass() != spec.getClass()) { + throw new IllegalArgumentException( + Strings.format("Duplicate feature - [%s] is declared by both [%s] and [%s]", hfe.getKey().id(), existing, spec) + ); + } + + if (hfe.getValue().after(CLUSTER_FEATURES_ADDED_VERSION)) { + throw new IllegalArgumentException( + Strings.format( + "Historical feature [%s] declared by [%s] for version [%s] is not a historical version", + hfe.getKey().id(), + spec, + hfe.getValue() + ) + ); + } + + historicalFeatures.computeIfAbsent(hfe.getValue(), k -> new HashSet<>()).add(hfe.getKey().id()); + } + + for (NodeFeature f : spec.getFeatures()) { + FeatureSpecification existing = allFeatures.putIfAbsent(f.id(), spec); + if (existing != null && existing.getClass() != spec.getClass()) { + throw new IllegalArgumentException( + Strings.format("Duplicate feature - [%s] is declared by both [%s] and [%s]", f.id(), existing, spec) + ); + } + + nodeFeatures.put(f.id(), f); + } + } + + return new FeatureData(consolidateHistoricalFeatures(historicalFeatures), Map.copyOf(nodeFeatures)); + } + + private static NavigableMap> consolidateHistoricalFeatures( + NavigableMap> declaredHistoricalFeatures + ) { + // update each version by adding in all features from previous versions + Set featureAggregator = new HashSet<>(); + for (Map.Entry> versions : declaredHistoricalFeatures.entrySet()) { + featureAggregator.addAll(versions.getValue()); + versions.setValue(Set.copyOf(featureAggregator)); + } + + return Collections.unmodifiableNavigableMap(declaredHistoricalFeatures); + } + + public NavigableMap> getHistoricalFeatures() { + return historicalFeatures; + } + + public Map getNodeFeatures() { + return nodeFeatures; + } +} diff --git a/server/src/main/java/org/elasticsearch/features/FeatureService.java b/server/src/main/java/org/elasticsearch/features/FeatureService.java index 5d7632a91b0b8..1d60627656b9e 100644 --- a/server/src/main/java/org/elasticsearch/features/FeatureService.java +++ b/server/src/main/java/org/elasticsearch/features/FeatureService.java @@ -10,19 +10,14 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.common.Strings; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.NavigableMap; import java.util.Set; -import java.util.TreeMap; /** * Manages information on the features supported by nodes in the cluster @@ -39,72 +34,21 @@ public class FeatureService { public static final Version CLUSTER_FEATURES_ADDED_VERSION = Version.V_8_12_0; private final NavigableMap> historicalFeatures; - private final Set nodeFeatures; + private final Map nodeFeatures; public FeatureService(List specs) { - Map allFeatures = new HashMap<>(); - NavigableMap> historicalFeatures = new TreeMap<>(); - Set nodeFeatures = new HashSet<>(); - for (FeatureSpecification spec : specs) { - for (var hfe : spec.getHistoricalFeatures().entrySet()) { - FeatureSpecification existing = allFeatures.putIfAbsent(hfe.getKey().id(), spec); - // the same SPI class can be loaded multiple times if it's in the base classloader - if (existing != null && existing.getClass() != spec.getClass()) { - throw new IllegalArgumentException( - Strings.format("Duplicate feature - [%s] is declared by both [%s] and [%s]", hfe.getKey().id(), existing, spec) - ); - } + var featureData = FeatureData.createFromSpecifications(specs); + nodeFeatures = featureData.getNodeFeatures(); + historicalFeatures = featureData.getHistoricalFeatures(); - if (hfe.getValue().onOrAfter(CLUSTER_FEATURES_ADDED_VERSION)) { - throw new IllegalArgumentException( - Strings.format( - "Historical feature [%s] declared by [%s] for version [%s] is not a historical version", - hfe.getKey().id(), - spec, - hfe.getValue() - ) - ); - } - - historicalFeatures.computeIfAbsent(hfe.getValue(), k -> new HashSet<>()).add(hfe.getKey().id()); - } - - for (NodeFeature f : spec.getFeatures()) { - FeatureSpecification existing = allFeatures.putIfAbsent(f.id(), spec); - if (existing != null && existing.getClass() != spec.getClass()) { - throw new IllegalArgumentException( - Strings.format("Duplicate feature - [%s] is declared by both [%s] and [%s]", f.id(), existing, spec) - ); - } - - nodeFeatures.add(f.id()); - } - } - - this.historicalFeatures = consolidateHistoricalFeatures(historicalFeatures); - this.nodeFeatures = Set.copyOf(nodeFeatures); - - logger.info("Registered local node features {}", nodeFeatures.stream().sorted().toList()); - } - - private static NavigableMap> consolidateHistoricalFeatures( - NavigableMap> declaredHistoricalFeatures - ) { - // update each version by adding in all features from previous versions - Set featureAggregator = new HashSet<>(); - for (Map.Entry> versions : declaredHistoricalFeatures.entrySet()) { - featureAggregator.addAll(versions.getValue()); - versions.setValue(Set.copyOf(featureAggregator)); - } - - return Collections.unmodifiableNavigableMap(declaredHistoricalFeatures); + logger.info("Registered local node features {}", nodeFeatures.keySet().stream().sorted().toList()); } /** * The non-historical features supported by this node. */ - public Set getNodeFeatures() { + public Map getNodeFeatures() { return nodeFeatures; } diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java index a7cf7299a8502..e7b8eadb3f771 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.Tuple; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.env.NodeMetadata; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.node.Node; @@ -184,7 +185,7 @@ private PersistedState createOnDiskPersistedState( long currentTerm = onDiskState.currentTerm; if (onDiskState.empty()) { - assert Version.CURRENT.major <= Version.V_7_0_0.major + 1 : "legacy metadata loader is not needed anymore from v9 onwards"; + @UpdateForV9 // legacy metadata loader is not needed anymore from v9 onwards final Tuple legacyState = metaStateService.loadFullState(); if (legacyState.v1().isEmpty() == false) { metadata = legacyState.v2(); diff --git a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java index 4ba7c91d411f3..1db0ec7346a32 100644 --- a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java +++ b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java @@ -17,6 +17,7 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Tuple; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -54,6 +55,7 @@ public MetaStateService(NodeEnvironment nodeEnv, NamedXContentRegistry namedXCon * meta state with globalGeneration -1 and empty meta data is returned. * @throws IOException if some IOException when loading files occurs or there is no metadata referenced by manifest file. */ + @UpdateForV9 public Tuple loadFullState() throws IOException { final Manifest manifest = Manifest.FORMAT.loadLatestState(logger, namedXContentRegistry, nodeEnv.nodeDataPaths()); if (manifest == null) { diff --git a/server/src/main/java/org/elasticsearch/health/GetHealthAction.java b/server/src/main/java/org/elasticsearch/health/GetHealthAction.java index 0e4722a872c4e..c1efa58a50c86 100644 --- a/server/src/main/java/org/elasticsearch/health/GetHealthAction.java +++ b/server/src/main/java/org/elasticsearch/health/GetHealthAction.java @@ -67,6 +67,12 @@ public Response(final ClusterName clusterName, final List } } + public Response(final ClusterName clusterName, final List indicators, HealthStatus topLevelStatus) { + this.indicators = indicators; + this.clusterName = clusterName; + this.status = topLevelStatus; + } + public ClusterName getClusterName() { return clusterName; } diff --git a/server/src/main/java/org/elasticsearch/health/HealthFeatures.java b/server/src/main/java/org/elasticsearch/health/HealthFeatures.java index 89b6c998c8508..3a5d11f862efc 100644 --- a/server/src/main/java/org/elasticsearch/health/HealthFeatures.java +++ b/server/src/main/java/org/elasticsearch/health/HealthFeatures.java @@ -16,7 +16,7 @@ public class HealthFeatures implements FeatureSpecification { - public static final NodeFeature SUPPORTS_HEALTH = new NodeFeature("supports_health"); + public static final NodeFeature SUPPORTS_HEALTH = new NodeFeature("health.supports_health"); @Override public Map getHistoricalFeatures() { diff --git a/server/src/main/java/org/elasticsearch/health/HealthPeriodicLogger.java b/server/src/main/java/org/elasticsearch/health/HealthPeriodicLogger.java index ba8a8458b08cc..55b03ec1192c8 100644 --- a/server/src/main/java/org/elasticsearch/health/HealthPeriodicLogger.java +++ b/server/src/main/java/org/elasticsearch/health/HealthPeriodicLogger.java @@ -36,6 +36,8 @@ import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; +import static org.elasticsearch.health.HealthStatus.GREEN; + /** * This class periodically logs the results of the Health API to the standard Elasticsearch server log file. */ @@ -91,7 +93,18 @@ public class HealthPeriodicLogger implements ClusterStateListener, Closeable, Sc * @param client the client used to call the Health Service. * @param healthService the Health Service, where the actual Health API logic lives. */ - public HealthPeriodicLogger(Settings settings, ClusterService clusterService, Client client, HealthService healthService) { + public static HealthPeriodicLogger create( + Settings settings, + ClusterService clusterService, + Client client, + HealthService healthService + ) { + HealthPeriodicLogger logger = new HealthPeriodicLogger(settings, clusterService, client, healthService); + logger.registerListeners(); + return logger; + } + + private HealthPeriodicLogger(Settings settings, ClusterService clusterService, Client client, HealthService healthService) { this.settings = settings; this.clusterService = clusterService; this.client = client; @@ -101,11 +114,8 @@ public HealthPeriodicLogger(Settings settings, ClusterService clusterService, Cl this.enabled = ENABLED_SETTING.get(settings); } - /** - * Initializer method to avoid the publication of a self reference in the constructor. - */ - public void init() { - if (this.enabled) { + private void registerListeners() { + if (enabled) { clusterService.addListener(this); } clusterService.getClusterSettings().addSettingsUpdateConsumer(ENABLED_SETTING, this::enable); @@ -194,7 +204,6 @@ static Map convertToLoggedFields(List ind // overall status final HealthStatus status = HealthStatus.merge(indicatorResults.stream().map(HealthIndicatorResult::status)); result.put(String.format(Locale.ROOT, "%s.overall.status", HEALTH_FIELD_PREFIX), status.xContentValue()); - result.put(MESSAGE_FIELD, String.format(Locale.ROOT, "health=%s", status.xContentValue())); // top-level status for each indicator indicatorResults.forEach((indicatorResult) -> { @@ -204,6 +213,18 @@ static Map convertToLoggedFields(List ind ); }); + // message field. Show the non-green indicators if they exist. + List nonGreen = indicatorResults.stream() + .filter(p -> p.status() != GREEN) + .map(HealthIndicatorResult::name) + .sorted() + .toList(); + if (nonGreen.isEmpty()) { + result.put(MESSAGE_FIELD, String.format(Locale.ROOT, "health=%s", status.xContentValue())); + } else { + result.put(MESSAGE_FIELD, String.format(Locale.ROOT, "health=%s [%s]", status.xContentValue(), String.join(",", nonGreen))); + } + return result; } diff --git a/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java b/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java index b5a334e56e94c..4c2b589584bdc 100644 --- a/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java +++ b/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java @@ -131,8 +131,6 @@ public static class Builder { private Disk disk; private ShardLimits shardLimits; - private Builder() {} - private Builder(HealthMetadata healthMetadata) { this.disk = healthMetadata.diskMetadata; this.shardLimits = healthMetadata.shardLimitsMetadata; diff --git a/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java b/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java index a6f6eb8750cac..177e4d471cf30 100644 --- a/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java +++ b/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java @@ -91,7 +91,7 @@ public abstract class AbstractHttpServerTransport extends AbstractLifecycleCompo private volatile BoundTransportAddress boundAddress; private final AtomicLong totalChannelsAccepted = new AtomicLong(); private final Map httpChannels = new ConcurrentHashMap<>(); - private final PlainActionFuture allClientsClosedListener = PlainActionFuture.newFuture(); + private final PlainActionFuture allClientsClosedListener = new PlainActionFuture<>(); private final RefCounted refCounted = AbstractRefCounted.of(() -> allClientsClosedListener.onResponse(null)); private final Set httpServerChannels = Collections.newSetFromMap(new ConcurrentHashMap<>()); private final long shutdownGracePeriodMillis; diff --git a/server/src/main/java/org/elasticsearch/http/DefaultRestChannel.java b/server/src/main/java/org/elasticsearch/http/DefaultRestChannel.java index 930b20b927bd8..24df7875f7e3d 100644 --- a/server/src/main/java/org/elasticsearch/http/DefaultRestChannel.java +++ b/server/src/main/java/org/elasticsearch/http/DefaultRestChannel.java @@ -23,7 +23,6 @@ import org.elasticsearch.rest.AbstractRestChannel; import org.elasticsearch.rest.ChunkedRestResponseBody; import org.elasticsearch.rest.LoggingChunkedRestResponseBody; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; @@ -40,7 +39,7 @@ * The default rest channel for incoming requests. This class implements the basic logic for sending a rest * response. It will set necessary headers nad ensure that bytes are released after the response is sent. */ -public class DefaultRestChannel extends AbstractRestChannel implements RestChannel { +public class DefaultRestChannel extends AbstractRestChannel { static final String CLOSE = "close"; static final String CONNECTION = "connection"; diff --git a/server/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java b/server/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java index 5e8fb556b2089..9991d42e013e3 100644 --- a/server/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java +++ b/server/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java @@ -9,13 +9,11 @@ package org.elasticsearch.index; import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; public abstract class AbstractIndexComponent { protected final Logger logger; - protected final DeprecationLogger deprecationLogger; protected final IndexSettings indexSettings; /** @@ -23,7 +21,6 @@ public abstract class AbstractIndexComponent { */ protected AbstractIndexComponent(IndexSettings indexSettings) { this.logger = Loggers.getLogger(getClass(), indexSettings.getIndex()); - this.deprecationLogger = DeprecationLogger.getLogger(getClass()); this.indexSettings = indexSettings; } diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java index b038db5ac379a..2e600bbdc5ed4 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexService.java +++ b/server/src/main/java/org/elasticsearch/index/IndexService.java @@ -48,6 +48,7 @@ import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.fielddata.FieldDataContext; +import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.fielddata.ordinals.GlobalOrdinalsAccounting; @@ -158,7 +159,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust private final IndexNameExpressionResolver expressionResolver; private final Supplier indexSortSupplier; private final ValuesSourceRegistry valuesSourceRegistry; - private Supplier documentParsingObserverSupplier; + private final Supplier documentParsingObserverSupplier; @SuppressWarnings("this-escape") public IndexService( @@ -231,7 +232,7 @@ public IndexService( this.indexSortSupplier = () -> indexSettings.getIndexSortConfig() .buildIndexSort( mapperService::fieldType, - (fieldType, searchLookup) -> indexFieldData.getForField(fieldType, FieldDataContext.noRuntimeFields("index sort")) + (fieldType, searchLookup) -> loadFielddata(fieldType, FieldDataContext.noRuntimeFields("index sort")) ); } else { this.indexSortSupplier = () -> null; @@ -662,7 +663,7 @@ public SearchExecutionContext newSearchExecutionContext( shardRequestIndex, indexSettings, indexCache.bitsetFilterCache(), - indexFieldData::getForField, + this::loadFielddata, mapperService(), mapperService().mappingLookup(), similarityService(), @@ -1293,4 +1294,7 @@ public static Map parseRuntimeMappings( return runtimeFieldTypes; } + public IndexFieldData loadFielddata(MappedFieldType fieldType, FieldDataContext fieldDataContext) { + return indexFieldData.getForField(fieldType, fieldDataContext); + } } diff --git a/server/src/main/java/org/elasticsearch/index/IndexVersions.java b/server/src/main/java/org/elasticsearch/index/IndexVersions.java index 6327c2ba53f54..b6bebcf6abb12 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexVersions.java +++ b/server/src/main/java/org/elasticsearch/index/IndexVersions.java @@ -10,6 +10,7 @@ import org.apache.lucene.util.Version; import org.elasticsearch.core.Assertions; +import org.elasticsearch.core.UpdateForV9; import java.lang.reflect.Field; import java.util.Collection; @@ -44,6 +45,7 @@ private static IndexVersion def(int id, Version luceneVersion) { return new IndexVersion(id, luceneVersion); } + @UpdateForV9 // remove the index versions with which v9 will not need to interact public static final IndexVersion ZERO = def(0, Version.LATEST); public static final IndexVersion V_7_0_0 = def(7_00_00_99, Version.LUCENE_8_0_0); @@ -87,6 +89,7 @@ private static IndexVersion def(int id, Version luceneVersion) { public static final IndexVersion NEW_SPARSE_VECTOR = def(8_500_001, Version.LUCENE_9_7_0); public static final IndexVersion SPARSE_VECTOR_IN_FIELD_NAMES_SUPPORT = def(8_500_002, Version.LUCENE_9_7_0); public static final IndexVersion UPGRADE_LUCENE_9_8 = def(8_500_003, Version.LUCENE_9_8_0); + public static final IndexVersion ES_VERSION_8_12 = def(8_500_004, Version.LUCENE_9_8_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/index/IndexingPressure.java b/server/src/main/java/org/elasticsearch/index/IndexingPressure.java index 5a6cd66e2d5c1..d0bc8ad980dde 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexingPressure.java +++ b/server/src/main/java/org/elasticsearch/index/IndexingPressure.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; @@ -101,12 +102,14 @@ public Releasable markCoordinatingOperationStarted(int operations, long bytes, b false ); } + logger.trace(() -> Strings.format("adding [%d] coordinating operations and [%d] bytes", operations, bytes)); currentCoordinatingBytes.getAndAdd(bytes); currentCoordinatingOps.getAndAdd(operations); totalCombinedCoordinatingAndPrimaryBytes.getAndAdd(bytes); totalCoordinatingBytes.getAndAdd(bytes); totalCoordinatingOps.getAndAdd(operations); return wrapReleasable(() -> { + logger.trace(() -> Strings.format("removing [%d] coordinating operations and [%d] bytes", operations, bytes)); this.currentCombinedCoordinatingAndPrimaryBytes.getAndAdd(-bytes); this.currentCoordinatingBytes.getAndAdd(-bytes); this.currentCoordinatingOps.getAndAdd(-operations); @@ -153,12 +156,14 @@ public Releasable markPrimaryOperationStarted(int operations, long bytes, boolea false ); } + logger.trace(() -> Strings.format("adding [%d] primary operations and [%d] bytes", operations, bytes)); currentPrimaryBytes.getAndAdd(bytes); currentPrimaryOps.getAndAdd(operations); totalCombinedCoordinatingAndPrimaryBytes.getAndAdd(bytes); totalPrimaryBytes.getAndAdd(bytes); totalPrimaryOps.getAndAdd(operations); return wrapReleasable(() -> { + logger.trace(() -> Strings.format("removing [%d] primary operations and [%d] bytes", operations, bytes)); this.currentCombinedCoordinatingAndPrimaryBytes.getAndAdd(-bytes); this.currentPrimaryBytes.getAndAdd(-bytes); this.currentPrimaryOps.getAndAdd(-operations); diff --git a/server/src/main/java/org/elasticsearch/index/MergePolicyConfig.java b/server/src/main/java/org/elasticsearch/index/MergePolicyConfig.java index bd228db91c0e1..e6b2a861458d0 100644 --- a/server/src/main/java/org/elasticsearch/index/MergePolicyConfig.java +++ b/server/src/main/java/org/elasticsearch/index/MergePolicyConfig.java @@ -111,17 +111,33 @@ public final class MergePolicyConfig { private final Logger logger; private final boolean mergesEnabled; private volatile Type mergePolicyType; + private final ByteSizeValue defaultMaxMergedSegment; + private final ByteSizeValue defaultMaxTimeBasedMergedSegment; public static final double DEFAULT_EXPUNGE_DELETES_ALLOWED = 10d; public static final ByteSizeValue DEFAULT_FLOOR_SEGMENT = new ByteSizeValue(2, ByteSizeUnit.MB); public static final int DEFAULT_MAX_MERGE_AT_ONCE = 10; public static final ByteSizeValue DEFAULT_MAX_MERGED_SEGMENT = new ByteSizeValue(5, ByteSizeUnit.GB); + public static final Setting DEFAULT_MAX_MERGED_SEGMENT_SETTING = Setting.byteSizeSetting( + "indices.merge.policy.max_merged_segment", + DEFAULT_MAX_MERGED_SEGMENT, + ByteSizeValue.ofBytes(1L), + ByteSizeValue.ofBytes(Long.MAX_VALUE), + Setting.Property.NodeScope + ); /** * Time-based data generally gets rolled over, so there is not much value in enforcing a maximum segment size, which has the side effect * of merging fewer segments together than the merge factor, which in-turn increases write amplification. So we set an arbitrarily high * roof that serves as a protection that we expect to never hit. */ public static final ByteSizeValue DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT = new ByteSizeValue(100, ByteSizeUnit.GB); + public static final Setting DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT_SETTING = Setting.byteSizeSetting( + "indices.merge.policy.max_time_based_merged_segment", + DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT, + ByteSizeValue.ofBytes(1L), + ByteSizeValue.ofBytes(Long.MAX_VALUE), + Setting.Property.NodeScope + ); public static final double DEFAULT_SEGMENTS_PER_TIER = 10.0d; /** * A default value for {@link LogByteSizeMergePolicy}'s merge factor: 32. This default value differs from the Lucene default of 10 in @@ -262,8 +278,8 @@ MergePolicy getMergePolicy(MergePolicyConfig config, boolean isTimeBasedIndex) { double forceMergeDeletesPctAllowed = indexSettings.getValue(INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING); // percentage ByteSizeValue floorSegment = indexSettings.getValue(INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING); int maxMergeAtOnce = indexSettings.getValue(INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING); - // TODO is this really a good default number for max_merge_segment, what happens for large indices, - // won't they end up with many segments? + this.defaultMaxMergedSegment = DEFAULT_MAX_MERGED_SEGMENT_SETTING.get(indexSettings.getNodeSettings()); + this.defaultMaxTimeBasedMergedSegment = DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT_SETTING.get(indexSettings.getNodeSettings()); ByteSizeValue maxMergedSegment = indexSettings.getValue(INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING); double segmentsPerTier = indexSettings.getValue(INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING); int mergeFactor = indexSettings.getValue(INDEX_MERGE_POLICY_MERGE_FACTOR_SETTING); @@ -315,8 +331,8 @@ void setMergeFactor(int mergeFactor) { void setMaxMergedSegment(ByteSizeValue maxMergedSegment) { // We use 0 as a placeholder for "unset". if (maxMergedSegment.getBytes() == 0) { - tieredMergePolicy.setMaxMergedSegmentMB(DEFAULT_MAX_MERGED_SEGMENT.getMbFrac()); - timeBasedMergePolicy.setMaxMergeMB(DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT.getMbFrac()); + tieredMergePolicy.setMaxMergedSegmentMB(defaultMaxMergedSegment.getMbFrac()); + timeBasedMergePolicy.setMaxMergeMB(defaultMaxTimeBasedMergedSegment.getMbFrac()); } else { tieredMergePolicy.setMaxMergedSegmentMB(maxMergedSegment.getMbFrac()); timeBasedMergePolicy.setMaxMergeMB(maxMergedSegment.getMbFrac()); diff --git a/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java b/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java index 7dd605c4c8a73..e19ee050c93a7 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java @@ -40,6 +40,7 @@ import org.apache.lucene.analysis.pt.PortugueseAnalyzer; import org.apache.lucene.analysis.ro.RomanianAnalyzer; import org.apache.lucene.analysis.ru.RussianAnalyzer; +import org.apache.lucene.analysis.sr.SerbianAnalyzer; import org.apache.lucene.analysis.sv.SwedishAnalyzer; import org.apache.lucene.analysis.th.ThaiAnalyzer; import org.apache.lucene.analysis.tr.TurkishAnalyzer; @@ -129,6 +130,7 @@ public static CharArraySet parseStemExclusion(Settings settings, CharArraySet de entry("_portuguese_", PortugueseAnalyzer.getDefaultStopSet()), entry("_romanian_", RomanianAnalyzer.getDefaultStopSet()), entry("_russian_", RussianAnalyzer.getDefaultStopSet()), + entry("_serbian_", SerbianAnalyzer.getDefaultStopSet()), entry("_sorani_", SoraniAnalyzer.getDefaultStopSet()), entry("_spanish_", SpanishAnalyzer.getDefaultStopSet()), entry("_swedish_", SwedishAnalyzer.getDefaultStopSet()), diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index a496429cc3e2b..43437529cd301 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -48,6 +48,7 @@ import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.DocumentParser; @@ -100,7 +101,8 @@ public abstract class Engine implements Closeable { - public static final String SYNC_COMMIT_ID = "sync_id"; // TODO: Remove sync_id in 9.0 + @UpdateForV9 // TODO: Remove sync_id in 9.0 + public static final String SYNC_COMMIT_ID = "sync_id"; public static final String HISTORY_UUID_KEY = "history_uuid"; public static final String FORCE_MERGE_UUID_KEY = "force_merge_uuid"; public static final String MIN_RETAINED_SEQNO = "min_retained_seq_no"; @@ -1140,7 +1142,7 @@ public void externalRefresh(String source, ActionListener */ // TODO: Remove or rename for increased clarity public void flush(boolean force, boolean waitIfOngoing) throws EngineException { - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); flush(force, waitIfOngoing, future); future.actionGet(); } @@ -1167,7 +1169,7 @@ public void flush(boolean force, boolean waitIfOngoing) throws EngineException { * a lucene commit if nothing needs to be committed. */ public final void flush() throws EngineException { - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); flush(false, false, future); future.actionGet(); } diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index d217f6b844fe8..6cdd86ce6c9a7 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -2469,7 +2469,7 @@ public IndexCommitRef acquireLastIndexCommit(final boolean flushFirst) throws En if (flushFirst) { logger.trace("start flush for snapshot"); // TODO: Split acquireLastIndexCommit into two apis one with blocking flushes one without - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); flush(false, true, future); future.actionGet(); logger.trace("finish flush for snapshot"); diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java b/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java index 76463807942a2..dbc3aadde2e9f 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java @@ -65,7 +65,7 @@ private NumericDoubleValues getNumericDocValues(LeafReaderContext context, doubl final BitSet rootDocs = nested.rootDocs(context); final DocIdSetIterator innerDocs = nested.innerDocs(context); final int maxChildren = nested.getNestedSort() != null ? nested.getNestedSort().getMaxChildren() : Integer.MAX_VALUE; - return sortMode.select(values, missingValue, rootDocs, innerDocs, context.reader().maxDoc(), maxChildren); + return sortMode.select(values, missingValue, rootDocs, innerDocs, maxChildren); } } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/FloatValuesComparatorSource.java b/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/FloatValuesComparatorSource.java index 4b8351f430e05..5dbcafcbdb5b8 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/FloatValuesComparatorSource.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/FloatValuesComparatorSource.java @@ -60,7 +60,7 @@ private NumericDoubleValues getNumericDocValues(LeafReaderContext context, float final BitSet rootDocs = nested.rootDocs(context); final DocIdSetIterator innerDocs = nested.innerDocs(context); final int maxChildren = nested.getNestedSort() != null ? nested.getNestedSort().getMaxChildren() : Integer.MAX_VALUE; - return sortMode.select(values, missingValue, rootDocs, innerDocs, context.reader().maxDoc(), maxChildren); + return sortMode.select(values, missingValue, rootDocs, innerDocs, maxChildren); } } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsBuilder.java b/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsBuilder.java index 2e894ea304fdc..340af1c1f7347 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsBuilder.java @@ -10,16 +10,18 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.index.DocValues; +import org.apache.lucene.index.FilterLeafReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.OrdinalMap; import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.packed.PackedInts; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData; import org.elasticsearch.index.fielddata.LeafOrdinalsFieldData; import org.elasticsearch.index.fielddata.plain.AbstractLeafOrdinalsFieldData; -import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.script.field.ToScriptFieldFactory; import java.io.IOException; @@ -37,7 +39,7 @@ public enum GlobalOrdinalsBuilder { public static IndexOrdinalsFieldData build( final IndexReader indexReader, IndexOrdinalsFieldData indexFieldData, - CircuitBreakerService breakerService, + CircuitBreaker breaker, Logger logger, ToScriptFieldFactory toScriptFieldFactory ) throws IOException { @@ -50,9 +52,26 @@ public static IndexOrdinalsFieldData build( atomicFD[i] = indexFieldData.load(indexReader.leaves().get(i)); subs[i] = atomicFD[i].getOrdinalsValues(); } - final OrdinalMap ordinalMap = OrdinalMap.build(null, subs, PackedInts.DEFAULT); + final TermsEnum[] termsEnums = new TermsEnum[subs.length]; + final long[] weights = new long[subs.length]; + // we assume that TermsEnum are visited sequentially, so we can share the counter between them + final long[] counter = new long[1]; + for (int i = 0; i < subs.length; ++i) { + termsEnums[i] = new FilterLeafReader.FilterTermsEnum(subs[i].termsEnum()) { + @Override + public BytesRef next() throws IOException { + // check parent circuit breaker every 65536 calls + if ((counter[0]++ & 0xFFFF) == 0) { + breaker.addEstimateBytesAndMaybeBreak(0L, "Global Ordinals"); + } + return in.next(); + } + }; + weights[i] = subs[i].getValueCount(); + } + final OrdinalMap ordinalMap = OrdinalMap.build(null, termsEnums, weights, PackedInts.DEFAULT); final long memorySizeInBytes = ordinalMap.ramBytesUsed(); - breakerService.getBreaker(CircuitBreaker.FIELDDATA).addWithoutBreaking(memorySizeInBytes); + breaker.addWithoutBreaking(memorySizeInBytes); TimeValue took = new TimeValue(System.nanoTime() - startTimeNS, TimeUnit.NANOSECONDS); if (logger.isDebugEnabled()) { @@ -108,5 +127,4 @@ public void close() {} took ); } - } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexOrdinalsFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexOrdinalsFieldData.java index 610f4a19f1a52..b4b15a481411d 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexOrdinalsFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexOrdinalsFieldData.java @@ -17,6 +17,7 @@ import org.apache.lucene.index.TermsEnum; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData; import org.elasticsearch.index.fielddata.LeafOrdinalsFieldData; @@ -28,6 +29,7 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceType; import java.io.IOException; +import java.util.concurrent.ExecutionException; public abstract class AbstractIndexOrdinalsFieldData implements IndexOrdinalsFieldData { private static final Logger logger = LogManager.getLogger(AbstractIndexOrdinalsFieldData.class); @@ -82,6 +84,8 @@ public LeafOrdinalsFieldData load(LeafReaderContext context) { } catch (Exception e) { if (e instanceof ElasticsearchException) { throw (ElasticsearchException) e; + } else if (e instanceof ExecutionException && e.getCause() instanceof ElasticsearchException) { + throw (ElasticsearchException) e.getCause(); } else { throw new ElasticsearchException(e); } @@ -128,6 +132,8 @@ private IndexOrdinalsFieldData loadGlobalInternal(DirectoryReader indexReader) { } catch (Exception e) { if (e instanceof ElasticsearchException) { throw (ElasticsearchException) e; + } else if (e instanceof ExecutionException && e.getCause() instanceof ElasticsearchException) { + throw (ElasticsearchException) e.getCause(); } else { throw new ElasticsearchException(e); } @@ -136,7 +142,13 @@ private IndexOrdinalsFieldData loadGlobalInternal(DirectoryReader indexReader) { @Override public IndexOrdinalsFieldData loadGlobalDirect(DirectoryReader indexReader) throws Exception { - return GlobalOrdinalsBuilder.build(indexReader, this, breakerService, logger, toScriptFieldFactory); + return GlobalOrdinalsBuilder.build( + indexReader, + this, + breakerService.getBreaker(CircuitBreaker.FIELDDATA), + logger, + toScriptFieldFactory + ); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/fieldvisitor/StoredFieldLoader.java b/server/src/main/java/org/elasticsearch/index/fieldvisitor/StoredFieldLoader.java index 4f31f1fa5db8d..f04f646ff23bc 100644 --- a/server/src/main/java/org/elasticsearch/index/fieldvisitor/StoredFieldLoader.java +++ b/server/src/main/java/org/elasticsearch/index/fieldvisitor/StoredFieldLoader.java @@ -72,6 +72,28 @@ public List fieldsToLoad() { }; } + /** + * Creates a new StoredFieldLoader using a StoredFieldsSpec that is optimized + * for loading documents in order. + */ + public static StoredFieldLoader fromSpecSequential(StoredFieldsSpec spec) { + if (spec.noRequirements()) { + return StoredFieldLoader.empty(); + } + List fieldsToLoad = fieldsToLoad(spec.requiresSource(), spec.requiredStoredFields()); + return new StoredFieldLoader() { + @Override + public LeafStoredFieldLoader getLoader(LeafReaderContext ctx, int[] docs) throws IOException { + return new ReaderStoredFieldLoader(sequentialReader(ctx), spec.requiresSource(), spec.requiredStoredFields()); + } + + @Override + public List fieldsToLoad() { + return fieldsToLoad; + } + }; + } + /** * Creates a StoredFieldLoader tuned for sequential reads of _source */ diff --git a/server/src/main/java/org/elasticsearch/index/get/GetResult.java b/server/src/main/java/org/elasticsearch/index/get/GetResult.java index 7d542d1e35275..52fc13abf200f 100644 --- a/server/src/main/java/org/elasticsearch/index/get/GetResult.java +++ b/server/src/main/java/org/elasticsearch/index/get/GetResult.java @@ -52,17 +52,16 @@ public class GetResult implements Writeable, Iterable, ToXContent private static final String FOUND = "found"; private static final String FIELDS = "fields"; - private String index; - private String id; - private long version; - private long seqNo; - private long primaryTerm; - private boolean exists; + private final String index; + private final String id; + private final long version; + private final long seqNo; + private final long primaryTerm; + private final boolean exists; private final Map documentFields; private final Map metaFields; private Map sourceAsMap; private BytesReference source; - private byte[] sourceAsBytes; public GetResult(StreamInput in) throws IOException { index = in.readString(); @@ -155,20 +154,6 @@ public long getPrimaryTerm() { return primaryTerm; } - /** - * The source of the document if exists. - */ - public byte[] source() { - if (source == null) { - return null; - } - if (sourceAsBytes != null) { - return sourceAsBytes; - } - this.sourceAsBytes = BytesReference.toBytes(sourceRef()); - return this.sourceAsBytes; - } - /** * Returns bytes reference, also un compress the source if needed. */ @@ -229,10 +214,6 @@ public Map sourceAsMap() throws ElasticsearchParseException { return sourceAsMap; } - public Map getSource() { - return sourceAsMap(); - } - public Map getMetadataFields() { return metaFields; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BlockDocValuesReader.java b/server/src/main/java/org/elasticsearch/index/mapper/BlockDocValuesReader.java index 90a295e5a25f2..11e57e030dfe7 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BlockDocValuesReader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BlockDocValuesReader.java @@ -8,6 +8,7 @@ package org.elasticsearch.index.mapper; +import org.apache.lucene.index.BinaryDocValues; import org.apache.lucene.index.DocValues; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NumericDocValues; @@ -15,171 +16,101 @@ import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.UnicodeUtil; -import org.elasticsearch.core.CheckedFunction; -import org.elasticsearch.index.fielddata.SortedBinaryDocValues; +import org.elasticsearch.common.io.stream.ByteArrayStreamInput; +import org.elasticsearch.index.mapper.BlockLoader.BlockFactory; import org.elasticsearch.index.mapper.BlockLoader.BooleanBuilder; import org.elasticsearch.index.mapper.BlockLoader.Builder; -import org.elasticsearch.index.mapper.BlockLoader.BuilderFactory; import org.elasticsearch.index.mapper.BlockLoader.BytesRefBuilder; import org.elasticsearch.index.mapper.BlockLoader.Docs; import org.elasticsearch.index.mapper.BlockLoader.DoubleBuilder; import org.elasticsearch.index.mapper.BlockLoader.IntBuilder; import org.elasticsearch.index.mapper.BlockLoader.LongBuilder; +import org.elasticsearch.search.fetch.StoredFieldsSpec; import java.io.IOException; /** * A reader that supports reading doc-values from a Lucene segment in Block fashion. */ -public abstract class BlockDocValuesReader { - public interface Factory { - BlockDocValuesReader build(int segment) throws IOException; - - boolean supportsOrdinals(); - - SortedSetDocValues ordinals(int segment) throws IOException; - } - - protected final Thread creationThread; +public abstract class BlockDocValuesReader implements BlockLoader.AllReader { + private final Thread creationThread; public BlockDocValuesReader() { this.creationThread = Thread.currentThread(); } - /** - * Returns the current doc that this reader is on. - */ - public abstract int docID(); + protected abstract int docId(); /** - * The {@link BlockLoader.Builder} for data of this type. + * Checks if the reader can be used to read a range documents starting with the given docID by the current thread. */ - public abstract Builder builder(BuilderFactory factory, int expectedCount); + @Override + public final boolean canReuse(int startingDocID) { + return creationThread == Thread.currentThread() && docId() <= startingDocID; + } - /** - * Reads the values of the given documents specified in the input block - */ - public abstract BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IOException; + @Override + public abstract String toString(); - /** - * Reads the values of the given document into the builder - */ - public abstract void readValuesFromSingleDoc(int docId, Builder builder) throws IOException; + public abstract static class DocValuesBlockLoader implements BlockLoader { + public abstract AllReader reader(LeafReaderContext context) throws IOException; - /** - * Checks if the reader can be used to read a range documents starting with the given docID by the current thread. - */ - public static boolean canReuse(BlockDocValuesReader reader, int startingDocID) { - return reader != null && reader.creationThread == Thread.currentThread() && reader.docID() <= startingDocID; - } + @Override + public final ColumnAtATimeReader columnAtATimeReader(LeafReaderContext context) throws IOException { + return reader(context); + } - public static BlockLoader booleans(String fieldName) { - return context -> { - SortedNumericDocValues docValues = DocValues.getSortedNumeric(context.reader(), fieldName); - NumericDocValues singleton = DocValues.unwrapSingleton(docValues); - if (singleton != null) { - return new SingletonBooleans(singleton); - } - return new Booleans(docValues); - }; - } + @Override + public final RowStrideReader rowStrideReader(LeafReaderContext context) throws IOException { + return reader(context); + } - public static BlockLoader bytesRefsFromOrds(String fieldName) { - return new BlockLoader() { - @Override - public BlockDocValuesReader reader(LeafReaderContext context) throws IOException { - SortedSetDocValues docValues = ordinals(context); - SortedDocValues singleton = DocValues.unwrapSingleton(docValues); - if (singleton != null) { - return new SingletonOrdinals(singleton); - } - return new Ordinals(docValues); - } + @Override + public final StoredFieldsSpec rowStrideStoredFieldSpec() { + return StoredFieldsSpec.NO_REQUIREMENTS; + } - @Override - public boolean supportsOrdinals() { - return true; - } + @Override + public boolean supportsOrdinals() { + return false; + } - @Override - public SortedSetDocValues ordinals(LeafReaderContext context) throws IOException { - return DocValues.getSortedSet(context.reader(), fieldName); - } - }; + @Override + public SortedSetDocValues ordinals(LeafReaderContext context) throws IOException { + throw new UnsupportedOperationException(); + } } - /** - * Load {@link BytesRef} values from doc values. Prefer {@link #bytesRefsFromOrds} if - * doc values are indexed with ordinals because that's generally much faster. It's - * possible to use this with field data, but generally should be avoided because field - * data has higher per invocation overhead. - */ - public static BlockLoader bytesRefsFromDocValues(CheckedFunction fieldData) { - return context -> new Bytes(fieldData.apply(context)); - } + public static class LongsBlockLoader extends DocValuesBlockLoader { + private final String fieldName; - /** - * Convert from the stored {@link long} into the {@link double} to load. - * Sadly, this will go megamorphic pretty quickly and slow us down, - * but it gets the job done for now. - */ - public interface ToDouble { - double convert(long v); - } + public LongsBlockLoader(String fieldName) { + this.fieldName = fieldName; + } - /** - * Load {@code double} values from doc values. - */ - public static BlockLoader doubles(String fieldName, ToDouble toDouble) { - return context -> { - SortedNumericDocValues docValues = DocValues.getSortedNumeric(context.reader(), fieldName); - NumericDocValues singleton = DocValues.unwrapSingleton(docValues); - if (singleton != null) { - return new SingletonDoubles(singleton, toDouble); - } - return new Doubles(docValues, toDouble); - }; - } + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.longs(expectedCount); + } - /** - * Load {@code int} values from doc values. - */ - public static BlockLoader ints(String fieldName) { - return context -> { - SortedNumericDocValues docValues = DocValues.getSortedNumeric(context.reader(), fieldName); - NumericDocValues singleton = DocValues.unwrapSingleton(docValues); - if (singleton != null) { - return new SingletonInts(singleton); + @Override + public AllReader reader(LeafReaderContext context) throws IOException { + SortedNumericDocValues docValues = context.reader().getSortedNumericDocValues(fieldName); + if (docValues != null) { + NumericDocValues singleton = DocValues.unwrapSingleton(docValues); + if (singleton != null) { + return new SingletonLongs(singleton); + } + return new Longs(docValues); } - return new Ints(docValues); - }; - } - - /** - * Load a block of {@code long}s from doc values. - */ - public static BlockLoader longs(String fieldName) { - return context -> { - SortedNumericDocValues docValues = DocValues.getSortedNumeric(context.reader(), fieldName); - NumericDocValues singleton = DocValues.unwrapSingleton(docValues); + NumericDocValues singleton = context.reader().getNumericDocValues(fieldName); if (singleton != null) { return new SingletonLongs(singleton); } - return new Longs(docValues); - }; - } - - /** - * Load blocks with only null. - */ - public static BlockLoader nulls() { - return context -> new Nulls(); + return new ConstantNullsReader(); + } } - @Override - public abstract String toString(); - private static class SingletonLongs extends BlockDocValuesReader { private final NumericDocValues numericDocValues; @@ -188,13 +119,8 @@ private static class SingletonLongs extends BlockDocValuesReader { } @Override - public BlockLoader.LongBuilder builder(BuilderFactory factory, int expectedCount) { - return factory.longsFromDocValues(expectedCount); - } - - @Override - public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IOException { - try (BlockLoader.LongBuilder builder = builder(factory, docs.count())) { + public BlockLoader.Block read(BlockFactory factory, Docs docs) throws IOException { + try (BlockLoader.LongBuilder builder = factory.longsFromDocValues(docs.count())) { int lastDoc = -1; for (int i = 0; i < docs.count(); i++) { int doc = docs.get(i); @@ -213,7 +139,7 @@ public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IO } @Override - public void readValuesFromSingleDoc(int docId, Builder builder) throws IOException { + public void read(int docId, BlockLoader.StoredFields storedFields, Builder builder) throws IOException { BlockLoader.LongBuilder blockBuilder = (BlockLoader.LongBuilder) builder; if (numericDocValues.advanceExact(docId)) { blockBuilder.appendLong(numericDocValues.longValue()); @@ -223,13 +149,13 @@ public void readValuesFromSingleDoc(int docId, Builder builder) throws IOExcepti } @Override - public int docID() { + public int docId() { return numericDocValues.docID(); } @Override public String toString() { - return "SingletonLongs"; + return "BlockDocValuesReader.SingletonLongs"; } } @@ -242,13 +168,8 @@ private static class Longs extends BlockDocValuesReader { } @Override - public BlockLoader.LongBuilder builder(BuilderFactory factory, int expectedCount) { - return factory.longsFromDocValues(expectedCount); - } - - @Override - public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IOException { - try (BlockLoader.LongBuilder builder = builder(factory, docs.count())) { + public BlockLoader.Block read(BlockFactory factory, Docs docs) throws IOException { + try (BlockLoader.LongBuilder builder = factory.longsFromDocValues(docs.count())) { for (int i = 0; i < docs.count(); i++) { int doc = docs.get(i); if (doc < this.docID) { @@ -261,7 +182,7 @@ public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IO } @Override - public void readValuesFromSingleDoc(int docId, Builder builder) throws IOException { + public void read(int docId, BlockLoader.StoredFields storedFields, Builder builder) throws IOException { read(docId, (LongBuilder) builder); } @@ -284,14 +205,44 @@ private void read(int doc, LongBuilder builder) throws IOException { } @Override - public int docID() { + public int docId() { // There is a .docID on the numericDocValues but it is often not implemented. return docID; } @Override public String toString() { - return "Longs"; + return "BlockDocValuesReader.Longs"; + } + } + + public static class IntsBlockLoader extends DocValuesBlockLoader { + private final String fieldName; + + public IntsBlockLoader(String fieldName) { + this.fieldName = fieldName; + } + + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.ints(expectedCount); + } + + @Override + public AllReader reader(LeafReaderContext context) throws IOException { + SortedNumericDocValues docValues = context.reader().getSortedNumericDocValues(fieldName); + if (docValues != null) { + NumericDocValues singleton = DocValues.unwrapSingleton(docValues); + if (singleton != null) { + return new SingletonInts(singleton); + } + return new Ints(docValues); + } + NumericDocValues singleton = context.reader().getNumericDocValues(fieldName); + if (singleton != null) { + return new SingletonInts(singleton); + } + return new ConstantNullsReader(); } } @@ -303,13 +254,8 @@ private static class SingletonInts extends BlockDocValuesReader { } @Override - public IntBuilder builder(BuilderFactory factory, int expectedCount) { - return factory.intsFromDocValues(expectedCount); - } - - @Override - public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IOException { - try (BlockLoader.IntBuilder builder = builder(factory, docs.count())) { + public BlockLoader.Block read(BlockFactory factory, Docs docs) throws IOException { + try (BlockLoader.IntBuilder builder = factory.intsFromDocValues(docs.count())) { int lastDoc = -1; for (int i = 0; i < docs.count(); i++) { int doc = docs.get(i); @@ -328,7 +274,7 @@ public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IO } @Override - public void readValuesFromSingleDoc(int docId, Builder builder) throws IOException { + public void read(int docId, BlockLoader.StoredFields storedFields, Builder builder) throws IOException { IntBuilder blockBuilder = (IntBuilder) builder; if (numericDocValues.advanceExact(docId)) { blockBuilder.appendInt(Math.toIntExact(numericDocValues.longValue())); @@ -338,13 +284,13 @@ public void readValuesFromSingleDoc(int docId, Builder builder) throws IOExcepti } @Override - public int docID() { + public int docId() { return numericDocValues.docID(); } @Override public String toString() { - return "SingletonInts"; + return "BlockDocValuesReader.SingletonInts"; } } @@ -357,13 +303,8 @@ private static class Ints extends BlockDocValuesReader { } @Override - public IntBuilder builder(BuilderFactory factory, int expectedCount) { - return factory.intsFromDocValues(expectedCount); - } - - @Override - public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IOException { - try (BlockLoader.IntBuilder builder = builder(factory, docs.count())) { + public BlockLoader.Block read(BlockFactory factory, Docs docs) throws IOException { + try (BlockLoader.IntBuilder builder = factory.intsFromDocValues(docs.count())) { for (int i = 0; i < docs.count(); i++) { int doc = docs.get(i); if (doc < this.docID) { @@ -376,7 +317,7 @@ public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IO } @Override - public void readValuesFromSingleDoc(int docId, Builder builder) throws IOException { + public void read(int docId, BlockLoader.StoredFields storedFields, Builder builder) throws IOException { read(docId, (IntBuilder) builder); } @@ -399,14 +340,55 @@ private void read(int doc, IntBuilder builder) throws IOException { } @Override - public int docID() { - // There is a .docID on on the numericDocValues but it is often not implemented. + public int docId() { + // There is a .docID on the numericDocValues but it is often not implemented. return docID; } @Override public String toString() { - return "Ints"; + return "BlockDocValuesReader.Ints"; + } + } + + /** + * Convert from the stored {@link long} into the {@link double} to load. + * Sadly, this will go megamorphic pretty quickly and slow us down, + * but it gets the job done for now. + */ + public interface ToDouble { + double convert(long v); + } + + public static class DoublesBlockLoader extends DocValuesBlockLoader { + private final String fieldName; + private final ToDouble toDouble; + + public DoublesBlockLoader(String fieldName, ToDouble toDouble) { + this.fieldName = fieldName; + this.toDouble = toDouble; + } + + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.doubles(expectedCount); + } + + @Override + public AllReader reader(LeafReaderContext context) throws IOException { + SortedNumericDocValues docValues = context.reader().getSortedNumericDocValues(fieldName); + if (docValues != null) { + NumericDocValues singleton = DocValues.unwrapSingleton(docValues); + if (singleton != null) { + return new SingletonDoubles(singleton, toDouble); + } + return new Doubles(docValues, toDouble); + } + NumericDocValues singleton = context.reader().getNumericDocValues(fieldName); + if (singleton != null) { + return new SingletonDoubles(singleton, toDouble); + } + return new ConstantNullsReader(); } } @@ -421,13 +403,8 @@ private static class SingletonDoubles extends BlockDocValuesReader { } @Override - public DoubleBuilder builder(BuilderFactory factory, int expectedCount) { - return factory.doublesFromDocValues(expectedCount); - } - - @Override - public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IOException { - try (BlockLoader.DoubleBuilder builder = builder(factory, docs.count())) { + public BlockLoader.Block read(BlockFactory factory, Docs docs) throws IOException { + try (BlockLoader.DoubleBuilder builder = factory.doublesFromDocValues(docs.count())) { int lastDoc = -1; for (int i = 0; i < docs.count(); i++) { int doc = docs.get(i); @@ -447,7 +424,7 @@ public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IO } @Override - public void readValuesFromSingleDoc(int docId, Builder builder) throws IOException { + public void read(int docId, BlockLoader.StoredFields storedFields, Builder builder) throws IOException { this.docID = docId; DoubleBuilder blockBuilder = (DoubleBuilder) builder; if (docValues.advanceExact(this.docID)) { @@ -458,13 +435,13 @@ public void readValuesFromSingleDoc(int docId, Builder builder) throws IOExcepti } @Override - public int docID() { + public int docId() { return docID; } @Override public String toString() { - return "SingletonDoubles"; + return "BlockDocValuesReader.SingletonDoubles"; } } @@ -479,13 +456,8 @@ private static class Doubles extends BlockDocValuesReader { } @Override - public DoubleBuilder builder(BuilderFactory factory, int expectedCount) { - return factory.doublesFromDocValues(expectedCount); - } - - @Override - public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IOException { - try (BlockLoader.DoubleBuilder builder = builder(factory, docs.count())) { + public BlockLoader.Block read(BlockFactory factory, Docs docs) throws IOException { + try (BlockLoader.DoubleBuilder builder = factory.doublesFromDocValues(docs.count())) { for (int i = 0; i < docs.count(); i++) { int doc = docs.get(i); if (doc < this.docID) { @@ -498,7 +470,7 @@ public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IO } @Override - public void readValuesFromSingleDoc(int docId, Builder builder) throws IOException { + public void read(int docId, BlockLoader.StoredFields storedFields, Builder builder) throws IOException { read(docId, (DoubleBuilder) builder); } @@ -521,13 +493,58 @@ private void read(int doc, DoubleBuilder builder) throws IOException { } @Override - public int docID() { + public int docId() { return docID; } @Override public String toString() { - return "Doubles"; + return "BlockDocValuesReader.Doubles"; + } + } + + public static class BytesRefsFromOrdsBlockLoader extends DocValuesBlockLoader { + private final String fieldName; + + public BytesRefsFromOrdsBlockLoader(String fieldName) { + this.fieldName = fieldName; + } + + @Override + public BytesRefBuilder builder(BlockFactory factory, int expectedCount) { + return factory.bytesRefs(expectedCount); + } + + @Override + public AllReader reader(LeafReaderContext context) throws IOException { + SortedSetDocValues docValues = context.reader().getSortedSetDocValues(fieldName); + if (docValues != null) { + SortedDocValues singleton = DocValues.unwrapSingleton(docValues); + if (singleton != null) { + return new SingletonOrdinals(singleton); + } + return new Ordinals(docValues); + } + SortedDocValues singleton = context.reader().getSortedDocValues(fieldName); + if (singleton != null) { + return new SingletonOrdinals(singleton); + } + return new ConstantNullsReader(); + } + + @Override + public boolean supportsOrdinals() { + return true; + } + + @Override + public SortedSetDocValues ordinals(LeafReaderContext context) throws IOException { + return DocValues.getSortedSet(context.reader(), fieldName); + } + + @Override + public String toString() { + return "BytesRefsFromOrds[" + fieldName + "]"; } } @@ -539,12 +556,7 @@ private static class SingletonOrdinals extends BlockDocValuesReader { } @Override - public BytesRefBuilder builder(BuilderFactory factory, int expectedCount) { - return factory.bytesRefsFromDocValues(expectedCount); - } - - @Override - public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IOException { + public BlockLoader.Block read(BlockFactory factory, Docs docs) throws IOException { try (BlockLoader.SingletonOrdinalsBuilder builder = factory.singletonOrdinalsBuilder(ordinals, docs.count())) { for (int i = 0; i < docs.count(); i++) { int doc = docs.get(i); @@ -562,8 +574,8 @@ public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IO } @Override - public void readValuesFromSingleDoc(int doc, Builder builder) throws IOException { - if (ordinals.advanceExact(doc)) { + public void read(int docId, BlockLoader.StoredFields storedFields, Builder builder) throws IOException { + if (ordinals.advanceExact(docId)) { ((BytesRefBuilder) builder).appendBytesRef(ordinals.lookupOrd(ordinals.ordValue())); } else { builder.appendNull(); @@ -571,13 +583,13 @@ public void readValuesFromSingleDoc(int doc, Builder builder) throws IOException } @Override - public int docID() { + public int docId() { return ordinals.docID(); } @Override public String toString() { - return "SingletonOrdinals"; + return "BlockDocValuesReader.SingletonOrdinals"; } } @@ -589,13 +601,8 @@ private static class Ordinals extends BlockDocValuesReader { } @Override - public BytesRefBuilder builder(BuilderFactory factory, int expectedCount) { - return factory.bytesRefsFromDocValues(expectedCount); - } - - @Override - public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IOException { - try (BytesRefBuilder builder = builder(factory, docs.count())) { + public BlockLoader.Block read(BlockFactory factory, Docs docs) throws IOException { + try (BytesRefBuilder builder = factory.bytesRefsFromDocValues(docs.count())) { for (int i = 0; i < docs.count(); i++) { int doc = docs.get(i); if (doc < ordinals.docID()) { @@ -608,12 +615,12 @@ public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IO } @Override - public void readValuesFromSingleDoc(int doc, Builder builder) throws IOException { - read(doc, (BytesRefBuilder) builder); + public void read(int docId, BlockLoader.StoredFields storedFields, Builder builder) throws IOException { + read(docId, (BytesRefBuilder) builder); } - private void read(int doc, BytesRefBuilder builder) throws IOException { - if (false == ordinals.advanceExact(doc)) { + private void read(int docId, BytesRefBuilder builder) throws IOException { + if (false == ordinals.advanceExact(docId)) { builder.appendNull(); return; } @@ -630,32 +637,52 @@ private void read(int doc, BytesRefBuilder builder) throws IOException { } @Override - public int docID() { + public int docId() { return ordinals.docID(); } @Override public String toString() { - return "Ordinals"; + return "BlockDocValuesReader.Ordinals"; } } - private static class Bytes extends BlockDocValuesReader { - private final SortedBinaryDocValues docValues; - private int docID = -1; + public static class BytesRefsFromBinaryBlockLoader extends DocValuesBlockLoader { + private final String fieldName; - Bytes(SortedBinaryDocValues docValues) { - this.docValues = docValues; + public BytesRefsFromBinaryBlockLoader(String fieldName) { + this.fieldName = fieldName; } @Override - public BytesRefBuilder builder(BuilderFactory factory, int expectedCount) { - return factory.bytesRefsFromDocValues(expectedCount); + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.bytesRefs(expectedCount); + } + + @Override + public AllReader reader(LeafReaderContext context) throws IOException { + BinaryDocValues docValues = context.reader().getBinaryDocValues(fieldName); + if (docValues == null) { + return new ConstantNullsReader(); + } + return new BytesRefsFromBinary(docValues); + } + } + + private static class BytesRefsFromBinary extends BlockDocValuesReader { + private final BinaryDocValues docValues; + private final ByteArrayStreamInput in = new ByteArrayStreamInput(); + private final BytesRef scratch = new BytesRef(); + + private int docID = -1; + + BytesRefsFromBinary(BinaryDocValues docValues) { + this.docValues = docValues; } @Override - public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IOException { - try (BlockLoader.BytesRefBuilder builder = builder(factory, docs.count())) { + public BlockLoader.Block read(BlockFactory factory, Docs docs) throws IOException { + try (BlockLoader.BytesRefBuilder builder = factory.bytesRefs(docs.count())) { for (int i = 0; i < docs.count(); i++) { int doc = docs.get(i); if (doc < docID) { @@ -668,7 +695,7 @@ public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IO } @Override - public void readValuesFromSingleDoc(int docId, Builder builder) throws IOException { + public void read(int docId, BlockLoader.StoredFields storedFields, Builder builder) throws IOException { read(docId, (BytesRefBuilder) builder); } @@ -678,27 +705,66 @@ private void read(int doc, BytesRefBuilder builder) throws IOException { builder.appendNull(); return; } - int count = docValues.docValueCount(); + BytesRef bytes = docValues.binaryValue(); + assert bytes.length > 0; + in.reset(bytes.bytes, bytes.offset, bytes.length); + int count = in.readVInt(); + scratch.bytes = bytes.bytes; + if (count == 1) { - // TODO read ords in ascending order. Buffers and stuff. - builder.appendBytesRef(docValues.nextValue()); + scratch.length = in.readVInt(); + scratch.offset = in.getPosition(); + builder.appendBytesRef(scratch); return; } builder.beginPositionEntry(); for (int v = 0; v < count; v++) { - builder.appendBytesRef(docValues.nextValue()); + scratch.length = in.readVInt(); + scratch.offset = in.getPosition(); + in.setPosition(scratch.offset + scratch.length); + builder.appendBytesRef(scratch); } builder.endPositionEntry(); } @Override - public int docID() { + public int docId() { return docID; } @Override public String toString() { - return "Bytes"; + return "BlockDocValuesReader.Bytes"; + } + } + + public static class BooleansBlockLoader extends DocValuesBlockLoader { + private final String fieldName; + + public BooleansBlockLoader(String fieldName) { + this.fieldName = fieldName; + } + + @Override + public BooleanBuilder builder(BlockFactory factory, int expectedCount) { + return factory.booleans(expectedCount); + } + + @Override + public AllReader reader(LeafReaderContext context) throws IOException { + SortedNumericDocValues docValues = context.reader().getSortedNumericDocValues(fieldName); + if (docValues != null) { + NumericDocValues singleton = DocValues.unwrapSingleton(docValues); + if (singleton != null) { + return new SingletonBooleans(singleton); + } + return new Booleans(docValues); + } + NumericDocValues singleton = context.reader().getNumericDocValues(fieldName); + if (singleton != null) { + return new SingletonBooleans(singleton); + } + return new ConstantNullsReader(); } } @@ -710,13 +776,8 @@ private static class SingletonBooleans extends BlockDocValuesReader { } @Override - public BooleanBuilder builder(BuilderFactory factory, int expectedCount) { - return factory.booleansFromDocValues(expectedCount); - } - - @Override - public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IOException { - try (BlockLoader.BooleanBuilder builder = builder(factory, docs.count())) { + public BlockLoader.Block read(BlockFactory factory, Docs docs) throws IOException { + try (BlockLoader.BooleanBuilder builder = factory.booleansFromDocValues(docs.count())) { int lastDoc = -1; for (int i = 0; i < docs.count(); i++) { int doc = docs.get(i); @@ -735,7 +796,7 @@ public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IO } @Override - public void readValuesFromSingleDoc(int docId, Builder builder) throws IOException { + public void read(int docId, BlockLoader.StoredFields storedFields, Builder builder) throws IOException { BooleanBuilder blockBuilder = (BooleanBuilder) builder; if (numericDocValues.advanceExact(docId)) { blockBuilder.appendBoolean(numericDocValues.longValue() != 0); @@ -745,13 +806,13 @@ public void readValuesFromSingleDoc(int docId, Builder builder) throws IOExcepti } @Override - public int docID() { + public int docId() { return numericDocValues.docID(); } @Override public String toString() { - return "SingletonBooleans"; + return "BlockDocValuesReader.SingletonBooleans"; } } @@ -764,13 +825,8 @@ private static class Booleans extends BlockDocValuesReader { } @Override - public BooleanBuilder builder(BuilderFactory factory, int expectedCount) { - return factory.booleansFromDocValues(expectedCount); - } - - @Override - public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IOException { - try (BlockLoader.BooleanBuilder builder = builder(factory, docs.count())) { + public BlockLoader.Block read(BlockFactory factory, Docs docs) throws IOException { + try (BlockLoader.BooleanBuilder builder = factory.booleansFromDocValues(docs.count())) { for (int i = 0; i < docs.count(); i++) { int doc = docs.get(i); if (doc < this.docID) { @@ -783,7 +839,7 @@ public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IO } @Override - public void readValuesFromSingleDoc(int docId, Builder builder) throws IOException { + public void read(int docId, BlockLoader.StoredFields storedFields, Builder builder) throws IOException { read(docId, (BooleanBuilder) builder); } @@ -806,61 +862,14 @@ private void read(int doc, BooleanBuilder builder) throws IOException { } @Override - public int docID() { + public int docId() { // There is a .docID on the numericDocValues but it is often not implemented. return docID; } @Override public String toString() { - return "Booleans"; - } - } - - private static class Nulls extends BlockDocValuesReader { - private int docID = -1; - - @Override - public BlockLoader.Builder builder(BuilderFactory factory, int expectedCount) { - return factory.nulls(expectedCount); - } - - @Override - public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IOException { - try (BlockLoader.Builder builder = builder(factory, docs.count())) { - for (int i = 0; i < docs.count(); i++) { - builder.appendNull(); - } - return builder.build(); - } - } - - @Override - public void readValuesFromSingleDoc(int docId, Builder builder) { - this.docID = docId; - builder.appendNull(); - } - - @Override - public int docID() { - return docID; - } - - @Override - public String toString() { - return "Nulls"; - } - } - - /** - * Convert a {@link String} into a utf-8 {@link BytesRef}. - */ - protected static BytesRef toBytesRef(BytesRef scratch, String v) { - int len = UnicodeUtil.maxUTF8Length(v.length()); - if (scratch.bytes.length < len) { - scratch.bytes = new byte[len]; + return "BlockDocValuesReader.Booleans"; } - scratch.length = UnicodeUtil.UTF16toUTF8(v, 0, v.length(), scratch.bytes); - return scratch; } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BlockLoader.java b/server/src/main/java/org/elasticsearch/index/mapper/BlockLoader.java index af53ab42d35d9..6e0329a61c51e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BlockLoader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BlockLoader.java @@ -13,8 +13,12 @@ import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.util.BytesRef; import org.elasticsearch.core.Releasable; +import org.elasticsearch.search.fetch.StoredFieldsSpec; +import org.elasticsearch.search.lookup.Source; import java.io.IOException; +import java.util.List; +import java.util.Map; /** * Interface for loading data in a block shape. Instances of this class @@ -22,26 +26,292 @@ */ public interface BlockLoader { /** - * Build a {@link LeafReaderContext leaf} level reader. + * The {@link BlockLoader.Builder} for data of this type. Called when + * loading from a multi-segment or unsorted block. */ - BlockDocValuesReader reader(LeafReaderContext context) throws IOException; + Builder builder(BlockFactory factory, int expectedCount); + + interface Reader { + /** + * Checks if the reader can be used to read a range documents starting with the given docID by the current thread. + */ + boolean canReuse(int startingDocID); + } + + interface ColumnAtATimeReader extends Reader { + /** + * Reads the values of all documents in {@code docs}. + */ + BlockLoader.Block read(BlockFactory factory, Docs docs) throws IOException; + } + + interface RowStrideReader extends Reader { + /** + * Reads the values of the given document into the builder. + */ + void read(int docId, StoredFields storedFields, Builder builder) throws IOException; + } + + interface AllReader extends ColumnAtATimeReader, RowStrideReader {} + + interface StoredFields { + Source source(); + + /** + * @return the ID for the current document + */ + String id(); + + /** + * @return the routing path for the current document + */ + String routing(); + + /** + * @return stored fields for the current document + */ + Map> storedFields(); + } + + ColumnAtATimeReader columnAtATimeReader(LeafReaderContext context) throws IOException; + + RowStrideReader rowStrideReader(LeafReaderContext context) throws IOException; + + StoredFieldsSpec rowStrideStoredFieldSpec(); /** * Does this loader support loading bytes via calling {@link #ordinals}. */ - default boolean supportsOrdinals() { - return false; - } + boolean supportsOrdinals(); /** * Load ordinals for the provided context. */ - default SortedSetDocValues ordinals(LeafReaderContext context) throws IOException { - throw new IllegalStateException("ordinals not supported"); + SortedSetDocValues ordinals(LeafReaderContext context) throws IOException; + + /** + * Load blocks with only null. + */ + BlockLoader CONSTANT_NULLS = new BlockLoader() { + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.nulls(expectedCount); + } + + @Override + public ColumnAtATimeReader columnAtATimeReader(LeafReaderContext context) { + return new ConstantNullsReader(); + } + + @Override + public RowStrideReader rowStrideReader(LeafReaderContext context) { + return new ConstantNullsReader(); + } + + @Override + public StoredFieldsSpec rowStrideStoredFieldSpec() { + return StoredFieldsSpec.NO_REQUIREMENTS; + } + + @Override + public boolean supportsOrdinals() { + return false; + } + + @Override + public SortedSetDocValues ordinals(LeafReaderContext context) { + throw new UnsupportedOperationException(); + } + + @Override + public String toString() { + return "ConstantNull"; + } + }; + + /** + * Implementation of {@link ColumnAtATimeReader} and {@link RowStrideReader} that always + * loads {@code null}. + */ + class ConstantNullsReader implements AllReader { + @Override + public Block read(BlockFactory factory, Docs docs) throws IOException { + return factory.constantNulls(); + } + + @Override + public void read(int docId, StoredFields storedFields, Builder builder) throws IOException { + builder.appendNull(); + } + + @Override + public boolean canReuse(int startingDocID) { + return true; + } + + @Override + public String toString() { + return "constant_nulls"; + } } /** - * A list of documents to load. + * Load blocks with only {@code value}. + */ + static BlockLoader constantBytes(BytesRef value) { + return new BlockLoader() { + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.bytesRefs(expectedCount); + } + + @Override + public ColumnAtATimeReader columnAtATimeReader(LeafReaderContext context) { + return new ColumnAtATimeReader() { + @Override + public Block read(BlockFactory factory, Docs docs) { + return factory.constantBytes(value); + } + + @Override + public boolean canReuse(int startingDocID) { + return true; + } + + @Override + public String toString() { + return "constant[" + value + "]"; + } + }; + } + + @Override + public RowStrideReader rowStrideReader(LeafReaderContext context) { + return new RowStrideReader() { + @Override + public void read(int docId, StoredFields storedFields, Builder builder) { + ((BlockLoader.BytesRefBuilder) builder).appendBytesRef(value); + } + + @Override + public boolean canReuse(int startingDocID) { + return true; + } + + @Override + public String toString() { + return "constant[" + value + "]"; + } + }; + } + + @Override + public StoredFieldsSpec rowStrideStoredFieldSpec() { + return StoredFieldsSpec.NO_REQUIREMENTS; + } + + @Override + public boolean supportsOrdinals() { + return false; + } + + @Override + public SortedSetDocValues ordinals(LeafReaderContext context) { + throw new UnsupportedOperationException(); + } + + @Override + public String toString() { + return "ConstantBytes[" + value + "]"; + } + }; + } + + abstract class Delegating implements BlockLoader { + protected final BlockLoader delegate; + + protected Delegating(BlockLoader delegate) { + this.delegate = delegate; + } + + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + return delegate.builder(factory, expectedCount); + } + + @Override + public ColumnAtATimeReader columnAtATimeReader(LeafReaderContext context) throws IOException { + ColumnAtATimeReader reader = delegate.columnAtATimeReader(context); + if (reader == null) { + return null; + } + return new ColumnAtATimeReader() { + @Override + public Block read(BlockFactory factory, Docs docs) throws IOException { + return reader.read(factory, docs); + } + + @Override + public boolean canReuse(int startingDocID) { + return reader.canReuse(startingDocID); + } + + @Override + public String toString() { + return "Delegating[to=" + delegatingTo() + ", impl=" + reader + "]"; + } + }; + } + + @Override + public RowStrideReader rowStrideReader(LeafReaderContext context) throws IOException { + RowStrideReader reader = delegate.rowStrideReader(context); + if (reader == null) { + return null; + } + return new RowStrideReader() { + @Override + public void read(int docId, StoredFields storedFields, Builder builder) throws IOException { + reader.read(docId, storedFields, builder); + } + + @Override + public boolean canReuse(int startingDocID) { + return reader.canReuse(startingDocID); + } + + @Override + public String toString() { + return "Delegating[to=" + delegatingTo() + ", impl=" + reader + "]"; + } + }; + } + + @Override + public StoredFieldsSpec rowStrideStoredFieldSpec() { + return delegate.rowStrideStoredFieldSpec(); + } + + @Override + public boolean supportsOrdinals() { + return delegate.supportsOrdinals(); + } + + @Override + public SortedSetDocValues ordinals(LeafReaderContext context) throws IOException { + return delegate.ordinals(context); + } + + protected abstract String delegatingTo(); + + @Override + public final String toString() { + return "Delegating[to=" + delegatingTo() + ", impl=" + delegate + "]"; + } + } + + /** + * A list of documents to load. Documents are always in non-decreasing order. */ interface Docs { int count(); @@ -55,7 +325,7 @@ interface Docs { * production code. That implementation sits in the "compute" project. The is * also a test implementation, but there may be no more other implementations. */ - interface BuilderFactory { + interface BlockFactory { /** * Build a builder to load booleans as loaded from doc values. Doc values * load booleans deduplicated and in sorted order. @@ -112,11 +382,21 @@ interface BuilderFactory { LongBuilder longs(int expectedCount); /** - * Build a builder that can only load null values. - * TODO this should return a block directly instead of a builder + * Build a builder to load only {@code null}s. */ Builder nulls(int expectedCount); + /** + * Build a block that contains only {@code null}. + */ + Block constantNulls(); + + /** + * Build a block that contains {@code value} repeated + * {@code size} times. + */ + Block constantBytes(BytesRef value); + /** * Build a reader for reading keyword ordinals. */ @@ -129,7 +409,7 @@ interface BuilderFactory { * Marker interface for block results. The compute engine has a fleshed * out implementation. */ - interface Block {} + interface Block extends Releasable {} /** * A builder for typed values. For each document you may either call diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BlockLoaderStoredFieldsFromLeafLoader.java b/server/src/main/java/org/elasticsearch/index/mapper/BlockLoaderStoredFieldsFromLeafLoader.java new file mode 100644 index 0000000000000..0090935f51bc3 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/mapper/BlockLoaderStoredFieldsFromLeafLoader.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.mapper; + +import org.elasticsearch.index.fieldvisitor.LeafStoredFieldLoader; +import org.elasticsearch.search.lookup.Source; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +public class BlockLoaderStoredFieldsFromLeafLoader implements BlockLoader.StoredFields { + private final LeafStoredFieldLoader loader; + private final SourceLoader.Leaf sourceLoader; + private Source source; + + public BlockLoaderStoredFieldsFromLeafLoader(LeafStoredFieldLoader loader, SourceLoader.Leaf sourceLoader) { + this.loader = loader; + this.sourceLoader = sourceLoader; + } + + public void advanceTo(int doc) throws IOException { + loader.advanceTo(doc); + if (sourceLoader != null) { + source = sourceLoader.source(loader, doc); + } + } + + @Override + public Source source() { + return source; + } + + @Override + public String id() { + return loader.id(); + } + + @Override + public String routing() { + return loader.routing(); + } + + @Override + public Map> storedFields() { + return loader.storedFields(); + } +} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BlockSourceReader.java b/server/src/main/java/org/elasticsearch/index/mapper/BlockSourceReader.java index 1261a3612d3cb..12b5ff0e82a03 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BlockSourceReader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BlockSourceReader.java @@ -8,174 +8,34 @@ package org.elasticsearch.index.mapper; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.index.fieldvisitor.LeafStoredFieldLoader; -import org.elasticsearch.index.fieldvisitor.StoredFieldLoader; -import org.elasticsearch.search.lookup.Source; +import org.apache.lucene.util.UnicodeUtil; +import org.elasticsearch.search.fetch.StoredFieldsSpec; import java.io.IOException; import java.util.ArrayList; import java.util.List; -import java.util.Set; /** * Loads values from {@code _source}. This whole process is very slow and cast-tastic, * so it doesn't really try to avoid megamorphic invocations. It's just going to be * slow. - * - * Note that this extends {@link BlockDocValuesReader} because it pretends to load - * doc values because, for now, ESQL only knows how to load things in a doc values - * order. */ -public abstract class BlockSourceReader extends BlockDocValuesReader { - /** - * Read {@code boolean}s from {@code _source}. - */ - public static BlockLoader booleans(ValueFetcher fetcher) { - StoredFieldLoader loader = StoredFieldLoader.create(true, Set.of()); - return context -> new BlockSourceReader(fetcher, loader.getLoader(context, null)) { - @Override - public BlockLoader.Builder builder(BlockLoader.BuilderFactory factory, int expectedCount) { - return factory.booleans(expectedCount); - } - - @Override - protected void append(BlockLoader.Builder builder, Object v) { - ((BlockLoader.BooleanBuilder) builder).appendBoolean((Boolean) v); - } - - @Override - public String toString() { - return "SourceBooleans"; - } - }; - } - - /** - * Read {@link BytesRef}s from {@code _source}. - */ - public static BlockLoader bytesRefs(ValueFetcher fetcher) { - StoredFieldLoader loader = StoredFieldLoader.create(true, Set.of()); - return context -> new BlockSourceReader(fetcher, loader.getLoader(context, null)) { - BytesRef scratch = new BytesRef(); - - @Override - public BlockLoader.Builder builder(BlockLoader.BuilderFactory factory, int expectedCount) { - return factory.bytesRefs(expectedCount); - } - - @Override - protected void append(BlockLoader.Builder builder, Object v) { - ((BlockLoader.BytesRefBuilder) builder).appendBytesRef(toBytesRef(scratch, (String) v)); - } - - @Override - public String toString() { - return "SourceBytes"; - } - }; - } - - /** - * Read {@code double}s from {@code _source}. - */ - public static BlockLoader doubles(ValueFetcher fetcher) { - StoredFieldLoader loader = StoredFieldLoader.create(true, Set.of()); - return context -> new BlockSourceReader(fetcher, loader.getLoader(context, null)) { - @Override - public BlockLoader.Builder builder(BlockLoader.BuilderFactory factory, int expectedCount) { - return factory.doubles(expectedCount); - } - - @Override - protected void append(BlockLoader.Builder builder, Object v) { - ((BlockLoader.DoubleBuilder) builder).appendDouble(((Number) v).doubleValue()); - } - - @Override - public String toString() { - return "SourceDoubles"; - } - }; - } - - /** - * Read {@code int}s from {@code _source}. - */ - public static BlockLoader ints(ValueFetcher fetcher) { - StoredFieldLoader loader = StoredFieldLoader.create(true, Set.of()); - return context -> new BlockSourceReader(fetcher, loader.getLoader(context, null)) { - @Override - public BlockLoader.Builder builder(BlockLoader.BuilderFactory factory, int expectedCount) { - return factory.ints(expectedCount); - } - - @Override - protected void append(BlockLoader.Builder builder, Object v) { - ((BlockLoader.IntBuilder) builder).appendInt(((Number) v).intValue()); - } - - @Override - public String toString() { - return "SourceInts"; - } - }; - } - - /** - * Read {@code long}s from {@code _source}. - */ - public static BlockLoader longs(ValueFetcher fetcher) { - StoredFieldLoader loader = StoredFieldLoader.create(true, Set.of()); - return context -> new BlockSourceReader(fetcher, loader.getLoader(context, null)) { - @Override - public BlockLoader.Builder builder(BlockLoader.BuilderFactory factory, int expectedCount) { - return factory.longs(expectedCount); - } - - @Override - protected void append(BlockLoader.Builder builder, Object v) { - ((BlockLoader.LongBuilder) builder).appendLong(((Number) v).longValue()); - } - - @Override - public String toString() { - return "SourceLongs"; - } - }; - } - +public abstract class BlockSourceReader implements BlockLoader.RowStrideReader { private final ValueFetcher fetcher; - private final LeafStoredFieldLoader loader; private final List ignoredValues = new ArrayList<>(); - private int docID = -1; - BlockSourceReader(ValueFetcher fetcher, LeafStoredFieldLoader loader) { + BlockSourceReader(ValueFetcher fetcher) { this.fetcher = fetcher; - this.loader = loader; - } - - @Override - public BlockLoader.Block readValues(BlockLoader.BuilderFactory factory, BlockLoader.Docs docs) throws IOException { - try (BlockLoader.Builder builder = builder(factory, docs.count())) { - for (int i = 0; i < docs.count(); i++) { - int doc = docs.get(i); - if (doc < this.docID) { - throw new IllegalStateException("docs within same block must be in order"); - } - readValuesFromSingleDoc(doc, builder); - } - return builder.build(); - } } @Override - public void readValuesFromSingleDoc(int doc, BlockLoader.Builder builder) throws IOException { - this.docID = doc; - loader.advanceTo(doc); - List values = fetcher.fetchValues(Source.fromBytes(loader.source()), doc, ignoredValues); + public final void read(int docId, BlockLoader.StoredFields storedFields, BlockLoader.Builder builder) throws IOException { + List values = fetcher.fetchValues(storedFields.source(), docId, ignoredValues); ignoredValues.clear(); // TODO do something with these? - if (values == null) { + if (values == null || values.isEmpty()) { builder.appendNull(); return; } @@ -193,7 +53,213 @@ public void readValuesFromSingleDoc(int doc, BlockLoader.Builder builder) throws protected abstract void append(BlockLoader.Builder builder, Object v); @Override - public int docID() { - return docID; + public boolean canReuse(int startingDocID) { + return true; + } + + private abstract static class SourceBlockLoader implements BlockLoader { + @Override + public final ColumnAtATimeReader columnAtATimeReader(LeafReaderContext context) throws IOException { + return null; + } + + @Override + public final StoredFieldsSpec rowStrideStoredFieldSpec() { + return StoredFieldsSpec.NEEDS_SOURCE; + } + + @Override + public final boolean supportsOrdinals() { + return false; + } + + @Override + public final SortedSetDocValues ordinals(LeafReaderContext context) { + throw new UnsupportedOperationException(); + } + } + + public static class BooleansBlockLoader extends SourceBlockLoader { + private final ValueFetcher fetcher; + + public BooleansBlockLoader(ValueFetcher fetcher) { + this.fetcher = fetcher; + } + + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.booleans(expectedCount); + } + + @Override + public RowStrideReader rowStrideReader(LeafReaderContext context) { + return new Booleans(fetcher); + } + } + + private static class Booleans extends BlockSourceReader { + Booleans(ValueFetcher fetcher) { + super(fetcher); + } + + @Override + protected void append(BlockLoader.Builder builder, Object v) { + ((BlockLoader.BooleanBuilder) builder).appendBoolean((Boolean) v); + } + + @Override + public String toString() { + return "BlockSourceReader.Booleans"; + } + } + + public static class BytesRefsBlockLoader extends SourceBlockLoader { + private final ValueFetcher fetcher; + + public BytesRefsBlockLoader(ValueFetcher fetcher) { + this.fetcher = fetcher; + } + + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.bytesRefs(expectedCount); + } + + @Override + public RowStrideReader rowStrideReader(LeafReaderContext context) { + return new BytesRefs(fetcher); + } + } + + private static class BytesRefs extends BlockSourceReader { + BytesRef scratch = new BytesRef(); + + BytesRefs(ValueFetcher fetcher) { + super(fetcher); + } + + @Override + protected void append(BlockLoader.Builder builder, Object v) { + ((BlockLoader.BytesRefBuilder) builder).appendBytesRef(toBytesRef(scratch, (String) v)); + } + + @Override + public String toString() { + return "BlockSourceReader.Bytes"; + } + } + + public static class DoublesBlockLoader extends SourceBlockLoader { + private final ValueFetcher fetcher; + + public DoublesBlockLoader(ValueFetcher fetcher) { + this.fetcher = fetcher; + } + + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.doubles(expectedCount); + } + + @Override + public RowStrideReader rowStrideReader(LeafReaderContext context) { + return new Doubles(fetcher); + } + } + + private static class Doubles extends BlockSourceReader { + Doubles(ValueFetcher fetcher) { + super(fetcher); + } + + @Override + protected void append(BlockLoader.Builder builder, Object v) { + ((BlockLoader.DoubleBuilder) builder).appendDouble(((Number) v).doubleValue()); + } + + @Override + public String toString() { + return "BlockSourceReader.Doubles"; + } + } + + public static class IntsBlockLoader extends SourceBlockLoader { + private final ValueFetcher fetcher; + + public IntsBlockLoader(ValueFetcher fetcher) { + this.fetcher = fetcher; + } + + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.ints(expectedCount); + } + + @Override + public RowStrideReader rowStrideReader(LeafReaderContext context) { + return new Ints(fetcher); + } + } + + private static class Ints extends BlockSourceReader { + Ints(ValueFetcher fetcher) { + super(fetcher); + } + + @Override + protected void append(BlockLoader.Builder builder, Object v) { + ((BlockLoader.IntBuilder) builder).appendInt(((Number) v).intValue()); + } + + @Override + public String toString() { + return "BlockSourceReader.Ints"; + } + } + + public static class LongsBlockLoader extends SourceBlockLoader { + private final ValueFetcher fetcher; + + public LongsBlockLoader(ValueFetcher fetcher) { + this.fetcher = fetcher; + } + + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.longs(expectedCount); + } + + @Override + public RowStrideReader rowStrideReader(LeafReaderContext context) { + return new Longs(fetcher); + } + } + + private static class Longs extends BlockSourceReader { + Longs(ValueFetcher fetcher) { + super(fetcher); + } + + @Override + protected void append(BlockLoader.Builder builder, Object v) { + ((BlockLoader.LongBuilder) builder).appendLong(((Number) v).longValue()); + } + + @Override + public String toString() { + return "BlockSourceReader.Longs"; + } + } + + /** + * Convert a {@link String} into a utf-8 {@link BytesRef}. + */ + static BytesRef toBytesRef(BytesRef scratch, String v) { + int len = UnicodeUtil.maxUTF8Length(v.length()); + if (scratch.bytes.length < len) { + scratch.bytes = new byte[len]; + } + scratch.length = UnicodeUtil.UTF16toUTF8(v, 0, v.length(), scratch.bytes); + return scratch; } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BlockStoredFieldsReader.java b/server/src/main/java/org/elasticsearch/index/mapper/BlockStoredFieldsReader.java index 5984482fd9441..0a6cde773ff48 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BlockStoredFieldsReader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BlockStoredFieldsReader.java @@ -9,10 +9,11 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.index.fieldvisitor.LeafStoredFieldLoader; -import org.elasticsearch.index.fieldvisitor.StoredFieldLoader; import org.elasticsearch.index.mapper.BlockLoader.BytesRefBuilder; +import org.elasticsearch.search.fetch.StoredFieldsSpec; import java.io.IOException; import java.util.List; @@ -27,86 +28,101 @@ * doc values because, for now, ESQL only knows how to load things in a doc values * order. */ -public abstract class BlockStoredFieldsReader extends BlockDocValuesReader { - public static BlockLoader bytesRefsFromBytesRefs(String field) { - StoredFieldLoader loader = StoredFieldLoader.create(false, Set.of(field)); - return context -> new Bytes(loader.getLoader(context, null), field) { - @Override - protected BytesRef toBytesRef(Object v) { - return (BytesRef) v; - } - }; +public abstract class BlockStoredFieldsReader implements BlockLoader.RowStrideReader { + @Override + public boolean canReuse(int startingDocID) { + return true; } - public static BlockLoader bytesRefsFromStrings(String field) { - StoredFieldLoader loader = StoredFieldLoader.create(false, Set.of(field)); - return context -> new Bytes(loader.getLoader(context, null), field) { - private final BytesRef scratch = new BytesRef(); + private abstract static class StoredFieldsBlockLoader implements BlockLoader { + protected final String field; - @Override - protected BytesRef toBytesRef(Object v) { - return toBytesRef(scratch, (String) v); - } - }; - } + StoredFieldsBlockLoader(String field) { + this.field = field; + } - public static BlockLoader id() { - StoredFieldLoader loader = StoredFieldLoader.create(false, Set.of(IdFieldMapper.NAME)); - return context -> new Id(loader.getLoader(context, null)); - } + @Override + public final ColumnAtATimeReader columnAtATimeReader(LeafReaderContext context) { + return null; + } - private final LeafStoredFieldLoader loader; - private int docID = -1; + @Override + public final StoredFieldsSpec rowStrideStoredFieldSpec() { + return new StoredFieldsSpec(false, false, Set.of(field)); + } - protected BlockStoredFieldsReader(LeafStoredFieldLoader loader) { - this.loader = loader; - } + @Override + public final boolean supportsOrdinals() { + return false; + } - @Override - public final BlockLoader.Block readValues(BlockLoader.BuilderFactory factory, BlockLoader.Docs docs) throws IOException { - try (BlockLoader.Builder builder = builder(factory, docs.count())) { - for (int i = 0; i < docs.count(); i++) { - readValuesFromSingleDoc(docs.get(i), builder); - } - return builder.build(); + @Override + public final SortedSetDocValues ordinals(LeafReaderContext context) { + throw new UnsupportedOperationException(); } } - @Override - public final void readValuesFromSingleDoc(int docId, BlockLoader.Builder builder) throws IOException { - if (docId < this.docID) { - throw new IllegalStateException("docs within same block must be in order"); + /** + * Load {@link BytesRef} blocks from stored {@link BytesRef}s. + */ + public static class BytesFromBytesRefsBlockLoader extends StoredFieldsBlockLoader { + public BytesFromBytesRefsBlockLoader(String field) { + super(field); + } + + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.bytesRefs(expectedCount); + } + + @Override + public RowStrideReader rowStrideReader(LeafReaderContext context) throws IOException { + return new Bytes(field) { + @Override + protected BytesRef toBytesRef(Object v) { + return (BytesRef) v; + } + }; } - this.docID = docId; - loader.advanceTo(docId); - read(loader, builder); } - protected abstract void read(LeafStoredFieldLoader loader, BlockLoader.Builder builder) throws IOException; + /** + * Load {@link BytesRef} blocks from stored {@link String}s. + */ + public static class BytesFromStringsBlockLoader extends StoredFieldsBlockLoader { + public BytesFromStringsBlockLoader(String field) { + super(field); + } - @Override - public final int docID() { - return docID; + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.bytesRefs(expectedCount); + } + + @Override + public RowStrideReader rowStrideReader(LeafReaderContext context) throws IOException { + return new Bytes(field) { + private final BytesRef scratch = new BytesRef(); + + @Override + protected BytesRef toBytesRef(Object v) { + return BlockSourceReader.toBytesRef(scratch, (String) v); + } + }; + } } private abstract static class Bytes extends BlockStoredFieldsReader { private final String field; - Bytes(LeafStoredFieldLoader loader, String field) { - super(loader); + Bytes(String field) { this.field = field; } - @Override - public BytesRefBuilder builder(BlockLoader.BuilderFactory factory, int expectedCount) { - return factory.bytesRefs(expectedCount); - } - protected abstract BytesRef toBytesRef(Object v); @Override - protected void read(LeafStoredFieldLoader loader, BlockLoader.Builder builder) throws IOException { - List values = loader.storedFields().get(field); + public void read(int docId, BlockLoader.StoredFields storedFields, BlockLoader.Builder builder) throws IOException { + List values = storedFields.storedFields().get(field); if (values == null) { builder.appendNull(); return; @@ -128,21 +144,31 @@ public String toString() { } } - private static class Id extends BlockStoredFieldsReader { - private final BytesRef scratch = new BytesRef(); - - Id(LeafStoredFieldLoader loader) { - super(loader); + /** + * Load {@link BytesRef} blocks from stored {@link String}s. + */ + public static class IdBlockLoader extends StoredFieldsBlockLoader { + public IdBlockLoader() { + super(IdFieldMapper.NAME); } @Override - public BlockLoader.BytesRefBuilder builder(BlockLoader.BuilderFactory factory, int expectedCount) { + public Builder builder(BlockFactory factory, int expectedCount) { return factory.bytesRefs(expectedCount); } @Override - protected void read(LeafStoredFieldLoader loader, BlockLoader.Builder builder) throws IOException { - ((BytesRefBuilder) builder).appendBytesRef(toBytesRef(scratch, loader.id())); + public RowStrideReader rowStrideReader(LeafReaderContext context) throws IOException { + return new Id(); + } + } + + private static class Id extends BlockStoredFieldsReader { + private final BytesRef scratch = new BytesRef(); + + @Override + public void read(int docId, BlockLoader.StoredFields storedFields, BlockLoader.Builder builder) throws IOException { + ((BytesRefBuilder) builder).appendBytesRef(BlockSourceReader.toBytesRef(scratch, storedFields.id())); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java index a5793df3b82e0..7f175982dc28e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java @@ -257,9 +257,9 @@ public Boolean valueForDisplay(Object value) { @Override public BlockLoader blockLoader(BlockLoaderContext blContext) { if (hasDocValues()) { - return BlockDocValuesReader.booleans(name()); + return new BlockDocValuesReader.BooleansBlockLoader(name()); } - return BlockSourceReader.booleans(sourceValueFetcher(blContext.sourcePaths(name()))); + return new BlockSourceReader.BooleansBlockLoader(sourceValueFetcher(blContext.sourcePaths(name()))); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BooleanScriptBlockDocValuesReader.java b/server/src/main/java/org/elasticsearch/index/mapper/BooleanScriptBlockDocValuesReader.java index b59df56791fbe..953e13dc69eb0 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BooleanScriptBlockDocValuesReader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BooleanScriptBlockDocValuesReader.java @@ -8,14 +8,31 @@ package org.elasticsearch.index.mapper; +import org.apache.lucene.index.LeafReaderContext; import org.elasticsearch.script.BooleanFieldScript; +import java.io.IOException; + /** * {@link BlockDocValuesReader} implementation for {@code boolean} scripts. */ public class BooleanScriptBlockDocValuesReader extends BlockDocValuesReader { - public static BlockLoader blockLoader(BooleanFieldScript.LeafFactory factory) { - return context -> new BooleanScriptBlockDocValuesReader(factory.newInstance(context)); + static class BooleanScriptBlockLoader extends DocValuesBlockLoader { + private final BooleanFieldScript.LeafFactory factory; + + BooleanScriptBlockLoader(BooleanFieldScript.LeafFactory factory) { + this.factory = factory; + } + + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.booleans(expectedCount); + } + + @Override + public AllReader reader(LeafReaderContext context) throws IOException { + return new BooleanScriptBlockDocValuesReader(factory.newInstance(context)); + } } private final BooleanFieldScript script; @@ -26,19 +43,14 @@ public static BlockLoader blockLoader(BooleanFieldScript.LeafFactory factory) { } @Override - public int docID() { + public int docId() { return docId; } @Override - public BlockLoader.BooleanBuilder builder(BlockLoader.BuilderFactory factory, int expectedCount) { + public BlockLoader.Block read(BlockLoader.BlockFactory factory, BlockLoader.Docs docs) throws IOException { // Note that we don't emit falses before trues so we conform to the doc values contract and can use booleansFromDocValues - return factory.booleansFromDocValues(expectedCount); - } - - @Override - public BlockLoader.Block readValues(BlockLoader.BuilderFactory factory, BlockLoader.Docs docs) { - try (BlockLoader.BooleanBuilder builder = builder(factory, docs.count())) { + try (BlockLoader.BooleanBuilder builder = factory.booleans(docs.count())) { for (int i = 0; i < docs.count(); i++) { read(docs.get(i), builder); } @@ -47,7 +59,7 @@ public BlockLoader.Block readValues(BlockLoader.BuilderFactory factory, BlockLoa } @Override - public void readValuesFromSingleDoc(int docId, BlockLoader.Builder builder) { + public void read(int docId, BlockLoader.StoredFields storedFields, BlockLoader.Builder builder) throws IOException { this.docId = docId; read(docId, (BlockLoader.BooleanBuilder) builder); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BooleanScriptFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/BooleanScriptFieldType.java index 6e3876644567f..749bb279cfed4 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BooleanScriptFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BooleanScriptFieldType.java @@ -112,7 +112,7 @@ public DocValueFormat docValueFormat(String format, ZoneId timeZone) { @Override public BlockLoader blockLoader(BlockLoaderContext blContext) { - return BooleanScriptBlockDocValuesReader.blockLoader(leafFactory(blContext.lookup())); + return new BooleanScriptBlockDocValuesReader.BooleanScriptBlockLoader(leafFactory(blContext.lookup())); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java index 9d12fc6910d66..e90bea103c4cb 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java @@ -775,9 +775,9 @@ public Function pointReaderIfPossible() { @Override public BlockLoader blockLoader(BlockLoaderContext blContext) { if (hasDocValues()) { - return BlockDocValuesReader.longs(name()); + return new BlockDocValuesReader.LongsBlockLoader(name()); } - return BlockSourceReader.longs(sourceValueFetcher(blContext.sourcePaths(name()))); + return new BlockSourceReader.LongsBlockLoader(sourceValueFetcher(blContext.sourcePaths(name()))); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DateScriptBlockDocValuesReader.java b/server/src/main/java/org/elasticsearch/index/mapper/DateScriptBlockDocValuesReader.java index ad630a71870a4..a5303f27573eb 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DateScriptBlockDocValuesReader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DateScriptBlockDocValuesReader.java @@ -8,14 +8,31 @@ package org.elasticsearch.index.mapper; +import org.apache.lucene.index.LeafReaderContext; import org.elasticsearch.script.DateFieldScript; +import java.io.IOException; + /** * {@link BlockDocValuesReader} implementation for date scripts. */ public class DateScriptBlockDocValuesReader extends BlockDocValuesReader { - public static BlockLoader blockLoader(DateFieldScript.LeafFactory factory) { - return context -> new DateScriptBlockDocValuesReader(factory.newInstance(context)); + static class DateScriptBlockLoader extends DocValuesBlockLoader { + private final DateFieldScript.LeafFactory factory; + + DateScriptBlockLoader(DateFieldScript.LeafFactory factory) { + this.factory = factory; + } + + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.longs(expectedCount); + } + + @Override + public AllReader reader(LeafReaderContext context) throws IOException { + return new DateScriptBlockDocValuesReader(factory.newInstance(context)); + } } private final DateFieldScript script; @@ -26,18 +43,14 @@ public static BlockLoader blockLoader(DateFieldScript.LeafFactory factory) { } @Override - public int docID() { + public int docId() { return docId; } @Override - public BlockLoader.LongBuilder builder(BlockLoader.BuilderFactory factory, int expectedCount) { - return factory.longs(expectedCount); // Note that we don't pre-sort our output so we can't use longsFromDocValues - } - - @Override - public BlockLoader.Block readValues(BlockLoader.BuilderFactory factory, BlockLoader.Docs docs) { - try (BlockLoader.LongBuilder builder = builder(factory, docs.count())) { + public BlockLoader.Block read(BlockLoader.BlockFactory factory, BlockLoader.Docs docs) throws IOException { + // Note that we don't sort the values sort, so we can't use factory.longsFromDocValues + try (BlockLoader.LongBuilder builder = factory.longs(docs.count())) { for (int i = 0; i < docs.count(); i++) { read(docs.get(i), builder); } @@ -46,7 +59,7 @@ public BlockLoader.Block readValues(BlockLoader.BuilderFactory factory, BlockLoa } @Override - public void readValuesFromSingleDoc(int docId, BlockLoader.Builder builder) { + public void read(int docId, BlockLoader.StoredFields storedFields, BlockLoader.Builder builder) throws IOException { this.docId = docId; read(docId, (BlockLoader.LongBuilder) builder); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DateScriptFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/DateScriptFieldType.java index 8252d571dce68..238f7488f6b54 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DateScriptFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DateScriptFieldType.java @@ -181,7 +181,7 @@ public DocValueFormat docValueFormat(@Nullable String format, ZoneId timeZone) { @Override public BlockLoader blockLoader(BlockLoaderContext blContext) { - return DateScriptBlockDocValuesReader.blockLoader(leafFactory(blContext.lookup())); + return new DateScriptBlockDocValuesReader.DateScriptBlockLoader(leafFactory(blContext.lookup())); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DoubleScriptBlockDocValuesReader.java b/server/src/main/java/org/elasticsearch/index/mapper/DoubleScriptBlockDocValuesReader.java index 4e317a3ed11cb..a98f5ff661a78 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DoubleScriptBlockDocValuesReader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DoubleScriptBlockDocValuesReader.java @@ -8,14 +8,31 @@ package org.elasticsearch.index.mapper; +import org.apache.lucene.index.LeafReaderContext; import org.elasticsearch.script.DoubleFieldScript; +import java.io.IOException; + /** * {@link BlockDocValuesReader} implementation for {@code double} scripts. */ public class DoubleScriptBlockDocValuesReader extends BlockDocValuesReader { - public static BlockLoader blockLoader(DoubleFieldScript.LeafFactory factory) { - return context -> new DoubleScriptBlockDocValuesReader(factory.newInstance(context)); + static class DoubleScriptBlockLoader extends DocValuesBlockLoader { + private final DoubleFieldScript.LeafFactory factory; + + DoubleScriptBlockLoader(DoubleFieldScript.LeafFactory factory) { + this.factory = factory; + } + + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.doubles(expectedCount); + } + + @Override + public AllReader reader(LeafReaderContext context) throws IOException { + return new DoubleScriptBlockDocValuesReader(factory.newInstance(context)); + } } private final DoubleFieldScript script; @@ -26,18 +43,14 @@ public static BlockLoader blockLoader(DoubleFieldScript.LeafFactory factory) { } @Override - public int docID() { + public int docId() { return docId; } @Override - public BlockLoader.DoubleBuilder builder(BlockLoader.BuilderFactory factory, int expectedCount) { - return factory.doubles(expectedCount); // Note that we don't pre-sort our output so we can't use doublesFromDocValues - } - - @Override - public BlockLoader.Block readValues(BlockLoader.BuilderFactory factory, BlockLoader.Docs docs) { - try (BlockLoader.DoubleBuilder builder = builder(factory, docs.count())) { + public BlockLoader.Block read(BlockLoader.BlockFactory factory, BlockLoader.Docs docs) throws IOException { + // Note that we don't sort the values sort, so we can't use factory.doublesFromDocValues + try (BlockLoader.DoubleBuilder builder = factory.doubles(docs.count())) { for (int i = 0; i < docs.count(); i++) { read(docs.get(i), builder); } @@ -46,7 +59,7 @@ public BlockLoader.Block readValues(BlockLoader.BuilderFactory factory, BlockLoa } @Override - public void readValuesFromSingleDoc(int docId, BlockLoader.Builder builder) { + public void read(int docId, BlockLoader.StoredFields storedFields, BlockLoader.Builder builder) throws IOException { this.docId = docId; read(docId, (BlockLoader.DoubleBuilder) builder); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DoubleScriptFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/DoubleScriptFieldType.java index ef5c112ef212a..c3f7e782c219a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DoubleScriptFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DoubleScriptFieldType.java @@ -107,7 +107,7 @@ public DocValueFormat docValueFormat(String format, ZoneId timeZone) { @Override public BlockLoader blockLoader(BlockLoaderContext blContext) { - return DoubleScriptBlockDocValuesReader.blockLoader(leafFactory(blContext.lookup())); + return new DoubleScriptBlockDocValuesReader.DoubleScriptBlockLoader(leafFactory(blContext.lookup())); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java index b9ba0762e5117..2b4eec2bdd565 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java @@ -204,4 +204,11 @@ Set sourcePaths(String field) { return fieldToCopiedFields.containsKey(resolvedField) ? fieldToCopiedFields.get(resolvedField) : Set.of(resolvedField); } + + /** + * If field is a leaf multi-field return the path to the parent field. Otherwise, return null. + */ + public String parentField(String field) { + return fullSubfieldNameToParentPath.get(field); + } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java index 5f987fd96ca66..1b2667fe9d2ea 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java @@ -80,42 +80,7 @@ public IndexFieldData.Builder fielddataBuilder(FieldDataContext fieldDataContext @Override public BlockLoader blockLoader(BlockLoaderContext blContext) { - // TODO build a constant block directly - BytesRef bytes = new BytesRef(blContext.indexName()); - return context -> new BlockDocValuesReader() { - private int docId; - - @Override - public int docID() { - return docId; - } - - @Override - public BlockLoader.BytesRefBuilder builder(BlockLoader.BuilderFactory factory, int expectedCount) { - return factory.bytesRefs(expectedCount); - } - - @Override - public BlockLoader.Block readValues(BlockLoader.BuilderFactory factory, BlockLoader.Docs docs) { - try (BlockLoader.BytesRefBuilder builder = builder(factory, docs.count())) { - for (int i = 0; i < docs.count(); i++) { - builder.appendBytesRef(bytes); - } - return builder.build(); - } - } - - @Override - public void readValuesFromSingleDoc(int docId, BlockLoader.Builder builder) { - this.docId = docId; - ((BlockLoader.BytesRefBuilder) builder).appendBytesRef(bytes); - } - - @Override - public String toString() { - return "Index"; - } - }; + return BlockLoader.constantBytes(new BytesRef(blContext.indexName())); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java index 80fd384f15fb7..56a50c2dee0aa 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java @@ -408,7 +408,7 @@ public static Query rangeQuery( @Override public BlockLoader blockLoader(BlockLoaderContext blContext) { if (hasDocValues()) { - return BlockDocValuesReader.bytesRefsFromOrds(name()); + return new BlockDocValuesReader.BytesRefsFromOrdsBlockLoader(name()); } return null; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IpScriptBlockDocValuesReader.java b/server/src/main/java/org/elasticsearch/index/mapper/IpScriptBlockDocValuesReader.java index 23229a6533cdb..ff063555ff05d 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IpScriptBlockDocValuesReader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IpScriptBlockDocValuesReader.java @@ -8,14 +8,31 @@ package org.elasticsearch.index.mapper; +import org.apache.lucene.index.LeafReaderContext; import org.elasticsearch.script.IpFieldScript; +import java.io.IOException; + /** * {@link BlockDocValuesReader} implementation for keyword scripts. */ public class IpScriptBlockDocValuesReader extends BlockDocValuesReader { - public static BlockLoader blockLoader(IpFieldScript.LeafFactory factory) { - return context -> new IpScriptBlockDocValuesReader(factory.newInstance(context)); + static class IpScriptBlockLoader extends DocValuesBlockLoader { + private final IpFieldScript.LeafFactory factory; + + IpScriptBlockLoader(IpFieldScript.LeafFactory factory) { + this.factory = factory; + } + + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.bytesRefs(expectedCount); + } + + @Override + public AllReader reader(LeafReaderContext context) throws IOException { + return new IpScriptBlockDocValuesReader(factory.newInstance(context)); + } } private final IpFieldScript script; @@ -26,18 +43,14 @@ public static BlockLoader blockLoader(IpFieldScript.LeafFactory factory) { } @Override - public int docID() { + public int docId() { return docId; } @Override - public BlockLoader.BytesRefBuilder builder(BlockLoader.BuilderFactory factory, int expectedCount) { - return factory.bytesRefs(expectedCount); // Note that we don't pre-sort our output so we can't use bytesRefsFromDocValues - } - - @Override - public BlockLoader.Block readValues(BlockLoader.BuilderFactory factory, BlockLoader.Docs docs) { - try (BlockLoader.BytesRefBuilder builder = builder(factory, docs.count())) { + public BlockLoader.Block read(BlockLoader.BlockFactory factory, BlockLoader.Docs docs) throws IOException { + // Note that we don't pre-sort our output so we can't use bytesRefsFromDocValues + try (BlockLoader.BytesRefBuilder builder = factory.bytesRefs(docs.count())) { for (int i = 0; i < docs.count(); i++) { read(docs.get(i), builder); } @@ -46,7 +59,7 @@ public BlockLoader.Block readValues(BlockLoader.BuilderFactory factory, BlockLoa } @Override - public void readValuesFromSingleDoc(int docId, BlockLoader.Builder builder) { + public void read(int docId, BlockLoader.StoredFields storedFields, BlockLoader.Builder builder) throws IOException { this.docId = docId; read(docId, (BlockLoader.BytesRefBuilder) builder); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IpScriptFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/IpScriptFieldType.java index 0e56b30e2d5d9..4a64184d5d164 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IpScriptFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IpScriptFieldType.java @@ -211,6 +211,6 @@ private Query cidrQuery(String term, SearchExecutionContext context) { @Override public BlockLoader blockLoader(BlockLoaderContext blContext) { - return IpScriptBlockDocValuesReader.blockLoader(leafFactory(blContext.lookup())); + return new IpScriptBlockDocValuesReader.IpScriptBlockLoader(leafFactory(blContext.lookup())); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java index f15bb0069570f..b62113a586bba 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java @@ -580,7 +580,7 @@ NamedAnalyzer normalizer() { @Override public BlockLoader blockLoader(BlockLoaderContext blContext) { if (hasDocValues()) { - return BlockDocValuesReader.bytesRefsFromOrds(name()); + return new BlockDocValuesReader.BytesRefsFromOrdsBlockLoader(name()); } if (isSyntheticSource) { if (false == isStored()) { @@ -590,9 +590,9 @@ public BlockLoader blockLoader(BlockLoaderContext blContext) { + "] is only supported in synthetic _source index if it creates doc values or stored fields" ); } - return BlockStoredFieldsReader.bytesRefsFromBytesRefs(name()); + return new BlockStoredFieldsReader.BytesFromBytesRefsBlockLoader(name()); } - return BlockSourceReader.bytesRefs(sourceValueFetcher(blContext.sourcePaths(name()))); + return new BlockSourceReader.BytesRefsBlockLoader(sourceValueFetcher(blContext.sourcePaths(name()))); } @Override @@ -822,6 +822,10 @@ public void validateMatchedRoutingPath(final String routingPath) { ); } } + + public boolean hasNormalizer() { + return normalizer != Lucene.KEYWORD_ANALYZER; + } } private final boolean indexed; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/KeywordScriptBlockDocValuesReader.java b/server/src/main/java/org/elasticsearch/index/mapper/KeywordScriptBlockDocValuesReader.java index 6afbcae50d31f..df5ba51755c2a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/KeywordScriptBlockDocValuesReader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/KeywordScriptBlockDocValuesReader.java @@ -8,15 +8,32 @@ package org.elasticsearch.index.mapper; +import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.util.BytesRefBuilder; import org.elasticsearch.script.StringFieldScript; +import java.io.IOException; + /** * {@link BlockDocValuesReader} implementation for keyword scripts. */ public class KeywordScriptBlockDocValuesReader extends BlockDocValuesReader { - public static BlockLoader blockLoader(StringFieldScript.LeafFactory factory) { - return context -> new KeywordScriptBlockDocValuesReader(factory.newInstance(context)); + static class KeywordScriptBlockLoader extends DocValuesBlockLoader { + private final StringFieldScript.LeafFactory factory; + + KeywordScriptBlockLoader(StringFieldScript.LeafFactory factory) { + this.factory = factory; + } + + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.bytesRefs(expectedCount); + } + + @Override + public AllReader reader(LeafReaderContext context) throws IOException { + return new KeywordScriptBlockDocValuesReader(factory.newInstance(context)); + } } private final BytesRefBuilder bytesBuild = new BytesRefBuilder(); @@ -28,18 +45,14 @@ public static BlockLoader blockLoader(StringFieldScript.LeafFactory factory) { } @Override - public int docID() { + public int docId() { return docId; } @Override - public BlockLoader.BytesRefBuilder builder(BlockLoader.BuilderFactory factory, int expectedCount) { - return factory.bytesRefs(expectedCount); // Note that we don't pre-sort our output so we can't use bytesRefsFromDocValues - } - - @Override - public BlockLoader.Block readValues(BlockLoader.BuilderFactory factory, BlockLoader.Docs docs) { - try (BlockLoader.BytesRefBuilder builder = builder(factory, docs.count())) { + public BlockLoader.Block read(BlockLoader.BlockFactory factory, BlockLoader.Docs docs) throws IOException { + // Note that we don't pre-sort our output so we can't use bytesRefsFromDocValues + try (BlockLoader.BytesRefBuilder builder = factory.bytesRefs(docs.count())) { for (int i = 0; i < docs.count(); i++) { read(docs.get(i), builder); } @@ -48,7 +61,7 @@ public BlockLoader.Block readValues(BlockLoader.BuilderFactory factory, BlockLoa } @Override - public void readValuesFromSingleDoc(int docId, BlockLoader.Builder builder) { + public void read(int docId, BlockLoader.StoredFields storedFields, BlockLoader.Builder builder) throws IOException { this.docId = docId; read(docId, (BlockLoader.BytesRefBuilder) builder); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/KeywordScriptFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/KeywordScriptFieldType.java index 879a28d4c76c8..188f0ae508fcc 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/KeywordScriptFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/KeywordScriptFieldType.java @@ -112,7 +112,7 @@ public Object valueForDisplay(Object value) { @Override public BlockLoader blockLoader(BlockLoaderContext blContext) { - return KeywordScriptBlockDocValuesReader.blockLoader(leafFactory(blContext.lookup())); + return new KeywordScriptBlockDocValuesReader.KeywordScriptBlockLoader(leafFactory(blContext.lookup())); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/LongScriptBlockDocValuesReader.java b/server/src/main/java/org/elasticsearch/index/mapper/LongScriptBlockDocValuesReader.java index 91c099cd2813b..73ad359147571 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/LongScriptBlockDocValuesReader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/LongScriptBlockDocValuesReader.java @@ -8,14 +8,31 @@ package org.elasticsearch.index.mapper; +import org.apache.lucene.index.LeafReaderContext; import org.elasticsearch.script.LongFieldScript; +import java.io.IOException; + /** * {@link BlockDocValuesReader} implementation for {@code long} scripts. */ public class LongScriptBlockDocValuesReader extends BlockDocValuesReader { - public static BlockLoader blockLoader(LongFieldScript.LeafFactory factory) { - return context -> new LongScriptBlockDocValuesReader(factory.newInstance(context)); + static class LongScriptBlockLoader extends DocValuesBlockLoader { + private final LongFieldScript.LeafFactory factory; + + LongScriptBlockLoader(LongFieldScript.LeafFactory factory) { + this.factory = factory; + } + + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.longs(expectedCount); + } + + @Override + public AllReader reader(LeafReaderContext context) throws IOException { + return new LongScriptBlockDocValuesReader(factory.newInstance(context)); + } } private final LongFieldScript script; @@ -26,18 +43,14 @@ public static BlockLoader blockLoader(LongFieldScript.LeafFactory factory) { } @Override - public int docID() { + public int docId() { return docId; } @Override - public BlockLoader.LongBuilder builder(BlockLoader.BuilderFactory factory, int expectedCount) { - return factory.longs(expectedCount); // Note that we don't pre-sort our output so we can't use longsFromDocValues - } - - @Override - public BlockLoader.Block readValues(BlockLoader.BuilderFactory factory, BlockLoader.Docs docs) { - try (BlockLoader.LongBuilder builder = builder(factory, docs.count())) { + public BlockLoader.Block read(BlockLoader.BlockFactory factory, BlockLoader.Docs docs) throws IOException { + // Note that we don't pre-sort our output so we can't use longsFromDocValues + try (BlockLoader.LongBuilder builder = factory.longs(docs.count())) { for (int i = 0; i < docs.count(); i++) { read(docs.get(i), builder); } @@ -46,7 +59,7 @@ public BlockLoader.Block readValues(BlockLoader.BuilderFactory factory, BlockLoa } @Override - public void readValuesFromSingleDoc(int docId, BlockLoader.Builder builder) { + public void read(int docId, BlockLoader.StoredFields storedFields, BlockLoader.Builder builder) throws IOException { this.docId = docId; read(docId, (BlockLoader.LongBuilder) builder); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/LongScriptFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/LongScriptFieldType.java index f89babe32d0a9..f099ee3625922 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/LongScriptFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/LongScriptFieldType.java @@ -107,7 +107,7 @@ public DocValueFormat docValueFormat(String format, ZoneId timeZone) { @Override public BlockLoader blockLoader(BlockLoaderContext blContext) { - return LongScriptBlockDocValuesReader.blockLoader(leafFactory(blContext.lookup())); + return new LongScriptBlockDocValuesReader.LongScriptBlockLoader(leafFactory(blContext.lookup())); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java index b68bb1a2b1987..376cb1a10e2e6 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java @@ -658,6 +658,11 @@ public interface BlockLoaderContext { * Find the paths in {@code _source} that contain values for the field named {@code name}. */ Set sourcePaths(String name); + + /** + * If field is a leaf multi-field return the path to the parent field. Otherwise, return null. + */ + String parentField(String field); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index deaac37508511..cbf2dd872da2f 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -707,7 +707,7 @@ public boolean isMultiField(String field) { */ public synchronized List reloadSearchAnalyzers(AnalysisRegistry registry, @Nullable String resource, boolean preview) throws IOException { - logger.info("reloading search analyzers"); + logger.debug("reloading search analyzers for index [{}]", indexSettings.getIndex().getName()); // TODO this should bust the cache somehow. Tracked in https://github.com/elastic/elasticsearch/issues/66722 return indexAnalyzers.reload(registry, indexSettings, resource, preview); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java b/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java index 7c44f33fbafa5..4880ce5edc204 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java @@ -404,6 +404,13 @@ public Set sourcePaths(String field) { return fieldTypesLookup().sourcePaths(field); } + /** + * If field is a leaf multi-field return the path to the parent field. Otherwise, return null. + */ + public String parentField(String field) { + return fieldTypesLookup().parentField(field); + } + /** * Returns true if the index has mappings. An index does not have mappings only if it was created * without providing mappings explicitly, and no documents have yet been indexed in it. diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java index 84e9e84fb8ceb..091e3c61764b0 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java @@ -440,12 +440,12 @@ protected void writeValue(XContentBuilder b, long value) throws IOException { @Override BlockLoader blockLoaderFromDocValues(String fieldName) { - return BlockDocValuesReader.doubles(fieldName, l -> HalfFloatPoint.sortableShortToHalfFloat((short) l)); + return new BlockDocValuesReader.DoublesBlockLoader(fieldName, l -> HalfFloatPoint.sortableShortToHalfFloat((short) l)); } @Override BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher) { - return BlockSourceReader.doubles(sourceValueFetcher); + return new BlockSourceReader.DoublesBlockLoader(sourceValueFetcher); } }, FLOAT("float", NumericType.FLOAT) { @@ -602,12 +602,12 @@ protected void writeValue(XContentBuilder b, long value) throws IOException { @Override BlockLoader blockLoaderFromDocValues(String fieldName) { - return BlockDocValuesReader.doubles(fieldName, l -> NumericUtils.sortableIntToFloat((int) l)); + return new BlockDocValuesReader.DoublesBlockLoader(fieldName, l -> NumericUtils.sortableIntToFloat((int) l)); } @Override BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher) { - return BlockSourceReader.doubles(sourceValueFetcher); + return new BlockSourceReader.DoublesBlockLoader(sourceValueFetcher); } }, DOUBLE("double", NumericType.DOUBLE) { @@ -742,12 +742,12 @@ protected void writeValue(XContentBuilder b, long value) throws IOException { @Override BlockLoader blockLoaderFromDocValues(String fieldName) { - return BlockDocValuesReader.doubles(fieldName, NumericUtils::sortableLongToDouble); + return new BlockDocValuesReader.DoublesBlockLoader(fieldName, NumericUtils::sortableLongToDouble); } @Override BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher) { - return BlockSourceReader.doubles(sourceValueFetcher); + return new BlockSourceReader.DoublesBlockLoader(sourceValueFetcher); } }, BYTE("byte", NumericType.BYTE) { @@ -845,12 +845,12 @@ SourceLoader.SyntheticFieldLoader syntheticFieldLoader(String fieldName, String @Override BlockLoader blockLoaderFromDocValues(String fieldName) { - return BlockDocValuesReader.ints(fieldName); + return new BlockDocValuesReader.IntsBlockLoader(fieldName); } @Override BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher) { - return BlockSourceReader.ints(sourceValueFetcher); + return new BlockSourceReader.IntsBlockLoader(sourceValueFetcher); } }, SHORT("short", NumericType.SHORT) { @@ -944,12 +944,12 @@ SourceLoader.SyntheticFieldLoader syntheticFieldLoader(String fieldName, String @Override BlockLoader blockLoaderFromDocValues(String fieldName) { - return BlockDocValuesReader.ints(fieldName); + return new BlockDocValuesReader.IntsBlockLoader(fieldName); } @Override BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher) { - return BlockSourceReader.ints(sourceValueFetcher); + return new BlockSourceReader.IntsBlockLoader(sourceValueFetcher); } }, INTEGER("integer", NumericType.INT) { @@ -1111,12 +1111,12 @@ SourceLoader.SyntheticFieldLoader syntheticFieldLoader(String fieldName, String @Override BlockLoader blockLoaderFromDocValues(String fieldName) { - return BlockDocValuesReader.ints(fieldName); + return new BlockDocValuesReader.IntsBlockLoader(fieldName); } @Override BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher) { - return BlockSourceReader.ints(sourceValueFetcher); + return new BlockSourceReader.IntsBlockLoader(sourceValueFetcher); } }, LONG("long", NumericType.LONG) { @@ -1248,12 +1248,12 @@ SourceLoader.SyntheticFieldLoader syntheticFieldLoader(String fieldName, String @Override BlockLoader blockLoaderFromDocValues(String fieldName) { - return BlockDocValuesReader.longs(fieldName); + return new BlockDocValuesReader.LongsBlockLoader(fieldName); } @Override BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher) { - return BlockSourceReader.longs(sourceValueFetcher); + return new BlockSourceReader.LongsBlockLoader(sourceValueFetcher); } }; @@ -1656,7 +1656,7 @@ public Function pointReaderIfPossible() { public BlockLoader blockLoader(BlockLoaderContext blContext) { if (indexMode == IndexMode.TIME_SERIES && metricType == TimeSeriesParams.MetricType.COUNTER) { // Counters are not supported by ESQL so we load them in null - return BlockDocValuesReader.nulls(); + return BlockLoader.CONSTANT_NULLS; } if (hasDocValues()) { return type.blockLoaderFromDocValues(name()); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ProvidedIdFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ProvidedIdFieldMapper.java index f681d54ebbead..d8a4177ee3211 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ProvidedIdFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ProvidedIdFieldMapper.java @@ -119,7 +119,7 @@ public Query termsQuery(Collection values, SearchExecutionContext context) { @Override public BlockLoader blockLoader(BlockLoaderContext blContext) { - return BlockStoredFieldsReader.id(); + return new BlockStoredFieldsReader.IdBlockLoader(); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldBlockLoader.java b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldBlockLoader.java new file mode 100644 index 0000000000000..63455379044f7 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldBlockLoader.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SortedSetDocValues; +import org.elasticsearch.search.fetch.StoredFieldsSpec; + +import java.io.IOException; +import java.util.Set; + +/** + * Load {@code _source} into blocks. + */ +public final class SourceFieldBlockLoader implements BlockLoader { + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.bytesRefs(expectedCount); + } + + @Override + public ColumnAtATimeReader columnAtATimeReader(LeafReaderContext context) { + return null; + } + + @Override + public RowStrideReader rowStrideReader(LeafReaderContext context) throws IOException { + return new Source(); + } + + @Override + public StoredFieldsSpec rowStrideStoredFieldSpec() { + return new StoredFieldsSpec(true, false, Set.of()); + } + + @Override + public boolean supportsOrdinals() { + return false; + } + + @Override + public SortedSetDocValues ordinals(LeafReaderContext context) { + throw new UnsupportedOperationException(); + } + + private static class Source extends BlockStoredFieldsReader { + @Override + public void read(int docId, StoredFields storedFields, Builder builder) throws IOException { + // TODO support appending BytesReference + ((BytesRefBuilder) builder).appendBytesRef(storedFields.source().internalSourceRef().toBytesRef()); + } + + @Override + public String toString() { + return "BlockStoredFieldsReader.Source"; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java index 42121147d7f09..958db80ae64c2 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java @@ -192,9 +192,11 @@ private IndexMode getIndexMode() { ); static final class SourceFieldType extends MappedFieldType { + private final boolean enabled; private SourceFieldType(boolean enabled) { super(NAME, false, enabled, false, TextSearchInfo.NONE, Collections.emptyMap()); + this.enabled = enabled; } @Override @@ -216,6 +218,14 @@ public Query existsQuery(SearchExecutionContext context) { public Query termQuery(Object value, SearchExecutionContext context) { throw new QueryShardException(context, "The _source field is not searchable"); } + + @Override + public BlockLoader blockLoader(BlockLoaderContext blContext) { + if (enabled) { + return new SourceFieldBlockLoader(); + } + return BlockLoader.CONSTANT_NULLS; + } } // nullable for bwc reasons diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java index 1949249b9be2d..1ae0489173ce3 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java @@ -678,6 +678,7 @@ public TextFieldType( super(name, indexed, stored, false, tsi, meta); fielddata = false; this.isSyntheticSource = isSyntheticSource; + // TODO block loader could use a "fast loading" delegate which isn't always the same - but frequently is. this.syntheticSourceDelegate = syntheticSourceDelegate; this.eagerGlobalOrdinals = eagerGlobalOrdinals; this.indexPhrases = indexPhrases; @@ -939,25 +940,46 @@ public boolean isAggregatable() { @Override public BlockLoader blockLoader(BlockLoaderContext blContext) { if (syntheticSourceDelegate != null) { - return syntheticSourceDelegate.blockLoader(blContext); + return new BlockLoader.Delegating(syntheticSourceDelegate.blockLoader(blContext)) { + @Override + protected String delegatingTo() { + return syntheticSourceDelegate.name(); + } + }; } - if (isSyntheticSource) { - if (isStored()) { - return BlockStoredFieldsReader.bytesRefsFromStrings(name()); + /* + * If this is a sub-text field try and return the parent's loader. Text + * fields will always be slow to load and if the parent is exact then we + * should use that instead. + */ + String parentField = blContext.parentField(name()); + if (parentField != null) { + MappedFieldType parent = blContext.lookup().fieldType(parentField); + if (parent.typeName().equals(KeywordFieldMapper.CONTENT_TYPE)) { + KeywordFieldMapper.KeywordFieldType kwd = (KeywordFieldMapper.KeywordFieldType) parent; + if (kwd.hasNormalizer() == false && (kwd.hasDocValues() || kwd.isStored())) { + return new BlockLoader.Delegating(kwd.blockLoader(blContext)) { + @Override + protected String delegatingTo() { + return kwd.name(); + } + }; + } } + } + if (isStored()) { + return new BlockStoredFieldsReader.BytesFromStringsBlockLoader(name()); + } + if (isSyntheticSource) { /* - * We *shouldn't fall to this exception. The mapping should be - * rejected because we've enabled synthetic source but not configured - * the index properly. But we give it a nice message anyway just in - * case. + * When we're in synthetic source mode we don't currently + * support text fields that are not stored and are not children + * of perfect keyword fields. We'd have to load from the parent + * field and then convert the result to a string. */ - throw new IllegalArgumentException( - "fetching values from a text field [" - + name() - + "] is supported because synthetic _source is enabled and we don't have a way to load the fields" - ); + return null; } - return BlockSourceReader.bytesRefs(SourceValueFetcher.toString(blContext.sourcePaths(name()))); + return new BlockSourceReader.BytesRefsBlockLoader(SourceValueFetcher.toString(blContext.sourcePaths(name()))); } @Override @@ -1019,7 +1041,7 @@ protected BytesRef storedToBytesRef(Object stored) { throw new IllegalArgumentException( "fetching values from a text field [" + name() - + "] is supported because synthetic _source is enabled and we don't have a way to load the fields" + + "] is not supported because synthetic _source is enabled and we don't have a way to load the fields" ); } return new SourceValueFetcherSortedBinaryIndexFieldData.Builder( @@ -1034,6 +1056,10 @@ protected BytesRef storedToBytesRef(Object stored) { public boolean isSyntheticSource() { return isSyntheticSource; } + + KeywordFieldMapper.KeywordFieldType syntheticSourceDelegate() { + return syntheticSourceDelegate; + } } public static class ConstantScoreTextFieldType extends TextFieldType { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TsidExtractingIdFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TsidExtractingIdFieldMapper.java index 9d43ef398feac..9245e78602eb7 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TsidExtractingIdFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TsidExtractingIdFieldMapper.java @@ -89,7 +89,7 @@ public Query termsQuery(Collection values, SearchExecutionContext context) { @Override public BlockLoader blockLoader(BlockLoaderContext blContext) { - return BlockStoredFieldsReader.id(); + return new BlockStoredFieldsReader.IdBlockLoader(); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java index 54a44dd55caa4..8f69f6afe47db 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java @@ -56,7 +56,7 @@ public ValueFetcher valueFetcher(SearchExecutionContext context, String format) @Override public BlockLoader blockLoader(BlockLoaderContext blContext) { - return BlockDocValuesReader.longs(name()); + return new BlockDocValuesReader.LongsBlockLoader(name()); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapper.java index 51be6290df657..c15adfb3be116 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapper.java @@ -113,10 +113,6 @@ private static class Defaults { public static final int DEPTH_LIMIT = 20; } - private static FlattenedFieldMapper toType(FieldMapper in) { - return (FlattenedFieldMapper) in; - } - private static Builder builder(Mapper in) { return ((FlattenedFieldMapper) in).builder; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldSyntheticWriterHelper.java b/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldSyntheticWriterHelper.java index a79b37796bbe9..ba1e27dc1a0aa 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldSyntheticWriterHelper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldSyntheticWriterHelper.java @@ -188,10 +188,6 @@ private KeyValue(final String key, final String value) { this(value, new Prefix(key), new Suffix(key)); } - public Prefix prefix() { - return this.prefix; - } - public Suffix suffix() { return this.suffix; } diff --git a/server/src/main/java/org/elasticsearch/index/query/Operator.java b/server/src/main/java/org/elasticsearch/index/query/Operator.java index 45e7cbc76f891..20f843df04651 100644 --- a/server/src/main/java/org/elasticsearch/index/query/Operator.java +++ b/server/src/main/java/org/elasticsearch/index/query/Operator.java @@ -12,7 +12,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.util.CollectionUtils; import java.io.IOException; import java.util.Locale; @@ -48,9 +47,4 @@ public static Operator fromString(String op) { return valueOf(op.toUpperCase(Locale.ROOT)); } - private static IllegalArgumentException newOperatorException(String op) { - return new IllegalArgumentException( - "operator needs to be either " + CollectionUtils.arrayAsArrayList(values()) + ", but not [" + op + "]" - ); - } } diff --git a/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java b/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java index c4806dbd3a0a8..143dfe7fe6e9d 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java @@ -334,6 +334,13 @@ public Set sourcePath(String fullName) { return mappingLookup.sourcePaths(fullName); } + /** + * If field is a leaf multi-field return the path to the parent field. Otherwise, return null. + */ + public String parentPath(String field) { + return mappingLookup.parentField(field); + } + /** * Will there be {@code _source}. */ diff --git a/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequestBuilder.java b/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequestBuilder.java index 3a7f316de6f40..7f6465e9bce8a 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequestBuilder.java @@ -9,14 +9,14 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.client.internal.ElasticsearchClient; public class DeleteByQueryRequestBuilder extends AbstractBulkByScrollRequestBuilder { public DeleteByQueryRequestBuilder(ElasticsearchClient client, ActionType action) { - this(client, action, new SearchRequestBuilder(client, SearchAction.INSTANCE)); + this(client, action, new SearchRequestBuilder(client, TransportSearchAction.TYPE)); } private DeleteByQueryRequestBuilder(ElasticsearchClient client, ActionType action, SearchRequestBuilder search) { diff --git a/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequestBuilder.java b/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequestBuilder.java index 68a0b948ef32d..e79d06ceba0b5 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequestBuilder.java @@ -11,8 +11,8 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.index.IndexAction; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.client.internal.ElasticsearchClient; public class ReindexRequestBuilder extends AbstractBulkIndexByScrollRequestBuilder { @@ -22,7 +22,7 @@ public ReindexRequestBuilder(ElasticsearchClient client, ActionType { public UpdateByQueryRequestBuilder(ElasticsearchClient client, ActionType action) { - this(client, action, new SearchRequestBuilder(client, SearchAction.INSTANCE)); + this(client, action, new SearchRequestBuilder(client, TransportSearchAction.TYPE)); } private UpdateByQueryRequestBuilder(ElasticsearchClient client, ActionType action, SearchRequestBuilder search) { diff --git a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java index 2e39b13b34c78..0b3b15670ef78 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.gateway.WriteStateException; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersions; @@ -466,13 +467,13 @@ public RetentionLeases loadRetentionLeases(final Path path) throws IOException { synchronized (retentionLeasePersistenceLock) { retentionLeases = RetentionLeases.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, path); } + return emptyIfNull(retentionLeases); + } - // TODO after backporting we expect this never to happen in 8.x, so adjust this to throw an exception instead. - assert Version.CURRENT.major <= 8 : "throw an exception instead of returning EMPTY on null"; - if (retentionLeases == null) { - return RetentionLeases.EMPTY; - } - return retentionLeases; + @UpdateForV9 + private static RetentionLeases emptyIfNull(RetentionLeases retentionLeases) { + // we expect never to see a null in 8.x, so adjust this to throw an exception from v9 onwards. + return retentionLeases == null ? RetentionLeases.EMPTY : retentionLeases; } private final Object retentionLeasePersistenceLock = new Object(); diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 2491d13784483..d7d67b3af159e 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -1393,22 +1393,38 @@ public BulkStats bulkStats() { * If false is returned, no flush happened. */ public boolean flush(FlushRequest request) { + PlainActionFuture future = new PlainActionFuture<>(); + flush(request, future); + return future.actionGet(); + } + + /** + * Executes the given flush request against the engine. + * + * @param request the flush request + * @param listener to notify after full durability has been achieved. + * false if waitIfOngoing==false + * and an ongoing request is detected, else true. + * If false is returned, no flush happened. + */ + public void flush(FlushRequest request, ActionListener listener) { final boolean waitIfOngoing = request.waitIfOngoing(); final boolean force = request.force(); logger.trace("flush with {}", request); - /* - * We allow flushes while recovery since we allow operations to happen while recovering and we want to keep the translog under - * control (up to deletes, which we do not GC). Yet, we do not use flush internally to clear deletes and flush the index writer - * since we use Engine#writeIndexingBuffer for this now. - */ - verifyNotClosed(); - final long time = System.nanoTime(); - // TODO: Transition this method to async to support async flush - PlainActionFuture future = PlainActionFuture.newFuture(); - getEngine().flush(force, waitIfOngoing, future); - Engine.FlushResult flushResult = future.actionGet(); - flushMetric.inc(System.nanoTime() - time); - return flushResult.flushPerformed(); + ActionListener.run(listener, l -> { + /* + * We allow flushes while recovery since we allow operations to happen while recovering and we want to keep the translog under + * control (up to deletes, which we do not GC). Yet, we do not use flush internally to clear deletes and flush the index writer + * since we use Engine#writeIndexingBuffer for this now. + */ + verifyNotClosed(); + final long startTime = System.nanoTime(); + getEngine().flush( + force, + waitIfOngoing, + ActionListener.runBefore(l.map(Engine.FlushResult::flushPerformed), () -> flushMetric.inc(System.nanoTime() - startTime)) + ); + }); } /** @@ -2006,7 +2022,7 @@ private void innerOpenEngineAndTranslog(LongSupplier globalCheckpointSupplier) t assert currentEngineReference.get() == null : "engine is running"; verifyNotClosed(); // we must create a new engine under mutex (see IndexShard#snapshotStoreMetadata). - final Engine newEngine = engineFactory.newReadWriteEngine(config); + final Engine newEngine = createEngine(config); onNewEngine(newEngine); currentEngineReference.set(newEngine); // We set active because we are now writing operations to the engine; this way, @@ -2021,6 +2037,22 @@ private void innerOpenEngineAndTranslog(LongSupplier globalCheckpointSupplier) t checkAndCallWaitForEngineOrClosedShardListeners(); } + // awful hack to work around problem in CloseFollowerIndexIT + static boolean suppressCreateEngineErrors; + + private Engine createEngine(EngineConfig config) { + if (suppressCreateEngineErrors) { + try { + return engineFactory.newReadWriteEngine(config); + } catch (Error e) { + ExceptionsHelper.maybeDieOnAnotherThread(e); + throw new RuntimeException("rethrowing suppressed error", e); + } + } else { + return engineFactory.newReadWriteEngine(config); + } + } + private boolean assertSequenceNumbersInCommit() throws IOException { final SegmentInfos segmentCommitInfos = SegmentInfos.readLatestCommit(store.directory()); final Map userData = segmentCommitInfos.getUserData(); @@ -2275,25 +2307,26 @@ public void flushOnIdle(long inactiveTimeNS) { boolean wasActive = active.getAndSet(false); if (wasActive) { logger.debug("flushing shard on inactive"); - threadPool.executor(ThreadPool.Names.FLUSH).execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - if (state != IndexShardState.CLOSED) { - active.set(true); - logger.warn("failed to flush shard on inactive", e); + threadPool.executor(ThreadPool.Names.FLUSH) + .execute(() -> flush(new FlushRequest().waitIfOngoing(false).force(false), new ActionListener<>() { + @Override + public void onResponse(Boolean flushed) { + if (flushed == false) { + // In case an ongoing flush was detected, revert active flag so that a next flushOnIdle request + // will retry (#87888) + active.set(true); + } + periodicFlushMetric.inc(); } - } - @Override - protected void doRun() { - if (flush(new FlushRequest().waitIfOngoing(false).force(false)) == false) { - // In case an ongoing flush was detected, revert active flag so that a next flushOnIdle request - // will retry (#87888) - active.set(true); + @Override + public void onFailure(Exception e) { + if (state != IndexShardState.CLOSED) { + active.set(true); + logger.warn("failed to flush shard on inactive", e); + } } - periodicFlushMetric.inc(); - } - }); + })); } } } @@ -3740,27 +3773,23 @@ public void afterWriteOperation() { */ if (shouldPeriodicallyFlush()) { logger.debug("submitting async flush request"); - final AbstractRunnable flush = new AbstractRunnable() { - @Override - public void onFailure(final Exception e) { - if (state != IndexShardState.CLOSED) { - logger.warn("failed to flush index", e); + threadPool.executor(ThreadPool.Names.FLUSH).execute(() -> { + flush(new FlushRequest(), new ActionListener<>() { + @Override + public void onResponse(Boolean flushed) { + periodicFlushMetric.inc(); } - } - - @Override - protected void doRun() { - flush(new FlushRequest()); - periodicFlushMetric.inc(); - } - @Override - public void onAfter() { - flushOrRollRunning.compareAndSet(true, false); - afterWriteOperation(); - } - }; - threadPool.executor(ThreadPool.Names.FLUSH).execute(flush); + @Override + public void onFailure(Exception e) { + if (state != IndexShardState.CLOSED) { + logger.warn("failed to flush index", e); + } + } + }); + flushOrRollRunning.compareAndSet(true, false); + afterWriteOperation(); + }); } else if (shouldRollTranslogGeneration()) { logger.debug("submitting async roll translog generation request"); final AbstractRunnable roll = new AbstractRunnable() { @@ -3823,16 +3852,18 @@ && isSearchIdle() // lets skip this refresh since we are search idle and // don't necessarily need to refresh. the next searcher access will register a refreshListener and that will // cause the next schedule to refresh. + logger.trace("scheduledRefresh: search-idle, skipping refresh"); engine.maybePruneDeletes(); // try to prune the deletes in the engine if we accumulated some setRefreshPending(engine); l.onResponse(false); return; } else { - logger.trace("refresh with source [schedule]"); + logger.trace("scheduledRefresh: refresh with source [schedule]"); engine.maybeRefresh("schedule", l.map(Engine.RefreshResult::refreshed)); return; } } + logger.trace("scheduledRefresh: no refresh needed"); engine.maybePruneDeletes(); // try to prune the deletes in the engine if we accumulated some l.onResponse(false); }); @@ -3928,7 +3959,7 @@ public final void ensureShardSearchActive(Consumer listener) { // a refresh can be a costly operation, so we should fork to a refresh thread to be safe: threadPool.executor(ThreadPool.Names.REFRESH).execute(() -> { if (location == pendingRefreshLocation.get()) { - getEngine().maybeRefresh("ensure-shard-search-active", PlainActionFuture.newFuture()); + getEngine().maybeRefresh("ensure-shard-search-active", new PlainActionFuture<>()); } }); } diff --git a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index ded3ffa4ebcc0..bc5a4b02116a7 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -31,12 +31,12 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.common.util.concurrent.ListenableFuture; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.engine.EngineException; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.snapshots.IndexShardRestoreFailedException; @@ -48,6 +48,7 @@ import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Map; @@ -308,7 +309,7 @@ void recoverFromRepository(final IndexShard indexShard, Repository repository, A RecoverySource.Type recoveryType = indexShard.recoveryState().getRecoverySource().getType(); assert recoveryType == RecoverySource.Type.SNAPSHOT : "expected snapshot recovery type: " + recoveryType; SnapshotRecoverySource recoverySource = (SnapshotRecoverySource) indexShard.recoveryState().getRecoverySource(); - restore(indexShard, repository, recoverySource, recoveryListener(indexShard, listener)); + restore(indexShard, repository, recoverySource, recoveryListener(indexShard, listener).map(ignored -> true)); } else { listener.onResponse(false); } @@ -408,15 +409,19 @@ private ActionListener recoveryListener(IndexShard indexShard, ActionLi * Recovers the state of the shard from the store. */ private void internalRecoverFromStore(IndexShard indexShard, ActionListener outerListener) { - indexShard.preRecovery(outerListener.delegateFailureAndWrap((listener, ignored) -> { - final RecoveryState recoveryState = indexShard.recoveryState(); - final boolean indexShouldExists = recoveryState.getRecoverySource().getType() != RecoverySource.Type.EMPTY_STORE; - indexShard.prepareForIndexRecovery(); - SegmentInfos si = null; - final Store store = indexShard.store(); - store.incRef(); - boolean triggeredPostRecovery = false; - try { + final List releasables = new ArrayList<>(1); + SubscribableListener + + .newForked(indexShard::preRecovery) + + .andThen((l, ignored) -> { + final RecoveryState recoveryState = indexShard.recoveryState(); + final boolean indexShouldExists = recoveryState.getRecoverySource().getType() != RecoverySource.Type.EMPTY_STORE; + indexShard.prepareForIndexRecovery(); + SegmentInfos si = null; + final Store store = indexShard.store(); + store.incRef(); + releasables.add(store::decRef); try { store.failIfCorrupted(); try { @@ -480,16 +485,16 @@ private void internalRecoverFromStore(IndexShard indexShard, ActionListener { + if (e instanceof IndexShardRecoveryException) { + l.onFailure(e); + } else { + l.onFailure(new IndexShardRecoveryException(shardId, "failed to recover from gateway", e)); } - } - })); + }), () -> Releasables.close(releasables))); } private static void writeEmptyRetentionLeasesFile(IndexShard indexShard) throws IOException { @@ -513,31 +518,24 @@ private void restore( IndexShard indexShard, Repository repository, SnapshotRecoverySource restoreSource, - ActionListener outerListener + ActionListener outerListener ) { logger.debug("restoring from {} ...", indexShard.recoveryState().getRecoverySource()); - indexShard.preRecovery(outerListener.delegateFailure((listener, ignored) -> { - final RecoveryState.Translog translogState = indexShard.recoveryState().getTranslog(); - if (restoreSource == null) { - listener.onFailure(new IndexShardRestoreFailedException(shardId, "empty restore source")); - return; - } - if (logger.isTraceEnabled()) { - logger.trace("[{}] restoring shard [{}]", restoreSource.snapshot(), shardId); - } - final ActionListener restoreListener = ActionListener.wrap(v -> { - indexShard.getIndexEventListener().afterFilesRestoredFromRepository(indexShard); - final Store store = indexShard.store(); - bootstrap(indexShard, store); - assert indexShard.shardRouting.primary() : "only primary shards can recover from store"; - writeEmptyRetentionLeasesFile(indexShard); - indexShard.openEngineAndRecoverFromTranslog(); - indexShard.getEngine().fillSeqNoGaps(indexShard.getPendingPrimaryTerm()); - indexShard.finalizeRecovery(); - indexShard.postRecovery("restore done", listener.map(voidValue -> true)); - }, e -> listener.onFailure(new IndexShardRestoreFailedException(shardId, "restore failed", e))); - try { + record ShardAndIndexIds(IndexId indexId, ShardId shardId) {} + + SubscribableListener + + .newForked(indexShard::preRecovery) + + .andThen((shardAndIndexIdsListener, ignored) -> { + final RecoveryState.Translog translogState = indexShard.recoveryState().getTranslog(); + if (restoreSource == null) { + throw new IndexShardRestoreFailedException(shardId, "empty restore source"); + } + if (logger.isTraceEnabled()) { + logger.trace("[{}] restoring shard [{}]", restoreSource.snapshot(), shardId); + } translogState.totalOperations(0); translogState.totalOperationsOnStart(0); indexShard.prepareForIndexRecovery(); @@ -548,37 +546,56 @@ private void restore( } else { snapshotShardId = new ShardId(indexId.getName(), IndexMetadata.INDEX_UUID_NA_VALUE, shardId.id()); } - final ListenableFuture indexIdListener = new ListenableFuture<>(); - // If the index UUID was not found in the recovery source we will have to load RepositoryData and resolve it by index name if (indexId.getId().equals(IndexMetadata.INDEX_UUID_NA_VALUE)) { - // BwC path, running against an old version master that did not add the IndexId to the recovery source + // BwC path, running against an old version master that did not add the IndexId to the recovery source. If the index + // UUID was not found in the recovery source we will have to load RepositoryData and resolve it by index name repository.getRepositoryData( // TODO no need to fork back to GENERIC if using cached repo data, see #101445 EsExecutors.DIRECT_EXECUTOR_SERVICE, new ThreadedActionListener<>( indexShard.getThreadPool().generic(), - indexIdListener.map(repositoryData -> repositoryData.resolveIndexId(indexId.getName())) + shardAndIndexIdsListener.map( + repositoryData -> new ShardAndIndexIds(repositoryData.resolveIndexId(indexId.getName()), snapshotShardId) + ) ) ); } else { - indexIdListener.onResponse(indexId); + shardAndIndexIdsListener.onResponse(new ShardAndIndexIds(indexId, snapshotShardId)); } + }) + + .andThen((restoreListener, shardAndIndexId) -> { assert indexShard.getEngineOrNull() == null; - indexIdListener.addListener(restoreListener.delegateFailureAndWrap((l, idx) -> { - assert ThreadPool.assertCurrentThreadPool(ThreadPool.Names.GENERIC, ThreadPool.Names.SNAPSHOT); - repository.restoreShard( - indexShard.store(), - restoreSource.snapshot().getSnapshotId(), - idx, - snapshotShardId, - indexShard.recoveryState(), - l - ); - })); - } catch (Exception e) { - restoreListener.onFailure(e); - } - })); + assert ThreadPool.assertCurrentThreadPool(ThreadPool.Names.GENERIC, ThreadPool.Names.SNAPSHOT); + repository.restoreShard( + indexShard.store(), + restoreSource.snapshot().getSnapshotId(), + shardAndIndexId.indexId(), + shardAndIndexId.shardId(), + indexShard.recoveryState(), + restoreListener + ); + }) + + .andThen((l, ignored) -> { + indexShard.getIndexEventListener().afterFilesRestoredFromRepository(indexShard); + final Store store = indexShard.store(); + bootstrap(indexShard, store); + assert indexShard.shardRouting.primary() : "only primary shards can recover from store"; + writeEmptyRetentionLeasesFile(indexShard); + indexShard.openEngineAndRecoverFromTranslog(); + indexShard.getEngine().fillSeqNoGaps(indexShard.getPendingPrimaryTerm()); + indexShard.finalizeRecovery(); + indexShard.postRecovery("restore done", l); + }) + + .addListener(outerListener.delegateResponse((l, e) -> { + if (e instanceof IndexShardRestoreFailedException) { + l.onFailure(e); + } else { + l.onFailure(new IndexShardRestoreFailedException(shardId, "restore failed", e)); + } + })); } public static void bootstrap(final IndexShard indexShard, final Store store) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java b/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java index 140c4684d1a70..451af25dfa649 100644 --- a/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java +++ b/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java @@ -61,7 +61,16 @@ public enum Stage { * where an abort could have occurred. */ public enum AbortStatus { + /** + * The shard snapshot got past the stage where an abort or pause could have occurred, and is either complete or on its way to + * completion. + */ NO_ABORT, + + /** + * The shard snapshot stopped before completion, either because the whole snapshot was aborted or because this node is to be + * removed. + */ ABORTED } diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesFeatures.java b/server/src/main/java/org/elasticsearch/indices/IndicesFeatures.java new file mode 100644 index 0000000000000..7fb52bcd0be1c --- /dev/null +++ b/server/src/main/java/org/elasticsearch/indices/IndicesFeatures.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.indices; + +import org.elasticsearch.Version; +import org.elasticsearch.features.FeatureSpecification; +import org.elasticsearch.features.NodeFeature; + +import java.util.Map; + +public class IndicesFeatures implements FeatureSpecification { + @Override + public Map getHistoricalFeatures() { + return Map.of(IndicesService.SUPPORTS_AUTO_PUT, Version.V_8_8_0); + } +} diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index bcd5b6015df51..0faa66a9d21da 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -17,7 +17,6 @@ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ResourceAlreadyExistsException; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.mapping.put.AutoPutMappingAction; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; @@ -75,6 +74,8 @@ import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.ShardLock; import org.elasticsearch.env.ShardLockObtainFailedException; +import org.elasticsearch.features.FeatureService; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.gateway.MetaStateService; import org.elasticsearch.gateway.MetadataStateFormat; import org.elasticsearch.index.Index; @@ -137,7 +138,6 @@ import org.elasticsearch.search.query.QueryPhase; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; @@ -207,6 +207,8 @@ public class IndicesService extends AbstractLifecycleComponent Setting.Property.NodeScope ); + static final NodeFeature SUPPORTS_AUTO_PUT = new NodeFeature("indices.auto_put_supported"); + /** * The node's settings. */ @@ -226,6 +228,7 @@ public class IndicesService extends AbstractLifecycleComponent private final ScriptService scriptService; private final ClusterService clusterService; private final Client client; + private final FeatureService featureService; private volatile Map indices = Map.of(); private final Map> pendingDeletes = new HashMap<>(); private final AtomicInteger numUncompletedDeletes = new AtomicInteger(); @@ -268,59 +271,35 @@ protected void doStart() { } @SuppressWarnings("this-escape") - public IndicesService( - Settings settings, - PluginsService pluginsService, - NodeEnvironment nodeEnv, - NamedXContentRegistry xContentRegistry, - AnalysisRegistry analysisRegistry, - IndexNameExpressionResolver indexNameExpressionResolver, - MapperRegistry mapperRegistry, - NamedWriteableRegistry namedWriteableRegistry, - ThreadPool threadPool, - IndexScopedSettings indexScopedSettings, - CircuitBreakerService circuitBreakerService, - BigArrays bigArrays, - ScriptService scriptService, - ClusterService clusterService, - Client client, - MetaStateService metaStateService, - Collection>> engineFactoryProviders, - Map directoryFactories, - ValuesSourceRegistry valuesSourceRegistry, - Map recoveryStateFactories, - List indexFoldersDeletionListeners, - Map snapshotCommitSuppliers, - CheckedBiConsumer requestCacheKeyDifferentiator, - Supplier documentParsingObserverSupplier - ) { - this.settings = settings; - this.threadPool = threadPool; - this.pluginsService = pluginsService; - this.nodeEnv = nodeEnv; + IndicesService(IndicesServiceBuilder builder) { + this.settings = builder.settings; + this.threadPool = builder.threadPool; + this.pluginsService = builder.pluginsService; + this.nodeEnv = builder.nodeEnv; this.parserConfig = XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE) - .withRegistry(xContentRegistry); - this.valuesSourceRegistry = valuesSourceRegistry; + .withRegistry(builder.xContentRegistry); + this.valuesSourceRegistry = builder.valuesSourceRegistry; this.shardsClosedTimeout = settings.getAsTime(INDICES_SHARDS_CLOSED_TIMEOUT, new TimeValue(1, TimeUnit.DAYS)); - this.analysisRegistry = analysisRegistry; - this.indexNameExpressionResolver = indexNameExpressionResolver; + this.analysisRegistry = builder.analysisRegistry; + this.indexNameExpressionResolver = builder.indexNameExpressionResolver; this.indicesRequestCache = new IndicesRequestCache(settings); this.indicesQueryCache = new IndicesQueryCache(settings); - this.mapperRegistry = mapperRegistry; - this.namedWriteableRegistry = namedWriteableRegistry; - this.documentParsingObserverSupplier = documentParsingObserverSupplier; + this.mapperRegistry = builder.mapperRegistry; + this.namedWriteableRegistry = builder.namedWriteableRegistry; + this.documentParsingObserverSupplier = builder.documentParsingObserverSupplier; indexingMemoryController = new IndexingMemoryController( settings, threadPool, // ensure we pull an iter with new shards - flatten makes a copy () -> Iterables.flatten(this).iterator() ); - this.indexScopedSettings = indexScopedSettings; - this.circuitBreakerService = circuitBreakerService; - this.bigArrays = bigArrays; - this.scriptService = scriptService; - this.clusterService = clusterService; - this.client = client; + this.indexScopedSettings = builder.indexScopedSettings; + this.circuitBreakerService = builder.circuitBreakerService; + this.bigArrays = builder.bigArrays; + this.scriptService = builder.scriptService; + this.clusterService = builder.clusterService; + this.client = builder.client; + this.featureService = builder.featureService; this.idFieldDataEnabled = INDICES_ID_FIELD_DATA_ENABLED_SETTING.get(clusterService.getSettings()); clusterService.getClusterSettings().addSettingsUpdateConsumer(INDICES_ID_FIELD_DATA_ENABLED_SETTING, this::setIdFieldDataEnabled); this.indicesFieldDataCache = new IndicesFieldDataCache(settings, new IndexFieldDataCache.Listener() { @@ -336,21 +315,21 @@ public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, lon }); this.cleanInterval = INDICES_CACHE_CLEAN_INTERVAL_SETTING.get(settings); this.cacheCleaner = new CacheCleaner(indicesFieldDataCache, indicesRequestCache, threadPool, this.cleanInterval); - this.metaStateService = metaStateService; - this.engineFactoryProviders = engineFactoryProviders; + this.metaStateService = builder.metaStateService; + this.engineFactoryProviders = builder.engineFactoryProviders; // do not allow any plugin-provided index store type to conflict with a built-in type - for (final String indexStoreType : directoryFactories.keySet()) { + for (final String indexStoreType : builder.directoryFactories.keySet()) { if (IndexModule.isBuiltinType(indexStoreType)) { throw new IllegalStateException("registered index store type [" + indexStoreType + "] conflicts with a built-in type"); } } - this.directoryFactories = directoryFactories; - this.recoveryStateFactories = recoveryStateFactories; - this.indexFoldersDeletionListeners = new CompositeIndexFoldersDeletionListener(indexFoldersDeletionListeners); - this.snapshotCommitSuppliers = snapshotCommitSuppliers; - this.requestCacheKeyDifferentiator = requestCacheKeyDifferentiator; + this.directoryFactories = builder.directoryFactories; + this.recoveryStateFactories = builder.recoveryStateFactories; + this.indexFoldersDeletionListeners = new CompositeIndexFoldersDeletionListener(builder.indexFoldersDeletionListeners); + this.snapshotCommitSuppliers = builder.snapshotCommitSuppliers; + this.requestCacheKeyDifferentiator = builder.requestCacheKeyDifferentiator; // doClose() is called when shutting down a node, yet there might still be ongoing requests // that we need to wait for before closing some resources such as the caches. In order to // avoid closing these resources while ongoing requests are still being processed, we use a @@ -903,7 +882,7 @@ public void createShard( assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS : "mapping update consumer only required by local shards recovery"; client.execute( - clusterService.state().nodes().getMinNodeVersion().onOrAfter(Version.V_8_8_0) + featureService.clusterHasFeature(clusterService.state(), SUPPORTS_AUTO_PUT) ? AutoPutMappingAction.INSTANCE : PutMappingAction.INSTANCE, new PutMappingRequest().setConcreteIndex(shardRouting.index()) diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesServiceBuilder.java b/server/src/main/java/org/elasticsearch/indices/IndicesServiceBuilder.java new file mode 100644 index 0000000000000..a5cd00bb86094 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/indices/IndicesServiceBuilder.java @@ -0,0 +1,232 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.indices; + +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.CheckedBiConsumer; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.features.FeatureService; +import org.elasticsearch.gateway.MetaStateService; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AnalysisRegistry; +import org.elasticsearch.index.engine.EngineFactory; +import org.elasticsearch.index.mapper.MapperRegistry; +import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.plugins.EnginePlugin; +import org.elasticsearch.plugins.IndexStorePlugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.internal.DocumentParsingObserver; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry; +import org.elasticsearch.search.internal.ShardSearchRequest; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.NamedXContentRegistry; + +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +public class IndicesServiceBuilder { + Settings settings; + PluginsService pluginsService; + NodeEnvironment nodeEnv; + NamedXContentRegistry xContentRegistry; + AnalysisRegistry analysisRegistry; + IndexNameExpressionResolver indexNameExpressionResolver; + MapperRegistry mapperRegistry; + NamedWriteableRegistry namedWriteableRegistry; + ThreadPool threadPool; + IndexScopedSettings indexScopedSettings; + CircuitBreakerService circuitBreakerService; + BigArrays bigArrays; + ScriptService scriptService; + ClusterService clusterService; + Client client; + FeatureService featureService; + MetaStateService metaStateService; + Collection>> engineFactoryProviders = List.of(); + Map directoryFactories = Map.of(); + @Nullable + ValuesSourceRegistry valuesSourceRegistry; + Map recoveryStateFactories = Map.of(); + List indexFoldersDeletionListeners = List.of(); + Map snapshotCommitSuppliers = Map.of(); + @Nullable + CheckedBiConsumer requestCacheKeyDifferentiator; + Supplier documentParsingObserverSupplier; + + public IndicesServiceBuilder settings(Settings settings) { + this.settings = settings; + return this; + } + + public IndicesServiceBuilder pluginsService(PluginsService pluginsService) { + this.pluginsService = pluginsService; + return this; + } + + public IndicesServiceBuilder nodeEnvironment(NodeEnvironment nodeEnv) { + this.nodeEnv = nodeEnv; + return this; + } + + public IndicesServiceBuilder xContentRegistry(NamedXContentRegistry xContentRegistry) { + this.xContentRegistry = xContentRegistry; + return this; + } + + public IndicesServiceBuilder analysisRegistry(AnalysisRegistry analysisRegistry) { + this.analysisRegistry = analysisRegistry; + return this; + } + + public IndicesServiceBuilder indexNameExpressionResolver(IndexNameExpressionResolver indexNameExpressionResolver) { + this.indexNameExpressionResolver = indexNameExpressionResolver; + return this; + } + + public IndicesServiceBuilder mapperRegistry(MapperRegistry mapperRegistry) { + this.mapperRegistry = mapperRegistry; + return this; + } + + public IndicesServiceBuilder namedWriteableRegistry(NamedWriteableRegistry namedWriteableRegistry) { + this.namedWriteableRegistry = namedWriteableRegistry; + return this; + } + + public IndicesServiceBuilder threadPool(ThreadPool threadPool) { + this.threadPool = threadPool; + return this; + } + + public IndicesServiceBuilder indexScopedSettings(IndexScopedSettings indexScopedSettings) { + this.indexScopedSettings = indexScopedSettings; + return this; + } + + public IndicesServiceBuilder circuitBreakerService(CircuitBreakerService circuitBreakerService) { + this.circuitBreakerService = circuitBreakerService; + return this; + } + + public IndicesServiceBuilder bigArrays(BigArrays bigArrays) { + this.bigArrays = bigArrays; + return this; + } + + public IndicesServiceBuilder scriptService(ScriptService scriptService) { + this.scriptService = scriptService; + return this; + } + + public IndicesServiceBuilder clusterService(ClusterService clusterService) { + this.clusterService = clusterService; + return this; + } + + public IndicesServiceBuilder client(Client client) { + this.client = client; + return this; + } + + public IndicesServiceBuilder featureService(FeatureService featureService) { + this.featureService = featureService; + return this; + } + + public IndicesServiceBuilder metaStateService(MetaStateService metaStateService) { + this.metaStateService = metaStateService; + return this; + } + + public IndicesServiceBuilder valuesSourceRegistry(ValuesSourceRegistry valuesSourceRegistry) { + this.valuesSourceRegistry = valuesSourceRegistry; + return this; + } + + public IndicesServiceBuilder requestCacheKeyDifferentiator( + CheckedBiConsumer requestCacheKeyDifferentiator + ) { + this.requestCacheKeyDifferentiator = requestCacheKeyDifferentiator; + return this; + } + + public IndicesServiceBuilder documentParsingObserverSupplier(Supplier documentParsingObserverSupplier) { + this.documentParsingObserverSupplier = documentParsingObserverSupplier; + return this; + } + + public IndicesService build() { + Objects.requireNonNull(settings); + Objects.requireNonNull(pluginsService); + Objects.requireNonNull(nodeEnv); + Objects.requireNonNull(xContentRegistry); + Objects.requireNonNull(analysisRegistry); + Objects.requireNonNull(indexNameExpressionResolver); + Objects.requireNonNull(mapperRegistry); + Objects.requireNonNull(namedWriteableRegistry); + Objects.requireNonNull(threadPool); + Objects.requireNonNull(indexScopedSettings); + Objects.requireNonNull(circuitBreakerService); + Objects.requireNonNull(bigArrays); + Objects.requireNonNull(scriptService); + Objects.requireNonNull(clusterService); + Objects.requireNonNull(client); + Objects.requireNonNull(featureService); + Objects.requireNonNull(metaStateService); + Objects.requireNonNull(engineFactoryProviders); + Objects.requireNonNull(directoryFactories); + Objects.requireNonNull(recoveryStateFactories); + Objects.requireNonNull(indexFoldersDeletionListeners); + Objects.requireNonNull(snapshotCommitSuppliers); + Objects.requireNonNull(documentParsingObserverSupplier); + + // collect engine factory providers from plugins + engineFactoryProviders = pluginsService.filterPlugins(EnginePlugin.class) + .>>map(plugin -> plugin::getEngineFactory) + .toList(); + + directoryFactories = pluginsService.filterPlugins(IndexStorePlugin.class) + .map(IndexStorePlugin::getDirectoryFactories) + .flatMap(m -> m.entrySet().stream()) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + + recoveryStateFactories = pluginsService.filterPlugins(IndexStorePlugin.class) + .map(IndexStorePlugin::getRecoveryStateFactories) + .flatMap(m -> m.entrySet().stream()) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + + indexFoldersDeletionListeners = pluginsService.filterPlugins(IndexStorePlugin.class) + .map(IndexStorePlugin::getIndexFoldersDeletionListeners) + .flatMap(List::stream) + .toList(); + + snapshotCommitSuppliers = pluginsService.filterPlugins(IndexStorePlugin.class) + .map(IndexStorePlugin::getSnapshotCommitSuppliers) + .flatMap(m -> m.entrySet().stream()) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + + return new IndicesService(this); + } +} diff --git a/server/src/main/java/org/elasticsearch/indices/SystemIndices.java b/server/src/main/java/org/elasticsearch/indices/SystemIndices.java index 56b0a07fcbc71..3ff760b753886 100644 --- a/server/src/main/java/org/elasticsearch/indices/SystemIndices.java +++ b/server/src/main/java/org/elasticsearch/indices/SystemIndices.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Booleans; +import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; @@ -49,7 +50,6 @@ import java.util.Map.Entry; import java.util.Optional; import java.util.Set; -import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Predicate; import java.util.stream.Collectors; @@ -530,7 +530,7 @@ public static IllegalArgumentException netNewSystemIndexAccessException(ThreadCo ); } else { return new IllegalArgumentException( - "Indices " + Arrays.toString(names.toArray(Strings.EMPTY_ARRAY)) + " use and access is reserved for system operations" + "Indices " + Arrays.toString(names.toArray(Strings.EMPTY_ARRAY)) + " may not be accessed by product [" + product + "]" ); } } @@ -929,7 +929,7 @@ public static void cleanUpFeature( Metadata metadata = clusterService.state().getMetadata(); final List exceptions = new ArrayList<>(); - final Consumer handleResponse = resetFeatureStateStatus -> { + final CheckedConsumer handleResponse = resetFeatureStateStatus -> { if (resetFeatureStateStatus.getStatus() == ResetFeatureStateStatus.Status.FAILURE) { synchronized (exceptions) { exceptions.add(resetFeatureStateStatus.getException()); diff --git a/server/src/main/java/org/elasticsearch/indices/breaker/CircuitBreakerMetrics.java b/server/src/main/java/org/elasticsearch/indices/breaker/CircuitBreakerMetrics.java new file mode 100644 index 0000000000000..3e018385ccc7a --- /dev/null +++ b/server/src/main/java/org/elasticsearch/indices/breaker/CircuitBreakerMetrics.java @@ -0,0 +1,172 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.indices.breaker; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.telemetry.TelemetryProvider; +import org.elasticsearch.telemetry.metric.LongCounter; +import org.elasticsearch.telemetry.metric.MeterRegistry; + +import java.util.Collections; +import java.util.Map; +import java.util.Objects; + +/** + * A class collecting trip counters for circuit breakers (parent, field data, request, in flight requests and custom child circuit + * breakers). + * + * The circuit breaker name is part of the (long) counter metric name instead of being an attribute because aggregating distinct circuit + * breakers trip counter values does not make sense, as for instance, summing es.breaker.field_data.trip.total and + * es.breaker.in_flight_requests.trip.total. + * Those counters trip for different reasons even if the underlying reason is "too much memory usage". Aggregating them together results in + * losing the ability to understand where the underlying issue is (too much field data, too many concurrent requests, too large concurrent + * requests?). Aggregating each one of them separately to get, for instance, cluster level or cloud region level statistics is perfectly + * fine, instead. + * + * NOTE: here we have the ability to register custom trip counters too. This ability is something a few plugins take advantage of nowadays. + * At the time of writing this class it is just "Eql" and "MachineLearning" which track memory used to store "things" that are + * application/plugin specific such as eql sequence query objects and inference model objects. As a result, we just have a couple of this + * custom counters. This means we have 6 circuit breaker counter metrics per node (parent, field_data, request, in_flight_requests, + * eql_sequence and model_inference). We register them a bit differently to keep the ability for plugins to define their own circuit breaker + * trip counters. + */ +public class CircuitBreakerMetrics { + public static final CircuitBreakerMetrics NOOP = new CircuitBreakerMetrics(TelemetryProvider.NOOP, Collections.emptyMap()); + public static final String ES_BREAKER_PARENT_TRIP_COUNT_TOTAL = "es.breaker.parent.trip.total"; + public static final String ES_BREAKER_FIELD_DATA_TRIP_COUNT_TOTAL = "es.breaker.field_data.trip.total"; + public static final String ES_BREAKER_REQUEST_TRIP_COUNT_TOTAL = "es.breaker.request.trip.total"; + public static final String ES_BREAKER_IN_FLIGHT_REQUESTS_TRIP_COUNT_TOTAL = "es.breaker.in_flight_requests.trip.total"; + + private static final String ES_BREAKER_CUSTOM_TRIP_COUNT_TOTAL_TEMPLATE = "es.breaker.%s.trip.total"; + private final MeterRegistry registry; + private final LongCounter parentTripCountTotal; + private final LongCounter fielddataTripCountTotal; + private final LongCounter requestTripCountTotal; + private final LongCounter inFlightRequestsCountTotal; + private final Map customTripCountsTotal; + + private CircuitBreakerMetrics( + final MeterRegistry registry, + final LongCounter parentTripCountTotal, + final LongCounter fielddataTripCountTotal, + final LongCounter requestTripCountTotal, + final LongCounter inFlightRequestsCountTotal, + final Map customTripCountsTotal + ) { + this.registry = registry; + this.parentTripCountTotal = parentTripCountTotal; + this.fielddataTripCountTotal = fielddataTripCountTotal; + this.requestTripCountTotal = requestTripCountTotal; + this.inFlightRequestsCountTotal = inFlightRequestsCountTotal; + this.customTripCountsTotal = customTripCountsTotal; + } + + public CircuitBreakerMetrics(final TelemetryProvider telemetryProvider, final Map customTripCounters) { + this( + telemetryProvider.getMeterRegistry(), + telemetryProvider.getMeterRegistry() + .registerLongCounter(ES_BREAKER_PARENT_TRIP_COUNT_TOTAL, "Parent circuit breaker trip count", "count"), + telemetryProvider.getMeterRegistry() + .registerLongCounter(ES_BREAKER_FIELD_DATA_TRIP_COUNT_TOTAL, "Field data circuit breaker trip count", "count"), + telemetryProvider.getMeterRegistry() + .registerLongCounter(ES_BREAKER_REQUEST_TRIP_COUNT_TOTAL, "Request circuit breaker trip count", "count"), + telemetryProvider.getMeterRegistry() + .registerLongCounter( + ES_BREAKER_IN_FLIGHT_REQUESTS_TRIP_COUNT_TOTAL, + "In-flight requests circuit breaker trip count", + "count" + ), + customTripCounters + ); + } + + public LongCounter getParentTripCountTotal() { + return parentTripCountTotal; + } + + public LongCounter getFielddataTripCountTotal() { + return fielddataTripCountTotal; + } + + public LongCounter getRequestTripCountTotal() { + return requestTripCountTotal; + } + + public LongCounter getInFlightRequestsCountTotal() { + return inFlightRequestsCountTotal; + } + + public Map getCustomTripCountsTotal() { + return customTripCountsTotal; + } + + public LongCounter getCustomTripCount(final String name, final LongCounter theDefault) { + return this.customTripCountsTotal.getOrDefault(name, theDefault); + } + + public LongCounter getCustomTripCount(final String name) { + return this.customTripCountsTotal.getOrDefault(name, LongCounter.NOOP); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + CircuitBreakerMetrics that = (CircuitBreakerMetrics) o; + return Objects.equals(registry, that.registry) + && Objects.equals(parentTripCountTotal, that.parentTripCountTotal) + && Objects.equals(fielddataTripCountTotal, that.fielddataTripCountTotal) + && Objects.equals(requestTripCountTotal, that.requestTripCountTotal) + && Objects.equals(inFlightRequestsCountTotal, that.inFlightRequestsCountTotal) + && Objects.equals(customTripCountsTotal, that.customTripCountsTotal); + } + + @Override + public int hashCode() { + return Objects.hash( + registry, + parentTripCountTotal, + fielddataTripCountTotal, + requestTripCountTotal, + inFlightRequestsCountTotal, + customTripCountsTotal + ); + } + + @Override + public String toString() { + return "CircuitBreakerMetrics{" + + "registry=" + + registry + + ", parentTripCountTotal=" + + parentTripCountTotal + + ", fielddataTripCountTotal=" + + fielddataTripCountTotal + + ", requestTripCountTotal=" + + requestTripCountTotal + + ", inFlightRequestsCountTotal=" + + inFlightRequestsCountTotal + + ", customTripCountsTotal=" + + customTripCountsTotal + + '}'; + } + + public void addCustomCircuitBreaker(final CircuitBreaker circuitBreaker) { + if (this.customTripCountsTotal.containsKey(circuitBreaker.getName())) { + throw new IllegalArgumentException("A circuit circuitBreaker named [" + circuitBreaker.getName() + " already exists"); + } + final String canonicalName = Strings.format(ES_BREAKER_CUSTOM_TRIP_COUNT_TOTAL_TEMPLATE, circuitBreaker.getName()); + this.customTripCountsTotal.put( + canonicalName, + registry.registerLongCounter(canonicalName, "A custom circuit circuitBreaker [" + circuitBreaker.getName() + "]", "count") + ); + } + +} diff --git a/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java b/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java index 86b6013895263..9e995c084a555 100644 --- a/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java +++ b/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java @@ -25,6 +25,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.monitor.jvm.GcNames; import org.elasticsearch.monitor.jvm.JvmInfo; +import org.elasticsearch.telemetry.metric.LongCounter; import java.lang.management.GarbageCollectorMXBean; import java.lang.management.ManagementFactory; @@ -141,17 +142,24 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService { // Tripped count for when redistribution was attempted but wasn't successful private final AtomicLong parentTripCount = new AtomicLong(0); + private final LongCounter parentTripCountTotalMetric; private final Function overLimitStrategyFactory; private volatile OverLimitStrategy overLimitStrategy; @SuppressWarnings("this-escape") - public HierarchyCircuitBreakerService(Settings settings, List customBreakers, ClusterSettings clusterSettings) { - this(settings, customBreakers, clusterSettings, HierarchyCircuitBreakerService::createOverLimitStrategy); + public HierarchyCircuitBreakerService( + CircuitBreakerMetrics metrics, + Settings settings, + List customBreakers, + ClusterSettings clusterSettings + ) { + this(metrics, settings, customBreakers, clusterSettings, HierarchyCircuitBreakerService::createOverLimitStrategy); } @SuppressWarnings("this-escape") HierarchyCircuitBreakerService( + CircuitBreakerMetrics metrics, Settings settings, List customBreakers, ClusterSettings clusterSettings, @@ -162,6 +170,7 @@ public HierarchyCircuitBreakerService(Settings settings, List c childCircuitBreakers.put( CircuitBreaker.FIELDDATA, validateAndCreateBreaker( + metrics.getFielddataTripCountTotal(), new BreakerSettings( CircuitBreaker.FIELDDATA, FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).getBytes(), @@ -174,6 +183,7 @@ public HierarchyCircuitBreakerService(Settings settings, List c childCircuitBreakers.put( CircuitBreaker.IN_FLIGHT_REQUESTS, validateAndCreateBreaker( + metrics.getInFlightRequestsCountTotal(), new BreakerSettings( CircuitBreaker.IN_FLIGHT_REQUESTS, IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).getBytes(), @@ -186,6 +196,7 @@ public HierarchyCircuitBreakerService(Settings settings, List c childCircuitBreakers.put( CircuitBreaker.REQUEST, validateAndCreateBreaker( + metrics.getRequestTripCountTotal(), new BreakerSettings( CircuitBreaker.REQUEST, REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).getBytes(), @@ -203,7 +214,10 @@ public HierarchyCircuitBreakerService(Settings settings, List c + "] exists. Circuit breaker names must be unique" ); } - childCircuitBreakers.put(breakerSettings.getName(), validateAndCreateBreaker(breakerSettings)); + childCircuitBreakers.put( + breakerSettings.getName(), + validateAndCreateBreaker(metrics.getCustomTripCount(breakerSettings.getName()), breakerSettings) + ); } this.breakers = Map.copyOf(childCircuitBreakers); this.parentSettings = new BreakerSettings( @@ -247,6 +261,7 @@ public HierarchyCircuitBreakerService(Settings settings, List c this.overLimitStrategyFactory = overLimitStrategyFactory; this.overLimitStrategy = overLimitStrategyFactory.apply(this.trackRealMemoryUsage); + this.parentTripCountTotalMetric = metrics.getParentTripCountTotal(); } private void updateCircuitBreakerSettings(String name, ByteSizeValue newLimit, Double newOverhead) { @@ -399,6 +414,7 @@ public void checkParentLimit(long newBytesReserved, String label) throws Circuit long parentLimit = this.parentSettings.getLimit(); if (memoryUsed.totalUsage > parentLimit && overLimitStrategy.overLimit(memoryUsed).totalUsage > parentLimit) { this.parentTripCount.incrementAndGet(); + this.parentTripCountTotalMetric.increment(); final String messageString = buildParentTripMessage( newBytesReserved, label, @@ -474,12 +490,13 @@ static void appendBytesSafe(StringBuilder stringBuilder, long bytes) { } } - private CircuitBreaker validateAndCreateBreaker(BreakerSettings breakerSettings) { + private CircuitBreaker validateAndCreateBreaker(LongCounter trippedCountMeter, BreakerSettings breakerSettings) { // Validate the settings validateSettings(new BreakerSettings[] { breakerSettings }); return breakerSettings.getType() == CircuitBreaker.Type.NOOP ? new NoopCircuitBreaker(breakerSettings.getName()) : new ChildMemoryCircuitBreaker( + trippedCountMeter, breakerSettings, LogManager.getLogger(CHILD_LOGGER_PREFIX + breakerSettings.getName()), this, @@ -501,7 +518,7 @@ static OverLimitStrategy createOverLimitStrategy(boolean trackRealMemoryUsage) { HierarchyCircuitBreakerService::realMemoryUsage, createYoungGcCountSupplier(), System::currentTimeMillis, - 5000, + 500, lockTimeout ); } else { @@ -542,6 +559,8 @@ static class G1OverLimitStrategy implements OverLimitStrategy { private long blackHole; private final ReleasableLock lock = new ReleasableLock(new ReentrantLock()); + // used to throttle logging + private int attemptNo; G1OverLimitStrategy( JvmInfo jvmInfo, @@ -588,9 +607,12 @@ public MemoryUsage overLimit(MemoryUsage memoryUsed) { boolean leader = false; int allocationIndex = 0; long allocationDuration = 0; + long begin = 0; + int attemptNoCopy = 0; try (ReleasableLock locked = lock.tryAcquire(lockTimeout)) { if (locked != null) { - long begin = timeSupplier.getAsLong(); + attemptNoCopy = ++this.attemptNo; + begin = timeSupplier.getAsLong(); leader = begin >= lastCheckTime + minimumInterval; overLimitTriggered(leader); if (leader) { @@ -622,9 +644,11 @@ public MemoryUsage overLimit(MemoryUsage memoryUsed) { long now = timeSupplier.getAsLong(); this.lastCheckTime = now; allocationDuration = now - begin; + this.attemptNo = 0; } } } catch (InterruptedException e) { + logger.info("could not acquire lock when attempting to trigger G1GC due to high heap usage"); Thread.currentThread().interrupt(); // fallthrough } @@ -639,6 +663,13 @@ public MemoryUsage overLimit(MemoryUsage memoryUsed) { allocationIndex, allocationDuration ); + } else if (attemptNoCopy < 10 || Long.bitCount(attemptNoCopy) == 1) { + logger.info( + "memory usage down after [{}], before [{}], after [{}]", + begin - lastCheckTime, + memoryUsed.baseUsage, + current + ); } return new MemoryUsage( current, @@ -655,6 +686,13 @@ public MemoryUsage overLimit(MemoryUsage memoryUsed) { allocationIndex, allocationDuration ); + } else if (attemptNoCopy < 10 || Long.bitCount(attemptNoCopy) == 1) { + logger.info( + "memory usage not down after [{}], before [{}], after [{}]", + begin - lastCheckTime, + memoryUsed.baseUsage, + current + ); } // prefer original measurement when reporting if heap usage was not brought down. return memoryUsed; diff --git a/server/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java b/server/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java index cf9378aabb993..c84a2fd343e8f 100644 --- a/server/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java +++ b/server/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldDataCache; @@ -37,6 +38,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.concurrent.TimeUnit; import java.util.function.ToLongBiFunction; public final class IndicesFieldDataCache implements RemovalListener, Releasable { @@ -48,6 +50,11 @@ public final class IndicesFieldDataCache implements RemovalListener INDICES_FIELDDATA_CACHE_EXPIRE = Setting.positiveTimeSetting( + "indices.fielddata.cache.expire", + new TimeValue(1, TimeUnit.HOURS), + Property.NodeScope + ); private final IndexFieldDataCache.Listener indicesFieldDataCacheListener; private final Cache cache; @@ -58,6 +65,10 @@ public IndicesFieldDataCache(Settings settings, IndexFieldDataCache.Listener ind if (sizeInBytes > 0) { cacheBuilder.setMaximumWeight(sizeInBytes).weigher(new FieldDataWeigher()); } + final TimeValue expire = INDICES_FIELDDATA_CACHE_EXPIRE.get(settings); + if (expire != null && expire.getNanos() > 0) { + cacheBuilder.setExpireAfterAccess(expire); + } cache = cacheBuilder.build(); } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index a570c88ddaba7..e6ec6f25a71a9 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -340,7 +340,9 @@ public void onFailure(Exception e) { final var sendShardFailure = // these indicate the source shard has already failed, which will independently notify the master and fail // the target shard - false == (cause instanceof ShardNotFoundException || cause instanceof IndexNotFoundException); + false == (cause instanceof ShardNotFoundException + || cause instanceof IndexNotFoundException + || cause instanceof AlreadyClosedException); // TODO retries? See RecoveryResponseHandler#handleException onGoingRecoveries.failRecovery( diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java index 47405e0daa0a7..2e10a5de2d4e1 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java @@ -14,7 +14,6 @@ import org.apache.lucene.store.RateLimiter.SimpleRateLimiter; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; -import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.common.settings.ClusterSettings; @@ -47,7 +46,6 @@ import static org.elasticsearch.node.NodeRoleSettings.NODE_ROLES_SETTING; public class RecoverySettings { - public static final Version SNAPSHOT_RECOVERIES_SUPPORTED_VERSION = Version.V_7_15_0; public static final IndexVersion SNAPSHOT_RECOVERIES_SUPPORTED_INDEX_VERSION = IndexVersions.V_7_15_0; public static final TransportVersion SNAPSHOT_RECOVERIES_SUPPORTED_TRANSPORT_VERSION = TransportVersions.V_7_15_0; public static final IndexVersion SEQ_NO_SNAPSHOT_RECOVERIES_SUPPORTED_VERSION = IndexVersions.V_7_16_0; diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsService.java b/server/src/main/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsService.java index 07d62fb87fe55..e15ec4c339a94 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsService.java @@ -51,7 +51,6 @@ import java.util.stream.Collectors; import static org.elasticsearch.core.Strings.format; -import static org.elasticsearch.indices.recovery.RecoverySettings.SNAPSHOT_RECOVERIES_SUPPORTED_VERSION; public class ShardSnapshotsService { private static final Logger logger = LogManager.getLogger(ShardSnapshotsService.class); @@ -84,13 +83,8 @@ public void fetchLatestSnapshotsForShard(ShardId shardId, ActionListener fetchSnapshotFiles(ShardId shardId, GetShardSnap } } - protected boolean masterSupportsFetchingLatestSnapshots() { - return clusterService.state().nodes().getMinNodeVersion().onOrAfter(SNAPSHOT_RECOVERIES_SUPPORTED_VERSION); - } - private static final class StoreFileMetadataDirectory extends Directory { private final Map files; diff --git a/server/src/main/java/org/elasticsearch/inference/InferenceService.java b/server/src/main/java/org/elasticsearch/inference/InferenceService.java index 2d7ee9f210e64..499cf5d5ca64f 100644 --- a/server/src/main/java/org/elasticsearch/inference/InferenceService.java +++ b/server/src/main/java/org/elasticsearch/inference/InferenceService.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.ActionListener; import java.io.Closeable; +import java.util.List; import java.util.Map; import java.util.Set; @@ -61,7 +62,7 @@ public interface InferenceService extends Closeable { * @param taskSettings Settings in the request to override the model's defaults * @param listener Inference result listener */ - void infer(Model model, String input, Map taskSettings, ActionListener listener); + void infer(Model model, List input, Map taskSettings, ActionListener listener); /** * Start or prepare the model for use. diff --git a/server/src/main/java/org/elasticsearch/inference/InferenceServiceResults.java b/server/src/main/java/org/elasticsearch/inference/InferenceServiceResults.java new file mode 100644 index 0000000000000..37990caeec097 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/inference/InferenceServiceResults.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.inference; + +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.xcontent.ToXContentFragment; + +import java.util.List; +import java.util.Map; + +public interface InferenceServiceResults extends NamedWriteable, ToXContentFragment { + + /** + * Transform the result to match the format required for versions prior to + * {@link org.elasticsearch.TransportVersions#INFERENCE_SERVICE_RESULTS_ADDED} + */ + List transformToLegacyFormat(); + + /** + * Convert the result to a map to aid with test assertions + */ + Map asMap(); +} diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestService.java b/server/src/main/java/org/elasticsearch/ingest/IngestService.java index 3adaab078ad4a..3a2a810dc61b5 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestService.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestService.java @@ -206,6 +206,22 @@ public IngestService( this.taskQueue = clusterService.createTaskQueue("ingest-pipelines", Priority.NORMAL, PIPELINE_TASK_EXECUTOR); } + /** + * This copy constructor returns a copy of the given ingestService, using all of the same internal state. The returned copy is not + * registered to listen to any cluster state changes + * @param ingestService + */ + IngestService(IngestService ingestService) { + this.clusterService = ingestService.clusterService; + this.scriptService = ingestService.scriptService; + this.documentParsingObserverSupplier = ingestService.documentParsingObserverSupplier; + this.processorFactories = ingestService.processorFactories; + this.threadPool = ingestService.threadPool; + this.taskQueue = ingestService.taskQueue; + this.pipelines = ingestService.pipelines; + this.state = ingestService.state; + } + private static Map processorFactories(List ingestPlugins, Processor.Parameters parameters) { Map processorFactories = new TreeMap<>(); for (IngestPlugin ingestPlugin : ingestPlugins) { diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestStats.java b/server/src/main/java/org/elasticsearch/ingest/IngestStats.java index 51bbed8f7b09f..e197af5fbb46a 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestStats.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestStats.java @@ -28,6 +28,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; public record IngestStats(Stats totalStats, List pipelineStats, Map> processorStats) implements @@ -266,6 +267,12 @@ public record ProcessorStat(String name, String type, Stats stats) { // both lists using a common index iterator. private static List merge(List first, List second) { var merged = new ArrayList(); + assert first.size() == second.size() + : "stats size mismatch [" + + first.stream().map(ps -> ps.name + ":" + ps.type).collect(Collectors.joining(",")) + + "] [" + + second.stream().map(ps -> ps.name + ":" + ps.type).collect(Collectors.joining(",")) + + "]"; for (var i = 0; i < first.size(); i++) { merged.add(new ProcessorStat(first.get(i).name, first.get(i).type, Stats.merge(first.get(i).stats, second.get(i).stats))); } diff --git a/server/src/main/java/org/elasticsearch/ingest/SimulateIngestService.java b/server/src/main/java/org/elasticsearch/ingest/SimulateIngestService.java new file mode 100644 index 0000000000000..2f9da248b2afb --- /dev/null +++ b/server/src/main/java/org/elasticsearch/ingest/SimulateIngestService.java @@ -0,0 +1,78 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.ingest; + +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.SimulateBulkRequest; + +import java.util.HashMap; +import java.util.Map; + +/** + * This is an implementation of IngestService that allows us to substitute pipeline definitions so that users can simulate ingest using + * pipelines that they define on the fly. + */ +public class SimulateIngestService extends IngestService { + private final Map pipelineSubstitutions; + + public SimulateIngestService(IngestService ingestService, BulkRequest request) { + super(ingestService); + if (request instanceof SimulateBulkRequest simulateBulkRequest) { + try { + pipelineSubstitutions = getPipelineSubstitutions(simulateBulkRequest.getPipelineSubstitutions(), ingestService); + } catch (Exception e) { + throw new RuntimeException(e); + } + } else { + throw new IllegalArgumentException("Expecting a SimulateBulkRequest but got " + request.getClass()); + } + } + + /** + * This transforms the pipeline substitutions from a SimulateBulkRequest into a new map, where the key is the pipelineId and the + * value is the Pipeline instance. The Pipeline is created using the Processor.Factories and the ScriptService of the given + * ingestService. + * @param rawPipelineSubstitutions The pipeline substitutions map received from a SimulateBulkRequest + * @param ingestService The ingestService beoing used + * @return A transformed version of rawPipelineSubstitutions, where the values are Pipeline objects + * @throws Exception + */ + private Map getPipelineSubstitutions( + Map> rawPipelineSubstitutions, + IngestService ingestService + ) throws Exception { + Map parsedPipelineSubstitutions = new HashMap<>(); + if (rawPipelineSubstitutions != null) { + for (Map.Entry> entry : rawPipelineSubstitutions.entrySet()) { + String pipelineId = entry.getKey(); + Pipeline pipeline = Pipeline.create( + pipelineId, + entry.getValue(), + ingestService.getProcessorFactories(), + ingestService.getScriptService() + ); + parsedPipelineSubstitutions.put(pipelineId, pipeline); + } + } + return parsedPipelineSubstitutions; + } + + /** + * This method returns the Pipeline for the given pipelineId. If a substitute definition of the pipeline has been defined for the + * current simulate, then that pipeline is returned. Otherwise, the pipeline stored in the cluster state is returned. + */ + @Override + public Pipeline getPipeline(String pipelineId) { + Pipeline pipeline = pipelineSubstitutions.get(pipelineId); + if (pipeline == null) { + pipeline = super.getPipeline(pipelineId); + } + return pipeline; + } +} diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index bd33a747b36fd..ca254e20e8b37 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -37,12 +37,9 @@ import org.elasticsearch.cluster.coordination.CoordinationDiagnosticsService; import org.elasticsearch.cluster.coordination.Coordinator; import org.elasticsearch.cluster.coordination.MasterHistoryService; -import org.elasticsearch.cluster.coordination.Reconfigurator; import org.elasticsearch.cluster.coordination.StableMasterHealthIndicatorService; -import org.elasticsearch.cluster.desirednodes.DesiredNodesSettingsValidator; import org.elasticsearch.cluster.metadata.IndexMetadataVerifier; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.IndexTemplateMetadata; import org.elasticsearch.cluster.metadata.MetadataCreateDataStreamService; import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; import org.elasticsearch.cluster.metadata.MetadataDataStreamsService; @@ -64,6 +61,7 @@ import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.inject.Injector; import org.elasticsearch.common.inject.Key; +import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.inject.ModulesBuilder; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.logging.DeprecationCategory; @@ -79,6 +77,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.Tuple; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; @@ -101,18 +100,17 @@ import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.index.IndexSettingProvider; import org.elasticsearch.index.IndexSettingProviders; -import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexingPressure; import org.elasticsearch.index.analysis.AnalysisRegistry; -import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.indices.ExecutorSelector; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.indices.IndicesServiceBuilder; import org.elasticsearch.indices.ShardLimitValidator; import org.elasticsearch.indices.SystemIndexMappingUpdateService; import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.indices.analysis.AnalysisModule; -import org.elasticsearch.indices.breaker.BreakerSettings; +import org.elasticsearch.indices.breaker.CircuitBreakerMetrics; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; @@ -140,9 +138,7 @@ import org.elasticsearch.plugins.ClusterCoordinationPlugin; import org.elasticsearch.plugins.ClusterPlugin; import org.elasticsearch.plugins.DiscoveryPlugin; -import org.elasticsearch.plugins.EnginePlugin; import org.elasticsearch.plugins.HealthPlugin; -import org.elasticsearch.plugins.IndexStorePlugin; import org.elasticsearch.plugins.InferenceServicePlugin; import org.elasticsearch.plugins.IngestPlugin; import org.elasticsearch.plugins.MapperPlugin; @@ -171,7 +167,6 @@ import org.elasticsearch.reservedstate.ReservedClusterStateHandlerProvider; import org.elasticsearch.reservedstate.action.ReservedClusterSettingsAction; import org.elasticsearch.reservedstate.service.FileSettingsService; -import org.elasticsearch.rest.RestController; import org.elasticsearch.script.ScriptModule; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.SearchModule; @@ -188,6 +183,7 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.telemetry.TelemetryProvider; +import org.elasticsearch.telemetry.metric.LongCounter; import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; @@ -204,17 +200,16 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.Set; +import java.util.TreeMap; import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.function.Supplier; -import java.util.function.UnaryOperator; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -245,11 +240,26 @@ static NodeConstruction prepareConstruction( List closeables = new ArrayList<>(); try { NodeConstruction constructor = new NodeConstruction(closeables); + Settings settings = constructor.createEnvironment(initialEnvironment, serviceProvider); + ThreadPool threadPool = constructor.createThreadPool(settings); SettingsModule settingsModule = constructor.validateSettings(initialEnvironment.settings(), settings, threadPool); - constructor.construct(threadPool, settingsModule, serviceProvider, forbidPrivateIndexSettings); + SearchModule searchModule = constructor.createSearchModule(settingsModule.getSettings(), threadPool); + constructor.createClientAndRegistries(settingsModule.getSettings(), threadPool, searchModule); + + ScriptService scriptService = constructor.createScriptService(settingsModule, threadPool, serviceProvider); + + constructor.construct( + threadPool, + settingsModule, + searchModule, + scriptService, + constructor.createAnalysisRegistry(), + serviceProvider, + forbidPrivateIndexSettings + ); return constructor; } catch (IOException e) { @@ -268,6 +278,7 @@ static NodeConstruction prepareConstruction( private final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(Node.class); private final List resourcesToClose; + private final ModulesBuilder modules = new ModulesBuilder(); /* * References for storing in a Node */ @@ -335,7 +346,7 @@ private Optional getSinglePlugin(Class pluginClass) { return getSinglePlugin(pluginsService.filterPlugins(pluginClass), pluginClass); } - private Optional getSinglePlugin(Stream plugins, Class pluginClass) { + private static Optional getSinglePlugin(Stream plugins, Class pluginClass) { var it = plugins.iterator(); if (it.hasNext() == false) { return Optional.empty(); @@ -345,7 +356,7 @@ private Optional getSinglePlugin(Stream plugins, Class pluginClass) List allPlugins = new ArrayList<>(); allPlugins.add(plugin); it.forEachRemaining(allPlugins::add); - throw new IllegalStateException("A single " + pluginClass.getName() + " was expected but got :" + allPlugins); + throw new IllegalStateException("A single " + pluginClass.getName() + " was expected but got " + allPlugins); } return Optional.of(plugin); } @@ -423,6 +434,7 @@ private Settings createEnvironment(Environment initialEnvironment, NodeServicePr ); pluginsService = serviceProvider.newPluginService(initialEnvironment, envSettings); + modules.bindToInstance(PluginsService.class, pluginsService); Settings settings = Node.mergePluginSettings(pluginsService.pluginMap(), envSettings); /* @@ -431,6 +443,7 @@ private Settings createEnvironment(Environment initialEnvironment, NodeServicePr */ environment = new Environment(settings, initialEnvironment.configFile()); Environment.assertEquivalent(initialEnvironment, environment); + modules.bindToInstance(Environment.class, environment); return settings; } @@ -441,6 +454,7 @@ private ThreadPool createThreadPool(Settings settings) throws IOException { pluginsService.flatMap(p -> p.getExecutorBuilders(settings)).toArray(ExecutorBuilder[]::new) ); resourcesToClose.add(() -> ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS)); + modules.bindToInstance(ThreadPool.class, threadPool); // adds the context to the DeprecationLogger so that it does not need to be injected everywhere HeaderWarning.setThreadContext(threadPool.getThreadContext()); @@ -457,13 +471,14 @@ private SettingsModule validateSettings(Settings envSettings, Settings settings, } SettingsExtension.load().forEach(e -> additionalSettings.addAll(e.getSettings())); - // this is as early as we can validate settings at this point. we already pass them to ScriptModule as well as ThreadPool + // this is as early as we can validate settings at this point. we already pass them to ThreadPool // so we might be late here already SettingsModule settingsModule = new SettingsModule( settings, additionalSettings, pluginsService.flatMap(Plugin::getSettingsFilter).toList() ); + modules.add(settingsModule); // creating `NodeEnvironment` breaks the ability to rollback to 7.x on an 8.0 upgrade (`upgradeLegacyNodeFolders`) so do this // after settings validation. @@ -479,91 +494,140 @@ private SettingsModule validateSettings(Settings envSettings, Settings settings, .collect(Collectors.toCollection(LinkedHashSet::new)) ); resourcesToClose.add(nodeEnvironment); + modules.bindToInstance(NodeEnvironment.class, nodeEnvironment); return settingsModule; } - private void construct( - ThreadPool threadPool, - SettingsModule settingsModule, - NodeServiceProvider serviceProvider, - boolean forbidPrivateIndexSettings - ) throws IOException { - - Settings settings = settingsModule.getSettings(); - - final ResourceWatcherService resourceWatcherService = new ResourceWatcherService(settings, threadPool); - resourcesToClose.add(resourceWatcherService); + private SearchModule createSearchModule(Settings settings, ThreadPool threadPool) { + IndexSearcher.setMaxClauseCount(SearchUtils.calculateMaxClauseValue(threadPool)); + return new SearchModule(settings, pluginsService.filterPlugins(SearchPlugin.class).toList()); + } - final Set taskHeaders = Stream.concat( - pluginsService.filterPlugins(ActionPlugin.class).flatMap(p -> p.getTaskHeaders().stream()), - Task.HEADERS_TO_COPY.stream() - ).collect(Collectors.toSet()); + /** + * Create various objects that are stored as member variables. This is so they are accessible as soon as possible. + */ + private void createClientAndRegistries(Settings settings, ThreadPool threadPool, SearchModule searchModule) { + client = new NodeClient(settings, threadPool); + modules.add(b -> { + b.bind(Client.class).toInstance(client); + b.bind(NodeClient.class).toInstance(client); + }); - final TelemetryProvider telemetryProvider = getSinglePlugin(TelemetryPlugin.class).map(p -> p.getTelemetryProvider(settings)) - .orElse(TelemetryProvider.NOOP); + localNodeFactory = new Node.LocalNodeFactory(settings, nodeEnvironment.nodeId()); - final Tracer tracer = telemetryProvider.getTracer(); + InferenceServiceRegistry inferenceServiceRegistry = new InferenceServiceRegistry( + pluginsService.filterPlugins(InferenceServicePlugin.class).toList(), + new InferenceServicePlugin.InferenceServiceFactoryContext(client) + ); + resourcesToClose.add(inferenceServiceRegistry); + modules.bindToInstance(InferenceServiceRegistry.class, inferenceServiceRegistry); - final TaskManager taskManager = new TaskManager(settings, threadPool, taskHeaders, tracer); + namedWriteableRegistry = new NamedWriteableRegistry( + Stream.of( + NetworkModule.getNamedWriteables().stream(), + IndicesModule.getNamedWriteables().stream(), + searchModule.getNamedWriteables().stream(), + pluginsService.flatMap(Plugin::getNamedWriteables), + ClusterModule.getNamedWriteables().stream(), + SystemIndexMigrationExecutor.getNamedWriteables().stream(), + inferenceServiceRegistry.getNamedWriteables().stream() + ).flatMap(Function.identity()).toList() + ); + xContentRegistry = new NamedXContentRegistry( + Stream.of( + NetworkModule.getNamedXContents().stream(), + IndicesModule.getNamedXContents().stream(), + searchModule.getNamedXContents().stream(), + pluginsService.flatMap(Plugin::getNamedXContent), + ClusterModule.getNamedXWriteables().stream(), + SystemIndexMigrationExecutor.getNamedXContentParsers().stream(), + HealthNodeTaskExecutor.getNamedXContentParsers().stream() + ).flatMap(Function.identity()).toList() + ); + modules.add(b -> { + b.bind(NamedWriteableRegistry.class).toInstance(namedWriteableRegistry); + b.bind(NamedXContentRegistry.class).toInstance(xContentRegistry); + }); + } - client = new NodeClient(settings, threadPool); + private ScriptService createScriptService(SettingsModule settingsModule, ThreadPool threadPool, NodeServiceProvider serviceProvider) { + Settings settings = settingsModule.getSettings(); + ScriptModule scriptModule = new ScriptModule(settings, pluginsService.filterPlugins(ScriptPlugin.class).toList()); - final ScriptModule scriptModule = new ScriptModule(settings, pluginsService.filterPlugins(ScriptPlugin.class).toList()); - final ScriptService scriptService = serviceProvider.newScriptService( + ScriptService scriptService = serviceProvider.newScriptService( pluginsService, settings, scriptModule.engines, scriptModule.contexts, threadPool::absoluteTimeInMillis ); - AnalysisModule analysisModule = new AnalysisModule( + ScriptModule.registerClusterSettingsListeners(scriptService, settingsModule.getClusterSettings()); + modules.add(b -> { + b.bind(ScriptService.class).toInstance(scriptService); + b.bind(UpdateHelper.class).toInstance(new UpdateHelper(scriptService)); + }); + + return scriptService; + } + + private AnalysisRegistry createAnalysisRegistry() throws IOException { + AnalysisRegistry registry = new AnalysisModule( environment, pluginsService.filterPlugins(AnalysisPlugin.class).toList(), pluginsService.getStablePluginRegistry() - ); - localNodeFactory = new Node.LocalNodeFactory(settings, nodeEnvironment.nodeId()); + ).getAnalysisRegistry(); + modules.bindToInstance(AnalysisRegistry.class, registry); + return registry; + } - ScriptModule.registerClusterSettingsListeners(scriptService, settingsModule.getClusterSettings()); - final NetworkService networkService = new NetworkService( - pluginsService.filterPlugins(DiscoveryPlugin.class) - .map(d -> d.getCustomNameResolver(environment.settings())) - .filter(Objects::nonNull) - .toList() + private void construct( + ThreadPool threadPool, + SettingsModule settingsModule, + SearchModule searchModule, + ScriptService scriptService, + AnalysisRegistry analysisRegistry, + NodeServiceProvider serviceProvider, + boolean forbidPrivateIndexSettings + ) throws IOException { + + Settings settings = settingsModule.getSettings(); + + TelemetryProvider telemetryProvider = getSinglePlugin(TelemetryPlugin.class).map(p -> p.getTelemetryProvider(settings)) + .orElse(TelemetryProvider.NOOP); + modules.bindToInstance(Tracer.class, telemetryProvider.getTracer()); + + TaskManager taskManager = new TaskManager( + settings, + threadPool, + Stream.concat( + pluginsService.filterPlugins(ActionPlugin.class).flatMap(p -> p.getTaskHeaders().stream()), + Task.HEADERS_TO_COPY.stream() + ).collect(Collectors.toSet()), + telemetryProvider.getTracer() ); - List clusterPlugins = pluginsService.filterPlugins(ClusterPlugin.class).toList(); - final ClusterService clusterService = new ClusterService(settings, settingsModule.getClusterSettings(), threadPool, taskManager); + ClusterService clusterService = createClusterService(settingsModule, threadPool, taskManager); clusterService.addStateApplier(scriptService); - resourcesToClose.add(clusterService); - - final Set> consistentSettings = settingsModule.getConsistentSettings(); - if (consistentSettings.isEmpty() == false) { - clusterService.addLocalNodeMasterListener( - new ConsistentSettingsService(settings, clusterService, consistentSettings).newHashPublisher() - ); - } Supplier documentParsingObserverSupplier = getDocumentParsingObserverSupplier(); - var factoryContext = new InferenceServicePlugin.InferenceServiceFactoryContext(client); - final InferenceServiceRegistry inferenceServiceRegistry = new InferenceServiceRegistry( - pluginsService.filterPlugins(InferenceServicePlugin.class).toList(), - factoryContext - ); - final IngestService ingestService = new IngestService( clusterService, threadPool, environment, scriptService, - analysisModule.getAnalysisRegistry(), + analysisRegistry, pluginsService.filterPlugins(IngestPlugin.class).toList(), client, IngestService.createGrokThreadWatchdog(environment, threadPool), documentParsingObserverSupplier ); + + SystemIndices systemIndices = createSystemIndices(settings); + final SetOnce repositoriesServiceReference = new SetOnce<>(); + final SetOnce rerouteServiceReference = new SetOnce<>(); final ClusterInfoService clusterInfoService = serviceProvider.newClusterInfoService( pluginsService, settings, @@ -571,162 +635,91 @@ private void construct( threadPool, client ); - final UsageService usageService = new UsageService(); - - SearchModule searchModule = new SearchModule(settings, pluginsService.filterPlugins(SearchPlugin.class).toList()); - IndexSearcher.setMaxClauseCount(SearchUtils.calculateMaxClauseValue(threadPool)); - final NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry( - Stream.of( - NetworkModule.getNamedWriteables().stream(), - IndicesModule.getNamedWriteables().stream(), - searchModule.getNamedWriteables().stream(), - pluginsService.flatMap(Plugin::getNamedWriteables), - ClusterModule.getNamedWriteables().stream(), - SystemIndexMigrationExecutor.getNamedWriteables().stream(), - inferenceServiceRegistry.getNamedWriteables().stream() - ).flatMap(Function.identity()).toList() - ); - NamedXContentRegistry xContentRegistry = new NamedXContentRegistry( - Stream.of( - NetworkModule.getNamedXContents().stream(), - IndicesModule.getNamedXContents().stream(), - searchModule.getNamedXContents().stream(), - pluginsService.flatMap(Plugin::getNamedXContent), - ClusterModule.getNamedXWriteables().stream(), - SystemIndexMigrationExecutor.getNamedXContentParsers().stream(), - HealthNodeTaskExecutor.getNamedXContentParsers().stream() - ).flatMap(Function.identity()).toList() - ); - final List features = pluginsService.filterPlugins(SystemIndexPlugin.class).map(plugin -> { - SystemIndices.validateFeatureName(plugin.getFeatureName(), plugin.getClass().getCanonicalName()); - return SystemIndices.Feature.fromSystemIndexPlugin(plugin, settings); - }).toList(); - final SystemIndices systemIndices = new SystemIndices(features); - final ExecutorSelector executorSelector = systemIndices.getExecutorSelector(); - - ModulesBuilder modules = new ModulesBuilder(); - final MonitorService monitorService = new MonitorService(settings, nodeEnvironment, threadPool); - final FsHealthService fsHealthService = new FsHealthService( - settings, - clusterService.getClusterSettings(), - threadPool, - nodeEnvironment - ); - final SetOnce rerouteServiceReference = new SetOnce<>(); final InternalSnapshotsInfoService snapshotsInfoService = new InternalSnapshotsInfoService( settings, clusterService, repositoriesServiceReference::get, rerouteServiceReference::get ); - final WriteLoadForecaster writeLoadForecaster = getWriteLoadForecaster(threadPool, settings, clusterService.getClusterSettings()); final ClusterModule clusterModule = new ClusterModule( settings, clusterService, - clusterPlugins, + pluginsService.filterPlugins(ClusterPlugin.class).toList(), clusterInfoService, snapshotsInfoService, threadPool, systemIndices, - writeLoadForecaster + getWriteLoadForecaster(threadPool, settings, clusterService.getClusterSettings()), + telemetryProvider ); modules.add(clusterModule); + + RerouteService rerouteService = new BatchedRerouteService(clusterService, clusterModule.getAllocationService()::reroute); + rerouteServiceReference.set(rerouteService); + clusterService.setRerouteService(rerouteService); + + clusterInfoService.addListener( + new DiskThresholdMonitor( + settings, + clusterService::state, + clusterService.getClusterSettings(), + client, + threadPool::relativeTimeInMillis, + rerouteService + )::onNewInfo + ); + IndicesModule indicesModule = new IndicesModule(pluginsService.filterPlugins(MapperPlugin.class).toList()); modules.add(indicesModule); - List pluginCircuitBreakers = pluginsService.filterPlugins(CircuitBreakerPlugin.class) - .map(plugin -> plugin.getCircuitBreaker(settings)) - .toList(); - final CircuitBreakerService circuitBreakerService = createCircuitBreakerService( + final Map customTripCounters = new TreeMap<>(); + CircuitBreakerService circuitBreakerService = createCircuitBreakerService( + new CircuitBreakerMetrics(telemetryProvider, customTripCounters), settingsModule.getSettings(), - pluginCircuitBreakers, settingsModule.getClusterSettings() ); - pluginsService.filterPlugins(CircuitBreakerPlugin.class).forEach(plugin -> { - CircuitBreaker breaker = circuitBreakerService.getBreaker(plugin.getCircuitBreaker(settings).getName()); - plugin.setCircuitBreaker(breaker); - }); - resourcesToClose.add(circuitBreakerService); modules.add(new GatewayModule()); CompatibilityVersions compatibilityVersions = new CompatibilityVersions( TransportVersion.current(), systemIndices.getMappingsVersions() ); + modules.add(loadPersistedClusterStateService(clusterService.getClusterSettings(), threadPool, compatibilityVersions)); + PageCacheRecycler pageCacheRecycler = serviceProvider.newPageCacheRecycler(pluginsService, settings); BigArrays bigArrays = serviceProvider.newBigArrays(pluginsService, pageCacheRecycler, circuitBreakerService); - modules.add(settingsModule); final MetaStateService metaStateService = new MetaStateService(nodeEnvironment, xContentRegistry); - final PersistedClusterStateService persistedClusterStateService = newPersistedClusterStateService( - xContentRegistry, - clusterService.getClusterSettings(), - threadPool, - compatibilityVersions - ); - // collect engine factory providers from plugins - final Collection>> engineFactoryProviders = pluginsService.filterPlugins( - EnginePlugin.class - ).>>map(plugin -> plugin::getEngineFactory).toList(); - - final Map indexStoreFactories = pluginsService.filterPlugins(IndexStorePlugin.class) - .map(IndexStorePlugin::getDirectoryFactories) - .flatMap(m -> m.entrySet().stream()) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); - - final Map recoveryStateFactories = pluginsService.filterPlugins( - IndexStorePlugin.class - ) - .map(IndexStorePlugin::getRecoveryStateFactories) - .flatMap(m -> m.entrySet().stream()) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); - - final List indexFoldersDeletionListeners = pluginsService.filterPlugins( - IndexStorePlugin.class - ).map(IndexStorePlugin::getIndexFoldersDeletionListeners).flatMap(List::stream).toList(); - - final Map snapshotCommitSuppliers = pluginsService.filterPlugins( - IndexStorePlugin.class - ) - .map(IndexStorePlugin::getSnapshotCommitSuppliers) - .flatMap(m -> m.entrySet().stream()) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + FeatureService featureService = new FeatureService(pluginsService.loadServiceProviders(FeatureSpecification.class)); if (DiscoveryNode.isMasterNode(settings)) { clusterService.addListener(new SystemIndexMappingUpdateService(systemIndices, client)); - clusterService.addListener(new TransportVersionsFixupListener(clusterService, client.admin().cluster(), threadPool)); + clusterService.addListener( + new TransportVersionsFixupListener(clusterService, client.admin().cluster(), featureService, threadPool) + ); } - final RerouteService rerouteService = new BatchedRerouteService(clusterService, clusterModule.getAllocationService()::reroute); - rerouteServiceReference.set(rerouteService); - clusterService.setRerouteService(rerouteService); - - final IndicesService indicesService = new IndicesService( - settings, - pluginsService, - nodeEnvironment, - xContentRegistry, - analysisModule.getAnalysisRegistry(), - clusterModule.getIndexNameExpressionResolver(), - indicesModule.getMapperRegistry(), - namedWriteableRegistry, - threadPool, - settingsModule.getIndexScopedSettings(), - circuitBreakerService, - bigArrays, - scriptService, - clusterService, - client, - metaStateService, - engineFactoryProviders, - indexStoreFactories, - searchModule.getValuesSourceRegistry(), - recoveryStateFactories, - indexFoldersDeletionListeners, - snapshotCommitSuppliers, - searchModule.getRequestCacheKeyDifferentiator(), - documentParsingObserverSupplier - ); + IndicesService indicesService = new IndicesServiceBuilder().settings(settings) + .pluginsService(pluginsService) + .nodeEnvironment(nodeEnvironment) + .xContentRegistry(xContentRegistry) + .analysisRegistry(analysisRegistry) + .indexNameExpressionResolver(clusterModule.getIndexNameExpressionResolver()) + .mapperRegistry(indicesModule.getMapperRegistry()) + .namedWriteableRegistry(namedWriteableRegistry) + .threadPool(threadPool) + .indexScopedSettings(settingsModule.getIndexScopedSettings()) + .circuitBreakerService(circuitBreakerService) + .bigArrays(bigArrays) + .scriptService(scriptService) + .clusterService(clusterService) + .client(client) + .featureService(featureService) + .metaStateService(metaStateService) + .valuesSourceRegistry(searchModule.getValuesSourceRegistry()) + .requestCacheKeyDifferentiator(searchModule.getRequestCacheKeyDifferentiator()) + .documentParsingObserverSupplier(documentParsingObserverSupplier) + .build(); final var parameters = new IndexSettingProvider.Parameters(indicesService::createIndexMapperServiceForValidation); IndexSettingProviders indexSettingProviders = new IndexSettingProviders( @@ -749,12 +742,11 @@ private void construct( indexSettingProviders ); - final MetadataCreateDataStreamService metadataCreateDataStreamService = new MetadataCreateDataStreamService( - threadPool, - clusterService, - metadataCreateIndexService + modules.bindToInstance( + MetadataCreateDataStreamService.class, + new MetadataCreateDataStreamService(threadPool, clusterService, metadataCreateIndexService) ); - final MetadataDataStreamsService metadataDataStreamsService = new MetadataDataStreamsService(clusterService, indicesService); + modules.bindToInstance(MetadataDataStreamsService.class, new MetadataDataStreamsService(clusterService, indicesService)); final MetadataUpdateSettingsService metadataUpdateSettingsService = new MetadataUpdateSettingsService( clusterService, @@ -765,8 +757,6 @@ private void construct( threadPool ); - FeatureService featureService = new FeatureService(pluginsService.loadServiceProviders(FeatureSpecification.class)); - record PluginServiceInstances( Client client, ClusterService clusterService, @@ -789,7 +779,7 @@ record PluginServiceInstances( client, clusterService, threadPool, - resourceWatcherService, + createResourceWatcherService(settings, threadPool), scriptService, xContentRegistry, environment, @@ -806,27 +796,6 @@ record PluginServiceInstances( Collection pluginComponents = pluginsService.flatMap(p -> p.createComponents(pluginServices)).toList(); - List> reservedStateHandlers = new ArrayList<>(); - - // add all reserved state handlers from server - reservedStateHandlers.add(new ReservedClusterSettingsAction(settingsModule.getClusterSettings())); - - var templateService = new MetadataIndexTemplateService( - clusterService, - metadataCreateIndexService, - indicesService, - settingsModule.getIndexScopedSettings(), - xContentRegistry, - systemIndices, - indexSettingProviders - ); - - reservedStateHandlers.add(new ReservedComposableIndexTemplateAction(templateService, settingsModule.getIndexScopedSettings())); - - // add all reserved state handlers from plugins - pluginsService.loadServiceProviders(ReservedClusterStateHandlerProvider.class) - .forEach(h -> reservedStateHandlers.addAll(h.handlers())); - var terminationHandlers = pluginsService.loadServiceProviders(TerminationHandlerProvider.class) .stream() .map(TerminationHandlerProvider::handler); @@ -842,16 +811,28 @@ record PluginServiceInstances( pluginsService.filterPlugins(ActionPlugin.class).toList(), client, circuitBreakerService, - usageService, + createUsageService(), systemIndices, - tracer, + telemetryProvider.getTracer(), clusterService, - reservedStateHandlers, + buildReservedStateHandlers( + settingsModule, + clusterService, + indicesService, + systemIndices, + indexSettingProviders, + metadataCreateIndexService + ), pluginsService.loadSingletonServiceProvider(RestExtension.class, RestExtension::allowAll) ); modules.add(actionModule); - final RestController restController = actionModule.getRestController(); + final NetworkService networkService = new NetworkService( + pluginsService.filterPlugins(DiscoveryPlugin.class) + .map(d -> d.getCustomNameResolver(environment.settings())) + .filter(Objects::nonNull) + .toList() + ); final NetworkModule networkModule = new NetworkModule( settings, pluginsService.filterPlugins(NetworkPlugin.class).toList(), @@ -862,15 +843,15 @@ record PluginServiceInstances( namedWriteableRegistry, xContentRegistry, networkService, - restController, + actionModule.getRestController(), actionModule::copyRequestHeadersToThreadContext, clusterService.getClusterSettings(), - tracer + telemetryProvider.getTracer() ); - Collection>> indexTemplateMetadataUpgraders = pluginsService.map( - Plugin::getIndexTemplateMetadataUpgrader - ).toList(); - final MetadataUpgrader metadataUpgrader = new MetadataUpgrader(indexTemplateMetadataUpgraders); + + var indexTemplateMetadataUpgraders = pluginsService.map(Plugin::getIndexTemplateMetadataUpgrader).toList(); + modules.bindToInstance(MetadataUpgrader.class, new MetadataUpgrader(indexTemplateMetadataUpgraders)); + final IndexMetadataVerifier indexMetadataVerifier = new IndexMetadataVerifier( settings, clusterService, @@ -893,9 +874,8 @@ record PluginServiceInstances( localNodeFactory, settingsModule.getClusterSettings(), taskManager, - tracer + telemetryProvider.getTracer() ); - final GatewayMetaState gatewayMetaState = new GatewayMetaState(); final ResponseCollectorService responseCollectorService = new ResponseCollectorService(clusterService); final SearchTransportService searchTransportService = new SearchTransportService( transportService, @@ -956,40 +936,23 @@ record PluginServiceInstances( fileSettingsService, threadPool ); - final DiskThresholdMonitor diskThresholdMonitor = new DiskThresholdMonitor( - settings, - clusterService::state, - clusterService.getClusterSettings(), - client, - threadPool::relativeTimeInMillis, - rerouteService - ); - clusterInfoService.addListener(diskThresholdMonitor::onNewInfo); - final DiscoveryModule discoveryModule = new DiscoveryModule( + DiscoveryModule discoveryModule = createDiscoveryModule( settings, + threadPool, transportService, - client, - namedWriteableRegistry, networkService, - clusterService.getMasterService(), - clusterService.getClusterApplierService(), - clusterService.getClusterSettings(), - pluginsService.filterPlugins(DiscoveryPlugin.class).toList(), - pluginsService.filterPlugins(ClusterCoordinationPlugin.class).toList(), + clusterService, clusterModule.getAllocationService(), - environment.configFile(), - gatewayMetaState, rerouteService, - fsHealthService, circuitBreakerService, compatibilityVersions, - featureService.getNodeFeatures() + featureService ); - this.nodeService = new NodeService( + nodeService = new NodeService( settings, threadPool, - monitorService, + new MonitorService(settings, nodeEnvironment, threadPool), discoveryModule.getCoordinator(), transportService, indicesService, @@ -1017,109 +980,66 @@ record PluginServiceInstances( searchModule.getFetchPhase(), responseCollectorService, circuitBreakerService, - executorSelector, - tracer + systemIndices.getExecutorSelector(), + telemetryProvider.getTracer() ); - final PersistentTasksService persistentTasksService = new PersistentTasksService(clusterService, threadPool, client); - final SystemIndexMigrationExecutor systemIndexMigrationExecutor = new SystemIndexMigrationExecutor( - client, - clusterService, - systemIndices, - metadataUpdateSettingsService, - metadataCreateIndexService, - settingsModule.getIndexScopedSettings() - ); - final HealthNodeTaskExecutor healthNodeTaskExecutor = HealthNodeTaskExecutor.create( - clusterService, - persistentTasksService, - featureService, - settings, - clusterService.getClusterSettings() - ); - final Stream> builtinTaskExecutors = Stream.of(systemIndexMigrationExecutor, healthNodeTaskExecutor); - final Stream> pluginTaskExecutors = pluginsService.filterPlugins(PersistentTaskPlugin.class) - .map( - p -> p.getPersistentTasksExecutor( - clusterService, - threadPool, - client, - settingsModule, - clusterModule.getIndexNameExpressionResolver() - ) + modules.add( + loadPersistentTasksService( + settingsModule, + clusterService, + threadPool, + systemIndices, + featureService, + clusterModule.getIndexNameExpressionResolver(), + metadataUpdateSettingsService, + metadataCreateIndexService ) - .flatMap(List::stream); - final PersistentTasksExecutorRegistry registry = new PersistentTasksExecutorRegistry( - Stream.concat(pluginTaskExecutors, builtinTaskExecutors).toList() - ); - final PersistentTasksClusterService persistentTasksClusterService = new PersistentTasksClusterService( - settings, - registry, - clusterService, - threadPool ); - resourcesToClose.add(persistentTasksClusterService); - PluginShutdownService pluginShutdownService = new PluginShutdownService( - pluginsService.filterPlugins(ShutdownAwarePlugin.class).toList() + modules.add( + loadPluginShutdownService(clusterService), + loadDiagnosticServices(settings, discoveryModule.getCoordinator(), clusterService, transportService, featureService, threadPool) ); - clusterService.addListener(pluginShutdownService); - final RecoveryPlannerService recoveryPlannerService = getRecoveryPlannerService(threadPool, clusterService, repositoryService); - final DesiredNodesSettingsValidator desiredNodesSettingsValidator = new DesiredNodesSettingsValidator(); - - final MasterHistoryService masterHistoryService = new MasterHistoryService(transportService, threadPool, clusterService); - final CoordinationDiagnosticsService coordinationDiagnosticsService = new CoordinationDiagnosticsService( - clusterService, - transportService, - discoveryModule.getCoordinator(), - masterHistoryService - ); - final HealthService healthService = createHealthService(clusterService, coordinationDiagnosticsService, threadPool); - HealthPeriodicLogger healthPeriodicLogger = createHealthPeriodicLogger(clusterService, settings, client, healthService); - healthPeriodicLogger.init(); - HealthMetadataService healthMetadataService = HealthMetadataService.create(clusterService, featureService, settings); - LocalHealthMonitor localHealthMonitor = LocalHealthMonitor.create( - settings, - clusterService, - nodeService, - threadPool, - client, - featureService - ); - HealthInfoCache nodeHealthOverview = HealthInfoCache.create(clusterService); - HealthApiStats healthApiStats = new HealthApiStats(); + RecoveryPlannerService recoveryPlannerService = getRecoveryPlannerService(threadPool, clusterService, repositoryService); + modules.add(b -> { + serviceProvider.processRecoverySettings(pluginsService, settingsModule.getClusterSettings(), recoverySettings); + SnapshotFilesProvider snapshotFilesProvider = new SnapshotFilesProvider(repositoryService); + var peerRecovery = new PeerRecoverySourceService( + transportService, + indicesService, + clusterService, + recoverySettings, + recoveryPlannerService + ); + resourcesToClose.add(peerRecovery); + b.bind(PeerRecoverySourceService.class).toInstance(peerRecovery); + b.bind(PeerRecoveryTargetService.class) + .toInstance( + new PeerRecoveryTargetService( + client, + threadPool, + transportService, + recoverySettings, + clusterService, + snapshotFilesProvider + ) + ); + }); - List reloadablePlugins = pluginsService.filterPlugins(ReloadablePlugin.class).toList(); - pluginsService.filterPlugins(ReloadAwarePlugin.class).forEach(p -> p.setReloadCallback(wrapPlugins(reloadablePlugins))); + modules.add(loadPluginComponents(pluginComponents)); modules.add(b -> { b.bind(NodeService.class).toInstance(nodeService); - b.bind(NamedXContentRegistry.class).toInstance(xContentRegistry); - b.bind(PluginsService.class).toInstance(pluginsService); - b.bind(Client.class).toInstance(client); - b.bind(NodeClient.class).toInstance(client); - b.bind(Environment.class).toInstance(environment); - b.bind(ThreadPool.class).toInstance(threadPool); - b.bind(NodeEnvironment.class).toInstance(nodeEnvironment); - b.bind(ResourceWatcherService.class).toInstance(resourceWatcherService); - b.bind(CircuitBreakerService.class).toInstance(circuitBreakerService); b.bind(BigArrays.class).toInstance(bigArrays); b.bind(PageCacheRecycler.class).toInstance(pageCacheRecycler); - b.bind(ScriptService.class).toInstance(scriptService); - b.bind(AnalysisRegistry.class).toInstance(analysisModule.getAnalysisRegistry()); b.bind(IngestService.class).toInstance(ingestService); b.bind(IndexingPressure.class).toInstance(indexingLimits); - b.bind(UsageService.class).toInstance(usageService); b.bind(AggregationUsageService.class).toInstance(searchModule.getValuesSourceRegistry().getUsageService()); - b.bind(NamedWriteableRegistry.class).toInstance(namedWriteableRegistry); - b.bind(MetadataUpgrader.class).toInstance(metadataUpgrader); b.bind(MetaStateService.class).toInstance(metaStateService); - b.bind(PersistedClusterStateService.class).toInstance(persistedClusterStateService); b.bind(IndicesService.class).toInstance(indicesService); b.bind(MetadataCreateIndexService.class).toInstance(metadataCreateIndexService); - b.bind(MetadataCreateDataStreamService.class).toInstance(metadataCreateDataStreamService); - b.bind(MetadataDataStreamsService.class).toInstance(metadataDataStreamsService); b.bind(MetadataUpdateSettingsService.class).toInstance(metadataUpdateSettingsService); b.bind(SearchService.class).toInstance(searchService); b.bind(SearchTransportService.class).toInstance(searchTransportService); @@ -1127,99 +1047,143 @@ record PluginServiceInstances( b.bind(Transport.class).toInstance(transport); b.bind(TransportService.class).toInstance(transportService); b.bind(NetworkService.class).toInstance(networkService); - b.bind(UpdateHelper.class).toInstance(new UpdateHelper(scriptService)); b.bind(IndexMetadataVerifier.class).toInstance(indexMetadataVerifier); b.bind(ClusterInfoService.class).toInstance(clusterInfoService); b.bind(SnapshotsInfoService.class).toInstance(snapshotsInfoService); - b.bind(GatewayMetaState.class).toInstance(gatewayMetaState); b.bind(FeatureService.class).toInstance(featureService); - b.bind(Coordinator.class).toInstance(discoveryModule.getCoordinator()); - b.bind(Reconfigurator.class).toInstance(discoveryModule.getReconfigurator()); - { - serviceProvider.processRecoverySettings(pluginsService, settingsModule.getClusterSettings(), recoverySettings); - final SnapshotFilesProvider snapshotFilesProvider = new SnapshotFilesProvider(repositoryService); - b.bind(PeerRecoverySourceService.class) - .toInstance( - new PeerRecoverySourceService( - transportService, - indicesService, - clusterService, - recoverySettings, - recoveryPlannerService - ) - ); - b.bind(PeerRecoveryTargetService.class) - .toInstance( - new PeerRecoveryTargetService( - client, - threadPool, - transportService, - recoverySettings, - clusterService, - snapshotFilesProvider - ) - ); - } b.bind(HttpServerTransport.class).toInstance(httpServerTransport); - pluginComponents.forEach(p -> { - if (p instanceof PluginComponentBinding pcb) { - @SuppressWarnings("unchecked") - Class clazz = (Class) pcb.inter(); - b.bind(clazz).toInstance(pcb.impl()); - - } else { - @SuppressWarnings("unchecked") - Class clazz = (Class) p.getClass(); - b.bind(clazz).toInstance(p); - } - }); - b.bind(PersistentTasksService.class).toInstance(persistentTasksService); - b.bind(PersistentTasksClusterService.class).toInstance(persistentTasksClusterService); - b.bind(PersistentTasksExecutorRegistry.class).toInstance(registry); b.bind(RepositoriesService.class).toInstance(repositoryService); b.bind(SnapshotsService.class).toInstance(snapshotsService); b.bind(SnapshotShardsService.class).toInstance(snapshotShardsService); b.bind(RestoreService.class).toInstance(restoreService); b.bind(RerouteService.class).toInstance(rerouteService); b.bind(ShardLimitValidator.class).toInstance(shardLimitValidator); - b.bind(FsHealthService.class).toInstance(fsHealthService); - b.bind(SystemIndices.class).toInstance(systemIndices); - b.bind(PluginShutdownService.class).toInstance(pluginShutdownService); - b.bind(ExecutorSelector.class).toInstance(executorSelector); b.bind(IndexSettingProviders.class).toInstance(indexSettingProviders); - b.bind(DesiredNodesSettingsValidator.class).toInstance(desiredNodesSettingsValidator); - b.bind(HealthService.class).toInstance(healthService); - b.bind(MasterHistoryService.class).toInstance(masterHistoryService); - b.bind(CoordinationDiagnosticsService.class).toInstance(coordinationDiagnosticsService); - b.bind(HealthNodeTaskExecutor.class).toInstance(healthNodeTaskExecutor); - b.bind(HealthMetadataService.class).toInstance(healthMetadataService); - b.bind(LocalHealthMonitor.class).toInstance(localHealthMonitor); - b.bind(HealthInfoCache.class).toInstance(nodeHealthOverview); - b.bind(HealthApiStats.class).toInstance(healthApiStats); - b.bind(Tracer.class).toInstance(tracer); b.bind(FileSettingsService.class).toInstance(fileSettingsService); - b.bind(WriteLoadForecaster.class).toInstance(writeLoadForecaster); - b.bind(HealthPeriodicLogger.class).toInstance(healthPeriodicLogger); b.bind(CompatibilityVersions.class).toInstance(compatibilityVersions); - b.bind(InferenceServiceRegistry.class).toInstance(inferenceServiceRegistry); }); if (ReadinessService.enabled(environment)) { - modules.add( - b -> b.bind(ReadinessService.class) - .toInstance(serviceProvider.newReadinessService(pluginsService, clusterService, environment)) + modules.bindToInstance( + ReadinessService.class, + serviceProvider.newReadinessService(pluginsService, clusterService, environment) ); } injector = modules.createInjector(); - // We allocate copies of existing shards by looking for a viable copy of the shard in the cluster and assigning the shard there. - // The search for viable copies is triggered by an allocation attempt (i.e. a reroute) and is performed asynchronously. When it - // completes we trigger another reroute to try the allocation again. This means there is a circular dependency: the allocation - // service needs access to the existing shards allocators (e.g. the GatewayAllocator) which need to be able to trigger a - // reroute, which needs to call into the allocation service. We close the loop here: - clusterModule.setExistingShardsAllocators(injector.getInstance(GatewayAllocator.class)); + postInjection(clusterModule, actionModule, clusterService, transportService, featureService); + } + + private ClusterService createClusterService(SettingsModule settingsModule, ThreadPool threadPool, TaskManager taskManager) { + ClusterService clusterService = new ClusterService( + settingsModule.getSettings(), + settingsModule.getClusterSettings(), + threadPool, + taskManager + ); + resourcesToClose.add(clusterService); + + Set> consistentSettings = settingsModule.getConsistentSettings(); + if (consistentSettings.isEmpty() == false) { + clusterService.addLocalNodeMasterListener( + new ConsistentSettingsService(settingsModule.getSettings(), clusterService, consistentSettings).newHashPublisher() + ); + } + return clusterService; + } + + private UsageService createUsageService() { + UsageService usageService = new UsageService(); + modules.bindToInstance(UsageService.class, usageService); + return usageService; + } + + private SystemIndices createSystemIndices(Settings settings) { + List features = pluginsService.filterPlugins(SystemIndexPlugin.class).map(plugin -> { + SystemIndices.validateFeatureName(plugin.getFeatureName(), plugin.getClass().getCanonicalName()); + return SystemIndices.Feature.fromSystemIndexPlugin(plugin, settings); + }).toList(); + + SystemIndices systemIndices = new SystemIndices(features); + modules.add(b -> { + b.bind(SystemIndices.class).toInstance(systemIndices); + b.bind(ExecutorSelector.class).toInstance(systemIndices.getExecutorSelector()); + }); + return systemIndices; + } + + private ResourceWatcherService createResourceWatcherService(Settings settings, ThreadPool threadPool) { + ResourceWatcherService resourceWatcherService = new ResourceWatcherService(settings, threadPool); + resourcesToClose.add(resourceWatcherService); + modules.bindToInstance(ResourceWatcherService.class, resourceWatcherService); + return resourceWatcherService; + } + + private Module loadPluginShutdownService(ClusterService clusterService) { + PluginShutdownService pluginShutdownService = new PluginShutdownService( + pluginsService.filterPlugins(ShutdownAwarePlugin.class).toList() + ); + clusterService.addListener(pluginShutdownService); + + return b -> b.bind(PluginShutdownService.class).toInstance(pluginShutdownService); + } + + private Module loadDiagnosticServices( + Settings settings, + Coordinator coordinator, + ClusterService clusterService, + TransportService transportService, + FeatureService featureService, + ThreadPool threadPool + ) { + + MasterHistoryService masterHistoryService = new MasterHistoryService(transportService, threadPool, clusterService); + CoordinationDiagnosticsService coordinationDiagnosticsService = new CoordinationDiagnosticsService( + clusterService, + transportService, + coordinator, + masterHistoryService + ); + + var serverHealthIndicatorServices = Stream.of( + new StableMasterHealthIndicatorService(coordinationDiagnosticsService, clusterService), + new RepositoryIntegrityHealthIndicatorService(clusterService), + new DiskHealthIndicatorService(clusterService), + new ShardsCapacityHealthIndicatorService(clusterService) + ); + var pluginHealthIndicatorServices = pluginsService.filterPlugins(HealthPlugin.class) + .flatMap(plugin -> plugin.getHealthIndicatorServices().stream()); + HealthService healthService = new HealthService( + Stream.concat(serverHealthIndicatorServices, pluginHealthIndicatorServices).toList(), + threadPool + ); + HealthPeriodicLogger healthPeriodicLogger = HealthPeriodicLogger.create(settings, clusterService, client, healthService); + HealthMetadataService healthMetadataService = HealthMetadataService.create(clusterService, featureService, settings); + LocalHealthMonitor localHealthMonitor = LocalHealthMonitor.create( + settings, + clusterService, + nodeService, + threadPool, + client, + featureService + ); + HealthInfoCache nodeHealthOverview = HealthInfoCache.create(clusterService); + + return b -> { + b.bind(HealthService.class).toInstance(healthService); + b.bind(MasterHistoryService.class).toInstance(masterHistoryService); + b.bind(CoordinationDiagnosticsService.class).toInstance(coordinationDiagnosticsService); + b.bind(HealthMetadataService.class).toInstance(healthMetadataService); + b.bind(LocalHealthMonitor.class).toInstance(localHealthMonitor); + b.bind(HealthInfoCache.class).toInstance(nodeHealthOverview); + b.bind(HealthApiStats.class).toInstance(new HealthApiStats()); + b.bind(HealthPeriodicLogger.class).toInstance(healthPeriodicLogger); + }; + } + + private Module loadPluginComponents(Collection pluginComponents) { List pluginLifecycleComponents = pluginComponents.stream().map(p -> { if (p instanceof PluginComponentBinding pcb) { return pcb.impl(); @@ -1227,8 +1191,37 @@ record PluginServiceInstances( return p; }).filter(p -> p instanceof LifecycleComponent).map(p -> (LifecycleComponent) p).toList(); resourcesToClose.addAll(pluginLifecycleComponents); - resourcesToClose.add(injector.getInstance(PeerRecoverySourceService.class)); - this.pluginLifecycleComponents = Collections.unmodifiableList(pluginLifecycleComponents); + this.pluginLifecycleComponents = pluginLifecycleComponents; + + List reloadablePlugins = pluginsService.filterPlugins(ReloadablePlugin.class).toList(); + pluginsService.filterPlugins(ReloadAwarePlugin.class).forEach(p -> p.setReloadCallback(wrapPlugins(reloadablePlugins))); + + return b -> pluginComponents.forEach(p -> { + if (p instanceof PluginComponentBinding pcb) { + @SuppressWarnings("unchecked") + Class clazz = (Class) pcb.inter(); + b.bind(clazz).toInstance(pcb.impl()); + } else { + @SuppressWarnings("unchecked") + Class clazz = (Class) p.getClass(); + b.bind(clazz).toInstance(p); + } + }); + } + + private void postInjection( + ClusterModule clusterModule, + ActionModule actionModule, + ClusterService clusterService, + TransportService transportService, + FeatureService featureService + ) { + // We allocate copies of existing shards by looking for a viable copy of the shard in the cluster and assigning the shard there. + // The search for viable copies is triggered by an allocation attempt (i.e. a reroute) and is performed asynchronously. When it + // completes we trigger another reroute to try the allocation again. This means there is a circular dependency: the allocation + // service needs access to the existing shards allocators (e.g. the GatewayAllocator) which need to be able to trigger a + // reroute, which needs to call into the allocation service. We close the loop here: + clusterModule.setExistingShardsAllocators(injector.getInstance(GatewayAllocator.class)); // Due to Java's type erasure with generics, the injector can't give us exactly what we need, and we have // to resort to some evil casting. @@ -1245,8 +1238,6 @@ record PluginServiceInstances( transportService.getRemoteClusterService(), namedWriteableRegistry ); - this.namedWriteableRegistry = namedWriteableRegistry; - this.xContentRegistry = xContentRegistry; logger.debug("initializing HTTP handlers ..."); actionModule.initRestHandlers(() -> clusterService.state().nodesIfRecovered(), f -> { @@ -1262,21 +1253,40 @@ private Supplier getDocumentParsingObserverSupplier() { } /** - * Creates a new {@link CircuitBreakerService} based on the settings provided. + * Create and initialize a new {@link CircuitBreakerService} based on the settings provided. * * @see Node#BREAKER_TYPE_KEY */ - private static CircuitBreakerService createCircuitBreakerService( + private CircuitBreakerService createCircuitBreakerService( + CircuitBreakerMetrics metrics, Settings settings, - List breakerSettings, ClusterSettings clusterSettings ) { + var pluginBreakers = pluginsService.filterPlugins(CircuitBreakerPlugin.class) + .map(p -> Tuple.tuple(p, p.getCircuitBreaker(settings))) + .toList(); + String type = Node.BREAKER_TYPE_KEY.get(settings); - return switch (type) { - case "hierarchy" -> new HierarchyCircuitBreakerService(settings, breakerSettings, clusterSettings); + CircuitBreakerService circuitBreakerService = switch (type) { + case "hierarchy" -> new HierarchyCircuitBreakerService( + metrics, + settings, + pluginBreakers.stream().map(Tuple::v2).toList(), + clusterSettings + ); case "none" -> new NoneCircuitBreakerService(); default -> throw new IllegalArgumentException("Unknown circuit breaker type [" + type + "]"); }; + resourcesToClose.add(circuitBreakerService); + modules.bindToInstance(CircuitBreakerService.class, circuitBreakerService); + + pluginBreakers.forEach(t -> { + final CircuitBreaker circuitBreaker = circuitBreakerService.getBreaker(t.v2().getName()); + t.v1().setCircuitBreaker(circuitBreaker); + metrics.addCustomCircuitBreaker(circuitBreaker); + }); + + return circuitBreakerService; } /** @@ -1296,31 +1306,6 @@ private static ReloadablePlugin wrapPlugins(List reloadablePlu }; } - private HealthService createHealthService( - ClusterService clusterService, - CoordinationDiagnosticsService coordinationDiagnosticsService, - ThreadPool threadPool - ) { - var serverHealthIndicatorServices = Stream.of( - new StableMasterHealthIndicatorService(coordinationDiagnosticsService, clusterService), - new RepositoryIntegrityHealthIndicatorService(clusterService), - new DiskHealthIndicatorService(clusterService), - new ShardsCapacityHealthIndicatorService(clusterService) - ); - var pluginHealthIndicatorServices = pluginsService.filterPlugins(HealthPlugin.class) - .flatMap(plugin -> plugin.getHealthIndicatorServices().stream()); - return new HealthService(Stream.concat(serverHealthIndicatorServices, pluginHealthIndicatorServices).toList(), threadPool); - } - - private static HealthPeriodicLogger createHealthPeriodicLogger( - ClusterService clusterService, - Settings settings, - NodeClient client, - HealthService healthService - ) { - return new HealthPeriodicLogger(settings, clusterService, client, healthService); - } - private RecoveryPlannerService getRecoveryPlannerService( ThreadPool threadPool, ClusterService clusterService, @@ -1340,11 +1325,14 @@ private WriteLoadForecaster getWriteLoadForecaster(ThreadPool threadPool, Settin var writeLoadForecasters = pluginsService.filterPlugins(ClusterPlugin.class) .flatMap(clusterPlugin -> clusterPlugin.createWriteLoadForecasters(threadPool, settings, clusterSettings).stream()); - return getSinglePlugin(writeLoadForecasters, WriteLoadForecaster.class).orElse(WriteLoadForecaster.DEFAULT); + WriteLoadForecaster forecaster = getSinglePlugin(writeLoadForecasters, WriteLoadForecaster.class).orElse( + WriteLoadForecaster.DEFAULT + ); + modules.bindToInstance(WriteLoadForecaster.class, forecaster); + return forecaster; } - private PersistedClusterStateService newPersistedClusterStateService( - NamedXContentRegistry xContentRegistry, + private Module loadPersistedClusterStateService( ClusterSettings clusterSettings, ThreadPool threadPool, CompatibilityVersions compatibilityVersions @@ -1353,18 +1341,140 @@ private PersistedClusterStateService newPersistedClusterStateService( .map(ClusterCoordinationPlugin::getPersistedClusterStateServiceFactory) .flatMap(Optional::stream); - return getSinglePlugin(persistedClusterStateServiceFactories, ClusterCoordinationPlugin.PersistedClusterStateServiceFactory.class) - .map( - f -> f.newPersistedClusterStateService( - nodeEnvironment, - xContentRegistry, - clusterSettings, - threadPool, - compatibilityVersions - ) - ) + var service = getSinglePlugin( + persistedClusterStateServiceFactories, + ClusterCoordinationPlugin.PersistedClusterStateServiceFactory.class + ).map(f -> f.newPersistedClusterStateService(nodeEnvironment, xContentRegistry, clusterSettings, threadPool, compatibilityVersions)) .orElseGet( () -> new PersistedClusterStateService(nodeEnvironment, xContentRegistry, clusterSettings, threadPool::relativeTimeInMillis) ); + + return b -> b.bind(PersistedClusterStateService.class).toInstance(service); + } + + private List> buildReservedStateHandlers( + SettingsModule settingsModule, + ClusterService clusterService, + IndicesService indicesService, + SystemIndices systemIndices, + IndexSettingProviders indexSettingProviders, + MetadataCreateIndexService metadataCreateIndexService + ) { + List> reservedStateHandlers = new ArrayList<>(); + + // add all reserved state handlers from server + reservedStateHandlers.add(new ReservedClusterSettingsAction(settingsModule.getClusterSettings())); + + var templateService = new MetadataIndexTemplateService( + clusterService, + metadataCreateIndexService, + indicesService, + settingsModule.getIndexScopedSettings(), + xContentRegistry, + systemIndices, + indexSettingProviders + ); + reservedStateHandlers.add(new ReservedComposableIndexTemplateAction(templateService, settingsModule.getIndexScopedSettings())); + + // add all reserved state handlers from plugins + pluginsService.loadServiceProviders(ReservedClusterStateHandlerProvider.class) + .forEach(h -> reservedStateHandlers.addAll(h.handlers())); + + return reservedStateHandlers; + } + + private DiscoveryModule createDiscoveryModule( + Settings settings, + ThreadPool threadPool, + TransportService transportService, + NetworkService networkService, + ClusterService clusterService, + AllocationService allocationService, + RerouteService rerouteService, + CircuitBreakerService circuitBreakerService, + CompatibilityVersions compatibilityVersions, + FeatureService featureService + ) { + GatewayMetaState gatewayMetaState = new GatewayMetaState(); + FsHealthService fsHealthService = new FsHealthService(settings, clusterService.getClusterSettings(), threadPool, nodeEnvironment); + + DiscoveryModule module = new DiscoveryModule( + settings, + transportService, + client, + namedWriteableRegistry, + networkService, + clusterService.getMasterService(), + clusterService.getClusterApplierService(), + clusterService.getClusterSettings(), + pluginsService.filterPlugins(DiscoveryPlugin.class).toList(), + pluginsService.filterPlugins(ClusterCoordinationPlugin.class).toList(), + allocationService, + environment.configFile(), + gatewayMetaState, + rerouteService, + fsHealthService, + circuitBreakerService, + compatibilityVersions, + featureService + ); + + modules.add(module, b -> { + b.bind(GatewayMetaState.class).toInstance(gatewayMetaState); + b.bind(FsHealthService.class).toInstance(fsHealthService); + }); + + return module; + } + + private Module loadPersistentTasksService( + SettingsModule settingsModule, + ClusterService clusterService, + ThreadPool threadPool, + SystemIndices systemIndices, + FeatureService featureService, + IndexNameExpressionResolver indexNameExpressionResolver, + MetadataUpdateSettingsService metadataUpdateSettingsService, + MetadataCreateIndexService metadataCreateIndexService + ) { + PersistentTasksService persistentTasksService = new PersistentTasksService(clusterService, threadPool, client); + SystemIndexMigrationExecutor systemIndexMigrationExecutor = new SystemIndexMigrationExecutor( + client, + clusterService, + systemIndices, + metadataUpdateSettingsService, + metadataCreateIndexService, + settingsModule.getIndexScopedSettings() + ); + HealthNodeTaskExecutor healthNodeTaskExecutor = HealthNodeTaskExecutor.create( + clusterService, + persistentTasksService, + featureService, + settingsModule.getSettings(), + clusterService.getClusterSettings() + ); + Stream> builtinTaskExecutors = Stream.of(systemIndexMigrationExecutor, healthNodeTaskExecutor); + + Stream> pluginTaskExecutors = pluginsService.filterPlugins(PersistentTaskPlugin.class) + .map(p -> p.getPersistentTasksExecutor(clusterService, threadPool, client, settingsModule, indexNameExpressionResolver)) + .flatMap(List::stream); + + PersistentTasksExecutorRegistry registry = new PersistentTasksExecutorRegistry( + Stream.concat(pluginTaskExecutors, builtinTaskExecutors).toList() + ); + PersistentTasksClusterService persistentTasksClusterService = new PersistentTasksClusterService( + settingsModule.getSettings(), + registry, + clusterService, + threadPool + ); + resourcesToClose.add(persistentTasksClusterService); + + return b -> { + b.bind(PersistentTasksService.class).toInstance(persistentTasksService); + b.bind(HealthNodeTaskExecutor.class).toInstance(healthNodeTaskExecutor); + b.bind(PersistentTasksExecutorRegistry.class).toInstance(registry); + b.bind(PersistentTasksClusterService.class).toInstance(persistentTasksClusterService); + }; } } diff --git a/server/src/main/java/org/elasticsearch/node/NodeService.java b/server/src/main/java/org/elasticsearch/node/NodeService.java index 9b6e55383eea0..e2283ea9851d7 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeService.java +++ b/server/src/main/java/org/elasticsearch/node/NodeService.java @@ -118,7 +118,8 @@ public NodeInfo info( boolean indices ) { return new NodeInfo( - Version.CURRENT, + // TODO: revert to Build.current().version() when Kibana is updated + Version.CURRENT.toString(), TransportVersion.current(), IndexVersion.current(), findComponentVersions(), diff --git a/server/src/main/java/org/elasticsearch/persistent/CompletionPersistentTaskAction.java b/server/src/main/java/org/elasticsearch/persistent/CompletionPersistentTaskAction.java index cb7652bdc7b03..7af206a12ecc9 100644 --- a/server/src/main/java/org/elasticsearch/persistent/CompletionPersistentTaskAction.java +++ b/server/src/main/java/org/elasticsearch/persistent/CompletionPersistentTaskAction.java @@ -11,10 +11,8 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -113,16 +111,6 @@ public int hashCode() { } } - public static class RequestBuilder extends MasterNodeOperationRequestBuilder< - CompletionPersistentTaskAction.Request, - PersistentTaskResponse, - CompletionPersistentTaskAction.RequestBuilder> { - - protected RequestBuilder(ElasticsearchClient client, CompletionPersistentTaskAction action) { - super(client, action, new Request()); - } - } - public static class TransportAction extends TransportMasterNodeAction { private final PersistentTasksClusterService persistentTasksClusterService; diff --git a/server/src/main/java/org/elasticsearch/persistent/RemovePersistentTaskAction.java b/server/src/main/java/org/elasticsearch/persistent/RemovePersistentTaskAction.java index 7fac04a63993e..8e0ee8f87422e 100644 --- a/server/src/main/java/org/elasticsearch/persistent/RemovePersistentTaskAction.java +++ b/server/src/main/java/org/elasticsearch/persistent/RemovePersistentTaskAction.java @@ -11,10 +11,8 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -83,22 +81,6 @@ public int hashCode() { } } - public static class RequestBuilder extends MasterNodeOperationRequestBuilder< - RemovePersistentTaskAction.Request, - PersistentTaskResponse, - RemovePersistentTaskAction.RequestBuilder> { - - protected RequestBuilder(ElasticsearchClient client, RemovePersistentTaskAction action) { - super(client, action, new Request()); - } - - public final RequestBuilder setTaskId(String taskId) { - request.setTaskId(taskId); - return this; - } - - } - public static class TransportAction extends TransportMasterNodeAction { private final PersistentTasksClusterService persistentTasksClusterService; diff --git a/server/src/main/java/org/elasticsearch/persistent/StartPersistentTaskAction.java b/server/src/main/java/org/elasticsearch/persistent/StartPersistentTaskAction.java index c719eb318d571..d98abdffaf463 100644 --- a/server/src/main/java/org/elasticsearch/persistent/StartPersistentTaskAction.java +++ b/server/src/main/java/org/elasticsearch/persistent/StartPersistentTaskAction.java @@ -11,10 +11,8 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -138,32 +136,6 @@ public void setParams(PersistentTaskParams params) { } - public static class RequestBuilder extends MasterNodeOperationRequestBuilder< - StartPersistentTaskAction.Request, - PersistentTaskResponse, - StartPersistentTaskAction.RequestBuilder> { - - protected RequestBuilder(ElasticsearchClient client, StartPersistentTaskAction action) { - super(client, action, new Request()); - } - - public RequestBuilder setTaskId(String taskId) { - request.setTaskId(taskId); - return this; - } - - public RequestBuilder setAction(String action) { - request.setTaskName(action); - return this; - } - - public RequestBuilder setRequest(PersistentTaskParams params) { - request.setParams(params); - return this; - } - - } - public static class TransportAction extends TransportMasterNodeAction { private final PersistentTasksClusterService persistentTasksClusterService; diff --git a/server/src/main/java/org/elasticsearch/persistent/UpdatePersistentTaskStatusAction.java b/server/src/main/java/org/elasticsearch/persistent/UpdatePersistentTaskStatusAction.java index 6074cc0e4ea35..f961a9fffec27 100644 --- a/server/src/main/java/org/elasticsearch/persistent/UpdatePersistentTaskStatusAction.java +++ b/server/src/main/java/org/elasticsearch/persistent/UpdatePersistentTaskStatusAction.java @@ -11,10 +11,8 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -66,14 +64,26 @@ public void setTaskId(String taskId) { this.taskId = taskId; } + public String getTaskId() { + return taskId; + } + public void setAllocationId(long allocationId) { this.allocationId = allocationId; } + public long getAllocationId() { + return allocationId; + } + public void setState(PersistentTaskState state) { this.state = state; } + public PersistentTaskState getState() { + return state; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -110,26 +120,6 @@ public int hashCode() { } } - public static class RequestBuilder extends MasterNodeOperationRequestBuilder< - UpdatePersistentTaskStatusAction.Request, - PersistentTaskResponse, - UpdatePersistentTaskStatusAction.RequestBuilder> { - - protected RequestBuilder(ElasticsearchClient client, UpdatePersistentTaskStatusAction action) { - super(client, action, new Request()); - } - - public final RequestBuilder setTaskId(String taskId) { - request.setTaskId(taskId); - return this; - } - - public final RequestBuilder setState(PersistentTaskState state) { - request.setState(state); - return this; - } - } - public static class TransportAction extends TransportMasterNodeAction { private final PersistentTasksClusterService persistentTasksClusterService; diff --git a/server/src/main/java/org/elasticsearch/readiness/ReadinessService.java b/server/src/main/java/org/elasticsearch/readiness/ReadinessService.java index 774d47b583686..7f7a55762bf08 100644 --- a/server/src/main/java/org/elasticsearch/readiness/ReadinessService.java +++ b/server/src/main/java/org/elasticsearch/readiness/ReadinessService.java @@ -242,8 +242,11 @@ public void clusterChanged(ClusterChangedEvent event) { this.shuttingDown = shutdownNodeIds.contains(clusterState.nodes().getLocalNodeId()); if (shuttingDown) { - setReady(false); - logger.info("marking node as not ready because it's shutting down"); + // only disable the probe and log if the probe is running + if (ready()) { + setReady(false); + logger.info("marking node as not ready because it's shutting down"); + } } else { if (clusterState.nodes().getLocalNodeId().equals(clusterState.nodes().getMasterNodeId())) { setReady(fileSettingsApplied); diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesModule.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesModule.java index 32c32369a5fae..b066b4c5a329e 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesModule.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesModule.java @@ -35,7 +35,14 @@ */ public final class RepositoriesModule { - public static final String METRIC_REQUESTS_COUNT = "repositories.requests.count"; + public static final String METRIC_REQUESTS_COUNT = "es.repositories.requests.count"; + public static final String METRIC_EXCEPTIONS_COUNT = "es.repositories.exceptions.count"; + public static final String METRIC_THROTTLES_COUNT = "es.repositories.throttles.count"; + public static final String METRIC_OPERATIONS_COUNT = "es.repositories.operations.count"; + public static final String METRIC_UNSUCCESSFUL_OPERATIONS_COUNT = "es.repositories.operations.unsuccessful.count"; + public static final String METRIC_EXCEPTIONS_HISTOGRAM = "es.repositories.exceptions.histogram"; + public static final String METRIC_THROTTLES_HISTOGRAM = "es.repositories.throttles.histogram"; + private final RepositoriesService repositoriesService; public RepositoriesModule( @@ -49,6 +56,16 @@ public RepositoriesModule( TelemetryProvider telemetryProvider ) { telemetryProvider.getMeterRegistry().registerLongCounter(METRIC_REQUESTS_COUNT, "repository request counter", "unit"); + telemetryProvider.getMeterRegistry().registerLongCounter(METRIC_EXCEPTIONS_COUNT, "repository request exception counter", "unit"); + telemetryProvider.getMeterRegistry().registerLongCounter(METRIC_THROTTLES_COUNT, "repository operation counter", "unit"); + telemetryProvider.getMeterRegistry() + .registerLongCounter(METRIC_OPERATIONS_COUNT, "repository unsuccessful operation counter", "unit"); + telemetryProvider.getMeterRegistry() + .registerLongCounter(METRIC_UNSUCCESSFUL_OPERATIONS_COUNT, "repository request throttle counter", "unit"); + telemetryProvider.getMeterRegistry() + .registerLongHistogram(METRIC_EXCEPTIONS_HISTOGRAM, "repository request exception histogram", "unit"); + telemetryProvider.getMeterRegistry() + .registerLongHistogram(METRIC_THROTTLES_HISTOGRAM, "repository request throttle histogram", "unit"); Map factories = new HashMap<>(); factories.put( FsRepository.TYPE, diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 4167717e09006..cd2b8c73fe90b 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -70,14 +70,15 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.common.util.concurrent.ListenableFuture; import org.elasticsearch.common.util.concurrent.ThrottledIterator; import org.elasticsearch.common.util.concurrent.ThrottledTaskRunner; import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.Tuple; @@ -176,6 +177,10 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp protected final ThreadPool threadPool; + public static final String STATELESS_SHARD_THREAD_NAME = "stateless_shard"; + public static final String STATELESS_TRANSLOG_THREAD_NAME = "stateless_translog"; + public static final String STATELESS_UPLOAD_THREAD_NAME = "stateless_upload"; + public static final String SNAPSHOT_PREFIX = "snap-"; public static final String INDEX_FILE_PREFIX = "index-"; @@ -455,6 +460,7 @@ protected void doStop() {} @Override protected void doClose() { + activityRefs.decRef(); BlobStore store; // to close blobStore if blobStore initialization is started during close synchronized (lock) { @@ -469,28 +475,14 @@ protected void doClose() { } } - // listeners to invoke when a restore completes and there are no more restores running - @Nullable - private List> emptyListeners; + private final SubscribableListener closedAndIdleListeners = new SubscribableListener<>(); - // Set of shard ids that this repository is currently restoring - private final Set ongoingRestores = new HashSet<>(); + private final RefCounted activityRefs = AbstractRefCounted.of(() -> closedAndIdleListeners.onResponse(null)); @Override public void awaitIdle() { - assert lifecycle.stoppedOrClosed(); - final PlainActionFuture future; - synchronized (ongoingRestores) { - if (ongoingRestores.isEmpty()) { - return; - } - future = new PlainActionFuture<>(); - if (emptyListeners == null) { - emptyListeners = new ArrayList<>(); - } - emptyListeners.add(future); - } - FutureUtils.get(future); + assert lifecycle.closed(); + PlainActionFuture.get(closedAndIdleListeners::addListener); } @SuppressForbidden(reason = "legacy usage of unbatched task") // TODO add support for batching here @@ -1987,7 +1979,15 @@ public long getRestoreThrottleTimeInNanos() { } protected void assertSnapshotOrGenericThread() { - assert ThreadPool.assertCurrentThreadPool(ThreadPool.Names.SNAPSHOT, ThreadPool.Names.SNAPSHOT_META, ThreadPool.Names.GENERIC); + // The Stateless plugin adds custom thread pools for object store operations + assert ThreadPool.assertCurrentThreadPool( + ThreadPool.Names.SNAPSHOT, + ThreadPool.Names.SNAPSHOT_META, + ThreadPool.Names.GENERIC, + STATELESS_SHARD_THREAD_NAME, + STATELESS_TRANSLOG_THREAD_NAME, + STATELESS_UPLOAD_THREAD_NAME + ); } @Override @@ -3305,30 +3305,19 @@ public void restoreShard( ); final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT); final BlobContainer container = shardContainer(indexId, snapshotShardId); - synchronized (ongoingRestores) { - if (store.isClosing()) { - restoreListener.onFailure(new AlreadyClosedException("store is closing")); - return; - } - if (lifecycle.started() == false) { - restoreListener.onFailure(new AlreadyClosedException("repository [" + metadata.name() + "] closed")); - return; - } - final boolean added = ongoingRestores.add(shardId); - assert added : "add restore for [" + shardId + "] that already has an existing restore"; + if (store.isClosing()) { + restoreListener.onFailure(new AlreadyClosedException("store is closing")); + return; } - executor.execute(ActionRunnable.wrap(ActionListener.runBefore(restoreListener, () -> { - final List> onEmptyListeners; - synchronized (ongoingRestores) { - if (ongoingRestores.remove(shardId) && ongoingRestores.isEmpty() && emptyListeners != null) { - onEmptyListeners = emptyListeners; - emptyListeners = null; - } else { - return; - } - } - ActionListener.onResponse(onEmptyListeners, null); - }), l -> { + if (lifecycle.started() == false) { + restoreListener.onFailure(new AlreadyClosedException("repository [" + metadata.name() + "] closed")); + return; + } + if (activityRefs.tryIncRef() == false) { + restoreListener.onFailure(new AlreadyClosedException("repository [" + metadata.name() + "] closing")); + return; + } + executor.execute(ActionRunnable.wrap(ActionListener.runBefore(restoreListener, activityRefs::decRef), l -> { final BlobStoreIndexShardSnapshot snapshot = loadShardSnapshot(container, snapshotId); final SnapshotFiles snapshotFiles = new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles(), null); new FileRestoreContext(metadata.name(), shardId, snapshotId, recoveryState) { diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java b/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java index 83676925a3ae7..56c975e148ab5 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java @@ -113,7 +113,7 @@ protected boolean shouldRefreshFileState(ClusterState clusterState) { */ @Override protected void processFileChanges() throws ExecutionException, InterruptedException, IOException { - PlainActionFuture completion = PlainActionFuture.newFuture(); + PlainActionFuture completion = new PlainActionFuture<>(); logger.info("processing path [{}] for [{}]", watchedFile(), NAMESPACE); try ( var fis = Files.newInputStream(watchedFile()); diff --git a/server/src/main/java/org/elasticsearch/rest/ChunkedRestResponseBody.java b/server/src/main/java/org/elasticsearch/rest/ChunkedRestResponseBody.java index 9cfe7b84577db..ae267573b4cab 100644 --- a/server/src/main/java/org/elasticsearch/rest/ChunkedRestResponseBody.java +++ b/server/src/main/java/org/elasticsearch/rest/ChunkedRestResponseBody.java @@ -20,6 +20,8 @@ import org.elasticsearch.core.Releasables; import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.core.Streams; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -36,6 +38,8 @@ */ public interface ChunkedRestResponseBody extends Releasable { + Logger logger = LogManager.getLogger(ChunkedRestResponseBody.class); + /** * @return true once this response has been written fully. */ @@ -126,6 +130,9 @@ public ReleasableBytesReference encodeChunk(int sizeHint, Recycler rec ); target = null; return result; + } catch (Exception e) { + logger.error("failure encoding chunk", e); + throw e; } finally { if (target != null) { assert false : "failure encoding chunk"; @@ -212,6 +219,9 @@ public ReleasableBytesReference encodeChunk(int sizeHint, Recycler rec ); currentOutput = null; return result; + } catch (Exception e) { + logger.error("failure encoding text chunk", e); + throw e; } finally { if (currentOutput != null) { assert false : "failure encoding text chunk"; diff --git a/server/src/main/java/org/elasticsearch/rest/RestController.java b/server/src/main/java/org/elasticsearch/rest/RestController.java index b51468edff63b..6a5d6f99df64b 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestController.java +++ b/server/src/main/java/org/elasticsearch/rest/RestController.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.path.PathTrie; import org.elasticsearch.common.recycler.Recycler; import org.elasticsearch.common.util.Maps; +import org.elasticsearch.common.util.concurrent.RunOnce; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.RestApiVersion; @@ -820,12 +821,12 @@ private void close() { private static class EncodedLengthTrackingChunkedRestResponseBody implements ChunkedRestResponseBody { private final ChunkedRestResponseBody delegate; - private final MethodHandlers methodHandlers; + private final RunOnce onCompletion; private long encodedLength = 0; private EncodedLengthTrackingChunkedRestResponseBody(ChunkedRestResponseBody delegate, MethodHandlers methodHandlers) { this.delegate = delegate; - this.methodHandlers = methodHandlers; + this.onCompletion = new RunOnce(() -> methodHandlers.addResponseStats(encodedLength)); } @Override @@ -837,6 +838,9 @@ public boolean isDone() { public ReleasableBytesReference encodeChunk(int sizeHint, Recycler recycler) throws IOException { final ReleasableBytesReference bytesReference = delegate.encodeChunk(sizeHint, recycler); encodedLength += bytesReference.length(); + if (isDone()) { + onCompletion.run(); + } return bytesReference; } @@ -848,7 +852,9 @@ public String getResponseContentTypeString() { @Override public void close() { delegate.close(); - methodHandlers.addResponseStats(encodedLength); + // the client might close the connection before we send the last chunk, in which case we won't have recorded the response in the + // stats yet, so we do it now: + onCompletion.run(); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/RestChunkedToXContentListener.java b/server/src/main/java/org/elasticsearch/rest/action/RestChunkedToXContentListener.java index a3bb1ed9d94dc..2edb042ea23e8 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/RestChunkedToXContentListener.java +++ b/server/src/main/java/org/elasticsearch/rest/action/RestChunkedToXContentListener.java @@ -9,6 +9,7 @@ package org.elasticsearch.rest.action; import org.elasticsearch.common.xcontent.ChunkedToXContent; +import org.elasticsearch.core.Releasable; import org.elasticsearch.rest.ChunkedRestResponseBody; import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestResponse; @@ -37,10 +38,17 @@ public RestChunkedToXContentListener(RestChannel channel, ToXContent.Params para @Override protected void processResponse(Response response) throws IOException { channel.sendResponse( - RestResponse.chunked(getRestStatus(response), ChunkedRestResponseBody.fromXContent(response, params, channel, null)) + RestResponse.chunked( + getRestStatus(response), + ChunkedRestResponseBody.fromXContent(response, params, channel, releasableFromResponse(response)) + ) ); } + protected Releasable releasableFromResponse(Response response) { + return null; + } + protected RestStatus getRestStatus(Response response) { return RestStatus.OK; } diff --git a/server/src/main/java/org/elasticsearch/rest/action/RestRefCountedChunkedToXContentListener.java b/server/src/main/java/org/elasticsearch/rest/action/RestRefCountedChunkedToXContentListener.java new file mode 100644 index 0000000000000..dfd9c40e0e107 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/rest/action/RestRefCountedChunkedToXContentListener.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.rest.action; + +import org.elasticsearch.common.xcontent.ChunkedToXContent; +import org.elasticsearch.core.RefCounted; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.rest.RestChannel; + +/** + * Same as {@link RestChunkedToXContentListener} but decrements the ref count on the response it receives by one after serialization of the + * response. + */ +public class RestRefCountedChunkedToXContentListener extends RestChunkedToXContentListener< + Response> { + public RestRefCountedChunkedToXContentListener(RestChannel channel) { + super(channel); + } + + @Override + protected Releasable releasableFromResponse(Response response) { + response.mustIncRef(); + return Releasables.assertOnce(response::decRef); + } +} diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsAction.java index 0191428e7ca82..fef7dc0cbdd37 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsAction.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.NodeStatsLevel; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; +import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestParameters; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag; import org.elasticsearch.client.internal.node.NodeClient; @@ -56,7 +57,7 @@ public List routes() { static { Map> map = new HashMap<>(); - for (NodesStatsRequest.Metric metric : NodesStatsRequest.Metric.values()) { + for (NodesStatsRequestParameters.Metric metric : NodesStatsRequestParameters.Metric.values()) { map.put(metric.metricName(), request -> request.addMetric(metric.metricName())); } map.put("indices", request -> request.indices(true)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCloseIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCloseIndexAction.java index ead9412334b84..630b8c9c40509 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCloseIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCloseIndexAction.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; @@ -43,6 +44,7 @@ public String getName() { } @Override + @UpdateForV9 public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { CloseIndexRequest closeIndexRequest = new CloseIndexRequest(Strings.splitStringByCommaToArray(request.param("index"))); closeIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", closeIndexRequest.masterNodeTimeout())); @@ -55,12 +57,11 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC "close-index-wait_for_active_shards-index-setting", "?wait_for_active_shards=index-setting is now the default behaviour; the 'index-setting' value for this parameter " + "should no longer be used since it will become unsupported in version " - + (Version.V_7_0_0.major + 2) + + (Version.V_8_0_0.major + 1) ); // TODO in v9: // - throw an IllegalArgumentException here - // - record the removal of support for this value as a breaking change. - // - mention Version.V_8_0_0 in the code to ensure that we revisit this in v10 + // - record the removal of support for this value as a breaking change // TODO in v10: // - remove the IllegalArgumentException here } else if (waitForActiveShards != null) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java index a04e23f289379..4c9ac8fcb9a3c 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java @@ -13,7 +13,7 @@ import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.action.support.ListenableActionFuture; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.rest.BaseRestHandler; @@ -65,9 +65,9 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC if (validationException != null) { throw validationException; } - final var responseFuture = new ListenableActionFuture(); - final var task = client.executeLocally(ForceMergeAction.INSTANCE, mergeRequest, responseFuture); - responseFuture.addListener(new LoggingTaskListener<>(task)); + final var responseListener = new SubscribableListener(); + final var task = client.executeLocally(ForceMergeAction.INSTANCE, mergeRequest, responseListener); + responseListener.addListener(new LoggingTaskListener<>(task)); return sendTask(client.getLocalNodeId(), task); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java index b6e1240a3f85a..a8f6fa325b468 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.RestApiVersion; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; @@ -50,6 +51,7 @@ @ServerlessScope(Scope.PUBLIC) public class RestGetAliasesAction extends BaseRestHandler { + @UpdateForV9 // reject the deprecated ?local parameter private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(RestGetAliasesAction.class); @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java index 2dc657582a0a1..5e9b2c8452579 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; +import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestParameters; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; @@ -63,8 +64,9 @@ public RestChannelConsumer doCatRequest(final RestRequest request, final NodeCli @Override public void processResponse(final ClusterStateResponse state) { NodesStatsRequest statsRequest = new NodesStatsRequest(nodes); + statsRequest.setIncludeShardsStats(false); statsRequest.clear() - .addMetric(NodesStatsRequest.Metric.FS.metricName()) + .addMetric(NodesStatsRequestParameters.Metric.FS.metricName()) .indices(new CommonStatsFlags(CommonStatsFlags.Flag.Store)); client.admin().cluster().nodesStats(statsRequest, new RestResponseListener(channel) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestFielddataAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestFielddataAction.java index 0afc010bd4b9d..cef831f06dfa1 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestFielddataAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestFielddataAction.java @@ -43,6 +43,7 @@ public String getName() { @Override protected RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { final NodesStatsRequest nodesStatsRequest = new NodesStatsRequest("data:true"); + nodesStatsRequest.setIncludeShardsStats(false); nodesStatsRequest.clear(); nodesStatsRequest.indices(true); String[] fields = request.paramAsStringArray("fields", null); diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java index e8395710ede03..39045a99aa4a2 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; +import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestParameters; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; @@ -98,14 +99,15 @@ public RestChannelConsumer doCatRequest(final RestRequest request, final NodeCli ); final NodesStatsRequest nodesStatsRequest = new NodesStatsRequest(); + nodesStatsRequest.setIncludeShardsStats(false); nodesStatsRequest.clear() .indices(true) .addMetrics( - NodesStatsRequest.Metric.JVM.metricName(), - NodesStatsRequest.Metric.OS.metricName(), - NodesStatsRequest.Metric.FS.metricName(), - NodesStatsRequest.Metric.PROCESS.metricName(), - NodesStatsRequest.Metric.SCRIPT.metricName() + NodesStatsRequestParameters.Metric.JVM.metricName(), + NodesStatsRequestParameters.Metric.OS.metricName(), + NodesStatsRequestParameters.Metric.FS.metricName(), + NodesStatsRequestParameters.Metric.PROCESS.metricName(), + NodesStatsRequestParameters.Metric.SCRIPT.metricName() ); nodesStatsRequest.indices().includeUnloadedSegments(request.paramAsBoolean("include_unloaded_segments", false)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java index c94f40b83856e..9ca0dae8c8740 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; +import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestParameters; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; @@ -83,7 +84,8 @@ public void processResponse(final ClusterStateResponse clusterStateResponse) { @Override public void processResponse(final NodesInfoResponse nodesInfoResponse) { NodesStatsRequest nodesStatsRequest = new NodesStatsRequest(); - nodesStatsRequest.clear().addMetric(NodesStatsRequest.Metric.THREAD_POOL.metricName()); + nodesStatsRequest.setIncludeShardsStats(false); + nodesStatsRequest.clear().addMetric(NodesStatsRequestParameters.Metric.THREAD_POOL.metricName()); client.admin().cluster().nodesStats(nodesStatsRequest, new RestResponseListener(channel) { @Override public RestResponse buildResponse(NodesStatsResponse nodesStatsResponse) throws Exception { diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java index e5c70fa4fe188..fed7d8606ba01 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java @@ -8,7 +8,6 @@ package org.elasticsearch.rest.action.document; -import org.elasticsearch.Version; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.index.IndexRequest; @@ -110,10 +109,8 @@ public List routes() { @Override public RestChannelConsumer prepareRequest(RestRequest request, final NodeClient client) throws IOException { assert request.params().get("id") == null : "non-null id: " + request.params().get("id"); - if (request.params().get("op_type") == null && nodesInCluster.get().getMinNodeVersion().onOrAfter(Version.V_7_5_0)) { - // default to op_type create - request.params().put("op_type", "create"); - } + // default to op_type create + request.params().putIfAbsent("op_type", "create"); return super.prepareRequest(request, client); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/info/RestClusterInfoAction.java b/server/src/main/java/org/elasticsearch/rest/action/info/RestClusterInfoAction.java index 7bcc19cb17fa9..4300293a1336e 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/info/RestClusterInfoAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/info/RestClusterInfoAction.java @@ -40,10 +40,10 @@ import java.util.function.Predicate; import java.util.stream.Collectors; -import static org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.HTTP; -import static org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.INGEST; -import static org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.SCRIPT; -import static org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.THREAD_POOL; +import static org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestParameters.Metric.HTTP; +import static org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestParameters.Metric.INGEST; +import static org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestParameters.Metric.SCRIPT; +import static org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestParameters.Metric.THREAD_POOL; import static org.elasticsearch.xcontent.ToXContent.EMPTY_PARAMS; @ServerlessScope(Scope.PUBLIC) @@ -86,6 +86,7 @@ public List routes() { @Override public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { var nodesStatsRequest = new NodesStatsRequest().clear(); + nodesStatsRequest.setIncludeShardsStats(false); var targets = Strings.tokenizeByCommaToSet(request.param("target")); if (targets.size() == 1 && targets.contains("_all")) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/ingest/RestSimulateIngestAction.java b/server/src/main/java/org/elasticsearch/rest/action/ingest/RestSimulateIngestAction.java new file mode 100644 index 0000000000000..e0d9dd95206cf --- /dev/null +++ b/server/src/main/java/org/elasticsearch/rest/action/ingest/RestSimulateIngestAction.java @@ -0,0 +1,179 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.rest.action.ingest; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.bulk.SimulateBulkAction; +import org.elasticsearch.action.bulk.SimulateBulkRequest; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.ingest.ConfigurationUtils; +import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.search.fetch.subphase.FetchSourceContext; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestRequest.Method.POST; + +/** + * This is the REST endpoint for the simulate ingest API. This API executes all pipelines for a document (or documents) that would be + * executed if that document were sent to the given index. The JSON that would be indexed is returned to the user, along with the list of + * pipelines that were executed. The API allows the user to optionally send in substitute definitions for pipelines so that changes can be + * tried out without actually modifying the cluster state. + */ +@ServerlessScope(Scope.PUBLIC) +public class RestSimulateIngestAction extends BaseRestHandler { + + @Override + public List routes() { + return List.of( + new Route(GET, "/_ingest/_simulate"), + new Route(POST, "/_ingest/_simulate"), + new Route(GET, "/_ingest/{index}/_simulate"), + new Route(POST, "/_ingest/{index}/_simulate") + ); + } + + @Override + public String getName() { + return "ingest_simulate_ingest_action"; + } + + @Override + @SuppressWarnings("unchecked") + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + String defaultIndex = request.param("index"); + FetchSourceContext defaultFetchSourceContext = FetchSourceContext.parseFromRestRequest(request); + String defaultPipeline = request.param("pipeline"); + Tuple sourceTuple = request.contentOrSourceParam(); + Map sourceMap = XContentHelper.convertToMap(sourceTuple.v2(), false, sourceTuple.v1()).v2(); + SimulateBulkRequest bulkRequest = new SimulateBulkRequest( + (Map>) sourceMap.remove("pipeline_substitutions") + ); + BytesReference transformedData = convertToBulkRequestXContentBytes(sourceMap); + bulkRequest.add( + transformedData, + defaultIndex, + null, + defaultFetchSourceContext, + defaultPipeline, + null, + true, + true, + request.getXContentType(), + request.getRestApiVersion() + ); + return channel -> client.execute(SimulateBulkAction.INSTANCE, bulkRequest, new SimulateIngestRestToXContentListener(channel)); + } + + /* + * The simulate ingest API is intended to have inputs and outputs that are formatted similarly to the simulate pipeline API for the + * sake of consistency. But internally it uses the same code as the _bulk API, so that we have confidence that we are simulating what + * really happens on ingest. This method transforms simulate-style inputs into an input that the bulk API can accept. + * Non-private for unit testing + */ + static BytesReference convertToBulkRequestXContentBytes(Map sourceMap) throws IOException { + List> docs = ConfigurationUtils.readList(null, null, sourceMap, "docs"); + if (docs.isEmpty()) { + throw new IllegalArgumentException("must specify at least one document in [docs]"); + } + ByteBuffer[] buffers = new ByteBuffer[2 * docs.size()]; + int bufferCount = 0; + for (Map doc : docs) { + if ((doc != null) == false) { + throw new IllegalArgumentException("malformed [docs] section, should include an inner object"); + } + Map document = ConfigurationUtils.readMap(null, null, doc, "_source"); + String index = ConfigurationUtils.readOptionalStringProperty(null, null, doc, IngestDocument.Metadata.INDEX.getFieldName()); + String id = ConfigurationUtils.readOptionalStringProperty(null, null, doc, IngestDocument.Metadata.ID.getFieldName()); + XContentBuilder actionXContentBuilder = XContentFactory.contentBuilder(XContentType.JSON).lfAtEnd(); + actionXContentBuilder.startObject().field("index").startObject(); + if (index != null) { + actionXContentBuilder.field("_index", index); + } + if (id != null) { + actionXContentBuilder.field("_id", id); + } + actionXContentBuilder.endObject().endObject(); + buffers[bufferCount++] = ByteBuffer.wrap(BytesReference.bytes(actionXContentBuilder).toBytesRef().bytes); + XContentBuilder dataXContentBuilder = XContentFactory.contentBuilder(XContentType.JSON).lfAtEnd(); + dataXContentBuilder.startObject(); + for (String key : document.keySet()) { + dataXContentBuilder.field(key, document.get(key)); + } + dataXContentBuilder.endObject(); + buffers[bufferCount++] = ByteBuffer.wrap(BytesReference.bytes(dataXContentBuilder).toBytesRef().bytes); + } + return BytesReference.fromByteBuffers(buffers); + } + + /* + * The simulate ingest API is intended to have inputs and outputs that are formatted similarly to the simulate pipeline API for the + * sake of consistency. But internally it uses the same code as the _bulk API, so that we have confidence that we are simulating what + * really happens on ingest. This class is used in place of RestToXContentListener to transform simulate-style outputs into an + * simulate-style xcontent. + * Non-private for unit testing + */ + static class SimulateIngestRestToXContentListener extends RestToXContentListener { + + SimulateIngestRestToXContentListener(RestChannel channel) { + super(channel); + } + + @Override + public RestResponse buildResponse(BulkResponse response, XContentBuilder builder) throws Exception { + assert response.isFragment() == false; + toXContent(response, builder, channel.request()); + RestStatus restStatus = statusFunction.apply(response); + return new RestResponse(restStatus, builder); + } + + private static void toXContent(BulkResponse response, XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + builder.startArray("docs"); + for (BulkItemResponse item : response) { + builder.startObject(); + builder.startObject("doc"); + if (item.isFailed()) { + builder.field("_id", item.getFailure().getId()); + builder.field("_index", item.getFailure().getIndex()); + builder.startObject("error"); + ElasticsearchException.generateThrowableXContent(builder, params, item.getFailure().getCause()); + builder.endObject(); + } else { + item.getResponse().innerToXContent(builder, params); + } + builder.endObject(); + builder.endObject(); + } + builder.endArray(); + builder.endObject(); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java index c9bcaf6b5ff4d..c232e1a30c553 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java @@ -8,9 +8,9 @@ package org.elasticsearch.rest.action.search; -import org.elasticsearch.action.search.MultiSearchAction; import org.elasticsearch.action.search.MultiSearchRequest; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.TransportMultiSearchAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.CheckedBiConsumer; @@ -26,7 +26,7 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.usage.SearchUsageHolder; import org.elasticsearch.xcontent.XContent; @@ -82,7 +82,11 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC ); return channel -> { final RestCancellableNodeClient cancellableClient = new RestCancellableNodeClient(client, request.getHttpChannel()); - cancellableClient.execute(MultiSearchAction.INSTANCE, multiSearchRequest, new RestChunkedToXContentListener<>(channel)); + cancellableClient.execute( + TransportMultiSearchAction.TYPE, + multiSearchRequest, + new RestRefCountedChunkedToXContentListener<>(channel) + ); }; } diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java index 9a5aef4996209..41102a3568e30 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java @@ -11,9 +11,9 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchContextId; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.Strings; @@ -29,7 +29,7 @@ import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestActions; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import org.elasticsearch.search.Scroll; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -121,7 +121,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC return channel -> { RestCancellableNodeClient cancelClient = new RestCancellableNodeClient(client, request.getHttpChannel()); - cancelClient.execute(SearchAction.INSTANCE, searchRequest, new RestChunkedToXContentListener<>(channel)); + cancelClient.execute(TransportSearchAction.TYPE, searchRequest, new RestRefCountedChunkedToXContentListener<>(channel)); }; } diff --git a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java index a4f641fd6f071..a8721503c7454 100644 --- a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java @@ -8,6 +8,8 @@ package org.elasticsearch.search; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.OrdinalMap; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; @@ -27,7 +29,12 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.fielddata.FieldDataContext; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData; +import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.IdLoader; +import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NestedLookup; import org.elasticsearch.index.mapper.SourceLoader; import org.elasticsearch.index.query.AbstractQueryBuilder; @@ -37,6 +44,7 @@ import org.elasticsearch.index.search.NestedHelper; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.search.aggregations.SearchContextAggregations; +import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.collapse.CollapseContext; import org.elasticsearch.search.dfs.DfsSearchResult; import org.elasticsearch.search.fetch.FetchPhase; @@ -68,7 +76,9 @@ import java.util.List; import java.util.Map; import java.util.concurrent.Executor; +import java.util.concurrent.ThreadPoolExecutor; import java.util.function.LongSupplier; +import java.util.function.ToLongFunction; final class DefaultSearchContext extends SearchContext { @@ -123,7 +133,6 @@ final class DefaultSearchContext extends SearchContext { private Query query; private ParsedQuery postFilter; private Query aliasFilter; - private int[] docIdsToLoad; private SearchContextAggregations aggregations; private SearchHighlightContext highlight; private SuggestionSearchContext suggest; @@ -143,57 +152,148 @@ final class DefaultSearchContext extends SearchContext { FetchPhase fetchPhase, boolean lowLevelCancellation, Executor executor, - int maximumNumberOfSlices, + SearchService.ResultsType resultsType, + boolean enableQueryPhaseParallelCollection, int minimumDocsPerSlice ) throws IOException { this.readerContext = readerContext; this.request = request; this.fetchPhase = fetchPhase; - this.searchType = request.searchType(); - this.shardTarget = shardTarget; - this.indexService = readerContext.indexService(); - this.indexShard = readerContext.indexShard(); - - Engine.Searcher engineSearcher = readerContext.acquireSearcher("search"); - if (executor == null) { - this.searcher = new ContextIndexSearcher( - engineSearcher.getIndexReader(), - engineSearcher.getSimilarity(), - engineSearcher.getQueryCache(), - engineSearcher.getQueryCachingPolicy(), - lowLevelCancellation - ); - } else { - this.searcher = new ContextIndexSearcher( - engineSearcher.getIndexReader(), - engineSearcher.getSimilarity(), - engineSearcher.getQueryCache(), - engineSearcher.getQueryCachingPolicy(), - lowLevelCancellation, - executor, - maximumNumberOfSlices, - minimumDocsPerSlice + boolean success = false; + try { + this.searchType = request.searchType(); + this.shardTarget = shardTarget; + this.indexService = readerContext.indexService(); + this.indexShard = readerContext.indexShard(); + + Engine.Searcher engineSearcher = readerContext.acquireSearcher("search"); + int maximumNumberOfSlices; + if (hasSyntheticSource(indexService)) { + // accessing synthetic source is not thread safe + maximumNumberOfSlices = 1; + } else { + maximumNumberOfSlices = determineMaximumNumberOfSlices( + executor, + request, + resultsType, + enableQueryPhaseParallelCollection, + field -> getFieldCardinality(field, readerContext.indexService(), engineSearcher.getDirectoryReader()) + ); + + } + if (executor == null) { + this.searcher = new ContextIndexSearcher( + engineSearcher.getIndexReader(), + engineSearcher.getSimilarity(), + engineSearcher.getQueryCache(), + engineSearcher.getQueryCachingPolicy(), + lowLevelCancellation + ); + } else { + this.searcher = new ContextIndexSearcher( + engineSearcher.getIndexReader(), + engineSearcher.getSimilarity(), + engineSearcher.getQueryCache(), + engineSearcher.getQueryCachingPolicy(), + lowLevelCancellation, + executor, + maximumNumberOfSlices, + minimumDocsPerSlice + ); + } + releasables.addAll(List.of(engineSearcher, searcher)); + this.relativeTimeSupplier = relativeTimeSupplier; + this.timeout = timeout; + searchExecutionContext = indexService.newSearchExecutionContext( + request.shardId().id(), + request.shardRequestIndex(), + searcher, + request::nowInMillis, + shardTarget.getClusterAlias(), + request.getRuntimeMappings() ); + queryBoost = request.indexBoost(); + this.lowLevelCancellation = lowLevelCancellation; + success = true; + } finally { + if (success == false) { + close(); + } } - releasables.addAll(List.of(engineSearcher, searcher)); + } - this.relativeTimeSupplier = relativeTimeSupplier; - this.timeout = timeout; - searchExecutionContext = indexService.newSearchExecutionContext( - request.shardId().id(), - request.shardRequestIndex(), - searcher, - request::nowInMillis, - shardTarget.getClusterAlias(), - request.getRuntimeMappings() - ); - queryBoost = request.indexBoost(); - this.lowLevelCancellation = lowLevelCancellation; + private static boolean hasSyntheticSource(IndexService indexService) { + DocumentMapper documentMapper = indexService.mapperService().documentMapper(); + if (documentMapper != null) { + return documentMapper.sourceMapper().isSynthetic(); + } + return false; + } + + static long getFieldCardinality(String field, IndexService indexService, DirectoryReader directoryReader) { + MappedFieldType mappedFieldType = indexService.mapperService().fieldType(field); + if (mappedFieldType == null) { + return -1; + } + IndexFieldData indexFieldData; + try { + indexFieldData = indexService.loadFielddata(mappedFieldType, FieldDataContext.noRuntimeFields("field cardinality")); + } catch (Exception e) { + // loading fielddata for runtime fields will fail, that's ok + return -1; + } + return getFieldCardinality(indexFieldData, directoryReader); + } + + static long getFieldCardinality(IndexFieldData indexFieldData, DirectoryReader directoryReader) { + if (indexFieldData instanceof IndexOrdinalsFieldData indexOrdinalsFieldData) { + if (indexOrdinalsFieldData.supportsGlobalOrdinalsMapping()) { + IndexOrdinalsFieldData global = indexOrdinalsFieldData.loadGlobal(directoryReader); + OrdinalMap ordinalMap = global.getOrdinalMap(); + if (ordinalMap != null) { + return ordinalMap.getValueCount(); + } + if (directoryReader.leaves().size() == 0) { + return 0; + } + return global.load(directoryReader.leaves().get(0)).getOrdinalsValues().getValueCount(); + } + } + return -1L; + } + + static int determineMaximumNumberOfSlices( + Executor executor, + ShardSearchRequest request, + SearchService.ResultsType resultsType, + boolean enableQueryPhaseParallelCollection, + ToLongFunction fieldCardinality + ) { + return executor instanceof ThreadPoolExecutor tpe + && isParallelCollectionSupportedForResults(resultsType, request.source(), fieldCardinality, enableQueryPhaseParallelCollection) + ? tpe.getMaximumPoolSize() + : 1; + } + + static boolean isParallelCollectionSupportedForResults( + SearchService.ResultsType resultsType, + SearchSourceBuilder source, + ToLongFunction fieldCardinality, + boolean isQueryPhaseParallelismEnabled + ) { + if (resultsType == SearchService.ResultsType.DFS) { + return true; + } + if (resultsType == SearchService.ResultsType.QUERY && isQueryPhaseParallelismEnabled) { + return source == null || source.supportsParallelCollection(fieldCardinality); + } + return false; } @Override public void addFetchResult() { this.fetchResult = new FetchSearchResult(this.readerContext.id(), this.shardTarget); + addReleasable(fetchResult::decRef); } @Override @@ -719,17 +819,6 @@ public void seqNoAndPrimaryTerm(boolean seqNoAndPrimaryTerm) { this.seqAndPrimaryTerm = seqNoAndPrimaryTerm; } - @Override - public int[] docIdsToLoad() { - return docIdsToLoad; - } - - @Override - public SearchContext docIdsToLoad(int[] docIdsToLoad) { - this.docIdsToLoad = docIdsToLoad; - return this; - } - @Override public DfsSearchResult dfsResult() { return dfsResult; diff --git a/server/src/main/java/org/elasticsearch/search/MultiValueMode.java b/server/src/main/java/org/elasticsearch/search/MultiValueMode.java index 2b5d9cb17b4f4..ad314a97a3a67 100644 --- a/server/src/main/java/org/elasticsearch/search/MultiValueMode.java +++ b/server/src/main/java/org/elasticsearch/search/MultiValueMode.java @@ -681,7 +681,6 @@ public NumericDoubleValues select( final double missingValue, final BitSet parentDocs, final DocIdSetIterator childDocs, - int maxDoc, int maxChildren ) throws IOException { if (parentDocs == null || childDocs == null) { diff --git a/server/src/main/java/org/elasticsearch/search/SearchHit.java b/server/src/main/java/org/elasticsearch/search/SearchHit.java index 6c04f6feddc96..7e1699307c5ee 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchHit.java +++ b/server/src/main/java/org/elasticsearch/search/SearchHit.java @@ -51,7 +51,6 @@ import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; -import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -72,7 +71,7 @@ * * @see SearchHits */ -public final class SearchHit implements Writeable, ToXContentObject, Iterable { +public final class SearchHit implements Writeable, ToXContentObject { private final transient int docId; @@ -156,18 +155,8 @@ public SearchHit(StreamInput in) throws IOException { if (in.readBoolean()) { explanation = readExplanation(in); } - if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_8_0)) { - documentFields.putAll(in.readMap(DocumentField::new)); - metaFields.putAll(in.readMap(DocumentField::new)); - } else { - Map fields = readFields(in); - fields.forEach( - (fieldName, docField) -> (MapperService.isMetadataFieldStatic(fieldName) ? metaFields : documentFields).put( - fieldName, - docField - ) - ); - } + documentFields.putAll(in.readMap(DocumentField::new)); + metaFields.putAll(in.readMap(DocumentField::new)); int size = in.readVInt(); if (size == 0) { @@ -213,33 +202,6 @@ public SearchHit(StreamInput in) throws IOException { private static final Text SINGLE_MAPPING_TYPE = new Text(MapperService.SINGLE_MAPPING_NAME); - private static Map readFields(StreamInput in) throws IOException { - Map fields; - int size = in.readVInt(); - if (size == 0) { - fields = emptyMap(); - } else if (size == 1) { - DocumentField hitField = new DocumentField(in); - fields = singletonMap(hitField.getName(), hitField); - } else { - fields = Maps.newMapWithExpectedSize(size); - for (int i = 0; i < size; i++) { - DocumentField field = new DocumentField(in); - fields.put(field.getName(), field); - } - fields = unmodifiableMap(fields); - } - return fields; - } - - private static void writeFields(StreamOutput out, Map fields) throws IOException { - if (fields == null) { - out.writeVInt(0); - } else { - out.writeCollection(fields.values()); - } - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeFloat(score); @@ -263,12 +225,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(true); writeExplanation(out, explanation); } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_8_0)) { - out.writeMap(documentFields, StreamOutput::writeWriteable); - out.writeMap(metaFields, StreamOutput::writeWriteable); - } else { - writeFields(out, this.getFields()); - } + out.writeMap(documentFields, StreamOutput::writeWriteable); + out.writeMap(metaFields, StreamOutput::writeWriteable); if (highlightFields == null) { out.writeVInt(0); } else { @@ -429,13 +387,6 @@ public Map getSourceAsMap() { return sourceAsMap; } - @Override - public Iterator iterator() { - // need to join the fields and metadata fields - Map allFields = this.getFields(); - return allFields.values().iterator(); - } - /** * The hit field matching the given field name. */ diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index 6ee02fa9425c0..548e3fea9d91c 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -42,6 +42,7 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; import org.elasticsearch.core.TimeValue; @@ -134,7 +135,6 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; -import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -544,7 +544,7 @@ public void executeQueryPhase(ShardSearchRequest request, SearchShardTask task, })); } - private void ensureAfterSeqNoRefreshed( + private void ensureAfterSeqNoRefreshed( IndexShard shard, ShardSearchRequest request, CheckedSupplier executable, @@ -648,8 +648,12 @@ private IndexShard getShard(ShardSearchRequest request) { return indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id()); } - private static void runAsync(Executor executor, CheckedSupplier executable, ActionListener listener) { - executor.execute(ActionRunnable.supply(listener, executable)); + private static void runAsync( + Executor executor, + CheckedSupplier executable, + ActionListener listener + ) { + executor.execute(ActionRunnable.supplyAndDecRef(listener, executable)); } /** @@ -686,6 +690,7 @@ private SearchPhaseResult executeQueryPhase(ShardSearchRequest request, SearchSh final RescoreDocIds rescoreDocIds = context.rescoreDocIds(); context.queryResult().setRescoreDocIds(rescoreDocIds); readerContext.setRescoreDocIds(rescoreDocIds); + // inc-ref query result because we close the SearchContext that references it in this try-with-resources block context.queryResult().incRef(); return context.queryResult(); } @@ -707,15 +712,14 @@ private QueryFetchSearchResult executeFetchPhase(ReaderContext reader, SearchCon Releasable scope = tracer.withScope(SpanId.forTask(context.getTask())); SearchOperationListenerExecutor executor = new SearchOperationListenerExecutor(context, true, afterQueryTime) ) { - shortcutDocIdsToLoad(context); - fetchPhase.execute(context); + fetchPhase.execute(context, shortcutDocIdsToLoad(context)); if (reader.singleSession()) { freeReaderContext(reader.id()); } executor.success(); } // This will incRef the QuerySearchResult when it gets created - return new QueryFetchSearchResult(context.queryResult(), context.fetchResult()); + return QueryFetchSearchResult.of(context.queryResult(), context.fetchResult()); } public void executeQueryPhase( @@ -772,7 +776,8 @@ public void executeQueryPhase(QuerySearchRequest request, SearchShardTask task, ) { searchContext.searcher().setAggregatedDfs(request.dfs()); QueryPhase.execute(searchContext); - if (searchContext.queryResult().hasSearchContext() == false && readerContext.singleSession()) { + final QuerySearchResult queryResult = searchContext.queryResult(); + if (queryResult.hasSearchContext() == false && readerContext.singleSession()) { // no hits, we can release the context since there will be no fetch phase freeReaderContext(readerContext.id()); } @@ -781,10 +786,11 @@ public void executeQueryPhase(QuerySearchRequest request, SearchShardTask task, // and receive them back in the fetch phase. // We also pass the rescoreDocIds to the LegacyReaderContext in case the search state needs to stay in the data node. final RescoreDocIds rescoreDocIds = searchContext.rescoreDocIds(); - searchContext.queryResult().setRescoreDocIds(rescoreDocIds); + queryResult.setRescoreDocIds(rescoreDocIds); readerContext.setRescoreDocIds(rescoreDocIds); - searchContext.queryResult().incRef(); - return searchContext.queryResult(); + // inc-ref query result because we close the SearchContext that references it in this try-with-resources block + queryResult.incRef(); + return queryResult; } catch (Exception e) { assert TransportActions.isShardNotAvailableException(e) == false : new AssertionError(e); logger.trace("Query phase failed", e); @@ -856,17 +862,19 @@ public void executeFetchPhase(ShardFetchRequest request, SearchShardTask task, A } searchContext.assignRescoreDocIds(readerContext.getRescoreDocIds(request.getRescoreDocIds())); searchContext.searcher().setAggregatedDfs(readerContext.getAggregatedDfs(request.getAggregatedDfs())); - searchContext.docIdsToLoad(request.docIds()); try ( SearchOperationListenerExecutor executor = new SearchOperationListenerExecutor(searchContext, true, System.nanoTime()) ) { - fetchPhase.execute(searchContext); + fetchPhase.execute(searchContext, request.docIds()); if (readerContext.singleSession()) { freeReaderContext(request.contextId()); } executor.success(); } - return searchContext.fetchResult(); + var fetchResult = searchContext.fetchResult(); + // inc-ref fetch result because we close the SearchContext that references it in this try-with-resources block + fetchResult.incRef(); + return fetchResult; } catch (Exception e) { assert TransportActions.isShardNotAvailableException(e) == false : new AssertionError(e); // we handle the failure in the failure listener below @@ -1078,7 +1086,6 @@ private DefaultSearchContext createSearchContext( request.getClusterAlias() ); ExecutorService executor = this.enableSearchWorkerThreads ? threadPool.executor(Names.SEARCH_WORKER) : null; - int maximumNumberOfSlices = determineMaximumNumberOfSlices(executor, request, resultsType); searchContext = new DefaultSearchContext( reader, request, @@ -1088,7 +1095,8 @@ private DefaultSearchContext createSearchContext( fetchPhase, lowLevelCancellation, executor, - maximumNumberOfSlices, + resultsType, + enableQueryPhaseParallelCollection, minimumDocsPerSlice ); // we clone the query shard context here just for rewriting otherwise we @@ -1109,27 +1117,6 @@ private DefaultSearchContext createSearchContext( return searchContext; } - int determineMaximumNumberOfSlices(ExecutorService executor, ShardSearchRequest request, ResultsType resultsType) { - return executor instanceof ThreadPoolExecutor tpe - && isParallelCollectionSupportedForResults(resultsType, request.source(), this.enableQueryPhaseParallelCollection) - ? tpe.getMaximumPoolSize() - : 1; - } - - static boolean isParallelCollectionSupportedForResults( - ResultsType resultsType, - SearchSourceBuilder source, - boolean isQueryPhaseParallelismEnabled - ) { - if (resultsType == ResultsType.DFS) { - return true; - } - if (resultsType == ResultsType.QUERY && isQueryPhaseParallelismEnabled) { - return source == null || source.supportsParallelCollection(); - } - return false; - } - private void freeAllContextForIndex(Index index) { assert index != null; for (ReaderContext ctx : activeReaders.values()) { @@ -1321,11 +1308,9 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc context.addQuerySearchResultReleasable(aggContext); try { final AggregatorFactories factories = source.aggregations().build(aggContext, null); - final Supplier supplier = () -> aggReduceContextBuilder( - context::isCancelled, - source.aggregations() + context.aggregations( + new SearchContextAggregations(factories, () -> aggReduceContextBuilder(context::isCancelled, source.aggregations())) ); - context.aggregations(new SearchContextAggregations(factories, supplier)); } catch (IOException e) { throw new AggregationInitializationException("Failed to create aggregators", e); } @@ -1465,7 +1450,7 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc * Shortcut ids to load, we load only "from" and up to "size". The phase controller * handles this as well since the result is always size * shards for Q_T_F */ - private static void shortcutDocIdsToLoad(SearchContext context) { + private static int[] shortcutDocIdsToLoad(SearchContext context) { final int[] docIdsToLoad; int docsOffset = 0; final Suggest suggest = context.queryResult().suggest(); @@ -1503,7 +1488,7 @@ private static void shortcutDocIdsToLoad(SearchContext context) { docIdsToLoad[docsOffset++] = option.getDoc().doc; } } - context.docIdsToLoad(docIdsToLoad); + return docIdsToLoad; } private static void processScroll(InternalScrollSearchRequest request, SearchContext context) { @@ -1584,14 +1569,6 @@ public AliasFilter buildAliasFilter(ClusterState state, String index, Set listener) { - try { - listener.onResponse(canMatch(request)); - } catch (IOException e) { - listener.onFailure(e); - } - } - public void canMatch(CanMatchNodeRequest request, ActionListener listener) { final List shardSearchRequests = request.createShardSearchRequests(); final List responses = new ArrayList<>(shardSearchRequests.size()); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java index 6aa545c981fa3..defbb0849bb47 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java @@ -25,6 +25,7 @@ import java.util.Optional; import java.util.Set; import java.util.function.Consumer; +import java.util.function.ToLongFunction; /** * A factory that knows how to create an {@link Aggregator} of a specific type. @@ -223,12 +224,12 @@ public boolean isInSortOrderExecutionRequired() { * Return false if this aggregation or any of the child aggregations does not support parallel collection. * As a result, a request including such aggregation is always executed sequentially despite concurrency is enabled for the query phase. */ - public boolean supportsParallelCollection() { + public boolean supportsParallelCollection(ToLongFunction fieldCardinalityResolver) { if (isInSortOrderExecutionRequired()) { return false; } for (AggregationBuilder builder : factoriesBuilder.getAggregatorFactories()) { - if (builder.supportsParallelCollection() == false) { + if (builder.supportsParallelCollection(fieldCardinalityResolver) == false) { return false; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java index be109b2909bcc..795f51a729ed6 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java @@ -142,8 +142,9 @@ public final Function pointReaderIfAvailable(ValuesSourceConfig * @return the cumulative size in bytes allocated by this aggregator to service this request */ protected long addRequestCircuitBreakerBytes(long bytes) { - // Only use the potential to circuit break if bytes are being incremented - if (bytes > 0) { + // Only use the potential to circuit break if bytes are being incremented, In the case of 0 + // bytes, it will trigger the parent circuit breaker. + if (bytes >= 0) { context.breaker().addEstimateBytesAndMaybeBreak(bytes, ""); } else { context.breaker().addWithoutBreaking(bytes); @@ -267,8 +268,8 @@ public Aggregator[] subAggregators() { public Aggregator subAggregator(String aggName) { if (subAggregatorbyName == null) { subAggregatorbyName = Maps.newMapWithExpectedSize(subAggregators.length); - for (int i = 0; i < subAggregators.length; i++) { - subAggregatorbyName.put(subAggregators[i].name(), subAggregators[i]); + for (Aggregator subAggregator : subAggregators) { + subAggregatorbyName.put(subAggregator.name(), subAggregator); } } return subAggregatorbyName.get(aggName); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java index 0738303020de5..7b7c41165b51e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java @@ -42,6 +42,7 @@ import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.function.ToLongFunction; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -338,9 +339,9 @@ public boolean isInSortOrderExecutionRequired() { * As a result, a request including such aggregation is always executed sequentially despite concurrency is enabled for the query * phase. */ - public boolean supportsParallelCollection() { + public boolean supportsParallelCollection(ToLongFunction fieldCardinalityResolver) { for (AggregationBuilder builder : aggregationBuilders) { - if (builder.supportsParallelCollection() == false) { + if (builder.supportsParallelCollection(fieldCardinalityResolver) == false) { return false; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java b/server/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java index c9f937b489a73..ff1ca58d351e3 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java @@ -26,6 +26,18 @@ public abstract class InternalMultiBucketAggregation< A extends InternalMultiBucketAggregation, B extends InternalMultiBucketAggregation.InternalBucket> extends InternalAggregation implements MultiBucketsAggregation { + /** + * When we pre-count the empty buckets we report them periodically + * because you can configure the date_histogram to create an astounding + * number of buckets. It'd take a while to count that high only to abort. + * So we report every couple thousand buckets. It's be simpler to report + * every single bucket we plan to allocate one at a time but that'd cause + * needless overhead on the circuit breakers. Counting a couple thousand + * buckets is plenty fast to fail this quickly in pathological cases and + * plenty large to keep the overhead minimal. + */ + protected static final int REPORT_EMPTY_EVERY = 10_000; + public InternalMultiBucketAggregation(String name, Map metadata) { super(name, metadata); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java b/server/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java index d950706b46b82..15d4a03be81f2 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java @@ -289,7 +289,7 @@ byte id() { @Override public Comparator partiallyBuiltBucketComparator(ToLongFunction ordinalReader, Aggregator aggregator) { Comparator comparator = comparator(); - return (lhs, rhs) -> comparator.compare(lhs, rhs); + return comparator::compare; } @Override @@ -388,7 +388,6 @@ private static Comparator comparingKeys() { /** * @return compare by {@link Bucket#getKey()} that will be in the bucket once it is reduced */ - @SuppressWarnings("unchecked") private static Comparator> comparingDelayedKeys() { return DelayedBucket::compareKey; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/MultiBucketCollector.java b/server/src/main/java/org/elasticsearch/search/aggregations/MultiBucketCollector.java index e98762f462243..b956658f1226d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/MultiBucketCollector.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/MultiBucketCollector.java @@ -183,17 +183,16 @@ public LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCtx) ); } } - switch (leafCollectors.size()) { - case 0: + return switch (leafCollectors.size()) { + case 0 -> { if (terminateIfNoop) { throw new CollectionTerminatedException(); } - return LeafBucketCollector.NO_OP_COLLECTOR; - case 1: - return leafCollectors.get(0); - default: - return new MultiLeafBucketCollector(leafCollectors, cacheScores); - } + yield LeafBucketCollector.NO_OP_COLLECTOR; + } + case 1 -> leafCollectors.get(0); + default -> new MultiLeafBucketCollector(leafCollectors, cacheScores); + }; } private static class MultiLeafBucketCollector extends LeafBucketCollector { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/TopBucketBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/TopBucketBuilder.java index 9980918badfd5..61427b446cb6d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/TopBucketBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/TopBucketBuilder.java @@ -102,7 +102,7 @@ static class PriorityQueueTopBucketBuilder= ArrayUtil.MAX_ARRAY_LENGTH) { throw new IllegalArgumentException("can't reduce more than [" + ArrayUtil.MAX_ARRAY_LENGTH + "] buckets"); } - queue = new PriorityQueue>(size) { + queue = new PriorityQueue<>(size) { private final Comparator> comparator = order.delayedBucketComparator(); @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java index b33abb0f95824..7c3c6f8397979 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java @@ -40,11 +40,7 @@ * this collector. */ public class BestBucketsDeferringCollector extends DeferringBucketCollector { - static class Entry { - final AggregationExecutionContext aggCtx; - final PackedLongValues docDeltas; - final PackedLongValues buckets; - + record Entry(AggregationExecutionContext aggCtx, PackedLongValues docDeltas, PackedLongValues buckets) { Entry(AggregationExecutionContext aggCtx, PackedLongValues docDeltas, PackedLongValues buckets) { this.aggCtx = Objects.requireNonNull(aggCtx); this.docDeltas = Objects.requireNonNull(docDeltas); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java index 06456b2396522..ec8117cf03135 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java @@ -223,9 +223,9 @@ protected final void buildSubAggsForAllBuckets( } InternalAggregations[] results = buildSubAggsForBuckets(bucketOrdsToCollect); s = 0; - for (int r = 0; r < buckets.length; r++) { - for (int b = 0; b < buckets[r].length; b++) { - setAggs.accept(buckets[r][b], results[s++]); + for (B[] bucket : buckets) { + for (int b = 0; b < bucket.length; b++) { + setAggs.accept(bucket[b], results[s++]); } } } @@ -330,8 +330,8 @@ protected final InternalAggregation[] buildAggregationsForVariableBuckets( } long[] bucketOrdsToCollect = new long[(int) totalOrdsToCollect]; int b = 0; - for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { - LongKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds[ordIdx]); + for (long owningBucketOrd : owningBucketOrds) { + LongKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrd); while (ordsEnum.next()) { bucketOrdsToCollect[b++] = ordsEnum.ord(); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java index ca88d50898763..59fec0dd1540a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java @@ -33,6 +33,7 @@ import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.function.ToLongFunction; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; @@ -302,4 +303,14 @@ public boolean equals(Object obj) { public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; } + + @Override + public boolean supportsParallelCollection(ToLongFunction fieldCardinalityResolver) { + for (CompositeValuesSourceBuilder source : sources) { + if (source.supportsParallelCollection(fieldCardinalityResolver) == false) { + return false; + } + } + return super.supportsParallelCollection(fieldCardinalityResolver); + } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java index dff95332d3f16..cee90f55597b2 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java @@ -256,22 +256,19 @@ private void finishLeaf() { /** Return true if the provided field may have multiple values per document in the leaf **/ private static boolean isMaybeMultivalued(LeafReaderContext context, SortField sortField) throws IOException { SortField.Type type = IndexSortConfig.getSortFieldType(sortField); - switch (type) { - case STRING: + return switch (type) { + case STRING -> { final SortedSetDocValues v1 = context.reader().getSortedSetDocValues(sortField.getField()); - return v1 != null && DocValues.unwrapSingleton(v1) == null; - - case DOUBLE: - case FLOAT: - case LONG: - case INT: + yield v1 != null && DocValues.unwrapSingleton(v1) == null; + } + case DOUBLE, FLOAT, LONG, INT -> { final SortedNumericDocValues v2 = context.reader().getSortedNumericDocValues(sortField.getField()); - return v2 != null && DocValues.unwrapSingleton(v2) == null; - - default: + yield v2 != null && DocValues.unwrapSingleton(v2) == null; + } + default -> // we have no clue whether the field is multi-valued or not so we assume it is. - return true; - } + true; + }; } /** @@ -631,13 +628,5 @@ public void collectDebugInfo(BiConsumer add) { } } - private static class Entry { - final AggregationExecutionContext aggCtx; - final DocIdSet docIdSet; - - Entry(AggregationExecutionContext aggCtx, DocIdSet docIdSet) { - this.aggCtx = aggCtx; - this.docIdSet = docIdSet; - } - } + private record Entry(AggregationExecutionContext aggCtx, DocIdSet docIdSet) {} } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueue.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueue.java index 2c4eb02dfa6d6..0a7f6a26f580b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueue.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueue.java @@ -29,7 +29,7 @@ */ final class CompositeValuesCollectorQueue extends PriorityQueue implements Releasable { private class Slot { - int value; + final int value; Slot(int initial) { this.value = initial; @@ -83,7 +83,7 @@ private interface CompetitiveBoundsChangedListener { // tracking the highest competitive value. if (arrays[0] instanceof GlobalOrdinalValuesSource globalOrdinalValuesSource) { if (shouldApplyGlobalOrdinalDynamicPruningForLeadingSource(sources, size, indexReader)) { - competitiveBoundsChangedListener = topSlot -> globalOrdinalValuesSource.updateHighestCompetitiveValue(topSlot); + competitiveBoundsChangedListener = globalOrdinalValuesSource::updateHighestCompetitiveValue; } else { competitiveBoundsChangedListener = null; } @@ -207,8 +207,8 @@ long getDocCount(int slot) { * Copies the current value in slot. */ private void copyCurrent(int slot, long value) { - for (int i = 0; i < arrays.length; i++) { - arrays[i].copyCurrent(slot); + for (SingleDimensionValuesSource array : arrays) { + array.copyCurrent(slot); } docCounts = bigArrays.grow(docCounts, slot + 1); docCounts.set(slot, value); @@ -238,12 +238,12 @@ int compare(int slot1, int slot2) { */ boolean equals(int slot1, int slot2) { assert slot2 != CANDIDATE_SLOT; - for (int i = 0; i < arrays.length; i++) { + for (SingleDimensionValuesSource array : arrays) { final int cmp; if (slot1 == CANDIDATE_SLOT) { - cmp = arrays[i].compareCurrent(slot2); + cmp = array.compareCurrent(slot2); } else { - cmp = arrays[i].compare(slot1, slot2); + cmp = array.compare(slot1, slot2); } if (cmp != 0) { return false; @@ -257,8 +257,8 @@ boolean equals(int slot1, int slot2) { */ int hashCode(int slot) { int result = 1; - for (int i = 0; i < arrays.length; i++) { - result = 31 * result + (slot == CANDIDATE_SLOT ? arrays[i].hashCodeCurrent() : arrays[i].hashCode(slot)); + for (SingleDimensionValuesSource array : arrays) { + result = 31 * result + (slot == CANDIDATE_SLOT ? array.hashCodeCurrent() : array.hashCode(slot)); } return result; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java index 6bbebc0ec9e5e..af7e450ac8bda 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java @@ -26,6 +26,7 @@ import java.io.IOException; import java.time.ZoneId; import java.util.Objects; +import java.util.function.ToLongFunction; /** * A {@link ValuesSource} builder for {@link CompositeAggregationBuilder} @@ -325,4 +326,12 @@ public final CompositeValuesSourceConfig build(AggregationContext context) throw protected ZoneId timeZone() { return null; } + + /** + * Return false if this composite source does not support parallel collection. + * As a result, a request including such aggregation is always executed sequentially despite concurrency is enabled for the query phase. + */ + public boolean supportsParallelCollection(ToLongFunction fieldCardinalityResolver) { + return true; + } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java index 927104a92deb2..f2c601e412f92 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java @@ -628,10 +628,10 @@ public Object get(Object key) { @Override public Set> entrySet() { - return new AbstractSet>() { + return new AbstractSet<>() { @Override public Iterator> iterator() { - return new Iterator>() { + return new Iterator<>() { int pos = 0; @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java index 3d79509ad9377..ca9968834e611 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java @@ -43,7 +43,7 @@ class LongValuesSource extends SingleDimensionValuesSource { private final CheckedFunction docValuesFunc; private final LongUnaryOperator rounding; - private BitArray bits; + private final BitArray bits; private LongArray values; private long currentValue; private boolean missingCurrentValue; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/ParsedComposite.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/ParsedComposite.java index 0c3ec9a521b8e..847e35cf0d4ea 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/ParsedComposite.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/ParsedComposite.java @@ -32,7 +32,7 @@ public class ParsedComposite extends ParsedMultiBucketAggregation ParsedComposite.ParsedBucket.fromXContent(parser), parser -> null); + declareMultiBucketAggregationFields(PARSER, ParsedBucket::fromXContent, parser -> null); } private Map afterKey; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java index e5e89d94c803b..18591b1f3719b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.script.Script; import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; @@ -29,6 +30,7 @@ import java.util.List; import java.util.function.LongConsumer; import java.util.function.LongUnaryOperator; +import java.util.function.ToLongFunction; /** * A {@link CompositeValuesSourceBuilder} that builds a {@link ValuesSource} from a {@link Script} or @@ -215,4 +217,13 @@ protected CompositeValuesSourceConfig innerBuild(ValuesSourceRegistry registry, return registry.getAggregator(REGISTRY_KEY, config) .apply(config, name, script() != null, format(), missingBucket(), missingOrder(), order()); } + + @Override + public boolean supportsParallelCollection(ToLongFunction fieldCardinalityResolver) { + if (script() == null) { + long cardinality = fieldCardinalityResolver.applyAsLong(field()); + return cardinality != -1 && cardinality <= TermsAggregationBuilder.KEY_ORDER_CONCURRENCY_THRESHOLD; + } + return false; + } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregator.java index e0792fca6c28f..69dcc8d3da117 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregator.java @@ -140,7 +140,7 @@ public static FiltersAggregator build( Map metadata ) throws IOException { FilterByFilterAggregator.AdapterBuilder filterByFilterBuilder = - new FilterByFilterAggregator.AdapterBuilder( + new FilterByFilterAggregator.AdapterBuilder<>( name, keyed, keyedBucket, @@ -214,7 +214,7 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I (offsetInOwningOrd, docCount, subAggregationResults) -> { if (offsetInOwningOrd < filters.size()) { return new InternalFilters.InternalBucket( - filters.get(offsetInOwningOrd).key().toString(), + filters.get(offsetInOwningOrd).key(), docCount, subAggregationResults, keyed, @@ -232,13 +232,7 @@ public InternalAggregation buildEmptyAggregation() { InternalAggregations subAggs = buildEmptySubAggregations(); List buckets = new ArrayList<>(filters.size() + (otherBucketKey == null ? 0 : 1)); for (QueryToFilterAdapter filter : filters) { - InternalFilters.InternalBucket bucket = new InternalFilters.InternalBucket( - filter.key().toString(), - 0, - subAggs, - keyed, - keyedBucket - ); + InternalFilters.InternalBucket bucket = new InternalFilters.InternalBucket(filter.key(), 0, subAggs, keyed, keyedBucket); buckets.add(bucket); } @@ -300,11 +294,17 @@ protected LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCt final int numFilters = filters().size(); List filterWrappers = new ArrayList<>(); long totalCost = 0; + // trigger the parent circuit breaker to make sure we have enough heap to build the first scorer. + // note we might still fail if the scorer is huge. + addRequestCircuitBreakerBytes(0L); for (int filterOrd = 0; filterOrd < numFilters; filterOrd++) { Scorer randomAccessScorer = filters().get(filterOrd).randomAccessScorer(aggCtx.getLeafReaderContext()); if (randomAccessScorer == null) { continue; } + // scorer can take a fair amount of heap, and we have no means to estimate the size, so + // we trigger the parent circuit breaker to at least fail if we are running out of heap + addRequestCircuitBreakerBytes(0L); totalCost += randomAccessScorer.iterator().cost(); filterWrappers.add( randomAccessScorer.twoPhaseIterator() == null diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java index ff9495ca4d825..8bc1e3d17642a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java @@ -64,7 +64,7 @@ public static ObjectParser crea parser.declareInt(GeoGridAggregationBuilder::size, FIELD_SIZE); parser.declareInt(GeoGridAggregationBuilder::shardSize, FIELD_SHARD_SIZE); parser.declareField( - (p, builder, context) -> { builder.setGeoBoundingBox(GeoBoundingBox.parseBoundingBox(p)); }, + (p, builder, context) -> builder.setGeoBoundingBox(GeoBoundingBox.parseBoundingBox(p)), GeoBoundingBox.BOUNDS_FIELD, org.elasticsearch.xcontent.ObjectParser.ValueType.OBJECT ); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileUtils.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileUtils.java index e5fd0aa10ced2..52f63bf24be11 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileUtils.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileUtils.java @@ -203,7 +203,7 @@ public static int[] parseHash(String hashAsString) { public static String stringEncode(long hash) { final int[] res = parseHash(hash); validateZXY(res[0], res[1], res[2]); - return "" + res[0] + "/" + res[1] + "/" + res[2]; + return res[0] + "/" + res[1] + "/" + res[2]; } /** diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java index 2371506082f1b..48b361592519c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java @@ -79,7 +79,7 @@ public String getPreferredName() { return preferredName; } - private String preferredName; + private final String preferredName; IntervalTypeEnum(String preferredName) { this.preferredName = preferredName; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java index ed883a4b04d6b..4ffc9abdc2202 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java @@ -373,18 +373,6 @@ protected Bucket reduceBucket(List buckets, AggregationReduceContext con return createBucket(buckets.get(0).key, docCount, aggs); } - /** - * When we pre-count the empty buckets we report them periodically - * because you can configure the date_histogram to create an astounding - * number of buckets. It'd take a while to count that high only to abort. - * So we report every couple thousand buckets. It's be simpler to report - * every single bucket we plan to allocate one at a time but that'd cause - * needless overhead on the circuit breakers. Counting a couple thousand - * buckets is plenty fast to fail this quickly in pathological cases and - * plenty large to keep the overhead minimal. - */ - private static final int REPORT_EMPTY_EVERY = 10_000; - private void addEmptyBuckets(List list, AggregationReduceContext reduceContext) { /* * Make sure we have space for the empty buckets we're going to add by diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java index caef13221b0f3..6ce723d12db26 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java @@ -291,10 +291,11 @@ protected boolean lessThan(IteratorAndCurrent a, IteratorAndCurrent(histogram.buckets.iterator())); + pq.add(new IteratorAndCurrent<>(histogram.buckets.iterator())); } } + int consumeBucketCount = 0; List reducedBuckets = new ArrayList<>(); if (pq.size() > 0) { // list of buckets coming from different shards that have the same key @@ -310,6 +311,10 @@ protected boolean lessThan(IteratorAndCurrent a, IteratorAndCurrent= minDocCount || reduceContext.isFinalReduce() == false) { reducedBuckets.add(reduced); + if (consumeBucketCount++ >= REPORT_EMPTY_EVERY) { + reduceContext.consumeBucketsAndMaybeBreak(consumeBucketCount); + consumeBucketCount = 0; + } } currentBuckets.clear(); key = top.current().key; @@ -330,10 +335,15 @@ protected boolean lessThan(IteratorAndCurrent a, IteratorAndCurrent= minDocCount || reduceContext.isFinalReduce() == false) { reducedBuckets.add(reduced); + if (consumeBucketCount++ >= REPORT_EMPTY_EVERY) { + reduceContext.consumeBucketsAndMaybeBreak(consumeBucketCount); + consumeBucketCount = 0; + } } } } + reduceContext.consumeBucketsAndMaybeBreak(consumeBucketCount); return reducedBuckets; } @@ -358,18 +368,6 @@ private double round(double key) { return Math.floor((key - emptyBucketInfo.offset) / emptyBucketInfo.interval) * emptyBucketInfo.interval + emptyBucketInfo.offset; } - /** - * When we pre-count the empty buckets we report them periodically - * because you can configure the histogram to create more buckets than - * there are atoms in the universe. It'd take a while to count that high - * only to abort. So we report every couple thousand buckets. It's be - * simpler to report every single bucket we plan to allocate one at a time - * but that'd cause needless overhead on the circuit breakers. Counting a - * couple thousand buckets is plenty fast to fail this quickly in - * pathological cases and plenty large to keep the overhead minimal. - */ - private static final int REPORT_EMPTY_EVERY = 10_000; - private void addEmptyBuckets(List list, AggregationReduceContext reduceContext) { /* * Make sure we have space for the empty buckets we're going to add by @@ -377,7 +375,7 @@ private void addEmptyBuckets(List list, AggregationReduceContext reduceC * consumeBucketsAndMaybeBreak. */ class Counter implements DoubleConsumer { - private int size = list.size(); + private int size = 0; @Override public void accept(double key) { @@ -456,11 +454,9 @@ private void iterateEmptyBuckets(List list, ListIterator iter, D @Override public InternalAggregation reduce(List aggregations, AggregationReduceContext reduceContext) { List reducedBuckets = reduceBuckets(aggregations, reduceContext); - boolean alreadyAccountedForBuckets = false; if (reduceContext.isFinalReduce()) { if (minDocCount == 0) { addEmptyBuckets(reducedBuckets, reduceContext); - alreadyAccountedForBuckets = true; } if (InternalOrder.isKeyDesc(order)) { // we just need to reverse here... @@ -474,9 +470,6 @@ public InternalAggregation reduce(List aggregations, Aggreg CollectionUtil.introSort(reducedBuckets, order.comparator()); } } - if (false == alreadyAccountedForBuckets) { - reduceContext.consumeBucketsAndMaybeBreak(reducedBuckets.size()); - } return new InternalHistogram(getName(), reducedBuckets, order, minDocCount, emptyBucketInfo, format, keyed, getMetadata()); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedVariableWidthHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedVariableWidthHistogram.java index ba33373354f3e..de7f29d785c75 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedVariableWidthHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedVariableWidthHistogram.java @@ -36,7 +36,7 @@ public List getBuckets() { return buckets; } - private static ObjectParser PARSER = new ObjectParser<>( + private static final ObjectParser PARSER = new ObjectParser<>( ParsedVariableWidthHistogram.class.getSimpleName(), true, ParsedVariableWidthHistogram::new diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregator.java index 516c9d91a7b65..945ecd7424de3 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregator.java @@ -82,7 +82,7 @@ private class BufferValuesPhase extends CollectionPhase { private DoubleArray buffer; private int bufferSize; - private int bufferLimit; + private final int bufferLimit; private MergeBucketsPhase mergeBucketsPhase; BufferValuesPhase(int bufferLimit) { @@ -97,7 +97,7 @@ public CollectionPhase collectValue(LeafBucketCollector sub, int doc, double val if (bufferSize < bufferLimit) { // Add to the buffer i.e store the doc in a new bucket buffer = bigArrays().grow(buffer, bufferSize + 1); - buffer.set((long) bufferSize, val); + buffer.set(bufferSize, val); collectBucket(sub, doc, bufferSize); bufferSize += 1; } @@ -432,7 +432,6 @@ public void close() { // Aggregation parameters private final int numBuckets; private final int shardSize; - private final int bufferLimit; private CollectionPhase collector; @@ -455,9 +454,8 @@ public void close() { this.valuesSource = (ValuesSource.Numeric) valuesSourceConfig.getValuesSource(); this.formatter = valuesSourceConfig.format(); this.shardSize = shardSize; - this.bufferLimit = initialBuffer; - collector = new BufferValuesPhase(this.bufferLimit); + collector = new BufferValuesPhase(initialBuffer); String scoringAgg = subAggsNeedScore(); String nestedAgg = descendsFromNestedAggregator(parent); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java index dc0b42f507d84..cdb2ae4517a22 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java @@ -117,11 +117,7 @@ private static String key(String key, Double from, Double to) { if (key != null) { return key; } - StringBuilder sb = new StringBuilder(); - sb.append((from == null || from == 0) ? "*" : from); - sb.append("-"); - sb.append((to == null || Double.isInfinite(to)) ? "*" : to); - return sb.toString(); + return ((from == null || from == 0) ? "*" : from) + "-" + ((to == null || Double.isInfinite(to)) ? "*" : to); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRange.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRange.java index 23105bbe2d4f3..c8588136c1d33 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRange.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRange.java @@ -63,10 +63,7 @@ public Bucket( } private static String generateKey(BytesRef from, BytesRef to, DocValueFormat format) { - StringBuilder builder = new StringBuilder().append(from == null ? "*" : format.format(from)) - .append("-") - .append(to == null ? "*" : format.format(to)); - return builder.toString(); + return (from == null ? "*" : format.format(from)) + "-" + (to == null ? "*" : format.format(to)); } private static Bucket createFromStream(StreamInput in, DocValueFormat format, boolean keyed) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java index cb970fc87fd33..046d5efb97ece 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java @@ -144,10 +144,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } private static String generateKey(double from, double to, DocValueFormat format) { - StringBuilder builder = new StringBuilder().append(Double.isInfinite(from) ? "*" : format.format(from)) - .append("-") - .append(Double.isInfinite(to) ? "*" : format.format(to)); - return builder.toString(); + return (Double.isInfinite(from) ? "*" : format.format(from)) + "-" + (Double.isInfinite(to) ? "*" : format.format(to)); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ParsedRange.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ParsedRange.java index 499b8c3e4f039..a12c126fb73d8 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ParsedRange.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ParsedRange.java @@ -41,7 +41,7 @@ protected static void declareParsedRangeFields( final CheckedFunction bucketParser, final CheckedFunction keyedBucketParser ) { - declareMultiBucketAggregationFields(objectParser, bucketParser::apply, keyedBucketParser::apply); + declareMultiBucketAggregationFields(objectParser, bucketParser, keyedBucketParser); } private static final ObjectParser PARSER = new ObjectParser<>( diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java index 7c89061ea32f2..7d7e1a1a03bc4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java @@ -383,8 +383,16 @@ public static FromFilters adaptIntoFiltersOrNull( return null; } boolean wholeNumbersOnly = false == ((ValuesSource.Numeric) valuesSourceConfig.getValuesSource()).isFloatingPoint(); - FilterByFilterAggregator.AdapterBuilder> filterByFilterBuilder = new FilterByFilterAggregator.AdapterBuilder< - FromFilters>(name, false, false, null, context, parent, cardinality, metadata) { + FilterByFilterAggregator.AdapterBuilder> filterByFilterBuilder = new FilterByFilterAggregator.AdapterBuilder<>( + name, + false, + false, + null, + context, + parent, + cardinality, + metadata + ) { @Override protected FromFilters adapt(CheckedFunction delegate) throws IOException { @@ -547,8 +555,7 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I public InternalAggregation buildEmptyAggregation() { InternalAggregations subAggs = buildEmptySubAggregations(); List buckets = new ArrayList<>(ranges.length); - for (int i = 0; i < ranges.length; i++) { - Range range = ranges[i]; + for (Range range : ranges) { org.elasticsearch.search.aggregations.bucket.range.Range.Bucket bucket = rangeFactory.createBucket( range.key, range.originalFrom, diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollector.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollector.java index 5bca7718c9e2a..1344604a8d39c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollector.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollector.java @@ -45,7 +45,7 @@ public class BestDocsDeferringCollector extends DeferringBucketCollector impleme private final List entries = new ArrayList<>(); private BucketCollector deferred; private ObjectArray perBucketSamples; - private int shardSize; + private final int shardSize; private PerSegmentCollects perSegCollector; private final BigArrays bigArrays; private final Consumer circuitBreakerConsumer; @@ -210,7 +210,7 @@ public int getDocCount() { } class PerSegmentCollects extends Scorable { - private AggregationExecutionContext aggCtx; + private final AggregationExecutionContext aggCtx; int maxDocId = Integer.MIN_VALUE; private float currentScore; private int currentDocId = -1; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractInternalTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractInternalTerms.java index 542fcc84a6411..aed2119dec483 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractInternalTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractInternalTerms.java @@ -204,7 +204,7 @@ protected boolean lessThan(IteratorAndCurrent a, IteratorAndCurrent b) { if (lastBucket != null && cmp.compare(top.current(), lastBucket) != 0) { // the key changed so bundle up the last key's worth of buckets boolean shouldContinue = sink.apply( - new DelayedBucket(AbstractInternalTerms.this::reduceBucket, reduceContext, sameTermBuckets) + new DelayedBucket<>(AbstractInternalTerms.this::reduceBucket, reduceContext, sameTermBuckets) ); if (false == shouldContinue) { return; @@ -228,7 +228,7 @@ protected boolean lessThan(IteratorAndCurrent a, IteratorAndCurrent b) { } if (sameTermBuckets.isEmpty() == false) { - sink.apply(new DelayedBucket(AbstractInternalTerms.this::reduceBucket, reduceContext, sameTermBuckets)); + sink.apply(new DelayedBucket<>(AbstractInternalTerms.this::reduceBucket, reduceContext, sameTermBuckets)); } } @@ -249,7 +249,7 @@ private void reduceLegacy( } for (List sameTermBuckets : bucketMap.values()) { boolean shouldContinue = sink.apply( - new DelayedBucket(AbstractInternalTerms.this::reduceBucket, reduceContext, sameTermBuckets) + new DelayedBucket<>(AbstractInternalTerms.this::reduceBucket, reduceContext, sameTermBuckets) ); if (false == shouldContinue) { return; @@ -300,7 +300,7 @@ public InternalAggregation reduce(List aggregations, Aggreg TopBucketBuilder top = TopBucketBuilder.build( getRequiredSize(), getOrder(), - removed -> { otherDocCount[0] += removed.getDocCount(); } + removed -> otherDocCount[0] += removed.getDocCount() ); thisReduceOrder = reduceBuckets(aggregations, reduceContext, bucket -> { if (bucket.getDocCount() >= getMinDocCount()) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/IncludeExclude.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/IncludeExclude.java index 70f258e523527..524c648215345 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/IncludeExclude.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/IncludeExclude.java @@ -170,7 +170,7 @@ public int hashCode() { private Set valids; private Set invalids; - private Long spare = new Long(0); + private final Long spare = new Long(0); private SetBackedLongFilter(int numValids, int numInvalids) { if (numValids > 0) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedRareTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedRareTerms.java index aaa9857fc1562..b41e402c029f4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedRareTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedRareTerms.java @@ -34,7 +34,6 @@ public abstract class InternalMappedRareTerms, protected DocValueFormat format; protected List buckets; - protected Map bucketMap; final SetBackedScalingCuckooFilter filter; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java index cffe11c5729eb..07aa318e9c487 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java @@ -213,11 +213,7 @@ public InternalAggregation reduce(List aggregations, Aggreg @SuppressWarnings("unchecked") InternalSignificantTerms terms = (InternalSignificantTerms) aggregation; for (B bucket : terms.getBuckets()) { - List existingBuckets = buckets.get(bucket.getKeyAsString()); - if (existingBuckets == null) { - existingBuckets = new ArrayList<>(aggregations.size()); - buckets.put(bucket.getKeyAsString(), existingBuckets); - } + List existingBuckets = buckets.computeIfAbsent(bucket.getKeyAsString(), k -> new ArrayList<>(aggregations.size())); // Adjust the buckets with the global stats representing the // total size of the pots from which the stats are drawn existingBuckets.add( diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java index 85307a903a3eb..1d32251ffc33a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java @@ -93,6 +93,18 @@ public long getDocCount() { return docCount; } + public void setDocCount(long docCount) { + this.docCount = docCount; + } + + public long getBucketOrd() { + return bucketOrd; + } + + public void setBucketOrd(long bucketOrd) { + this.bucketOrd = bucketOrd; + } + @Override public long getDocCountError() { if (showDocCountError == false) { @@ -102,7 +114,7 @@ public long getDocCountError() { } @Override - protected void setDocCountError(long docCountError) { + public void setDocCountError(long docCountError) { this.docCountError = docCountError; } @@ -121,6 +133,10 @@ public Aggregations getAggregations() { return aggregations; } + public void setAggregations(InternalAggregations aggregations) { + this.aggregations = aggregations; + } + @Override public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongKeyedBucketOrds.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongKeyedBucketOrds.java index 279625654e734..6b21b11db5015 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongKeyedBucketOrds.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongKeyedBucketOrds.java @@ -130,8 +130,8 @@ public Iterator keyOrderedIterator(long owningBucketOrd) { } } Iterator toReturn = new Iterator<>() { - Iterator wrapped = keySet.iterator(); - long filterOrd = owningBucketOrd; + final Iterator wrapped = keySet.iterator(); + final long filterOrd = owningBucketOrd; long next; boolean hasNext = true; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregationBuilder.java index 056a8a00dd72f..ce911379d9ddb 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregationBuilder.java @@ -34,6 +34,7 @@ import java.io.IOException; import java.util.Map; import java.util.Objects; +import java.util.function.ToLongFunction; import static org.elasticsearch.index.query.AbstractQueryBuilder.parseTopLevelQuery; @@ -137,7 +138,7 @@ public boolean supportsSampling() { } @Override - public boolean supportsParallelCollection() { + public boolean supportsParallelCollection(ToLongFunction fieldCardinalityResolver) { return false; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java index fe27738fe7589..fa05ffbd58295 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java @@ -59,6 +59,14 @@ public Object getKey() { return getKeyAsString(); } + public BytesRef getTermBytes() { + return termBytes; + } + + public void setTermBytes(BytesRef termBytes) { + this.termBytes = termBytes; + } + // this method is needed for scripted numeric aggs @Override public Number getKeyAsNumber() { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregatorFromFilters.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregatorFromFilters.java index 0e0db3ab5054f..d1458b04f17a1 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregatorFromFilters.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregatorFromFilters.java @@ -69,16 +69,7 @@ static StringTermsAggregatorFromFilters adaptIntoFiltersOrNull( return null; } FilterByFilterAggregator.AdapterBuilder filterByFilterBuilder = - new FilterByFilterAggregator.AdapterBuilder( - name, - false, - false, - null, - context, - parent, - cardinality, - metadata - ) { + new FilterByFilterAggregator.AdapterBuilder<>(name, false, false, null, context, parent, cardinality, metadata) { @Override protected StringTermsAggregatorFromFilters adapt( CheckedFunction delegate @@ -164,7 +155,7 @@ protected InternalAggregation adapt(InternalAggregation delegateResult) throws I } TermsEnum terms = valuesSupplier.get().termsEnum(); if (filters.getBuckets().size() > bucketCountThresholds.getShardSize()) { - PriorityQueue queue = new PriorityQueue(bucketCountThresholds.getShardSize()) { + PriorityQueue queue = new PriorityQueue<>(bucketCountThresholds.getShardSize()) { private final Comparator comparator = order.comparator(); @Override @@ -195,7 +186,7 @@ protected boolean lessThan(OrdBucket a, OrdBucket b) { for (OrdBucket b : queue) { buckets.add(buildBucket(b, terms)); } - Collections.sort(buckets, reduceOrder.comparator()); + buckets.sort(reduceOrder.comparator()); } else { /* * Note for the curious: you can just use a for loop to iterate @@ -217,7 +208,7 @@ protected boolean lessThan(OrdBucket a, OrdBucket b) { } buckets.add(buildBucket(b, terms)); } - Collections.sort(buckets, reduceOrder.comparator()); + buckets.sort(reduceOrder.comparator()); } return new StringTerms( filters.getName(), diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java index ebc6b2c1cc70c..68263e2d72b9c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java @@ -35,8 +35,11 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.function.ToLongFunction; public class TermsAggregationBuilder extends ValuesSourceAggregationBuilder { + public static final int KEY_ORDER_CONCURRENCY_THRESHOLD = 50; + public static final String NAME = "terms"; public static final ValuesSourceRegistry.RegistryKey REGISTRY_KEY = new ValuesSourceRegistry.RegistryKey<>( NAME, @@ -106,13 +109,13 @@ public static void registerAggregators(ValuesSourceRegistry.Builder builder) { private IncludeExclude includeExclude = null; private String executionHint = null; private SubAggCollectionMode collectMode = null; - private TermsAggregator.BucketCountThresholds bucketCountThresholds = new TermsAggregator.BucketCountThresholds( - DEFAULT_BUCKET_COUNT_THRESHOLDS - ); + private final TermsAggregator.BucketCountThresholds bucketCountThresholds; + private boolean showTermDocCountError = false; public TermsAggregationBuilder(String name) { super(name); + this.bucketCountThresholds = new TermsAggregator.BucketCountThresholds(DEFAULT_BUCKET_COUNT_THRESHOLDS); } protected TermsAggregationBuilder( @@ -135,7 +138,34 @@ public boolean supportsSampling() { } @Override - public boolean supportsParallelCollection() { + public boolean supportsParallelCollection(ToLongFunction fieldCardinalityResolver) { + /* + * we parallelize only if the cardinality of the field is lower than shard size, this is to minimize precision issues. + * When ordered by term, we still take cardinality into account to avoid overhead that concurrency may cause against + * high cardinality fields. + */ + if (script() == null + && (executionHint == null || executionHint.equals(TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS.toString()))) { + long cardinality = fieldCardinalityResolver.applyAsLong(field()); + if (supportsParallelCollection(cardinality, order, bucketCountThresholds)) { + return super.supportsParallelCollection(fieldCardinalityResolver); + } + } + return false; + } + + /** + * Whether a terms aggregation with the provided order and bucket count thresholds against a field + * with the given cardinality should be executed concurrency. + */ + public static boolean supportsParallelCollection(long cardinality, BucketOrder order, BucketCountThresholds bucketCountThresholds) { + if (cardinality != -1) { + if (InternalOrder.isKeyOrder(order)) { + return cardinality <= KEY_ORDER_CONCURRENCY_THRESHOLD; + } + BucketCountThresholds adjusted = TermsAggregatorFactory.adjustBucketCountThresholds(bucketCountThresholds, order); + return cardinality <= adjusted.getShardSize(); + } return false; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java index 31c6a4a7e0430..e17cd828a24d0 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java @@ -277,39 +277,45 @@ private static boolean isAggregationSort(BucketOrder order) { } } - @Override - protected Aggregator doCreateInternal(Aggregator parent, CardinalityUpperBound cardinality, Map metadata) - throws IOException { - BucketCountThresholds bucketCountThresholds = new BucketCountThresholds(this.bucketCountThresholds); + public static BucketCountThresholds adjustBucketCountThresholds(BucketCountThresholds bucketCountThresholds, BucketOrder order) { + BucketCountThresholds newBucketCountThresholds = new BucketCountThresholds(bucketCountThresholds); if (InternalOrder.isKeyOrder(order) == false - && bucketCountThresholds.getShardSize() == TermsAggregationBuilder.DEFAULT_BUCKET_COUNT_THRESHOLDS.shardSize()) { + && newBucketCountThresholds.getShardSize() == TermsAggregationBuilder.DEFAULT_BUCKET_COUNT_THRESHOLDS.shardSize()) { // The user has not made a shardSize selection. Use default // heuristic to avoid any wrong-ranking caused by distributed // counting - bucketCountThresholds.setShardSize(BucketUtils.suggestShardSideQueueSize(bucketCountThresholds.getRequiredSize())); + newBucketCountThresholds.setShardSize(BucketUtils.suggestShardSideQueueSize(newBucketCountThresholds.getRequiredSize())); } + newBucketCountThresholds.ensureValidity(); + return newBucketCountThresholds; + } + + @Override + protected Aggregator doCreateInternal(Aggregator parent, CardinalityUpperBound cardinality, Map metadata) + throws IOException { + + BucketCountThresholds adjusted = adjustBucketCountThresholds(this.bucketCountThresholds, order); // If min_doc_count and shard_min_doc_count is provided, we do not support them being larger than 1 // This is because we cannot be sure about their relative scale when sampled if (getSamplingContext().map(SamplingContext::isSampled).orElse(false)) { - if (bucketCountThresholds.getMinDocCount() > 1 || bucketCountThresholds.getShardMinDocCount() > 1) { + if (adjusted.getMinDocCount() > 1 || adjusted.getShardMinDocCount() > 1) { throw new ElasticsearchStatusException( "aggregation [{}] is within a sampling context; " + "min_doc_count, provided [{}], and min_shard_doc_count, provided [{}], cannot be greater than 1", RestStatus.BAD_REQUEST, name(), - bucketCountThresholds.getMinDocCount(), - bucketCountThresholds.getShardMinDocCount() + adjusted.getMinDocCount(), + adjusted.getShardMinDocCount() ); } } - bucketCountThresholds.ensureValidity(); return aggregatorSupplier.build( name, factories, config, order, - bucketCountThresholds, + adjusted, includeExclude, executionHint, context, @@ -468,7 +474,7 @@ Aggregator create( && ordinalsValuesSource.supportsGlobalOrdinalsMapping() && // we use the static COLLECT_SEGMENT_ORDS to allow tests to force specific optimizations - (COLLECT_SEGMENT_ORDS != null ? COLLECT_SEGMENT_ORDS.booleanValue() : ratio <= 0.5 && maxOrd <= 2048)) { + (COLLECT_SEGMENT_ORDS != null ? COLLECT_SEGMENT_ORDS : ratio <= 0.5 && maxOrd <= 2048)) { /* * We can use the low cardinality execution mode iff this aggregator: * - has no sub-aggregator AND @@ -505,7 +511,7 @@ Aggregator create( * is only possible if we're collecting from a single * bucket. */ - remapGlobalOrds = REMAP_GLOBAL_ORDS.booleanValue(); + remapGlobalOrds = REMAP_GLOBAL_ORDS; } else { remapGlobalOrds = true; if (includeExclude == null diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractHyperLogLogPlusPlus.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractHyperLogLogPlusPlus.java index edd1b42668697..abe4987573cb9 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractHyperLogLogPlusPlus.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractHyperLogLogPlusPlus.java @@ -81,12 +81,7 @@ private Object getComparableData(long bucketOrd) { AbstractHyperLogLog.RunLenIterator iterator = getHyperLogLog(bucketOrd); while (iterator.next()) { byte runLength = iterator.value(); - Integer numOccurances = values.get(runLength); - if (numOccurances == null) { - values.put(runLength, 1); - } else { - values.put(runLength, numOccurances + 1); - } + values.merge(runLength, 1, Integer::sum); } return values; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalHDRPercentiles.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalHDRPercentiles.java index 9b4656ee7cf7e..a77cd495f87db 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalHDRPercentiles.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalHDRPercentiles.java @@ -121,7 +121,7 @@ public double value(String name) { @Override public Iterable valueNames() { - return Arrays.stream(getKeys()).mapToObj(d -> String.valueOf(d)).toList(); + return Arrays.stream(getKeys()).mapToObj(String::valueOf).toList(); } public DocValueFormat formatter() { @@ -210,9 +210,9 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th DoubleHistogram state = getState(); if (keyed) { builder.startObject(CommonFields.VALUES.getPreferredName()); - for (int i = 0; i < keys.length; ++i) { - String key = String.valueOf(keys[i]); - double value = value(keys[i]); + for (double v : keys) { + String key = String.valueOf(v); + double value = value(v); builder.field(key, state.getTotalCount() == 0 ? null : value); if (format != DocValueFormat.RAW && state.getTotalCount() > 0) { builder.field(key + "_as_string", format.format(value).toString()); @@ -221,10 +221,10 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th builder.endObject(); } else { builder.startArray(CommonFields.VALUES.getPreferredName()); - for (int i = 0; i < keys.length; i++) { - double value = value(keys[i]); + for (double key : keys) { + double value = value(key); builder.startObject(); - builder.field(CommonFields.KEY.getPreferredName(), keys[i]); + builder.field(CommonFields.KEY.getPreferredName(), key); builder.field(CommonFields.VALUE.getPreferredName(), state.getTotalCount() == 0 ? null : value); if (format != DocValueFormat.RAW && state.getTotalCount() > 0) { builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(value).toString()); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalTDigestPercentiles.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalTDigestPercentiles.java index 08588473c61d1..3ae609689ed7a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalTDigestPercentiles.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalTDigestPercentiles.java @@ -101,7 +101,7 @@ public double value(String name) { @Override public Iterable valueNames() { - return Arrays.stream(getKeys()).mapToObj(d -> String.valueOf(d)).toList(); + return Arrays.stream(getKeys()).mapToObj(String::valueOf).toList(); } public abstract double value(double key); @@ -188,9 +188,9 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th TDigestState state = getState(); if (keyed) { builder.startObject(CommonFields.VALUES.getPreferredName()); - for (int i = 0; i < keys.length; ++i) { - String key = String.valueOf(keys[i]); - double value = value(keys[i]); + for (double v : keys) { + String key = String.valueOf(v); + double value = value(v); builder.field(key, state.size() == 0 ? null : value); if (format != DocValueFormat.RAW && state.size() > 0) { builder.field(key + "_as_string", format.format(value).toString()); @@ -199,10 +199,10 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th builder.endObject(); } else { builder.startArray(CommonFields.VALUES.getPreferredName()); - for (int i = 0; i < keys.length; i++) { - double value = value(keys[i]); + for (double key : keys) { + double value = value(key); builder.startObject(); - builder.field(CommonFields.KEY.getPreferredName(), keys[i]); + builder.field(CommonFields.KEY.getPreferredName(), key); builder.field(CommonFields.VALUE.getPreferredName(), state.size() == 0 ? null : value); if (format != DocValueFormat.RAW && state.size() > 0) { builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(value).toString()); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorFactory.java index f27efafaf64cf..bbeebf858073a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorFactory.java @@ -111,7 +111,7 @@ public static ExecutionMode fromString(String value) { } } - boolean isHeuristicBased; + final boolean isHeuristicBased; ExecutionMode(boolean isHeuristicBased) { this.isHeuristicBased = isHeuristicBased; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregator.java index b469c24175715..cecd75941bcab 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregator.java @@ -140,20 +140,11 @@ public boolean hasMetric(String name) { public double metric(String name, long owningBucketOrd) { if (owningBucketOrd >= counts.size()) { return switch (InternalExtendedStats.Metrics.resolve(name)) { - case count -> 0; - case sum -> 0; + case count, sum_of_squares, sum -> 0; case min -> Double.POSITIVE_INFINITY; case max -> Double.NEGATIVE_INFINITY; - case avg -> Double.NaN; - case sum_of_squares -> 0; - case variance -> Double.NaN; - case variance_population -> Double.NaN; - case variance_sampling -> Double.NaN; - case std_deviation -> Double.NaN; - case std_deviation_population -> Double.NaN; - case std_deviation_sampling -> Double.NaN; - case std_upper -> Double.NaN; - case std_lower -> Double.NaN; + case avg, variance, variance_population, variance_sampling, std_deviation, std_deviation_population, std_deviation_sampling, + std_upper, std_lower -> Double.NaN; default -> throw new IllegalArgumentException("Unknown value [" + name + "] in common stats aggregation"); }; } @@ -167,9 +158,7 @@ public double metric(String name, long owningBucketOrd) { case variance -> variance(owningBucketOrd); case variance_population -> variancePopulation(owningBucketOrd); case variance_sampling -> varianceSampling(owningBucketOrd); - case std_deviation -> Math.sqrt(variance(owningBucketOrd)); - case std_deviation_population -> Math.sqrt(variance(owningBucketOrd)); - case std_deviation_sampling -> Math.sqrt(varianceSampling(owningBucketOrd)); + case std_deviation, std_deviation_population, std_deviation_sampling -> Math.sqrt(variance(owningBucketOrd)); case std_upper -> (sums.get(owningBucketOrd) / counts.get(owningBucketOrd)) + (Math.sqrt(variance(owningBucketOrd)) * this.sigma); case std_lower -> (sums.get(owningBucketOrd) / counts.get(owningBucketOrd)) - (Math.sqrt(variance(owningBucketOrd)) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksAggregator.java index b44bc69ae68e6..d04a3744df4ff 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksAggregator.java @@ -54,7 +54,7 @@ public double metric(String name, long bucketOrd) { if (state == null) { return Double.NaN; } else { - return InternalHDRPercentileRanks.percentileRank(state, Double.valueOf(name)); + return InternalHDRPercentileRanks.percentileRank(state, Double.parseDouble(name)); } } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HyperLogLogPlusPlus.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HyperLogLogPlusPlus.java index 08d0907c2a1bd..30225263eb8b4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HyperLogLogPlusPlus.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HyperLogLogPlusPlus.java @@ -244,7 +244,7 @@ private static class HyperLogLogIterator implements AbstractHyperLogLog.RunLenIt private final HyperLogLog hll; int pos; - long start; + final long start; private byte value; HyperLogLogIterator(HyperLogLog hll, long bucket) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalTopHits.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalTopHits.java index 18b1f44ce5d7f..77cb482edd8b4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalTopHits.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalTopHits.java @@ -98,7 +98,6 @@ int getSize() { @Override public InternalAggregation reduce(List aggregations, AggregationReduceContext reduceContext) { - final SearchHits[] shardHits = new SearchHits[aggregations.size()]; final int from; final int size; if (reduceContext.isFinalReduce()) { @@ -113,65 +112,66 @@ public InternalAggregation reduce(List aggregations, Aggreg final TopDocs reducedTopDocs; final TopDocs[] shardDocs; - - if (topDocs.topDocs instanceof TopFieldDocs) { - Sort sort = new Sort(((TopFieldDocs) topDocs.topDocs).fields); + final float maxScore; + if (topDocs.topDocs instanceof TopFieldDocs topFieldDocs) { shardDocs = new TopFieldDocs[aggregations.size()]; - for (int i = 0; i < shardDocs.length; i++) { - InternalTopHits topHitsAgg = (InternalTopHits) aggregations.get(i); - shardDocs[i] = topHitsAgg.topDocs.topDocs; - shardHits[i] = topHitsAgg.searchHits; - for (ScoreDoc doc : shardDocs[i].scoreDocs) { - doc.shardIndex = i; - } - } - reducedTopDocs = TopDocs.merge(sort, from, size, (TopFieldDocs[]) shardDocs); + maxScore = reduceAndFindMaxScore(aggregations, shardDocs); + reducedTopDocs = TopDocs.merge(new Sort(topFieldDocs.fields), from, size, (TopFieldDocs[]) shardDocs); } else { shardDocs = new TopDocs[aggregations.size()]; - for (int i = 0; i < shardDocs.length; i++) { - InternalTopHits topHitsAgg = (InternalTopHits) aggregations.get(i); - shardDocs[i] = topHitsAgg.topDocs.topDocs; - shardHits[i] = topHitsAgg.searchHits; - for (ScoreDoc doc : shardDocs[i].scoreDocs) { - doc.shardIndex = i; - } - } + maxScore = reduceAndFindMaxScore(aggregations, shardDocs); reducedTopDocs = TopDocs.merge(from, size, shardDocs); } - - float maxScore = Float.NaN; - for (InternalAggregation agg : aggregations) { - InternalTopHits topHitsAgg = (InternalTopHits) agg; - if (Float.isNaN(topHitsAgg.topDocs.maxScore) == false) { - if (Float.isNaN(maxScore)) { - maxScore = topHitsAgg.topDocs.maxScore; - } else { - maxScore = Math.max(maxScore, topHitsAgg.topDocs.maxScore); - } - } - } - - final int[] tracker = new int[shardHits.length]; - SearchHit[] hits = new SearchHit[reducedTopDocs.scoreDocs.length]; - for (int i = 0; i < reducedTopDocs.scoreDocs.length; i++) { - ScoreDoc scoreDoc = reducedTopDocs.scoreDocs[i]; - int position; - do { - position = tracker[scoreDoc.shardIndex]++; - } while (shardDocs[scoreDoc.shardIndex].scoreDocs[position] != scoreDoc); - hits[i] = shardHits[scoreDoc.shardIndex].getAt(position); - } assert reducedTopDocs.totalHits.relation == Relation.EQUAL_TO; + return new InternalTopHits( name, this.from, this.size, new TopDocsAndMaxScore(reducedTopDocs, maxScore), - new SearchHits(hits, reducedTopDocs.totalHits, maxScore), + extractSearchHits(aggregations, reducedTopDocs, shardDocs, maxScore), getMetadata() ); } + private static SearchHits extractSearchHits( + List aggregations, + TopDocs reducedTopDocs, + TopDocs[] shardDocs, + float maxScore + ) { + final int[] tracker = new int[aggregations.size()]; + ScoreDoc[] scoreDocs = reducedTopDocs.scoreDocs; + SearchHit[] hits = new SearchHit[scoreDocs.length]; + for (int i = 0; i < scoreDocs.length; i++) { + ScoreDoc scoreDoc = scoreDocs[i]; + int shardIndex = scoreDoc.shardIndex; + TopDocs topDocsForShard = shardDocs[shardIndex]; + int position; + do { + position = tracker[shardIndex]++; + } while (topDocsForShard.scoreDocs[position] != scoreDoc); + hits[i] = ((InternalTopHits) aggregations.get(shardIndex)).searchHits.getAt(position); + } + return new SearchHits(hits, reducedTopDocs.totalHits, maxScore); + } + + private static float reduceAndFindMaxScore(List aggregations, TopDocs[] shardDocs) { + float maxScore = Float.NaN; + for (int i = 0; i < shardDocs.length; i++) { + InternalTopHits topHitsAgg = (InternalTopHits) aggregations.get(i); + shardDocs[i] = topHitsAgg.topDocs.topDocs; + for (ScoreDoc doc : shardDocs[i].scoreDocs) { + doc.shardIndex = i; + } + final float max = topHitsAgg.topDocs.maxScore; + if (Float.isNaN(max) == false) { + maxScore = Float.isNaN(maxScore) ? max : Math.max(maxScore, max); + } + } + return maxScore; + } + @Override public InternalAggregation finalizeSampling(SamplingContext samplingContext) { return this; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedGeoBounds.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedGeoBounds.java index fed48ec7640e3..24f68d87802bf 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedGeoBounds.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedGeoBounds.java @@ -80,11 +80,7 @@ public GeoPoint bottomRight() { static { declareAggregationFields(PARSER); - PARSER.declareObject( - (agg, bbox) -> { agg.geoBoundingBox = new GeoBoundingBox(bbox.v1(), bbox.v2()); }, - BOUNDS_PARSER, - BOUNDS_FIELD - ); + PARSER.declareObject((agg, bbox) -> agg.geoBoundingBox = new GeoBoundingBox(bbox.v1(), bbox.v2()), BOUNDS_PARSER, BOUNDS_FIELD); BOUNDS_PARSER.declareObject(constructorArg(), GEO_POINT_PARSER, TOP_LEFT_FIELD); BOUNDS_PARSER.declareObject(constructorArg(), GEO_POINT_PARSER, BOTTOM_RIGHT_FIELD); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedHDRPercentiles.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedHDRPercentiles.java index 59deb90c7e5a2..2ee56bf648dcc 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedHDRPercentiles.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedHDRPercentiles.java @@ -52,6 +52,6 @@ public double value(String name) { @Override public Iterable valueNames() { - return percentiles.keySet().stream().map(d -> d.toString()).toList(); + return percentiles.keySet().stream().map(Object::toString).toList(); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedPercentileRanks.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedPercentileRanks.java index 0bf317c36be16..44ecf5cf69b4c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedPercentileRanks.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedPercentileRanks.java @@ -31,7 +31,7 @@ public double value(String name) { @Override public Iterable valueNames() { - return percentiles.keySet().stream().map(d -> d.toString()).toList(); + return percentiles.keySet().stream().map(Object::toString).toList(); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedPercentiles.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedPercentiles.java index d1b0f03904ef9..3af30aa16f094 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedPercentiles.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedPercentiles.java @@ -111,7 +111,7 @@ protected static void declarePercentilesFields(ObjectParser 0) { - double key = Double.valueOf(parser.currentName().substring(0, i)); + double key = Double.parseDouble(parser.currentName().substring(0, i)); aggregation.addPercentileAsString(key, parser.text()); } else { aggregation.addPercentile(Double.valueOf(parser.currentName()), Double.valueOf(parser.text())); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedTDigestPercentiles.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedTDigestPercentiles.java index 78001e3c65534..b5ab17ba335c3 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedTDigestPercentiles.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedTDigestPercentiles.java @@ -37,7 +37,7 @@ public double value(String name) { @Override public Iterable valueNames() { - return percentiles.keySet().stream().map(d -> d.toString()).toList(); + return percentiles.keySet().stream().map(Object::toString).toList(); } private static final ObjectParser PARSER = new ObjectParser<>( diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregationBuilder.java index 0596af8cbb51d..487cc2bd11bd3 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregationBuilder.java @@ -284,9 +284,4 @@ public boolean equals(Object obj) { && Objects.equals(reduceScript, other.reduceScript) && Objects.equals(params, other.params); } - - @Override - public boolean supportsParallelCollection() { - return false; - } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregator.java index 5290aac3e055d..7e749b06442f6 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregator.java @@ -158,7 +158,6 @@ public void doClose() { private class State { private final ScriptedMetricAggContexts.MapScript.LeafFactory mapScript; - private final Map mapScriptParamsForState; private final Map combineScriptParamsForState; private final Map aggState; private MapScript leafMapScript; @@ -166,7 +165,7 @@ private class State { State() { // Its possible for building the initial state to mutate the parameters as a side effect Map aggParamsForState = ScriptedMetricAggregatorFactory.deepCopyParams(aggParams); - mapScriptParamsForState = ScriptedMetricAggregatorFactory.mergeParams(aggParamsForState, mapScriptParams); + Map mapScriptParamsForState = ScriptedMetricAggregatorFactory.mergeParams(aggParamsForState, mapScriptParams); combineScriptParamsForState = ScriptedMetricAggregatorFactory.mergeParams(aggParamsForState, combineScriptParams); aggState = newInitialState(ScriptedMetricAggregatorFactory.mergeParams(aggParamsForState, initScriptParams)); mapScript = mapScriptFactory.newFactory( diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/StatsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/StatsAggregator.java index 02a8325abe7b8..7a5861eb97fe2 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/StatsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/StatsAggregator.java @@ -112,8 +112,7 @@ public boolean hasMetric(String name) { public double metric(String name, long owningBucketOrd) { if (owningBucketOrd >= counts.size()) { return switch (InternalStats.Metrics.resolve(name)) { - case count -> 0; - case sum -> 0; + case count, sum -> 0; case min -> Double.POSITIVE_INFINITY; case max -> Double.NEGATIVE_INFINITY; case avg -> Double.NaN; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksAggregator.java index 71082d7abc29c..8328f25a5cab0 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksAggregator.java @@ -54,7 +54,7 @@ public double metric(String name, long bucketOrd) { if (state == null) { return Double.NaN; } else { - return InternalTDigestPercentileRanks.percentileRank(state, Double.valueOf(name)); + return InternalTDigestPercentileRanks.percentileRank(state, Double.parseDouble(name)); } } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java index d7113fc6ec798..00db45e2d06b4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java @@ -272,9 +272,7 @@ public TopHitsAggregationBuilder sorts(List> sorts) { if (this.sorts == null) { this.sorts = new ArrayList<>(); } - for (SortBuilder sort : sorts) { - this.sorts.add(sort); - } + this.sorts.addAll(sorts); return this; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregator.java index 55cd1efa40e0d..75f5c472c6665 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregator.java @@ -191,8 +191,7 @@ public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOE for (int i = 0; i < topDocs.scoreDocs.length; i++) { docIdsToLoad[i] = topDocs.scoreDocs[i].doc; } - subSearchContext.docIdsToLoad(docIdsToLoad); - subSearchContext.fetchPhase().execute(subSearchContext); + subSearchContext.fetchPhase().execute(subSearchContext, docIdsToLoad); FetchSearchResult fetchResult = subSearchContext.fetchResult(); if (fetchProfiles != null) { fetchProfiles.add(fetchResult.profileResult()); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalPercentilesBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalPercentilesBucket.java index bef9b64c6e95b..c763ea5cf2bd3 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalPercentilesBucket.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalPercentilesBucket.java @@ -126,7 +126,7 @@ public double value(String name) { @Override public Iterable valueNames() { - return Arrays.stream(percents).mapToObj(d -> String.valueOf(d)).toList(); + return Arrays.stream(percents).mapToObj(String::valueOf).toList(); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovAvgPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovAvgPipelineAggregationBuilder.java index c174dd5458685..c31acfcdd20f2 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovAvgPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovAvgPipelineAggregationBuilder.java @@ -37,7 +37,7 @@ public class MovAvgPipelineAggregationBuilder extends AbstractPipelineAggregatio public static final String MOVING_AVG_AGG_DEPRECATION_MSG = "Moving Average aggregation usage is not supported. " + "Use the [moving_fn] aggregation instead."; - public static ParseField NAME_V7 = new ParseField("moving_avg").withAllDeprecated(MOVING_AVG_AGG_DEPRECATION_MSG) + public static final ParseField NAME_V7 = new ParseField("moving_avg").withAllDeprecated(MOVING_AVG_AGG_DEPRECATION_MSG) .forRestApiVersion(RestApiVersion.equalTo(RestApiVersion.V_7)); public static final ContextParser PARSER = (parser, name) -> { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovingFunctions.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovingFunctions.java index 0b982f8f2e586..53bf09329c57b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovingFunctions.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovingFunctions.java @@ -174,7 +174,7 @@ public static double holt(double[] values, double alpha, double beta) { int counter = 0; - Double last; + double last; for (double v : values) { if (Double.isNaN(v) == false) { last = v; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ParsedPercentilesBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ParsedPercentilesBucket.java index f9e037247bf2c..7da76d2d4c2eb 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ParsedPercentilesBucket.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ParsedPercentilesBucket.java @@ -57,7 +57,7 @@ public double value(String name) { @Override public Iterable valueNames() { - return percentiles.keySet().stream().map(d -> d.toString()).toList(); + return percentiles.keySet().stream().map(Object::toString).toList(); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregator.java index 4c2f9a825c1fa..9bd27a9931bd0 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregator.java @@ -92,9 +92,9 @@ public String toString() { } } - private String name; - private String[] bucketsPaths; - private Map metadata; + private final String name; + private final String[] bucketsPaths; + private final Map metadata; protected PipelineAggregator(String name, String[] bucketsPaths, Map metadata) { this.name = name; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffPipelineAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffPipelineAggregator.java index 500c107065520..7225d7652b3b8 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffPipelineAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffPipelineAggregator.java @@ -28,9 +28,9 @@ import static org.elasticsearch.search.aggregations.pipeline.BucketHelpers.resolveBucketValue; public class SerialDiffPipelineAggregator extends PipelineAggregator { - private DocValueFormat formatter; - private GapPolicy gapPolicy; - private int lag; + private final DocValueFormat formatter; + private final GapPolicy gapPolicy; + private final int lag; SerialDiffPipelineAggregator( String name, diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/CoreValuesSourceType.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/CoreValuesSourceType.java index 35b8230a48554..24cceabf2388d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/CoreValuesSourceType.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/CoreValuesSourceType.java @@ -338,14 +338,10 @@ public Function roundingPreparer(AggregationContext @Override public QueryVisitor getSubVisitor(BooleanClause.Occur occur, Query parent) { // Only extract bounds queries that must filter the results - switch (occur) { - case MUST: - case FILTER: - return this; - - default: - return QueryVisitor.EMPTY_VISITOR; - } + return switch (occur) { + case MUST, FILTER -> this; + default -> QueryVisitor.EMPTY_VISITOR; + }; }; @Override @@ -450,5 +446,5 @@ public String typeName() { } /** List containing all members of the enumeration. */ - public static List ALL_CORE = Arrays.asList(CoreValuesSourceType.values()); + public static final List ALL_CORE = Arrays.asList(CoreValuesSourceType.values()); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/FieldContext.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/FieldContext.java index 101e94b6717c4..30db7c984db7a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/FieldContext.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/FieldContext.java @@ -14,39 +14,24 @@ * Used by all field data based aggregators. This determine the context of the field data the aggregators are operating * in. It holds both the field names and the index field datas that are associated with them. */ -public class FieldContext { - - private final String field; - private final IndexFieldData indexFieldData; - private final MappedFieldType fieldType; +public record FieldContext(String field, IndexFieldData indexFieldData, MappedFieldType fieldType) { /** * Constructs a field data context for the given field and its index field data * - * @param field The name of the field - * @param indexFieldData The index field data of the field + * @param field The name of the field + * @param indexFieldData The index field data of the field */ - public FieldContext(String field, IndexFieldData indexFieldData, MappedFieldType fieldType) { - this.field = field; - this.indexFieldData = indexFieldData; - this.fieldType = fieldType; - } - - public String field() { - return field; - } + public FieldContext {} /** * @return The index field datas in this context */ + @Override public IndexFieldData indexFieldData() { return indexFieldData; } - public MappedFieldType fieldType() { - return fieldType; - } - public String getTypeName() { return fieldType.typeName(); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java index 4472083060d6e..7e0c235ee4fb3 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java @@ -96,7 +96,6 @@ protected MultiValuesSourceAggregationBuilder(StreamInput in) throws IOException /** * Read from a stream. */ - @SuppressWarnings("unchecked") private void read(StreamInput in) throws IOException { fields = in.readMap(MultiValuesSourceFieldConfig::new); userValueTypeHint = in.readOptionalWriteable(ValueType::readFromStream); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/SamplingContext.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/SamplingContext.java index c1681a2070078..57ea138f63268 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/SamplingContext.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/SamplingContext.java @@ -21,7 +21,7 @@ * This provides information around the current sampling context for aggregations */ public record SamplingContext(double probability, int seed) { - public static SamplingContext NONE = new SamplingContext(1.0, 0); + public static final SamplingContext NONE = new SamplingContext(1.0, 0); public boolean isSampled() { return probability < 1.0; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java index bc83a5b5cd3b1..91bc2d12ac575 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java @@ -69,7 +69,7 @@ public ValuesSourceType getValuesSourceType() { return valuesSourceType; } - private static Set numericValueTypes = Set.of( + private static final Set numericValueTypes = Set.of( ValueType.DOUBLE, ValueType.DATE, ValueType.LONG, @@ -77,7 +77,7 @@ public ValuesSourceType getValuesSourceType() { ValueType.NUMERIC, ValueType.BOOLEAN ); - private static Set stringValueTypes = Set.of(ValueType.STRING, ValueType.IP); + private static final Set stringValueTypes = Set.of(ValueType.STRING, ValueType.IP); /** * This is a bit of a hack to mirror the old {@link ValueType} behavior, which would allow a rough compatibility between types. This diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java index 85788c1964b40..af75a8495afba 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java @@ -182,7 +182,6 @@ private static ValuesSourceConfig internalResolve( aggregationScript, scriptValueType, missing, - timeZone, docValueFormat, context::nowInMillis ); @@ -258,14 +257,14 @@ private static DocValueFormat resolveFormat( public static ValuesSourceConfig resolveFieldOnly(MappedFieldType fieldType, AggregationContext context) { FieldContext fieldContext = context.buildFieldContext(fieldType); ValuesSourceType vstype = fieldContext.indexFieldData().getValuesSourceType(); - return new ValuesSourceConfig(vstype, fieldContext, false, null, null, null, null, null, context::nowInMillis); + return new ValuesSourceConfig(vstype, fieldContext, false, null, null, null, null, context::nowInMillis); } /** * Convenience method for creating unmapped configs */ public static ValuesSourceConfig resolveUnmapped(ValuesSourceType valuesSourceType, AggregationContext context) { - return new ValuesSourceConfig(valuesSourceType, null, true, null, null, null, null, null, context::nowInMillis); + return new ValuesSourceConfig(valuesSourceType, null, true, null, null, null, null, context::nowInMillis); } private final ValuesSourceType valuesSourceType; @@ -275,7 +274,6 @@ public static ValuesSourceConfig resolveUnmapped(ValuesSourceType valuesSourceTy private final boolean unmapped; private final DocValueFormat format; private final Object missing; - private final ZoneId timeZone; private final ValuesSource valuesSource; @SuppressWarnings("this-escape") @@ -286,7 +284,6 @@ public ValuesSourceConfig( AggregationScript.LeafFactory script, ValueType scriptValueType, Object missing, - ZoneId timeZone, DocValueFormat format, LongSupplier nowInMillis ) { @@ -299,7 +296,6 @@ public ValuesSourceConfig( this.script = script; this.scriptValueType = scriptValueType; this.missing = missing; - this.timeZone = timeZone; this.format = format == null ? DocValueFormat.RAW : format; if (valid() == false) { @@ -383,10 +379,6 @@ public Object missing() { return this.missing; } - public ZoneId timezone() { - return this.timeZone; - } - public DocValueFormat format() { return format; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceRegistry.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceRegistry.java index c33ad5266d4e2..44e66d98f0258 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceRegistry.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceRegistry.java @@ -53,12 +53,9 @@ public int hashCode() { } } - @SuppressWarnings("rawtypes") - public static final RegistryKey UNREGISTERED_KEY = new RegistryKey<>("unregistered", RegistryKey.class); - public static class Builder { private final AggregationUsageService.Builder usageServiceBuilder; - private Map, List>> aggregatorRegistry = new HashMap<>(); + private final Map, List>> aggregatorRegistry = new HashMap<>(); public Builder() { this.usageServiceBuilder = new AggregationUsageService.Builder(); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptDoubleValues.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptDoubleValues.java index 0e122162e5e87..32f84612fb887 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptDoubleValues.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptDoubleValues.java @@ -81,7 +81,7 @@ private static double toDoubleValue(Object o) { // that scripts return the same internal representation as regular fields, so boolean // values in scripts need to be converted to a number, and the value formatter will // make sure of using true/false in the key_as_string field - return ((Boolean) o).booleanValue() ? 1.0 : 0.0; + return (Boolean) o ? 1.0 : 0.0; } else { throw AggregationErrors.unsupportedScriptValue(o == null ? "null" : o.toString()); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptLongValues.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptLongValues.java index f702be71c49f3..66a8513e7c118 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptLongValues.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptLongValues.java @@ -83,7 +83,7 @@ private static long toLongValue(Object o) { // that scripts return the same internal representation as regular fields, so boolean // values in scripts need to be converted to a number, and the value formatter will // make sure of using true/false in the key_as_string field - return ((Boolean) o).booleanValue() ? 1L : 0L; + return (Boolean) o ? 1L : 0L; } else { throw AggregationErrors.unsupportedScriptValue(o == null ? "null" : o.toString()); } diff --git a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 40d46a71405dd..069aa6ff41ae1 100644 --- a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -63,6 +63,7 @@ import java.util.Map; import java.util.Objects; import java.util.function.Consumer; +import java.util.function.ToLongFunction; import static java.util.Collections.emptyMap; import static org.elasticsearch.index.query.AbstractQueryBuilder.parseTopLevelQuery; @@ -2103,7 +2104,7 @@ public String toString(Params params) { } } - public boolean supportsParallelCollection() { + public boolean supportsParallelCollection(ToLongFunction fieldCardinality) { if (profile) return false; if (sorts != null) { @@ -2113,6 +2114,6 @@ public boolean supportsParallelCollection() { } } - return collapse == null && (aggregations == null || aggregations.supportsParallelCollection()); + return collapse == null && (aggregations == null || aggregations.supportsParallelCollection(fieldCardinality)); } } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java index 91ac7356a9670..5c98808c9c169 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java @@ -56,7 +56,7 @@ public FetchPhase(List fetchSubPhases) { this.fetchSubPhases[fetchSubPhases.size()] = new InnerHitsPhase(this); } - public void execute(SearchContext context) { + public void execute(SearchContext context, int[] docIdsToLoad) { if (LOGGER.isTraceEnabled()) { LOGGER.trace("{}", new SearchContextSourcePrinter(context)); } @@ -65,7 +65,7 @@ public void execute(SearchContext context) { throw new TaskCancelledException("cancelled"); } - if (context.docIdsToLoad() == null || context.docIdsToLoad().length == 0) { + if (docIdsToLoad == null || docIdsToLoad.length == 0) { // no individual hits to process, so we shortcut SearchHits hits = new SearchHits(new SearchHit[0], context.queryResult().getTotalHits(), context.queryResult().getMaxScore()); context.fetchResult().shardResult(hits, null); @@ -75,7 +75,7 @@ public void execute(SearchContext context) { Profiler profiler = context.getProfilers() == null ? Profiler.NOOP : Profilers.startProfilingFetchPhase(); SearchHits hits = null; try { - hits = buildSearchHits(context, profiler); + hits = buildSearchHits(context, docIdsToLoad, profiler); } finally { // Always finish profiling ProfileResult profileResult = profiler.finish(); @@ -96,7 +96,7 @@ public Source getSource(LeafReaderContext ctx, int doc) { } } - private SearchHits buildSearchHits(SearchContext context, Profiler profiler) { + private SearchHits buildSearchHits(SearchContext context, int[] docIdsToLoad, Profiler profiler) { FetchContext fetchContext = new FetchContext(context); SourceLoader sourceLoader = context.newSourceLoader(); @@ -166,7 +166,7 @@ protected SearchHit nextDoc(int doc) throws IOException { } }; - SearchHit[] hits = docsIterator.iterate(context.shardTarget(), context.searcher().getIndexReader(), context.docIdsToLoad()); + SearchHit[] hits = docsIterator.iterate(context.shardTarget(), context.searcher().getIndexReader(), docIdsToLoad); if (context.isCancelled()) { throw new TaskCancelledException("cancelled"); diff --git a/server/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java b/server/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java index c25c3575a8c4b..725b723b5155f 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java @@ -11,12 +11,15 @@ import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.AbstractRefCounted; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.profile.ProfileResult; +import org.elasticsearch.transport.LeakTracker; import java.io.IOException; @@ -28,6 +31,8 @@ public final class FetchSearchResult extends SearchPhaseResult { private ProfileResult profileResult; + private final RefCounted refCounted = LeakTracker.wrap(AbstractRefCounted.of(() -> hits = null)); + public FetchSearchResult() {} public FetchSearchResult(ShardSearchContextId id, SearchShardTarget shardTarget) { @@ -90,4 +95,24 @@ public int counterGetAndIncrement() { public ProfileResult profileResult() { return profileResult; } + + @Override + public void incRef() { + refCounted.incRef(); + } + + @Override + public boolean tryIncRef() { + return refCounted.tryIncRef(); + } + + @Override + public boolean decRef() { + return refCounted.decRef(); + } + + @Override + public boolean hasReferences() { + return refCounted.hasReferences(); + } } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/QueryFetchSearchResult.java b/server/src/main/java/org/elasticsearch/search/fetch/QueryFetchSearchResult.java index 78d6882472ebd..bb838c29ff54c 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/QueryFetchSearchResult.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/QueryFetchSearchResult.java @@ -16,6 +16,7 @@ import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.query.QuerySearchResult; +import org.elasticsearch.transport.LeakTracker; import java.io.IOException; @@ -25,27 +26,25 @@ public final class QueryFetchSearchResult extends SearchPhaseResult { private final FetchSearchResult fetchResult; private final RefCounted refCounted; + public static QueryFetchSearchResult of(QuerySearchResult queryResult, FetchSearchResult fetchResult) { + // We're acquiring a copy, we should incRef it + queryResult.incRef(); + fetchResult.incRef(); + return new QueryFetchSearchResult(queryResult, fetchResult); + } + public QueryFetchSearchResult(StreamInput in) throws IOException { - super(in); // These get a ref count of 1 when we create them, so we don't need to incRef here - queryResult = new QuerySearchResult(in); - fetchResult = new FetchSearchResult(in); - refCounted = AbstractRefCounted.of(() -> { - queryResult.decRef(); - fetchResult.decRef(); - }); + this(new QuerySearchResult(in), new FetchSearchResult(in)); } - public QueryFetchSearchResult(QuerySearchResult queryResult, FetchSearchResult fetchResult) { + private QueryFetchSearchResult(QuerySearchResult queryResult, FetchSearchResult fetchResult) { this.queryResult = queryResult; this.fetchResult = fetchResult; - // We're acquiring a copy, we should incRef it - this.queryResult.incRef(); - this.fetchResult.incRef(); - refCounted = AbstractRefCounted.of(() -> { + refCounted = LeakTracker.wrap(AbstractRefCounted.of(() -> { queryResult.decRef(); fetchResult.decRef(); - }); + })); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/fetch/ShardFetchRequest.java b/server/src/main/java/org/elasticsearch/search/fetch/ShardFetchRequest.java index 9ce93a825f849..86f6db0b681d7 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/ShardFetchRequest.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/ShardFetchRequest.java @@ -37,7 +37,8 @@ public class ShardFetchRequest extends TransportRequest { private final int[] docIds; - private ScoreDoc lastEmittedDoc; + @Nullable + private final ScoreDoc lastEmittedDoc; public ShardFetchRequest(ShardSearchContextId contextId, List docIds, ScoreDoc lastEmittedDoc) { this.contextId = contextId; @@ -60,6 +61,8 @@ public ShardFetchRequest(StreamInput in) throws IOException { lastEmittedDoc = Lucene.readScoreDoc(in); } else if (flag != 0) { throw new IOException("Unknown flag: " + flag); + } else { + lastEmittedDoc = null; } } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/StoredFieldsSpec.java b/server/src/main/java/org/elasticsearch/search/fetch/StoredFieldsSpec.java index 48aea98887ff0..87cbf9b1d6b85 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/StoredFieldsSpec.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/StoredFieldsSpec.java @@ -38,6 +38,9 @@ public boolean noRequirements() { * Combine these stored field requirements with those from another StoredFieldsSpec */ public StoredFieldsSpec merge(StoredFieldsSpec other) { + if (this == other) { + return this; + } Set mergedFields = new HashSet<>(this.requiredStoredFields); mergedFields.addAll(other.requiredStoredFields); return new StoredFieldsSpec( diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsPhase.java index 44e9a2a6e5193..feb0547a32536 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsPhase.java @@ -89,11 +89,10 @@ private void hitExecute(Map innerHi for (int j = 0; j < topDoc.topDocs.scoreDocs.length; j++) { docIdsToLoad[j] = topDoc.topDocs.scoreDocs[j].doc; } - innerHitsContext.docIdsToLoad(docIdsToLoad); innerHitsContext.setRootId(hit.getId()); innerHitsContext.setRootLookup(rootSource); - fetchPhase.execute(innerHitsContext); + fetchPhase.execute(innerHitsContext, docIdsToLoad); FetchSearchResult fetchResult = innerHitsContext.fetchResult(); SearchHit[] internalHits = fetchResult.fetchResult().hits().getHits(); for (int j = 0; j < internalHits.length; j++) { diff --git a/server/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java b/server/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java index 8bd91c9b9cfe7..c02a959231a61 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java @@ -359,16 +359,6 @@ public void seqNoAndPrimaryTerm(boolean seqNoAndPrimaryTerm) { in.seqNoAndPrimaryTerm(seqNoAndPrimaryTerm); } - @Override - public int[] docIdsToLoad() { - return in.docIdsToLoad(); - } - - @Override - public SearchContext docIdsToLoad(int[] docIdsToLoad) { - return in.docIdsToLoad(docIdsToLoad); - } - @Override public DfsSearchResult dfsResult() { return in.dfsResult(); diff --git a/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java b/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java index ef67d3d19e42f..512df4d15dcb0 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java @@ -12,6 +12,7 @@ import org.apache.lucene.search.TotalHits; import org.elasticsearch.action.search.SearchShardTask; import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.core.Assertions; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; @@ -44,6 +45,7 @@ import org.elasticsearch.search.rescore.RescoreContext; import org.elasticsearch.search.sort.SortAndFormats; import org.elasticsearch.search.suggest.SuggestionSearchContext; +import org.elasticsearch.transport.LeakTracker; import java.io.IOException; import java.util.HashMap; @@ -66,7 +68,14 @@ public abstract class SearchContext implements Releasable { public static final int DEFAULT_TRACK_TOTAL_HITS_UP_TO = 10000; protected final List releasables = new CopyOnWriteArrayList<>(); + private final AtomicBoolean closed = new AtomicBoolean(false); + + { + if (Assertions.ENABLED) { + releasables.add(LeakTracker.wrap(() -> { assert closed.get(); })); + } + } private InnerHitsContext innerHitsContext; private Query rewriteQuery; @@ -313,10 +322,6 @@ public Query rewrittenQuery() { /** controls whether the sequence number and primary term of the last modification to each hit should be returned */ public abstract void seqNoAndPrimaryTerm(boolean seqNoAndPrimaryTerm); - public abstract int[] docIdsToLoad(); - - public abstract SearchContext docIdsToLoad(int[] docIdsToLoad); - public abstract DfsSearchResult dfsResult(); /** diff --git a/server/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java b/server/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java index 8b4824e42cbf4..8567677aca30a 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java @@ -41,8 +41,6 @@ public class SubSearchContext extends FilteredSearchContext { private final FetchSearchResult fetchSearchResult; private final QuerySearchResult querySearchResult; - private int[] docIdsToLoad; - private StoredFieldsContext storedFields; private ScriptFieldsContext scriptFields; private FetchSourceContext fetchSourceContext; @@ -55,9 +53,12 @@ public class SubSearchContext extends FilteredSearchContext { private boolean version; private boolean seqNoAndPrimaryTerm; + @SuppressWarnings("this-escape") public SubSearchContext(SearchContext context) { super(context); + context.addReleasable(this); this.fetchSearchResult = new FetchSearchResult(); + addReleasable(fetchSearchResult::decRef); this.querySearchResult = new QuerySearchResult(); } @@ -274,17 +275,6 @@ public void seqNoAndPrimaryTerm(boolean seqNoAndPrimaryTerm) { this.seqNoAndPrimaryTerm = seqNoAndPrimaryTerm; } - @Override - public int[] docIdsToLoad() { - return docIdsToLoad; - } - - @Override - public SearchContext docIdsToLoad(int[] docIdsToLoad) { - this.docIdsToLoad = docIdsToLoad; - return this; - } - @Override public CollapseContext collapse() { return null; diff --git a/server/src/main/java/org/elasticsearch/search/lookup/SearchLookup.java b/server/src/main/java/org/elasticsearch/search/lookup/SearchLookup.java index 06f71fbf2514d..f88441b32d08b 100644 --- a/server/src/main/java/org/elasticsearch/search/lookup/SearchLookup.java +++ b/server/src/main/java/org/elasticsearch/search/lookup/SearchLookup.java @@ -37,8 +37,8 @@ public class SearchLookup implements SourceProvider { * The chain of fields for which this lookup was created, used for detecting * loops caused by runtime fields referring to other runtime fields. The chain is empty * for the "top level" lookup created for the entire search. When a lookup is used to load - * fielddata for a field, we fork it and make sure the field name name isn't in the chain, - * then add it to the end. So the lookup for the a field named {@code a} will be {@code ["a"]}. If + * fielddata for a field, we fork it and make sure the field name isn't in the chain, + * then add it to the end. So the lookup for a field named {@code a} will be {@code ["a"]}. If * that field looks up the values of a field named {@code b} then * {@code b}'s chain will contain {@code ["a", "b"]}. */ diff --git a/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java b/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java index 3044d15ab8552..01015ec8cc78e 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java +++ b/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java @@ -94,13 +94,14 @@ static void executeRank(SearchContext searchContext) throws QueryPhaseExecutionE if (searchTimedOut) { break; } - RankSearchContext rankSearchContext = new RankSearchContext(searchContext, rankQuery, rankShardContext.windowSize()); - QueryPhase.addCollectorsAndSearch(rankSearchContext); - QuerySearchResult rrfQuerySearchResult = rankSearchContext.queryResult(); - rrfRankResults.add(rrfQuerySearchResult.topDocs().topDocs); - serviceTimeEWMA += rrfQuerySearchResult.serviceTimeEWMA(); - nodeQueueSize = Math.max(nodeQueueSize, rrfQuerySearchResult.nodeQueueSize()); - searchTimedOut = rrfQuerySearchResult.searchTimedOut(); + try (RankSearchContext rankSearchContext = new RankSearchContext(searchContext, rankQuery, rankShardContext.windowSize())) { + QueryPhase.addCollectorsAndSearch(rankSearchContext); + QuerySearchResult rrfQuerySearchResult = rankSearchContext.queryResult(); + rrfRankResults.add(rrfQuerySearchResult.topDocs().topDocs); + serviceTimeEWMA += rrfQuerySearchResult.serviceTimeEWMA(); + nodeQueueSize = Math.max(nodeQueueSize, rrfQuerySearchResult.nodeQueueSize()); + searchTimedOut = rrfQuerySearchResult.searchTimedOut(); + } } querySearchResult.setRankShardResult(rankShardContext.combine(rrfRankResults)); diff --git a/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java b/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java index edebf602af188..301d7fb219ca7 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java +++ b/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java @@ -31,6 +31,7 @@ import org.elasticsearch.search.profile.SearchProfileQueryPhaseResult; import org.elasticsearch.search.rank.RankShardResult; import org.elasticsearch.search.suggest.Suggest; +import org.elasticsearch.transport.LeakTracker; import java.io.IOException; import java.util.ArrayList; @@ -104,8 +105,8 @@ public QuerySearchResult(ShardSearchContextId contextId, SearchShardTarget shard setSearchShardTarget(shardTarget); isNull = false; setShardSearchRequest(shardSearchRequest); - this.refCounted = AbstractRefCounted.of(this::close); this.toRelease = new ArrayList<>(); + this.refCounted = LeakTracker.wrap(AbstractRefCounted.of(() -> Releasables.close(toRelease))); } private QuerySearchResult(boolean isNull) { @@ -245,10 +246,6 @@ public void releaseAggs() { } } - private void close() { - Releasables.close(toRelease); - } - public void addReleasable(Releasable releasable) { toRelease.add(releasable); } diff --git a/server/src/main/java/org/elasticsearch/search/rank/RankSearchContext.java b/server/src/main/java/org/elasticsearch/search/rank/RankSearchContext.java index 84f04283d64e8..86f7566683d21 100644 --- a/server/src/main/java/org/elasticsearch/search/rank/RankSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/rank/RankSearchContext.java @@ -59,11 +59,13 @@ public class RankSearchContext extends SearchContext { private final int windowSize; private final QuerySearchResult querySearchResult; + @SuppressWarnings("this-escape") public RankSearchContext(SearchContext parent, Query rankQuery, int windowSize) { this.parent = parent; this.rankQuery = parent.buildFilteredQuery(rankQuery); this.windowSize = windowSize; this.querySearchResult = new QuerySearchResult(parent.readerContext().id(), parent.shardTarget(), parent.request()); + this.addReleasable(querySearchResult::decRef); } @Override @@ -480,16 +482,6 @@ public void seqNoAndPrimaryTerm(boolean seqNoAndPrimaryTerm) { throw new UnsupportedOperationException(); } - @Override - public int[] docIdsToLoad() { - throw new UnsupportedOperationException(); - } - - @Override - public SearchContext docIdsToLoad(int[] docIdsToLoad) { - throw new UnsupportedOperationException(); - } - @Override public DfsSearchResult dfsResult() { throw new UnsupportedOperationException(); diff --git a/server/src/main/java/org/elasticsearch/search/searchafter/SearchAfterBuilder.java b/server/src/main/java/org/elasticsearch/search/searchafter/SearchAfterBuilder.java index 68ee36f5c0883..249f2c95ddc7f 100644 --- a/server/src/main/java/org/elasticsearch/search/searchafter/SearchAfterBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/searchafter/SearchAfterBuilder.java @@ -152,24 +152,18 @@ static Object convertValueFromSortField(Object value, SortField sortField, DocVa private static Object convertValueFromSortType(String fieldName, SortField.Type sortType, Object value, DocValueFormat format) { try { switch (sortType) { - case DOC: + case DOC, INT: if (value instanceof Number) { return ((Number) value).intValue(); } return Integer.parseInt(value.toString()); - case SCORE: + case SCORE, FLOAT: if (value instanceof Number) { return ((Number) value).floatValue(); } return Float.parseFloat(value.toString()); - case INT: - if (value instanceof Number) { - return ((Number) value).intValue(); - } - return Integer.parseInt(value.toString()); - case DOUBLE: if (value instanceof Number) { return ((Number) value).doubleValue(); @@ -187,12 +181,6 @@ private static Object convertValueFromSortType(String fieldName, SortField.Type () -> { throw new IllegalStateException("now() is not allowed in [search_after] key"); } ); - case FLOAT: - if (value instanceof Number) { - return ((Number) value).floatValue(); - } - return Float.parseFloat(value.toString()); - case STRING_VAL: case STRING: if (value instanceof BytesRef bytesRef) { diff --git a/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java index 2dceca2e9ad65..8b07a9e48a660 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java @@ -651,14 +651,7 @@ private NumericDoubleValues getNumericDoubleValues(LeafReaderContext context) th final BitSet rootDocs = nested.rootDocs(context); final DocIdSetIterator innerDocs = nested.innerDocs(context); final int maxChildren = nested.getNestedSort() != null ? nested.getNestedSort().getMaxChildren() : Integer.MAX_VALUE; - return localSortMode.select( - distanceValues, - Double.POSITIVE_INFINITY, - rootDocs, - innerDocs, - context.reader().maxDoc(), - maxChildren - ); + return localSortMode.select(distanceValues, Double.POSITIVE_INFINITY, rootDocs, innerDocs, maxChildren); } } diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index a2a4c1bd444a5..61923dcff2d78 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -715,7 +715,9 @@ static DataStream updateDataStream(DataStream dataStream, Metadata.Builder metad dataStream.isSystem(), dataStream.isAllowCustomRouting(), dataStream.getIndexMode(), - dataStream.getLifecycle() + dataStream.getLifecycle(), + dataStream.isFailureStore(), + dataStream.getFailureIndices() ); } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index 134e76c57ed4c..cc390618e5ebb 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -249,7 +249,7 @@ private void handleUpdatedSnapshotsInProgressEntry(String localNodeId, Snapshots // due to CS batching we might have missed the INIT state and straight went into ABORTED // notify master that abort has completed by moving to FAILED if (shard.getValue().state() == ShardState.ABORTED && localNodeId.equals(shard.getValue().nodeId())) { - notifyFailedSnapshotShard(snapshot, sid, shard.getValue().reason(), shard.getValue().generation()); + notifyUnsuccessfulSnapshotShard(snapshot, sid, shard.getValue().reason(), shard.getValue().generation()); } } else { snapshotStatus.abortIfNotCompleted("snapshot has been aborted", notifyOnAbortTaskRunner::enqueueTask); @@ -343,7 +343,7 @@ public void onFailure(Exception e) { logger.warn(() -> format("[%s][%s] failed to snapshot shard", shardId, snapshot), e); } snapshotStatus.moveToFailed(threadPool.absoluteTimeInMillis(), failure); - notifyFailedSnapshotShard(snapshot, shardId, failure, snapshotStatus.generation()); + notifyUnsuccessfulSnapshotShard(snapshot, shardId, failure, snapshotStatus.generation()); } }); } @@ -540,7 +540,7 @@ private void syncShardStatsOnNewMaster(List entries) snapshot.snapshot(), shardId ); - notifyFailedSnapshotShard( + notifyUnsuccessfulSnapshotShard( snapshot.snapshot(), shardId, indexShardSnapshotStatus.getFailure(), @@ -554,15 +554,19 @@ private void syncShardStatsOnNewMaster(List entries) } } - /** Notify the master node that the given shard has been successfully snapshotted **/ + /** + * Notify the master node that the given shard snapshot completed successfully. + */ private void notifySuccessfulSnapshotShard(final Snapshot snapshot, final ShardId shardId, ShardSnapshotResult shardSnapshotResult) { assert shardSnapshotResult != null; assert shardSnapshotResult.getGeneration() != null; sendSnapshotShardUpdate(snapshot, shardId, ShardSnapshotStatus.success(clusterService.localNode().getId(), shardSnapshotResult)); } - /** Notify the master node that the given shard failed to be snapshotted **/ - private void notifyFailedSnapshotShard( + /** + * Notify the master node that the given shard snapshot has completed but did not succeed + */ + private void notifyUnsuccessfulSnapshotShard( final Snapshot snapshot, final ShardId shardId, final String failure, diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index e6b140a3e70b8..499ac7022403e 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -827,23 +827,34 @@ public void applyClusterState(ClusterChangedEvent event) { event.routingTableChanged() && waitingShardsStartedOrUnassigned(snapshotsInProgress, event) ); } else { - if (snapshotCompletionListeners.isEmpty() == false) { - // We have snapshot listeners but are not the master any more. Fail all waiting listeners except for those that already - // have their snapshots finalizing (those that are already finalizing will fail on their own from to update the cluster - // state). - for (Snapshot snapshot : Set.copyOf(snapshotCompletionListeners.keySet())) { + final List readyToResolveListeners = new ArrayList<>(); + // line-up mutating concurrent operations which can be in form of clusterApplierService and masterService tasks + // to completion and deletion listeners, see #failAllListenersOnMasterFailOver + synchronized (currentlyFinalizing) { + // We have snapshot listeners but are not the master anymore. Fail all waiting listeners except for those that + // already have their snapshots finalizing (those that are already finalizing will fail on their own from to update + // the cluster state). + for (final Snapshot snapshot : snapshotCompletionListeners.keySet()) { if (endingSnapshots.add(snapshot)) { - failSnapshotCompletionListeners(snapshot, new SnapshotException(snapshot, "no longer master")); + failSnapshotCompletionListeners( + snapshot, + new SnapshotException(snapshot, "no longer master"), + readyToResolveListeners::add + ); assert endingSnapshots.contains(snapshot) == false : snapshot; } } - } - if (snapshotDeletionListeners.isEmpty() == false) { - final Exception e = new NotMasterException("no longer master"); - for (String delete : Set.copyOf(snapshotDeletionListeners.keySet())) { - failListenersIgnoringException(snapshotDeletionListeners.remove(delete), e); + if (snapshotDeletionListeners.isEmpty() == false) { + final Exception cause = new NotMasterException("no longer master"); + for (final Iterator>> it = snapshotDeletionListeners.values().iterator(); it.hasNext();) { + final List> listeners = it.next(); + readyToResolveListeners.add(() -> failListenersIgnoringException(listeners, cause)); + it.remove(); + } } } + // fail snapshot listeners outside mutex + readyToResolveListeners.forEach(Runnable::run); } } catch (Exception e) { assert false : new AssertionError(e); @@ -918,17 +929,17 @@ private static boolean assertNoDanglingSnapshots(ClusterState state) { * disconnect of a data node that was executing a snapshot) or a routing change that started shards whose snapshot state is * {@link SnapshotsInProgress.ShardState#WAITING}. * - * @param changedNodes true iff either a master fail-over occurred or a data node that was doing snapshot work got removed from the + * @param changedNodes true if either a master fail-over occurred or a data node that was doing snapshot work was removed from the * cluster - * @param startShards true iff any waiting shards were started due to a routing change + * @param changedShards true if any waiting shards changed state in the routing table */ - private void processExternalChanges(boolean changedNodes, boolean startShards) { - if (changedNodes == false && startShards == false) { + private void processExternalChanges(boolean changedNodes, boolean changedShards) { + if (changedNodes == false && changedShards == false) { // nothing to do, no relevant external change happened return; } - final String source = "update snapshot after shards started [" - + startShards + final String source = "update snapshot after shards changed [" + + changedShards + "] or node configuration changed [" + changedNodes + "]"; @@ -1528,7 +1539,8 @@ private void handleFinalizationFailure(Exception e, Snapshot snapshot, Repositor logger.debug(() -> "[" + snapshot + "] failed to update cluster state during snapshot finalization", e); failSnapshotCompletionListeners( snapshot, - new SnapshotException(snapshot, "Failed to update cluster state during snapshot finalization", e) + new SnapshotException(snapshot, "Failed to update cluster state during snapshot finalization", e), + Runnable::run ); failAllListenersOnMasterFailOver(e); } else { @@ -1847,14 +1859,15 @@ public void onFailure(Exception e) { ); failSnapshotCompletionListeners( snapshot, - new SnapshotException(snapshot, "Failed to remove snapshot from cluster state", e) + new SnapshotException(snapshot, "Failed to remove snapshot from cluster state", e), + Runnable::run ); failAllListenersOnMasterFailOver(e); } @Override public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { - failSnapshotCompletionListeners(snapshot, failure); + failSnapshotCompletionListeners(snapshot, failure, Runnable::run); if (repositoryData != null) { runNextQueuedOperation(repositoryData, snapshot.getRepository(), true); } @@ -1897,8 +1910,9 @@ private static SnapshotDeletionsInProgress deletionsWithoutSnapshots( return changed ? SnapshotDeletionsInProgress.of(updatedEntries) : null; } - private void failSnapshotCompletionListeners(Snapshot snapshot, Exception e) { - failListenersIgnoringException(endAndGetListenersToResolve(snapshot), e); + private void failSnapshotCompletionListeners(Snapshot snapshot, Exception e, Consumer failingListenersConsumer) { + final List> listeners = endAndGetListenersToResolve(snapshot); + failingListenersConsumer.accept(() -> failListenersIgnoringException(listeners, e)); assert repositoryOperations.assertNotQueued(snapshot); } @@ -2093,7 +2107,11 @@ public void clusterStateProcessed(ClusterState oldState, ClusterState newState) logger.info("snapshots {} aborted", completedNoCleanup); } for (Snapshot snapshot : completedNoCleanup) { - failSnapshotCompletionListeners(snapshot, new SnapshotException(snapshot, SnapshotsInProgress.ABORTED_FAILURE_TEXT)); + failSnapshotCompletionListeners( + snapshot, + new SnapshotException(snapshot, SnapshotsInProgress.ABORTED_FAILURE_TEXT), + Runnable::run + ); } if (newDelete == null) { listener.onResponse(null); @@ -2466,17 +2484,22 @@ protected void handleListeners(List> deleteListeners) { */ private void failAllListenersOnMasterFailOver(Exception e) { logger.debug("Failing all snapshot operation listeners because this node is not master any longer", e); + final List readyToResolveListeners = new ArrayList<>(); synchronized (currentlyFinalizing) { if (ExceptionsHelper.unwrap(e, NotMasterException.class, FailedToCommitClusterStateException.class) != null) { repositoryOperations.clear(); - for (Snapshot snapshot : Set.copyOf(snapshotCompletionListeners.keySet())) { - failSnapshotCompletionListeners(snapshot, new SnapshotException(snapshot, "no longer master")); + for (final Snapshot snapshot : snapshotCompletionListeners.keySet()) { + failSnapshotCompletionListeners( + snapshot, + new SnapshotException(snapshot, "no longer master"), + readyToResolveListeners::add + ); } final Exception wrapped = new RepositoryException("_all", "Failed to update cluster state during repository operation", e); - for (Iterator>> iterator = snapshotDeletionListeners.values().iterator(); iterator.hasNext();) { - final List> listeners = iterator.next(); - iterator.remove(); - failListenersIgnoringException(listeners, wrapped); + for (final Iterator>> it = snapshotDeletionListeners.values().iterator(); it.hasNext();) { + final List> listeners = it.next(); + readyToResolveListeners.add(() -> failListenersIgnoringException(listeners, wrapped)); + it.remove(); } assert snapshotDeletionListeners.isEmpty() : "No new listeners should have been added but saw " + snapshotDeletionListeners; } else { @@ -2486,6 +2509,8 @@ private void failAllListenersOnMasterFailOver(Exception e) { } currentlyFinalizing.clear(); } + // fail snapshot listeners outside mutex + readyToResolveListeners.forEach(Runnable::run); } /** @@ -2539,8 +2564,13 @@ protected SnapshotDeletionsInProgress filterDeletions(SnapshotDeletionsInProgres @Override public final void clusterStateProcessed(ClusterState oldState, ClusterState newState) { repositoryOperations.finishDeletion(deleteEntry.uuid()); - final List> deleteListeners = snapshotDeletionListeners.remove(deleteEntry.uuid()); - handleListeners(deleteListeners); + final List readyToResolveListeners = new ArrayList<>(); + synchronized (currentlyFinalizing) { + final List> deleteListeners = snapshotDeletionListeners.remove(deleteEntry.uuid()); + readyToResolveListeners.add(() -> handleListeners(deleteListeners)); + } + // resolve listeners outside mutex + readyToResolveListeners.forEach(Runnable::run); if (newFinalizations.isEmpty()) { if (readyDeletions.isEmpty()) { leaveRepoLoop(deleteEntry.repository()); @@ -3546,6 +3576,7 @@ public void clusterStateProcessed(ClusterState oldState, ClusterState newState) () -> format("Removed all snapshot tasks for repository [%s] from cluster state, now failing listeners", repository), failure ); + final List readyToResolveListeners = new ArrayList<>(); synchronized (currentlyFinalizing) { Tuple finalization; while ((finalization = repositoryOperations.pollFinalization(repository)) != null) { @@ -3554,13 +3585,16 @@ public void clusterStateProcessed(ClusterState oldState, ClusterState newState) } leaveRepoLoop(repository); for (Snapshot snapshot : snapshotsToFail) { - failSnapshotCompletionListeners(snapshot, failure); + failSnapshotCompletionListeners(snapshot, failure, readyToResolveListeners::add); } for (String delete : deletionsToFail) { - failListenersIgnoringException(snapshotDeletionListeners.remove(delete), failure); + final List> listeners = snapshotDeletionListeners.remove(delete); + readyToResolveListeners.add(() -> failListenersIgnoringException(listeners, failure)); repositoryOperations.finishDeletion(delete); } } + // fail snapshot listeners outside mutex + readyToResolveListeners.forEach(Runnable::run); } } diff --git a/server/src/main/java/org/elasticsearch/tasks/LoggingTaskListener.java b/server/src/main/java/org/elasticsearch/tasks/LoggingTaskListener.java index c99194d933131..63e17bd62f8ee 100644 --- a/server/src/main/java/org/elasticsearch/tasks/LoggingTaskListener.java +++ b/server/src/main/java/org/elasticsearch/tasks/LoggingTaskListener.java @@ -31,7 +31,6 @@ public LoggingTaskListener(Task task) { @Override public void onResponse(Response response) { logger.info("{} finished with response {}", task.getId(), response); - } @Override diff --git a/server/src/main/java/org/elasticsearch/tasks/TaskManager.java b/server/src/main/java/org/elasticsearch/tasks/TaskManager.java index 620e0e44f95e9..9b4c2a6b026e9 100644 --- a/server/src/main/java/org/elasticsearch/tasks/TaskManager.java +++ b/server/src/main/java/org/elasticsearch/tasks/TaskManager.java @@ -20,7 +20,6 @@ import org.elasticsearch.cluster.ClusterStateApplier; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; @@ -67,7 +66,7 @@ public class TaskManager implements ClusterStateApplier { private static final Logger logger = LogManager.getLogger(TaskManager.class); /** Rest headers that are copied to the task */ - private final String[] taskHeaders; + private final Set taskHeaders; private final ThreadPool threadPool; private final Map tasks = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency(); @@ -97,7 +96,7 @@ public TaskManager(Settings settings, ThreadPool threadPool, Set taskHea public TaskManager(Settings settings, ThreadPool threadPool, Set taskHeaders, Tracer tracer) { this.threadPool = threadPool; - this.taskHeaders = taskHeaders.toArray(Strings.EMPTY_ARRAY); + this.taskHeaders = Set.copyOf(taskHeaders); this.maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings); this.tracer = tracer; } @@ -658,10 +657,6 @@ private void notifyListeners(List listeners) { ExceptionsHelper.reThrowIfNotNull(rootException); } - public boolean hasParent(TaskId parentTaskId) { - return task.getParentTaskId().equals(parentTaskId); - } - public CancellableTask getTask() { return task; } @@ -817,7 +812,7 @@ public void cancelTaskAndDescendants(CancellableTask task, String reason, boolea getCancellationService().cancelTaskAndDescendants(task, reason, waitForCompletion, listener); } - public List getTaskHeaders() { - return List.of(taskHeaders); + public Set getTaskHeaders() { + return taskHeaders; } } diff --git a/server/src/main/java/org/elasticsearch/telemetry/metric/DoubleAsyncCounter.java b/server/src/main/java/org/elasticsearch/telemetry/metric/DoubleAsyncCounter.java new file mode 100644 index 0000000000000..5d6acf3cf82dd --- /dev/null +++ b/server/src/main/java/org/elasticsearch/telemetry/metric/DoubleAsyncCounter.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.telemetry.metric; + +/** + * A monotonically increasing double based on a callback. + */ +public interface DoubleAsyncCounter extends Instrument, AutoCloseable { + + /** + * Noop counter for use in tests. + */ + DoubleAsyncCounter NOOP = new DoubleAsyncCounter() { + @Override + public void close() throws Exception { + + } + + @Override + public String getName() { + return "noop"; + } + + }; +} diff --git a/server/src/main/java/org/elasticsearch/telemetry/metric/DoubleWithAttributes.java b/server/src/main/java/org/elasticsearch/telemetry/metric/DoubleWithAttributes.java index e342b6128998d..ac0a6eec8a6fb 100644 --- a/server/src/main/java/org/elasticsearch/telemetry/metric/DoubleWithAttributes.java +++ b/server/src/main/java/org/elasticsearch/telemetry/metric/DoubleWithAttributes.java @@ -12,4 +12,7 @@ public record DoubleWithAttributes(double value, Map attributes) { + public DoubleWithAttributes(double value) { + this(value, Map.of()); + } } diff --git a/server/src/main/java/org/elasticsearch/telemetry/metric/LongAsyncCounter.java b/server/src/main/java/org/elasticsearch/telemetry/metric/LongAsyncCounter.java new file mode 100644 index 0000000000000..33a33747a4c19 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/telemetry/metric/LongAsyncCounter.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.telemetry.metric; + +/** + * A monotonically increasing long metric based on a callback. + */ +public interface LongAsyncCounter extends Instrument, AutoCloseable { + + /** + * Noop counter for use in tests. + */ + LongAsyncCounter NOOP = new LongAsyncCounter() { + @Override + public void close() throws Exception { + + } + + @Override + public String getName() { + return "noop"; + } + }; +} diff --git a/server/src/main/java/org/elasticsearch/telemetry/metric/LongGaugeMetric.java b/server/src/main/java/org/elasticsearch/telemetry/metric/LongGaugeMetric.java new file mode 100644 index 0000000000000..b5287fb18d346 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/telemetry/metric/LongGaugeMetric.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.telemetry.metric; + +import java.util.concurrent.atomic.AtomicLong; + +/** + * This wrapper allow us to record metric with APM (via {@link LongGauge}) while also access its current state via {@link AtomicLong} + */ +public record LongGaugeMetric(AtomicLong value, LongGauge gauge) { + + public static LongGaugeMetric create(MeterRegistry meterRegistry, String name, String description, String unit) { + final AtomicLong value = new AtomicLong(); + return new LongGaugeMetric( + value, + meterRegistry.registerLongGauge(name, description, unit, () -> new LongWithAttributes(value.get())) + ); + } + + public void set(long l) { + value.set(l); + } + + public long get() { + return value.get(); + } +} diff --git a/server/src/main/java/org/elasticsearch/telemetry/metric/LongWithAttributes.java b/server/src/main/java/org/elasticsearch/telemetry/metric/LongWithAttributes.java index eef880431fb83..8ef4dd1f4476d 100644 --- a/server/src/main/java/org/elasticsearch/telemetry/metric/LongWithAttributes.java +++ b/server/src/main/java/org/elasticsearch/telemetry/metric/LongWithAttributes.java @@ -12,4 +12,7 @@ public record LongWithAttributes(long value, Map attributes) { + public LongWithAttributes(long value) { + this(value, Map.of()); + } } diff --git a/server/src/main/java/org/elasticsearch/telemetry/metric/MeterRegistry.java b/server/src/main/java/org/elasticsearch/telemetry/metric/MeterRegistry.java index 6940795213603..0f690558361e4 100644 --- a/server/src/main/java/org/elasticsearch/telemetry/metric/MeterRegistry.java +++ b/server/src/main/java/org/elasticsearch/telemetry/metric/MeterRegistry.java @@ -91,6 +91,38 @@ public interface MeterRegistry { */ LongCounter registerLongCounter(String name, String description, String unit); + /** + * Register a {@link LongAsyncCounter} with an asynchronous callback. The returned object may be reused. + * @param name name of the counter + * @param description description of purpose + * @param unit the unit (bytes, sec, hour) + * @param observer a callback to provide a metric value upon observation (metric interval) + */ + LongAsyncCounter registerLongAsyncCounter(String name, String description, String unit, Supplier observer); + + /** + * Retrieved a previously registered {@link LongAsyncCounter}. + * @param name name of the counter + * @return the registered meter. + */ + LongAsyncCounter getLongAsyncCounter(String name); + + /** + * Register a {@link DoubleAsyncCounter} with an asynchronous callback. The returned object may be reused. + * @param name name of the counter + * @param description description of purpose + * @param unit the unit (bytes, sec, hour) + * @param observer a callback to provide a metric value upon observation (metric interval) + */ + DoubleAsyncCounter registerDoubleAsyncCounter(String name, String description, String unit, Supplier observer); + + /** + * Retrieved a previously registered {@link DoubleAsyncCounter}. + * @param name name of the counter + * @return the registered meter. + */ + DoubleAsyncCounter getDoubleAsyncCounter(String name); + /** * Retrieved a previously registered {@link LongCounter}. * @param name name of the counter @@ -196,6 +228,36 @@ public LongCounter registerLongCounter(String name, String description, String u return LongCounter.NOOP; } + @Override + public LongAsyncCounter registerLongAsyncCounter( + String name, + String description, + String unit, + Supplier observer + ) { + return LongAsyncCounter.NOOP; + } + + @Override + public LongAsyncCounter getLongAsyncCounter(String name) { + return LongAsyncCounter.NOOP; + } + + @Override + public DoubleAsyncCounter registerDoubleAsyncCounter( + String name, + String description, + String unit, + Supplier observer + ) { + return DoubleAsyncCounter.NOOP; + } + + @Override + public DoubleAsyncCounter getDoubleAsyncCounter(String name) { + return DoubleAsyncCounter.NOOP; + } + @Override public LongCounter getLongCounter(String name) { return LongCounter.NOOP; diff --git a/server/src/main/java/org/elasticsearch/transport/ClusterConnectionManager.java b/server/src/main/java/org/elasticsearch/transport/ClusterConnectionManager.java index 419cbf97dfa09..4d6a66b6ec075 100644 --- a/server/src/main/java/org/elasticsearch/transport/ClusterConnectionManager.java +++ b/server/src/main/java/org/elasticsearch/transport/ClusterConnectionManager.java @@ -223,7 +223,7 @@ private void connectToNodeOrRetry( IOUtils.closeWhileHandlingException(conn); } else { logger.debug("connected to node [{}]", node); - managerRefs.incRef(); + managerRefs.mustIncRef(); try { connectionListener.onNodeConnected(node, conn); } finally { diff --git a/server/src/main/java/org/elasticsearch/transport/InboundHandler.java b/server/src/main/java/org/elasticsearch/transport/InboundHandler.java index 5d47c79abfd61..1686213139722 100644 --- a/server/src/main/java/org/elasticsearch/transport/InboundHandler.java +++ b/server/src/main/java/org/elasticsearch/transport/InboundHandler.java @@ -293,7 +293,7 @@ private static void doHandleRequest(RequestHandlerR private void handleRequestForking(T request, RequestHandlerRegistry reg, TransportChannel channel) { boolean success = false; - request.incRef(); + request.mustIncRef(); try { reg.getExecutor().execute(threadPool.getThreadContext().preserveContextWithTracing(new AbstractRunnable() { @Override @@ -381,7 +381,7 @@ private void handleResponse( // no need to provide a buffer release here, we never escape the buffer when handling directly doHandleResponse(handler, remoteAddress, stream, inboundMessage.getHeader(), () -> {}); } else { - inboundMessage.incRef(); + inboundMessage.mustIncRef(); // release buffer once we deserialize the message, but have a fail-safe in #onAfter below in case that didn't work out final Releasable releaseBuffer = Releasables.releaseOnce(inboundMessage::decRef); executor.execute(new ForkingResponseHandlerRunnable(handler, null, threadPool) { diff --git a/test/framework/src/main/java/org/elasticsearch/transport/LeakTracker.java b/server/src/main/java/org/elasticsearch/transport/LeakTracker.java similarity index 85% rename from test/framework/src/main/java/org/elasticsearch/transport/LeakTracker.java rename to server/src/main/java/org/elasticsearch/transport/LeakTracker.java index ce82e62df698a..ea12953e7df12 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/LeakTracker.java +++ b/server/src/main/java/org/elasticsearch/transport/LeakTracker.java @@ -13,6 +13,9 @@ import org.elasticsearch.common.Randomness; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.core.Assertions; +import org.elasticsearch.core.RefCounted; +import org.elasticsearch.core.Releasable; import java.lang.ref.ReferenceQueue; import java.lang.ref.WeakReference; @@ -69,6 +72,55 @@ public void reportLeak() { } } + public static Releasable wrap(Releasable releasable) { + if (Assertions.ENABLED == false) { + return releasable; + } + var leak = INSTANCE.track(releasable); + return () -> { + try { + releasable.close(); + } finally { + leak.close(releasable); + } + }; + } + + public static RefCounted wrap(RefCounted refCounted) { + if (Assertions.ENABLED == false) { + return refCounted; + } + var leak = INSTANCE.track(refCounted); + return new RefCounted() { + @Override + public void incRef() { + leak.record(); + refCounted.incRef(); + } + + @Override + public boolean tryIncRef() { + leak.record(); + return refCounted.tryIncRef(); + } + + @Override + public boolean decRef() { + if (refCounted.decRef()) { + leak.close(refCounted); + return true; + } + leak.record(); + return false; + } + + @Override + public boolean hasReferences() { + return refCounted.hasReferences(); + } + }; + } + public static final class Leak extends WeakReference { @SuppressWarnings({ "unchecked", "rawtypes" }) diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteConnectionManager.java b/server/src/main/java/org/elasticsearch/transport/RemoteConnectionManager.java index e026df96fe6ca..b16734b273376 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteConnectionManager.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteConnectionManager.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.util.CollectionUtils; @@ -256,11 +255,6 @@ public boolean isClosed() { return connection.isClosed(); } - @Override - public Version getVersion() { - return connection.getVersion(); - } - @Override public TransportVersion getTransportVersion() { return connection.getTransportVersion(); @@ -346,11 +340,6 @@ public boolean isClosed() { return connection.isClosed(); } - @Override - public Version getVersion() { - return connection.getVersion(); - } - @Override public TransportVersion getTransportVersion() { return connection.getTransportVersion(); diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java index 8612b5221c77b..4346431102d94 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -13,7 +13,6 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ThreadedActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -264,13 +263,6 @@ public final class NodeChannels extends CloseableConnection { compressionScheme = connectionProfile.getCompressionScheme(); } - @Override - public Version getVersion() { - // TODO: this should be the below, but in some cases the node version does not match the passed-in version. - // return node.getVersion(); - return Version.fromId(version.id()); - } - @Override public TransportVersion getTransportVersion() { return version; diff --git a/server/src/main/java/org/elasticsearch/transport/Transport.java b/server/src/main/java/org/elasticsearch/transport/Transport.java index 3461f57503d7d..5b396daa5d51f 100644 --- a/server/src/main/java/org/elasticsearch/transport/Transport.java +++ b/server/src/main/java/org/elasticsearch/transport/Transport.java @@ -9,7 +9,6 @@ package org.elasticsearch.transport; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.LifecycleComponent; @@ -107,7 +106,7 @@ interface Connection extends Closeable, RefCounted { /** * Sends the request to the node this connection is associated with - * @param requestId see {@link ResponseHandlers#add(ResponseContext)} for details + * @param requestId see {@link ResponseHandlers#add(TransportResponseHandler, Connection, String)} for details * @param action the action to execute * @param request the request to send * @param options request options to apply @@ -127,13 +126,6 @@ void sendRequest(long requestId, String action, TransportRequest request, Transp boolean isClosed(); - /** - * Returns the version of the node on the other side of this channel. - */ - default Version getVersion() { - return getNode().getVersion(); - } - /** * Returns the version of the data to communicate in this channel. */ @@ -163,35 +155,15 @@ default Object getCacheKey() { } /** - * This class represents a response context that encapsulates the actual response handler, the action and the connection it was - * executed on. + * This class represents a response context that encapsulates the actual response handler, the action. the connection it was + * executed on, and the request ID. */ - final class ResponseContext { - - private final TransportResponseHandler handler; - - private final Connection connection; - - private final String action; - - ResponseContext(TransportResponseHandler handler, Connection connection, String action) { - this.handler = handler; - this.connection = connection; - this.action = action; - } - - public TransportResponseHandler handler() { - return handler; - } - - public Connection connection() { - return this.connection; - } - - public String action() { - return this.action; - } - } + record ResponseContext( + TransportResponseHandler handler, + Connection connection, + String action, + long requestId + ) {}; /** * This class is a registry that allows @@ -218,14 +190,19 @@ public ResponseContext remove(long requestId) { /** * Adds a new response context and associates it with a new request ID. - * @return the new request ID + * @return the new response context * @see Connection#sendRequest(long, String, TransportRequest, TransportRequestOptions) */ - public long add(ResponseContext holder) { + public ResponseContext add( + TransportResponseHandler handler, + Connection connection, + String action + ) { long requestId = newRequestId(); + ResponseContext holder = new ResponseContext<>(handler, connection, action, requestId); ResponseContext existing = handlers.put(requestId, holder); assert existing == null : "request ID already in use: " + requestId; - return requestId; + return holder; } /** diff --git a/server/src/main/java/org/elasticsearch/transport/TransportActionProxy.java b/server/src/main/java/org/elasticsearch/transport/TransportActionProxy.java index aa28f8a76b58e..ecd4ec6e4fc1b 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportActionProxy.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportActionProxy.java @@ -65,7 +65,7 @@ public Executor executor(ThreadPool threadPool) { @Override public void handleResponse(TransportResponse response) { try { - response.incRef(); + response.mustIncRef(); channel.sendResponse(response); } catch (IOException e) { throw new UncheckedIOException(e); diff --git a/server/src/main/java/org/elasticsearch/transport/TransportInfo.java b/server/src/main/java/org/elasticsearch/transport/TransportInfo.java index e12dc599d5bf0..91520a6223a83 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportInfo.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportInfo.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.node.ReportingService; import org.elasticsearch.xcontent.XContentBuilder; @@ -30,6 +31,7 @@ public class TransportInfo implements ReportingService.Info { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(TransportInfo.class); /** Whether to add hostname to publish host field when serializing. */ + @UpdateForV9 // Remove es.transport.cname_in_publish_address property from TransportInfo in 9.0.0 private static final boolean CNAME_IN_PUBLISH_ADDRESS = parseBoolean( System.getProperty("es.transport.cname_in_publish_address"), false diff --git a/server/src/main/java/org/elasticsearch/transport/TransportService.java b/server/src/main/java/org/elasticsearch/transport/TransportService.java index 8ef6ff3c9d8ef..5ce44c74a7a69 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportService.java @@ -38,6 +38,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.node.ReportingService; import org.elasticsearch.tasks.Task; @@ -358,6 +359,10 @@ protected void doStop() { try { final TransportResponseHandler handler = holderToNotify.handler(); final var targetNode = holderToNotify.connection().getNode(); + final long requestId = holderToNotify.requestId(); + if (tracerLog.isTraceEnabled() && shouldTraceAction(holderToNotify.action())) { + tracerLog.trace("[{}][{}] pruning request for node [{}]", requestId, holderToNotify.action(), targetNode); + } assert transport instanceof TcpTransport == false /* other transports (used in tests) may not implement the proper close-connection behaviour. TODO fix this. */ @@ -921,7 +926,7 @@ private void sendRequestInternal( Supplier storedContextSupplier = threadPool.getThreadContext().newRestorableContext(true); ContextRestoreResponseHandler responseHandler = new ContextRestoreResponseHandler<>(storedContextSupplier, handler); // TODO we can probably fold this entire request ID dance into connection.sendRequest but it will be a bigger refactoring - final long requestId = responseHandlers.add(new Transport.ResponseContext<>(responseHandler, connection, action)); + final long requestId = responseHandlers.add(responseHandler, connection, action).requestId(); request.setRequestId(requestId); final TimeoutHandler timeoutHandler; if (options.timeout() != null) { @@ -950,7 +955,7 @@ private void sendRequestInternal( } } - private void handleInternalSendException( + protected void handleInternalSendException( String action, DiscoveryNode node, long requestId, @@ -985,6 +990,9 @@ public void onFailure(Exception e) { @Override protected void doRun() { + if (tracerLog.isTraceEnabled() && shouldTraceAction(action)) { + tracerLog.trace("[{}][{}] failed to send request to node [{}]", requestId, action, node); + } contextToNotify.handler().handleException(sendRequestException); } }); @@ -1012,7 +1020,7 @@ private void sendLocalRequest(long requestId, final String action, final Transpo } } else { boolean success = false; - request.incRef(); + request.mustIncRef(); try { executor.execute(threadPool.getThreadContext().preserveContextWithTracing(new AbstractRunnable() { @Override @@ -1293,15 +1301,21 @@ public void onConnectionClosed(Transport.Connection connection) { return; } - // Callback that an exception happened, but on a different thread since we don't - // want handlers to worry about stack overflows. - // Execute on the current thread in the special case of a node shut down to notify the listener even when the threadpool has - // already been shut down. - final String executor = lifecycle.stoppedOrClosed() ? ThreadPool.Names.SAME : ThreadPool.Names.GENERIC; - threadPool.executor(executor).execute(new AbstractRunnable() { + // Callback that an exception happened, but on a different thread since we don't want handlers to worry about stack overflows. + final var executor = threadPool.generic(); + assert executor.isShutdown() == false : "connections should all be closed before threadpool shuts down"; + executor.execute(new AbstractRunnable() { @Override public void doRun() { for (Transport.ResponseContext holderToNotify : pruned) { + if (tracerLog.isTraceEnabled() && shouldTraceAction(holderToNotify.action())) { + tracerLog.trace( + "[{}][{}] pruning request because connection to node [{}] closed", + holderToNotify.requestId(), + holderToNotify.action(), + connection.getNode() + ); + } holderToNotify.handler().handleException(new NodeDisconnectedException(connection.getNode(), holderToNotify.action())); } } @@ -1480,7 +1494,7 @@ public void sendResponse(TransportResponse response) throws IOException { if (executor == EsExecutors.DIRECT_EXECUTOR_SERVICE) { processResponse(handler, response); } else { - response.incRef(); + response.mustIncRef(); executor.execute(new ForkingResponseHandlerRunnable(handler, null, threadPool) { @Override protected void doRun() { @@ -1673,11 +1687,11 @@ Releasable withRef() { static { // Ensure that this property, introduced and immediately deprecated in 7.11, is not used in 8.x + @UpdateForV9 // we can remove this whole block in v9 final String PERMIT_HANDSHAKES_FROM_INCOMPATIBLE_BUILDS_KEY = "es.unsafely_permit_handshake_from_incompatible_builds"; if (System.getProperty(PERMIT_HANDSHAKES_FROM_INCOMPATIBLE_BUILDS_KEY) != null) { throw new IllegalArgumentException("system property [" + PERMIT_HANDSHAKES_FROM_INCOMPATIBLE_BUILDS_KEY + "] must not be set"); } - assert Version.CURRENT.major == Version.V_7_0_0.major + 1; // we can remove this whole block in v9 } private record UnregisterChildTransportResponseHandler( diff --git a/server/src/main/java/org/elasticsearch/transport/TransportStats.java b/server/src/main/java/org/elasticsearch/transport/TransportStats.java index 96c5a89256008..13cce6328b84e 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportStats.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportStats.java @@ -9,7 +9,6 @@ package org.elasticsearch.transport; import org.elasticsearch.TransportVersions; -import org.elasticsearch.Version; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -18,6 +17,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -165,11 +165,14 @@ public Map getTransportActionStats() { return transportActionStats; } + @UpdateForV9 // Review and simplify the if-else blocks containing this symbol once v9 is released + private static final boolean IMPOSSIBLE_IN_V9 = true; + private boolean assertHistogramsConsistent() { assert inboundHandlingTimeBucketFrequencies.length == outboundHandlingTimeBucketFrequencies.length; if (inboundHandlingTimeBucketFrequencies.length == 0) { // Stats came from before v8.1 - assert Version.CURRENT.major == Version.V_8_0_0.major; + assert IMPOSSIBLE_IN_V9; } else { assert inboundHandlingTimeBucketFrequencies.length == HandlingTimeTracker.BUCKET_COUNT; } @@ -177,6 +180,7 @@ private boolean assertHistogramsConsistent() { } @Override + @UpdateForV9 // review the "if" blocks checking for non-empty once we have public Iterator toXContentChunked(ToXContent.Params outerParams) { return Iterators.concat(Iterators.single((builder, params) -> { builder.startObject(Fields.TRANSPORT); @@ -191,13 +195,13 @@ public Iterator toXContentChunked(ToXContent.Params outerP histogramToXContent(builder, outboundHandlingTimeBucketFrequencies, Fields.OUTBOUND_HANDLING_TIME_HISTOGRAM); } else { // Stats came from before v8.1 - assert Version.CURRENT.major == Version.V_7_0_0.major + 1; + assert IMPOSSIBLE_IN_V9; } if (transportActionStats.isEmpty() == false) { builder.startObject(Fields.ACTIONS); } else { // Stats came from before v8.8 - assert Version.CURRENT.major == Version.V_7_0_0.major + 1; + assert IMPOSSIBLE_IN_V9; } return builder; }), diff --git a/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification b/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification index c94606f289c9e..a42c1f7192d49 100644 --- a/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification +++ b/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification @@ -8,5 +8,7 @@ org.elasticsearch.features.FeatureInfrastructureFeatures org.elasticsearch.health.HealthFeatures +org.elasticsearch.cluster.service.TransportFeatures org.elasticsearch.cluster.metadata.MetadataFeatures org.elasticsearch.rest.RestFeatures +org.elasticsearch.indices.IndicesFeatures diff --git a/server/src/test/java/org/elasticsearch/action/ActionListenerTests.java b/server/src/test/java/org/elasticsearch/action/ActionListenerTests.java index 09a24f6b76a8e..2d5d7b7b522d1 100644 --- a/server/src/test/java/org/elasticsearch/action/ActionListenerTests.java +++ b/server/src/test/java/org/elasticsearch/action/ActionListenerTests.java @@ -29,6 +29,7 @@ import java.util.function.Consumer; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -579,6 +580,9 @@ public String toString() { l.onResponse(null); } catch (Exception e) { // ok + } catch (AssertionError e) { + // ensure this was only thrown by ActionListener#assertOnce + assertThat(e.getMessage(), endsWith("must handle its own exceptions")); } } else { l.onFailure(new RuntimeException("supplied")); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/DesiredBalanceResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/DesiredBalanceResponseTests.java index 3378ff0063bb0..7b452beac0938 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/DesiredBalanceResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/DesiredBalanceResponseTests.java @@ -12,7 +12,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.allocator.ClusterBalanceStats; -import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceStats; +import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceStatsTests; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.test.AbstractChunkedSerializingTestCase; @@ -30,6 +30,7 @@ import static java.util.function.Function.identity; import static java.util.stream.Collectors.toMap; +import static org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceStatsTests.randomDesiredBalanceStats; import static org.hamcrest.Matchers.containsInAnyOrder; public class DesiredBalanceResponseTests extends AbstractWireSerializingTestCase { @@ -49,20 +50,6 @@ protected DesiredBalanceResponse createTestInstance() { ); } - private DesiredBalanceStats randomDesiredBalanceStats() { - return new DesiredBalanceStats( - randomNonNegativeLong(), - randomBoolean(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong() - ); - } - private ClusterBalanceStats randomClusterBalanceStats() { return new ClusterBalanceStats( randomNonNegativeInt(), @@ -156,7 +143,7 @@ private Map> randomRo protected DesiredBalanceResponse mutateInstance(DesiredBalanceResponse instance) { return switch (randomInt(4)) { case 0 -> new DesiredBalanceResponse( - randomValueOtherThan(instance.getStats(), this::randomDesiredBalanceStats), + randomValueOtherThan(instance.getStats(), DesiredBalanceStatsTests::randomDesiredBalanceStats), instance.getClusterBalanceStats(), instance.getRoutingTable(), instance.getClusterInfo() diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java index cbe4acd137b2a..e702446406238 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java @@ -37,6 +37,7 @@ import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.tasks.Task; +import org.elasticsearch.telemetry.TelemetryProvider; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.MockUtils; import org.elasticsearch.test.gateway.TestGatewayAllocator; @@ -114,7 +115,14 @@ public DesiredBalance compute( return super.compute(previousDesiredBalance, desiredBalanceInput, pendingDesiredBalanceMoves, isFresh); } }; - var allocator = new DesiredBalanceShardsAllocator(delegate, threadPool, clusterService, computer, (state, action) -> state); + var allocator = new DesiredBalanceShardsAllocator( + delegate, + threadPool, + clusterService, + computer, + (state, action) -> state, + TelemetryProvider.NOOP + ); var allocationService = new MockAllocationService( randomAllocationDeciders(settings, clusterSettings), new TestGatewayAllocator(), diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceActionTests.java index 1d80454fcea12..a98d7662b8983 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceActionTests.java @@ -59,6 +59,7 @@ import static org.elasticsearch.cluster.ClusterModule.BALANCED_ALLOCATOR; import static org.elasticsearch.cluster.ClusterModule.DESIRED_BALANCE_ALLOCATOR; import static org.elasticsearch.cluster.ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING; +import static org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceStatsTests.randomDesiredBalanceStats; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; import static org.mockito.Mockito.mock; @@ -219,17 +220,7 @@ public void testGetDesiredBalance() throws Exception { } when(desiredBalanceShardsAllocator.getDesiredBalance()).thenReturn(new DesiredBalance(randomInt(1024), shardAssignments)); - DesiredBalanceStats desiredBalanceStats = new DesiredBalanceStats( - randomInt(Integer.MAX_VALUE), - randomBoolean(), - randomInt(Integer.MAX_VALUE), - randomInt(Integer.MAX_VALUE), - randomInt(Integer.MAX_VALUE), - randomInt(Integer.MAX_VALUE), - randomInt(Integer.MAX_VALUE), - randomInt(Integer.MAX_VALUE), - randomInt(Integer.MAX_VALUE) - ); + DesiredBalanceStats desiredBalanceStats = randomDesiredBalanceStats(); when(desiredBalanceShardsAllocator.getStats()).thenReturn(desiredBalanceStats); ClusterInfo clusterInfo = ClusterInfo.EMPTY; when(clusterInfoService.getClusterInfo()).thenReturn(clusterInfo); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesActionTests.java index b018b6c47ed4d..4e2948eafc1d7 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesActionTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.coordination.NoMasterBlockService; -import org.elasticsearch.cluster.desirednodes.DesiredNodesSettingsValidator; import org.elasticsearch.cluster.desirednodes.VersionConflictException; import org.elasticsearch.cluster.metadata.DesiredNode; import org.elasticsearch.cluster.metadata.DesiredNodeWithStatus; @@ -45,11 +44,6 @@ public class TransportUpdateDesiredNodesActionTests extends DesiredNodesTestCase { - public static final DesiredNodesSettingsValidator NO_OP_SETTINGS_VALIDATOR = new DesiredNodesSettingsValidator() { - @Override - public void validate(List desiredNodes) {} - }; - public void testWriteBlocks() { ThreadPool threadPool = mock(ThreadPool.class); TransportService transportService = MockUtils.setupTransportServiceWithThreadpoolExecutor(threadPool); @@ -60,7 +54,7 @@ public void testWriteBlocks() { threadPool, mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), - NO_OP_SETTINGS_VALIDATOR, + l -> {}, mock(AllocationService.class) ); @@ -88,7 +82,7 @@ public void testNoBlocks() { threadPool, mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), - NO_OP_SETTINGS_VALIDATOR, + l -> {}, mock(AllocationService.class) ); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusActionTests.java index 2348602487518..e3dcc7cca5bbd 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusActionTests.java @@ -8,11 +8,11 @@ package org.elasticsearch.action.admin.cluster.migration; -import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.indices.SystemIndexDescriptorUtils; @@ -84,9 +84,7 @@ private static ClusterState getClusterState() { .numberOfReplicas(0) .build(); - // Once we start testing 9.x, we should update this test to use a 7.x "version created" - assert Version.CURRENT.major < 9; - + @UpdateForV9 // Once we start testing 9.x, we should update this test to use a 7.x "version created" IndexMetadata indexMetadata2 = IndexMetadata.builder(".test-index-2") .settings(Settings.builder().put("index.version.created", TEST_OLD_VERSION).build()) .numberOfShards(1) diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfoTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfoTests.java index 3e0460565dd61..8618ae516768f 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfoTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfoTests.java @@ -10,7 +10,6 @@ import org.elasticsearch.Build; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; @@ -39,7 +38,7 @@ public class NodeInfoTests extends ESTestCase { */ public void testGetInfo() { NodeInfo nodeInfo = new NodeInfo( - Version.CURRENT, + Build.current().version(), TransportVersion.current(), IndexVersion.current(), Map.of(), diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java index 217cb8e3334e3..e65d99c64ae5e 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -1078,7 +1078,4 @@ private static TimeSeries randomTimeSeries() { } } - private IngestStats.Stats getPipelineStats(List pipelineStats, String id) { - return pipelineStats.stream().filter(p1 -> p1.pipelineId().equals(id)).findFirst().map(p2 -> p2.stats()).orElse(null); - } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestTests.java index 9cb47791d4f98..ad5f1e5034dd6 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestTests.java @@ -34,7 +34,7 @@ public class NodesStatsRequestTests extends ESTestCase { public void testAddMetrics() throws Exception { NodesStatsRequest request = new NodesStatsRequest(randomAlphaOfLength(8)); request.indices(randomFrom(CommonStatsFlags.ALL)); - String[] metrics = randomSubsetOf(NodesStatsRequest.Metric.allMetrics()).toArray(String[]::new); + String[] metrics = randomSubsetOf(NodesStatsRequestParameters.Metric.allMetrics()).toArray(String[]::new); request.addMetrics(metrics); NodesStatsRequest deserializedRequest = roundTripRequest(request); assertRequestsEqual(request, deserializedRequest); @@ -45,7 +45,7 @@ public void testAddMetrics() throws Exception { */ public void testAddSingleMetric() throws Exception { NodesStatsRequest request = new NodesStatsRequest(); - request.addMetric(randomFrom(NodesStatsRequest.Metric.allMetrics())); + request.addMetric(randomFrom(NodesStatsRequestParameters.Metric.allMetrics())); NodesStatsRequest deserializedRequest = roundTripRequest(request); assertRequestsEqual(request, deserializedRequest); } @@ -56,7 +56,7 @@ public void testAddSingleMetric() throws Exception { public void testRemoveSingleMetric() throws Exception { NodesStatsRequest request = new NodesStatsRequest(); request.all(); - String metric = randomFrom(NodesStatsRequest.Metric.allMetrics()); + String metric = randomFrom(NodesStatsRequestParameters.Metric.allMetrics()); request.removeMetric(metric); NodesStatsRequest deserializedRequest = roundTripRequest(request); assertThat(request.requestedMetrics(), equalTo(deserializedRequest.requestedMetrics())); @@ -83,7 +83,7 @@ public void testNodesInfoRequestAll() throws Exception { request.all(); assertThat(request.indices().getFlags(), equalTo(CommonStatsFlags.ALL.getFlags())); - assertThat(request.requestedMetrics(), equalTo(NodesStatsRequest.Metric.allMetrics())); + assertThat(request.requestedMetrics(), equalTo(NodesStatsRequestParameters.Metric.allMetrics())); } /** @@ -105,7 +105,7 @@ public void testUnknownMetricsRejected() { String unknownMetric2 = "unknown_metric2"; Set unknownMetrics = new HashSet<>(); unknownMetrics.add(unknownMetric1); - unknownMetrics.addAll(randomSubsetOf(NodesStatsRequest.Metric.allMetrics())); + unknownMetrics.addAll(randomSubsetOf(NodesStatsRequestParameters.Metric.allMetrics())); NodesStatsRequest request = new NodesStatsRequest(); @@ -145,7 +145,7 @@ private static void assertRequestsEqual(NodesStatsRequest request1, NodesStatsRe public void testGetDescription() { final var request = new NodesStatsRequest("nodeid1", "nodeid2"); request.clear(); - request.addMetrics(NodesStatsRequest.Metric.OS.metricName(), NodesStatsRequest.Metric.TRANSPORT.metricName()); + request.addMetrics(NodesStatsRequestParameters.Metric.OS.metricName(), NodesStatsRequestParameters.Metric.TRANSPORT.metricName()); request.indices(new CommonStatsFlags(CommonStatsFlags.Flag.Store, CommonStatsFlags.Flag.Flush)); final var description = request.getDescription(); @@ -154,9 +154,9 @@ public void testGetDescription() { allOf( containsString("nodeid1"), containsString("nodeid2"), - containsString(NodesStatsRequest.Metric.OS.metricName()), - containsString(NodesStatsRequest.Metric.TRANSPORT.metricName()), - not(containsString(NodesStatsRequest.Metric.SCRIPT.metricName())), + containsString(NodesStatsRequestParameters.Metric.OS.metricName()), + containsString(NodesStatsRequestParameters.Metric.TRANSPORT.metricName()), + not(containsString(NodesStatsRequestParameters.Metric.SCRIPT.metricName())), containsString(CommonStatsFlags.Flag.Store.toString()), containsString(CommonStatsFlags.Flag.Flush.toString()), not(containsString(CommonStatsFlags.Flag.FieldData.toString())) diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java index 30f6c2fda8dc1..8334c98e5fca0 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java @@ -67,7 +67,6 @@ import java.util.concurrent.atomic.AtomicReferenceArray; import java.util.stream.Collectors; -import static org.elasticsearch.action.support.PlainActionFuture.newFuture; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; @@ -286,7 +285,7 @@ private ActionFuture startBlockingTestNodesAction(CountDownLatch } private ActionFuture startBlockingTestNodesAction(CountDownLatch checkLatch, NodesRequest request) throws Exception { - PlainActionFuture future = newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); startBlockingTestNodesAction(checkLatch, request, future); return future; } @@ -677,7 +676,7 @@ protected void taskOperation( TestTasksRequest testTasksRequest = new TestTasksRequest(); testTasksRequest.setActions("internal:testAction[n]"); // pick all test actions testTasksRequest.setNodes(testNodes[0].getNodeId(), testNodes[1].getNodeId()); // only first two nodes - PlainActionFuture taskFuture = newFuture(); + PlainActionFuture taskFuture = new PlainActionFuture<>(); CancellableTask task = (CancellableTask) testNodes[0].transportService.getTaskManager() .registerAndExecute( "direct", @@ -690,7 +689,7 @@ protected void taskOperation( taskExecutesLatch.await(); logger.info("All test tasks are now executing"); - PlainActionFuture cancellationFuture = newFuture(); + PlainActionFuture cancellationFuture = new PlainActionFuture<>(); logger.info("Cancelling tasks"); testNodes[0].transportService.getTaskManager().cancelTaskAndDescendants(task, "test case", false, cancellationFuture); @@ -734,7 +733,7 @@ protected void taskOperation( TestTasksRequest testTasksRequest = new TestTasksRequest(); testTasksRequest.setNodes(testNodes[0].getNodeId()); // only local node - PlainActionFuture taskFuture = newFuture(); + PlainActionFuture taskFuture = new PlainActionFuture<>(); CancellableTask task = (CancellableTask) testNodes[0].transportService.getTaskManager() .registerAndExecute( "direct", @@ -808,7 +807,7 @@ protected void taskOperation( TestTasksRequest testTasksRequest = new TestTasksRequest(); testTasksRequest.setActions("internal:testTasksAction[n]"); - PlainActionFuture taskFuture = newFuture(); + PlainActionFuture taskFuture = new PlainActionFuture<>(); CancellableTask task = (CancellableTask) testNodes[0].transportService.getTaskManager() .registerAndExecute( "direct", diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/remote/RemoteClusterNodesActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/remote/RemoteClusterNodesActionTests.java index 91af3383f0670..a56eb50290ed9 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/remote/RemoteClusterNodesActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/remote/RemoteClusterNodesActionTests.java @@ -8,8 +8,8 @@ package org.elasticsearch.action.admin.cluster.remote; +import org.elasticsearch.Build; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; @@ -76,7 +76,7 @@ public void testDoExecuteForRemoteServerNodes() { } nodeInfos.add( new NodeInfo( - Version.CURRENT, + Build.current().version(), TransportVersion.current(), IndexVersion.current(), Map.of(), @@ -154,7 +154,7 @@ public void testDoExecuteForRemoteNodes() { expectedRemoteNodes.add(node); nodeInfos.add( new NodeInfo( - Version.CURRENT, + Build.current().version(), TransportVersion.current(), IndexVersion.current(), Map.of(), diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodesTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodesTests.java index f2b1cb28ce8b6..0ee3b244ecf45 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodesTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodesTests.java @@ -10,7 +10,6 @@ import org.elasticsearch.Build; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodeStatsTests; @@ -322,7 +321,7 @@ private static NodeInfo createNodeInfo(String nodeId, String transportType, Stri settings.put(randomFrom(NetworkModule.HTTP_TYPE_KEY, NetworkModule.HTTP_TYPE_DEFAULT_KEY), httpType); } return new NodeInfo( - Version.CURRENT, + Build.current().version(), TransportVersion.current(), IndexVersion.current(), Map.of(), diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponseTests.java index 6fde4bed97a17..433563b99ef64 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponseTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Tuple; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.test.AbstractWireSerializingTestCase; import java.util.ArrayList; @@ -24,6 +25,7 @@ import java.util.Map; import java.util.function.Predicate; +@UpdateForV9 // no need to round-trip these objects over the wire any more, we only need a checkEqualsAndHashCode test public class GetAliasesResponseTests extends AbstractWireSerializingTestCase { @Override @@ -33,9 +35,8 @@ protected GetAliasesResponse createTestInstance() { /** * NB prior to 8.12 get-aliases was a TransportMasterNodeReadAction so for BwC we must remain able to write these responses so that - * older nodes can read them until we no longer need to support {@link org.elasticsearch.TransportVersions#CLUSTER_FEATURES_ADDED} and - * earlier. The reader implementation below is the production implementation from earlier versions, but moved here because it is unused - * in production now. + * older nodes can read them until we no longer need to support calling this action remotely. The reader implementation below is the + * production implementation from earlier versions, but moved here because it is unused in production now. */ @Override protected Writeable.Reader instanceReader() { diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java index 908dbb48383a4..9c71b654c8a56 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java @@ -154,7 +154,7 @@ private void executeOnPrimaryOrReplica(boolean phase1) throws Throwable { phase1, taskId ); - final PlainActionFuture res = PlainActionFuture.newFuture(); + final PlainActionFuture res = new PlainActionFuture<>(); action.shardOperationOnPrimary(request, indexShard, res.delegateFailureAndWrap((l, r) -> { assertNotNull(r); l.onResponse(null); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexActionTests.java index c5639d5989d01..966ac50dfab37 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexActionTests.java @@ -67,7 +67,7 @@ public class TransportCreateIndexActionTests extends ESTestCase { ) .build() ) - .compatibilityVersions( + .nodeIdsToCompatibilityVersions( Map.of( "node-1", new CompatibilityVersions( diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequestTests.java index 7190b7ec216c1..065dea4be743c 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequestTests.java @@ -39,7 +39,7 @@ public void setupIndex() { int numDocs = scaledRandomIntBetween(100, 1000); for (int j = 0; j < numDocs; ++j) { String id = Integer.toString(j); - client().prepareIndex("test").setId(id).setSource("text", "sometext").get(); + prepareIndex("test").setId(id).setSource("text", "sometext").get(); } client().admin().indices().prepareFlush("test").get(); client().admin().indices().prepareRefresh().get(); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsTests.java index de28a7c7bd99d..661727d832747 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsTests.java @@ -72,7 +72,7 @@ public void testSegmentStats() throws Exception { .setSettings(Settings.builder().put("index.store.type", storeType.getSettingsKey())) ); ensureGreen("test"); - client().prepareIndex("test").setId("1").setSource("foo", "bar", "bar", "baz", "baz", 42).get(); + prepareIndex("test").setId("1").setSource("foo", "bar", "bar", "baz", "baz", 42).get(); indicesAdmin().prepareRefresh("test").get(); IndicesStatsResponse rsp = indicesAdmin().prepareStats("test").get(); @@ -80,7 +80,7 @@ public void testSegmentStats() throws Exception { assertThat(stats.getCount(), greaterThan(0L)); // now check multiple segments stats are merged together - client().prepareIndex("test").setId("2").setSource("foo", "bar", "bar", "baz", "baz", 43).get(); + prepareIndex("test").setId("2").setSource("foo", "bar", "bar", "baz", "baz", 43).get(); indicesAdmin().prepareRefresh("test").get(); rsp = indicesAdmin().prepareStats("test").get(); @@ -107,8 +107,7 @@ public void testRefreshListeners() throws Exception { createIndex("test", Settings.builder().put("refresh_interval", -1).build()); // Index a document asynchronously so the request will only return when document is refreshed - ActionFuture index = client().prepareIndex("test") - .setId("test") + ActionFuture index = prepareIndex("test").setId("test") .setSource("test", "test") .setRefreshPolicy(RefreshPolicy.WAIT_UNTIL) .execute(); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/template/reservedstate/ReservedComposableIndexTemplateActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/template/reservedstate/ReservedComposableIndexTemplateActionTests.java index 28476a0d8b839..173cb4c66d18f 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/template/reservedstate/ReservedComposableIndexTemplateActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/template/reservedstate/ReservedComposableIndexTemplateActionTests.java @@ -875,7 +875,13 @@ public void testTemplatesWithReservedPrefix() throws Exception { .indexTemplates( Map.of( reservedComposableIndexName(conflictingTemplateName), - new ComposableIndexTemplate(singletonList("foo*"), null, Collections.emptyList(), 1L, 1L, Collections.emptyMap()) + ComposableIndexTemplate.builder() + .indexPatterns(singletonList("foo*")) + .componentTemplates(Collections.emptyList()) + .priority(1L) + .version(1L) + .metadata(Collections.emptyMap()) + .build() ) ) .build(); diff --git a/server/src/test/java/org/elasticsearch/action/bulk/Retry2Tests.java b/server/src/test/java/org/elasticsearch/action/bulk/Retry2Tests.java index 5075c98421af0..4179a869441e5 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/Retry2Tests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/Retry2Tests.java @@ -81,7 +81,7 @@ private BulkRequest createBulkRequest() { public void testRetryBacksOff() throws Exception { BulkRequest bulkRequest = createBulkRequest(); Retry2 retry2 = new Retry2(CALLS_TO_FAIL); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); retry2.consumeRequestWithRetries(bulkClient::bulk, bulkRequest, future); BulkResponse response = future.actionGet(); @@ -93,7 +93,7 @@ public void testRetryFailsAfterRetry() throws Exception { BulkRequest bulkRequest = createBulkRequest(); try { Retry2 retry2 = new Retry2(CALLS_TO_FAIL - 1); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); retry2.consumeRequestWithRetries(bulkClient::bulk, bulkRequest, future); BulkResponse response = future.actionGet(); /* diff --git a/server/src/test/java/org/elasticsearch/action/bulk/SimulateBulkRequestTests.java b/server/src/test/java/org/elasticsearch/action/bulk/SimulateBulkRequestTests.java new file mode 100644 index 0000000000000..7fe036f97596e --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/bulk/SimulateBulkRequestTests.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.bulk; + +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +public class SimulateBulkRequestTests extends ESTestCase { + + public void testSerialization() throws Exception { + testSerialization(getTestPipelineSubstitutions()); + testSerialization(null); + testSerialization(Map.of()); + } + + private void testSerialization(Map> pipelineSubstitutions) throws IOException { + SimulateBulkRequest simulateBulkRequest = new SimulateBulkRequest(pipelineSubstitutions); + /* + * Note: SimulateBulkRequest does not implement equals or hashCode, so we can't test serialization in the usual way for a + * Writable + */ + SimulateBulkRequest copy = copyWriteable(simulateBulkRequest, null, SimulateBulkRequest::new); + assertThat(copy.getPipelineSubstitutions(), equalTo(simulateBulkRequest.getPipelineSubstitutions())); + } + + private Map> getTestPipelineSubstitutions() { + return new HashMap<>() { + { + put("pipeline1", new HashMap<>() { + { + put("processors", List.of(new HashMap<>() { + { + put("processor2", new HashMap<>()); + } + }, new HashMap<>() { + { + put("processor3", new HashMap<>()); + } + })); + } + }); + put("pipeline2", new HashMap<>() { + { + put("processors", List.of(new HashMap<>() { + { + put("processor3", new HashMap<>()); + } + })); + } + }); + } + }; + } +} diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java index e097b83fb9d35..1276f6c2db58b 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.action.bulk; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.delete.DeleteRequest; @@ -21,7 +20,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AtomicArray; @@ -34,7 +32,6 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockUtils; -import org.elasticsearch.test.VersionUtils; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -101,10 +98,6 @@ private void indicesThatCannotBeCreatedTestCase( when(clusterService.state()).thenReturn(state); when(clusterService.getSettings()).thenReturn(Settings.EMPTY); - DiscoveryNodes discoveryNodes = mock(DiscoveryNodes.class); - when(state.getNodes()).thenReturn(discoveryNodes); - when(discoveryNodes.getMinNodeVersion()).thenReturn(VersionUtils.randomCompatibleVersion(random(), Version.CURRENT)); - DiscoveryNode localNode = mock(DiscoveryNode.class); when(clusterService.localNode()).thenReturn(localNode); when(localNode.isIngestNode()).thenReturn(randomBoolean()); diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java index 0168eb0488a5b..95039f6fb0de1 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.action.bulk; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; @@ -46,7 +45,6 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockUtils; -import org.elasticsearch.test.VersionUtils; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool.Names; import org.elasticsearch.transport.TransportResponseHandler; @@ -194,7 +192,6 @@ public void setupAction() { nodes = mock(DiscoveryNodes.class); Map ingestNodes = Map.of("node1", remoteNode1, "node2", remoteNode2); when(nodes.getIngestNodes()).thenReturn(ingestNodes); - when(nodes.getMinNodeVersion()).thenReturn(VersionUtils.randomCompatibleVersion(random(), Version.CURRENT)); ClusterState state = mock(ClusterState.class); when(state.getNodes()).thenReturn(nodes); Metadata metadata = Metadata.builder() @@ -679,16 +676,10 @@ public void testFindDefaultPipelineFromTemplateMatch() { public void testFindDefaultPipelineFromV2TemplateMatch() { Exception exception = new Exception("fake exception"); - ComposableIndexTemplate t1 = new ComposableIndexTemplate( - Collections.singletonList("missing_*"), - new Template(Settings.builder().put(IndexSettings.DEFAULT_PIPELINE.getKey(), "pipeline2").build(), null, null), - null, - null, - null, - null, - null, - null - ); + ComposableIndexTemplate t1 = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("missing_*")) + .template(new Template(Settings.builder().put(IndexSettings.DEFAULT_PIPELINE.getKey(), "pipeline2").build(), null, null)) + .build(); ClusterState state = clusterService.state(); Metadata metadata = Metadata.builder().put("my-template", t1).build(); diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java index e2c71f3b20084..7a87cf29bb526 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java @@ -142,7 +142,7 @@ public void tearDown() throws Exception { public void testDeleteNonExistingDocDoesNotCreateIndex() throws Exception { BulkRequest bulkRequest = new BulkRequest().add(new DeleteRequest("index").id("id")); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); ActionTestUtils.execute(bulkAction, null, bulkRequest, future); BulkResponse response = future.actionGet(); @@ -157,7 +157,7 @@ public void testDeleteNonExistingDocDoesNotCreateIndex() throws Exception { public void testDeleteNonExistingDocExternalVersionCreatesIndex() throws Exception { BulkRequest bulkRequest = new BulkRequest().add(new DeleteRequest("index").id("id").versionType(VersionType.EXTERNAL).version(0)); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); ActionTestUtils.execute(bulkAction, null, bulkRequest, future); future.actionGet(); assertTrue(bulkAction.indexCreated); @@ -168,7 +168,7 @@ public void testDeleteNonExistingDocExternalGteVersionCreatesIndex() throws Exce new DeleteRequest("index2").id("id").versionType(VersionType.EXTERNAL_GTE).version(0) ); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); ActionTestUtils.execute(bulkAction, null, bulkRequest, future); future.actionGet(); assertTrue(bulkAction.indexCreated); @@ -313,7 +313,7 @@ public void testRejectCoordination() throws Exception { try { threadPool.startForcingRejections(); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); ActionTestUtils.execute(bulkAction, null, bulkRequest, future); expectThrows(EsRejectedExecutionException.class, future::actionGet); } finally { @@ -327,7 +327,7 @@ public void testRejectionAfterCreateIndexIsPropagated() throws Exception { bulkAction.failIndexCreation = randomBoolean(); try { bulkAction.beforeIndexCreation = threadPool::startForcingRejections; - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); ActionTestUtils.execute(bulkAction, null, bulkRequest, future); expectThrows(EsRejectedExecutionException.class, future::actionGet); assertTrue(bulkAction.indexCreated); diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionTests.java new file mode 100644 index 0000000000000..647eafb5f3cdd --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionTests.java @@ -0,0 +1,206 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.bulk; + +import org.elasticsearch.ResourceAlreadyExistsException; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.ingest.SimulateIndexResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.IndexVersions; +import org.elasticsearch.index.IndexingPressure; +import org.elasticsearch.indices.EmptySystemIndices; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; +import org.elasticsearch.test.index.IndexVersionUtils; +import org.elasticsearch.test.transport.CapturingTransport; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; + +import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Mockito.mock; + +public class TransportSimulateBulkActionTests extends ESTestCase { + + /** Services needed by bulk action */ + private TransportService transportService; + private ClusterService clusterService; + private TestThreadPool threadPool; + + private TestTransportSimulateBulkAction bulkAction; + + class TestTransportSimulateBulkAction extends TransportSimulateBulkAction { + + volatile boolean failIndexCreation = false; + boolean indexCreated = false; // set when the "real" index is created + Runnable beforeIndexCreation = null; + + TestTransportSimulateBulkAction() { + super( + TransportSimulateBulkActionTests.this.threadPool, + transportService, + clusterService, + null, + null, + new ActionFilters(Collections.emptySet()), + new TransportBulkActionTookTests.Resolver(), + new IndexingPressure(Settings.EMPTY), + EmptySystemIndices.INSTANCE + ); + } + + @Override + void createIndex(String index, TimeValue timeout, ActionListener listener) { + indexCreated = true; + if (beforeIndexCreation != null) { + beforeIndexCreation.run(); + } + if (failIndexCreation) { + listener.onFailure(new ResourceAlreadyExistsException("index already exists")); + } else { + listener.onResponse(null); + } + } + } + + @Before + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool(getClass().getName()); + DiscoveryNode discoveryNode = DiscoveryNodeUtils.builder("node") + .version( + VersionUtils.randomCompatibleVersion(random(), Version.CURRENT), + IndexVersions.MINIMUM_COMPATIBLE, + IndexVersionUtils.randomCompatibleVersion(random()) + ) + .build(); + clusterService = createClusterService(threadPool, discoveryNode); + CapturingTransport capturingTransport = new CapturingTransport(); + transportService = capturingTransport.createTransportService( + clusterService.getSettings(), + threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + boundAddress -> clusterService.localNode(), + null, + Collections.emptySet() + ); + transportService.start(); + transportService.acceptIncomingRequests(); + bulkAction = new TestTransportSimulateBulkAction(); + } + + @After + public void tearDown() throws Exception { + ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); + threadPool = null; + clusterService.close(); + super.tearDown(); + } + + public void testIndexData() { + Task task = mock(Task.class); // unused + BulkRequest bulkRequest = new SimulateBulkRequest((Map>) null); + int bulkItemCount = randomIntBetween(0, 200); + for (int i = 0; i < bulkItemCount; i++) { + Map source = Map.of(randomAlphaOfLength(10), randomAlphaOfLength(5)); + IndexRequest indexRequest = new IndexRequest(randomAlphaOfLength(10)).id(randomAlphaOfLength(10)).source(source); + for (int j = 0; j < randomIntBetween(0, 10); j++) { + indexRequest.addPipeline(randomAlphaOfLength(12)); + } + bulkRequest.add(); + } + AtomicBoolean onResponseCalled = new AtomicBoolean(false); + ActionListener listener = new ActionListener<>() { + @Override + public void onResponse(BulkResponse response) { + onResponseCalled.set(true); + BulkItemResponse[] responseItems = response.getItems(); + assertThat(responseItems.length, equalTo(bulkRequest.requests().size())); + for (int i = 0; i < responseItems.length; i++) { + BulkItemResponse responseItem = responseItems[i]; + IndexRequest indexRequest = (IndexRequest) bulkRequest.requests().get(i); + assertNull(responseItem.getFailure()); + assertThat(responseItem.getResponse(), instanceOf(SimulateIndexResponse.class)); + SimulateIndexResponse simulateIndexResponse = responseItem.getResponse(); + assertThat(simulateIndexResponse.getIndex(), equalTo(indexRequest.index())); + /* + * SimulateIndexResponse doesn't have an equals() method, and most of its state is private. So we check that + * its toXContent method produces the expected output. + */ + String output = Strings.toString(simulateIndexResponse); + try { + assertEquals( + XContentHelper.stripWhitespace( + Strings.format( + """ + { + "_index": "%s", + "_source": %s, + "executed_pipelines": [%s] + }""", + indexRequest.index(), + indexRequest.source(), + indexRequest.getExecutedPipelines() + .stream() + .map(pipeline -> "\"" + pipeline + "\"") + .collect(Collectors.joining(",")) + ) + ), + output + ); + } catch (IOException e) { + fail(e); + } + } + } + + @Override + public void onFailure(Exception e) { + fail(e, "Unexpected error"); + } + }; + Set autoCreateIndices = Set.of(); // unused + Map indicesThatCannotBeCreated = Map.of(); // unused + long startTime = 0; + bulkAction.createMissingIndicesAndIndexData( + task, + bulkRequest, + randomAlphaOfLength(10), + listener, + autoCreateIndices, + indicesThatCannotBeCreated, + startTime + ); + assertThat(onResponseCalled.get(), equalTo(true)); + } +} diff --git a/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java b/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java index e02f5f66e6c80..b3ca89c9dc0bd 100644 --- a/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java @@ -10,7 +10,6 @@ import org.elasticsearch.Build; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.client.internal.Client; @@ -100,7 +99,7 @@ public void setup() { DiscoveryNode discoveryNode = DiscoveryNodeUtils.builder("_node_id").roles(emptySet()).build(); NodeInfo nodeInfo = new NodeInfo( - Version.CURRENT, + Build.current().version(), TransportVersion.current(), IndexVersion.current(), Map.of(), diff --git a/server/src/test/java/org/elasticsearch/action/ingest/SimulateIndexResponseTests.java b/server/src/test/java/org/elasticsearch/action/ingest/SimulateIndexResponseTests.java new file mode 100644 index 0000000000000..7ce3b411e978f --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/ingest/SimulateIndexResponseTests.java @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.ingest; + +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.RandomObjects; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.equalTo; + +public class SimulateIndexResponseTests extends ESTestCase { + + public void testToXContent() throws IOException { + String id = randomAlphaOfLength(10); + String index = randomAlphaOfLength(5); + long version = randomLongBetween(0, 500); + final List pipelines = new ArrayList<>(); + for (int i = 0; i < randomIntBetween(0, 20); i++) { + pipelines.add(randomAlphaOfLength(20)); + } + String source = """ + {"doc": {"key1": "val1", "key2": "val2"}}"""; + BytesReference sourceBytes = BytesReference.fromByteBuffer(ByteBuffer.wrap(source.getBytes(StandardCharsets.UTF_8))); + SimulateIndexResponse indexResponse = new SimulateIndexResponse(id, index, version, sourceBytes, XContentType.JSON, pipelines); + String output = Strings.toString(indexResponse); + assertEquals( + XContentHelper.stripWhitespace( + Strings.format( + """ + { + "_id": "%s", + "_index": "%s", + "_version": %d, + "_source": %s, + "executed_pipelines": [%s] + }""", + id, + index, + version, + source, + pipelines.stream().map(pipeline -> "\"" + pipeline + "\"").collect(Collectors.joining(",")) + ) + ), + output + ); + } + + public void testSerialization() throws IOException { + // Note: SimulateIndexRequest does not implement equals or hashCode, so we can't test serialization in the usual way for a Writable + SimulateIndexResponse response = randomIndexResponse(); + IndexResponse copy = copyWriteable(response, null, SimulateIndexResponse::new); + assertThat(Strings.toString(response), equalTo(Strings.toString(copy))); + } + + /** + * Returns a tuple of {@link IndexResponse}s. + *

    + * The left element is the actual {@link IndexResponse} to serialize while the right element is the + * expected {@link IndexResponse} after parsing. + */ + private static SimulateIndexResponse randomIndexResponse() { + String id = randomAlphaOfLength(10); + String index = randomAlphaOfLength(5); + long version = randomLongBetween(0, 500); + final List pipelines = new ArrayList<>(); + for (int i = 0; i < randomIntBetween(0, 20); i++) { + pipelines.add(randomAlphaOfLength(20)); + } + XContentType xContentType = randomFrom(XContentType.values()); + BytesReference sourceBytes = RandomObjects.randomSource(random(), xContentType); + return new SimulateIndexResponse(id, index, version, sourceBytes, xContentType, pipelines); + } +} diff --git a/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java b/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java index 5cbb7ab228c0c..7f5b5f7716f3e 100644 --- a/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java @@ -134,47 +134,45 @@ public void testTookWithRealClock() { private void runTestTook(final boolean controlled) { final AtomicLong expected = new AtomicLong(); - AbstractSearchAsyncAction action = createAction( - new SearchRequest(), - new ArraySearchPhaseResults<>(10), - null, - controlled, - expected - ); - final long actual = action.buildTookInMillis(); - if (controlled) { - // with a controlled clock, we can assert the exact took time - assertThat(actual, equalTo(TimeUnit.NANOSECONDS.toMillis(expected.get()))); - } else { - // with a real clock, the best we can say is that it took as long as we spun for - assertThat(actual, greaterThanOrEqualTo(TimeUnit.NANOSECONDS.toMillis(expected.get()))); + var result = new ArraySearchPhaseResults<>(10); + try { + AbstractSearchAsyncAction action = createAction(new SearchRequest(), result, null, controlled, expected); + final long actual = action.buildTookInMillis(); + if (controlled) { + // with a controlled clock, we can assert the exact took time + assertThat(actual, equalTo(TimeUnit.NANOSECONDS.toMillis(expected.get()))); + } else { + // with a real clock, the best we can say is that it took as long as we spun for + assertThat(actual, greaterThanOrEqualTo(TimeUnit.NANOSECONDS.toMillis(expected.get()))); + } + } finally { + result.decRef(); } } public void testBuildShardSearchTransportRequest() { SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(randomBoolean()); final AtomicLong expected = new AtomicLong(); - AbstractSearchAsyncAction action = createAction( - searchRequest, - new ArraySearchPhaseResults<>(10), - null, - false, - expected - ); - String clusterAlias = randomBoolean() ? null : randomAlphaOfLengthBetween(5, 10); - SearchShardIterator iterator = new SearchShardIterator( - clusterAlias, - new ShardId(new Index("name", "foo"), 1), - Collections.emptyList(), - new OriginalIndices(new String[] { "name", "name1" }, IndicesOptions.strictExpand()) - ); - ShardSearchRequest shardSearchTransportRequest = action.buildShardSearchRequest(iterator, 10); - assertEquals(IndicesOptions.strictExpand(), shardSearchTransportRequest.indicesOptions()); - assertArrayEquals(new String[] { "name", "name1" }, shardSearchTransportRequest.indices()); - assertEquals(new MatchAllQueryBuilder(), shardSearchTransportRequest.getAliasFilter().getQueryBuilder()); - assertEquals(2.0f, shardSearchTransportRequest.indexBoost(), 0.0f); - assertArrayEquals(new String[] { "name", "name1" }, shardSearchTransportRequest.indices()); - assertEquals(clusterAlias, shardSearchTransportRequest.getClusterAlias()); + var result = new ArraySearchPhaseResults<>(10); + try { + AbstractSearchAsyncAction action = createAction(searchRequest, result, null, false, expected); + String clusterAlias = randomBoolean() ? null : randomAlphaOfLengthBetween(5, 10); + SearchShardIterator iterator = new SearchShardIterator( + clusterAlias, + new ShardId(new Index("name", "foo"), 1), + Collections.emptyList(), + new OriginalIndices(new String[] { "name", "name1" }, IndicesOptions.strictExpand()) + ); + ShardSearchRequest shardSearchTransportRequest = action.buildShardSearchRequest(iterator, 10); + assertEquals(IndicesOptions.strictExpand(), shardSearchTransportRequest.indicesOptions()); + assertArrayEquals(new String[] { "name", "name1" }, shardSearchTransportRequest.indices()); + assertEquals(new MatchAllQueryBuilder(), shardSearchTransportRequest.getAliasFilter().getQueryBuilder()); + assertEquals(2.0f, shardSearchTransportRequest.indexBoost(), 0.0f); + assertArrayEquals(new String[] { "name", "name1" }, shardSearchTransportRequest.indices()); + assertEquals(clusterAlias, shardSearchTransportRequest.getClusterAlias()); + } finally { + result.decRef(); + } } public void testSendSearchResponseDisallowPartialFailures() { diff --git a/server/src/test/java/org/elasticsearch/action/search/CountedCollectorTests.java b/server/src/test/java/org/elasticsearch/action/search/CountedCollectorTests.java index 659d4de8552c3..838e13d6026c7 100644 --- a/server/src/test/java/org/elasticsearch/action/search/CountedCollectorTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/CountedCollectorTests.java @@ -24,73 +24,77 @@ public class CountedCollectorTests extends ESTestCase { public void testCollect() throws InterruptedException { ArraySearchPhaseResults consumer = new ArraySearchPhaseResults<>(randomIntBetween(1, 100)); - List state = new ArrayList<>(); - int numResultsExpected = randomIntBetween(1, consumer.getAtomicArray().length()); - MockSearchPhaseContext context = new MockSearchPhaseContext(consumer.getAtomicArray().length()); - CountDownLatch latch = new CountDownLatch(1); - boolean maybeFork = randomBoolean(); - Executor executor = (runnable) -> { - if (randomBoolean() && maybeFork) { - new Thread(runnable).start(); + try { + List state = new ArrayList<>(); + int numResultsExpected = randomIntBetween(1, consumer.getAtomicArray().length()); + MockSearchPhaseContext context = new MockSearchPhaseContext(consumer.getAtomicArray().length()); + CountDownLatch latch = new CountDownLatch(1); + boolean maybeFork = randomBoolean(); + Executor executor = (runnable) -> { + if (randomBoolean() && maybeFork) { + new Thread(runnable).start(); - } else { - runnable.run(); - } - }; - CountedCollector collector = new CountedCollector<>(consumer, numResultsExpected, latch::countDown, context); - for (int i = 0; i < numResultsExpected; i++) { - int shardID = i; - switch (randomIntBetween(0, 2)) { - case 0 -> { - state.add(0); - executor.execute(() -> collector.countDown()); + } else { + runnable.run(); } - case 1 -> { - state.add(1); - executor.execute(() -> { - DfsSearchResult dfsSearchResult = new DfsSearchResult( - new ShardSearchContextId(UUIDs.randomBase64UUID(), shardID), - null, - null + }; + CountedCollector collector = new CountedCollector<>(consumer, numResultsExpected, latch::countDown, context); + for (int i = 0; i < numResultsExpected; i++) { + int shardID = i; + switch (randomIntBetween(0, 2)) { + case 0 -> { + state.add(0); + executor.execute(() -> collector.countDown()); + } + case 1 -> { + state.add(1); + executor.execute(() -> { + DfsSearchResult dfsSearchResult = new DfsSearchResult( + new ShardSearchContextId(UUIDs.randomBase64UUID(), shardID), + null, + null + ); + dfsSearchResult.setShardIndex(shardID); + dfsSearchResult.setSearchShardTarget(new SearchShardTarget("foo", new ShardId("bar", "baz", shardID), null)); + collector.onResult(dfsSearchResult); + }); + } + case 2 -> { + state.add(2); + executor.execute( + () -> collector.onFailure( + shardID, + new SearchShardTarget("foo", new ShardId("bar", "baz", shardID), null), + new RuntimeException("boom") + ) ); - dfsSearchResult.setShardIndex(shardID); - dfsSearchResult.setSearchShardTarget(new SearchShardTarget("foo", new ShardId("bar", "baz", shardID), null)); - collector.onResult(dfsSearchResult); - }); - } - case 2 -> { - state.add(2); - executor.execute( - () -> collector.onFailure( - shardID, - new SearchShardTarget("foo", new ShardId("bar", "baz", shardID), null), - new RuntimeException("boom") - ) - ); + } + default -> fail("unknown state"); } - default -> fail("unknown state"); } - } - latch.await(); - assertEquals(numResultsExpected, state.size()); - AtomicArray results = consumer.getAtomicArray(); - for (int i = 0; i < numResultsExpected; i++) { - switch (state.get(i)) { - case 0 -> assertNull(results.get(i)); - case 1 -> { - assertNotNull(results.get(i)); - assertEquals(i, results.get(i).getContextId().getId()); + latch.await(); + assertEquals(numResultsExpected, state.size()); + AtomicArray results = consumer.getAtomicArray(); + for (int i = 0; i < numResultsExpected; i++) { + switch (state.get(i)) { + case 0 -> assertNull(results.get(i)); + case 1 -> { + assertNotNull(results.get(i)); + assertEquals(i, results.get(i).getContextId().getId()); + } + case 2 -> { + final int shardId = i; + assertEquals(1, context.failures.stream().filter(f -> f.shardId() == shardId).count()); + } + default -> fail("unknown state"); } - case 2 -> { - final int shardId = i; - assertEquals(1, context.failures.stream().filter(f -> f.shardId() == shardId).count()); - } - default -> fail("unknown state"); } - } - for (int i = numResultsExpected; i < results.length(); i++) { - assertNull("index: " + i, results.get(i)); + for (int i = numResultsExpected; i < results.length(); i++) { + assertNull("index: " + i, results.get(i)); + } + } finally { + consumer.decRef(); } } } diff --git a/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java index 65a0950d05b4d..21c1e9b0470b5 100644 --- a/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java @@ -44,6 +44,10 @@ import java.util.List; import java.util.concurrent.atomic.AtomicReference; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Mockito.mock; + public class DfsQueryPhaseTests extends ESTestCase { private static DfsSearchResult newSearchResult(int shardIndex, ShardSearchContextId contextId, SearchShardTarget target) { @@ -81,30 +85,38 @@ public void sendExecuteQuery( new SearchShardTarget("node1", new ShardId("test", "na", 0), null), null ); - queryResult.topDocs( - new TopDocsAndMaxScore( - new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] { new ScoreDoc(42, 1.0F) }), - 2.0F - ), - new DocValueFormat[0] - ); - queryResult.size(2); // the size of the result set - listener.onResponse(queryResult); + try { + queryResult.topDocs( + new TopDocsAndMaxScore( + new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] { new ScoreDoc(42, 1.0F) }), + 2.0F + ), + new DocValueFormat[0] + ); + queryResult.size(2); // the size of the result set + listener.onResponse(queryResult); + } finally { + queryResult.decRef(); + } } else if (request.contextId().getId() == 2) { QuerySearchResult queryResult = new QuerySearchResult( new ShardSearchContextId("", 123), new SearchShardTarget("node2", new ShardId("test", "na", 0), null), null ); - queryResult.topDocs( - new TopDocsAndMaxScore( - new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] { new ScoreDoc(84, 2.0F) }), - 2.0F - ), - new DocValueFormat[0] - ); - queryResult.size(2); // the size of the result set - listener.onResponse(queryResult); + try { + queryResult.topDocs( + new TopDocsAndMaxScore( + new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] { new ScoreDoc(84, 2.0F) }), + 2.0F + ), + new DocValueFormat[0] + ); + queryResult.size(2); // the size of the result set + listener.onResponse(queryResult); + } finally { + queryResult.decRef(); + } } else { fail("no such request ID: " + request.contextId()); } @@ -122,26 +134,30 @@ public void sendExecuteQuery( results.length(), exc -> {} ); - DfsQueryPhase phase = new DfsQueryPhase(results.asList(), null, null, consumer, (response) -> new SearchPhase("test") { - @Override - public void run() throws IOException { - responseRef.set(response.results); - } - }, mockSearchPhaseContext); - assertEquals("dfs_query", phase.getName()); - phase.run(); - mockSearchPhaseContext.assertNoFailure(); - assertNotNull(responseRef.get()); - assertNotNull(responseRef.get().get(0)); - assertNull(responseRef.get().get(0).fetchResult()); - assertEquals(1, responseRef.get().get(0).queryResult().topDocs().topDocs.totalHits.value); - assertEquals(42, responseRef.get().get(0).queryResult().topDocs().topDocs.scoreDocs[0].doc); - assertNotNull(responseRef.get().get(1)); - assertNull(responseRef.get().get(1).fetchResult()); - assertEquals(1, responseRef.get().get(1).queryResult().topDocs().topDocs.totalHits.value); - assertEquals(84, responseRef.get().get(1).queryResult().topDocs().topDocs.scoreDocs[0].doc); - assertTrue(mockSearchPhaseContext.releasedSearchContexts.isEmpty()); - assertEquals(2, mockSearchPhaseContext.numSuccess.get()); + try { + DfsQueryPhase phase = new DfsQueryPhase(results.asList(), null, null, consumer, (response) -> new SearchPhase("test") { + @Override + public void run() throws IOException { + responseRef.set(response.results); + } + }, mockSearchPhaseContext); + assertEquals("dfs_query", phase.getName()); + phase.run(); + mockSearchPhaseContext.assertNoFailure(); + assertNotNull(responseRef.get()); + assertNotNull(responseRef.get().get(0)); + assertNull(responseRef.get().get(0).fetchResult()); + assertEquals(1, responseRef.get().get(0).queryResult().topDocs().topDocs.totalHits.value); + assertEquals(42, responseRef.get().get(0).queryResult().topDocs().topDocs.scoreDocs[0].doc); + assertNotNull(responseRef.get().get(1)); + assertNull(responseRef.get().get(1).fetchResult()); + assertEquals(1, responseRef.get().get(1).queryResult().topDocs().topDocs.totalHits.value); + assertEquals(84, responseRef.get().get(1).queryResult().topDocs().topDocs.scoreDocs[0].doc); + assertTrue(mockSearchPhaseContext.releasedSearchContexts.isEmpty()); + assertEquals(2, mockSearchPhaseContext.numSuccess.get()); + } finally { + consumer.decRef(); + } } public void testDfsWith1ShardFailed() throws IOException { @@ -172,15 +188,19 @@ public void sendExecuteQuery( new SearchShardTarget("node1", new ShardId("test", "na", 0), null), null ); - queryResult.topDocs( - new TopDocsAndMaxScore( - new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] { new ScoreDoc(42, 1.0F) }), - 2.0F - ), - new DocValueFormat[0] - ); - queryResult.size(2); // the size of the result set - listener.onResponse(queryResult); + try { + queryResult.topDocs( + new TopDocsAndMaxScore( + new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] { new ScoreDoc(42, 1.0F) }), + 2.0F + ), + new DocValueFormat[0] + ); + queryResult.size(2); // the size of the result set + listener.onResponse(queryResult); + } finally { + queryResult.decRef(); + } } else if (request.contextId().getId() == 2) { listener.onFailure(new MockDirectoryWrapper.FakeIOException()); } else { @@ -200,28 +220,32 @@ public void sendExecuteQuery( results.length(), exc -> {} ); - DfsQueryPhase phase = new DfsQueryPhase(results.asList(), null, null, consumer, (response) -> new SearchPhase("test") { - @Override - public void run() throws IOException { - responseRef.set(response.results); - } - }, mockSearchPhaseContext); - assertEquals("dfs_query", phase.getName()); - phase.run(); - mockSearchPhaseContext.assertNoFailure(); - assertNotNull(responseRef.get()); - assertNotNull(responseRef.get().get(0)); - assertNull(responseRef.get().get(0).fetchResult()); - assertEquals(1, responseRef.get().get(0).queryResult().topDocs().topDocs.totalHits.value); - assertEquals(42, responseRef.get().get(0).queryResult().topDocs().topDocs.scoreDocs[0].doc); - assertNull(responseRef.get().get(1)); + try { + DfsQueryPhase phase = new DfsQueryPhase(results.asList(), null, null, consumer, (response) -> new SearchPhase("test") { + @Override + public void run() throws IOException { + responseRef.set(response.results); + } + }, mockSearchPhaseContext); + assertEquals("dfs_query", phase.getName()); + phase.run(); + mockSearchPhaseContext.assertNoFailure(); + assertNotNull(responseRef.get()); + assertNotNull(responseRef.get().get(0)); + assertNull(responseRef.get().get(0).fetchResult()); + assertEquals(1, responseRef.get().get(0).queryResult().topDocs().topDocs.totalHits.value); + assertEquals(42, responseRef.get().get(0).queryResult().topDocs().topDocs.scoreDocs[0].doc); + assertNull(responseRef.get().get(1)); - assertEquals(1, mockSearchPhaseContext.numSuccess.get()); - assertEquals(1, mockSearchPhaseContext.failures.size()); - assertTrue(mockSearchPhaseContext.failures.get(0).getCause() instanceof MockDirectoryWrapper.FakeIOException); - assertEquals(1, mockSearchPhaseContext.releasedSearchContexts.size()); - assertTrue(mockSearchPhaseContext.releasedSearchContexts.contains(new ShardSearchContextId("", 2L))); - assertNull(responseRef.get().get(1)); + assertEquals(1, mockSearchPhaseContext.numSuccess.get()); + assertEquals(1, mockSearchPhaseContext.failures.size()); + assertTrue(mockSearchPhaseContext.failures.get(0).getCause() instanceof MockDirectoryWrapper.FakeIOException); + assertEquals(1, mockSearchPhaseContext.releasedSearchContexts.size()); + assertTrue(mockSearchPhaseContext.releasedSearchContexts.contains(new ShardSearchContextId("", 2L))); + assertNull(responseRef.get().get(1)); + } finally { + consumer.decRef(); + } } public void testFailPhaseOnException() throws IOException { @@ -252,17 +276,21 @@ public void sendExecuteQuery( new SearchShardTarget("node1", new ShardId("test", "na", 0), null), null ); - queryResult.topDocs( - new TopDocsAndMaxScore( - new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] { new ScoreDoc(42, 1.0F) }), - 2.0F - ), - new DocValueFormat[0] - ); - queryResult.size(2); // the size of the result set - listener.onResponse(queryResult); + try { + queryResult.topDocs( + new TopDocsAndMaxScore( + new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] { new ScoreDoc(42, 1.0F) }), + 2.0F + ), + new DocValueFormat[0] + ); + queryResult.size(2); // the size of the result set + listener.onResponse(queryResult); + } finally { + queryResult.decRef(); + } } else if (request.contextId().getId() == 2) { - throw new UncheckedIOException(new MockDirectoryWrapper.FakeIOException()); + listener.onFailure(new UncheckedIOException(new MockDirectoryWrapper.FakeIOException())); } else { fail("no such request ID: " + request.contextId()); } @@ -280,15 +308,21 @@ public void sendExecuteQuery( results.length(), exc -> {} ); - DfsQueryPhase phase = new DfsQueryPhase(results.asList(), null, null, consumer, (response) -> new SearchPhase("test") { - @Override - public void run() throws IOException { - responseRef.set(response.results); - } - }, mockSearchPhaseContext); - assertEquals("dfs_query", phase.getName()); - expectThrows(UncheckedIOException.class, phase::run); - assertTrue(mockSearchPhaseContext.releasedSearchContexts.isEmpty()); // phase execution will clean up on the contexts + try { + DfsQueryPhase phase = new DfsQueryPhase(results.asList(), null, null, consumer, (response) -> new SearchPhase("test") { + @Override + public void run() throws IOException { + responseRef.set(response.results); + } + }, mockSearchPhaseContext); + assertEquals("dfs_query", phase.getName()); + phase.run(); + assertThat(mockSearchPhaseContext.failures, hasSize(1)); + assertThat(mockSearchPhaseContext.failures.get(0).getCause(), instanceOf(UncheckedIOException.class)); + assertThat(mockSearchPhaseContext.releasedSearchContexts, hasSize(1)); // phase execution will clean up on the contexts + } finally { + consumer.decRef(); + } } public void testRewriteShardSearchRequestWithRank() { @@ -301,7 +335,7 @@ public void testRewriteShardSearchRequestWithRank() { ); MockSearchPhaseContext mspc = new MockSearchPhaseContext(2); mspc.searchTransport = new SearchTransportService(null, null, null); - DfsQueryPhase dqp = new DfsQueryPhase(null, null, dkrs, null, null, mspc); + DfsQueryPhase dqp = new DfsQueryPhase(null, null, dkrs, mock(QueryPhaseResultConsumer.class), null, mspc); QueryBuilder bm25 = new TermQueryBuilder("field", "term"); SearchSourceBuilder ssb = new SearchSourceBuilder().query(bm25) diff --git a/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java index 0f2c55b586b7c..126d09663a169 100644 --- a/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java @@ -102,9 +102,15 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL mSearchResponses.add(new MultiSearchResponse.Item(mockSearchPhaseContext.searchResponse.get(), null)); } - listener.onResponse( - new MultiSearchResponse(mSearchResponses.toArray(new MultiSearchResponse.Item[0]), randomIntBetween(1, 10000)) + var response = new MultiSearchResponse( + mSearchResponses.toArray(new MultiSearchResponse.Item[0]), + randomIntBetween(1, 10000) ); + try { + listener.onResponse(response); + } finally { + response.decRef(); + } } }; @@ -164,14 +170,17 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY ); - listener.onResponse( - new MultiSearchResponse( - new MultiSearchResponse.Item[] { - new MultiSearchResponse.Item(null, new RuntimeException("boom")), - new MultiSearchResponse.Item(searchResponse, null) }, - randomIntBetween(1, 10000) - ) + var response = new MultiSearchResponse( + new MultiSearchResponse.Item[] { + new MultiSearchResponse.Item(null, new RuntimeException("boom")), + new MultiSearchResponse.Item(searchResponse, null) }, + randomIntBetween(1, 10000) ); + try { + listener.onResponse(response); + } finally { + response.decRef(); + } } }; diff --git a/server/src/test/java/org/elasticsearch/action/search/FetchLookupFieldsPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/FetchLookupFieldsPhaseTests.java index fa0020975fb00..215293517a467 100644 --- a/server/src/test/java/org/elasticsearch/action/search/FetchLookupFieldsPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/FetchLookupFieldsPhaseTests.java @@ -119,7 +119,12 @@ void sendExecuteMultiSearch( null ); } - listener.onResponse(new MultiSearchResponse(responses, randomNonNegativeLong())); + var response = new MultiSearchResponse(responses, randomNonNegativeLong()); + try { + listener.onResponse(response); + } finally { + response.decRef(); + } } }; diff --git a/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java index 82e579ce7eb36..3d66c4bc2793f 100644 --- a/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java @@ -38,7 +38,9 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; +import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.nullValue; public class FetchSearchPhaseTests extends ESTestCase { @@ -56,55 +58,71 @@ public void testShortcutQueryAndFetchOptimization() { 1, exc -> {} ); - boolean hasHits = randomBoolean(); - boolean profiled = hasHits && randomBoolean(); - final int numHits; - if (hasHits) { - QuerySearchResult queryResult = new QuerySearchResult(); - queryResult.setSearchShardTarget(new SearchShardTarget("node0", new ShardId("index", "index", 0), null)); - queryResult.topDocs( - new TopDocsAndMaxScore( - new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] { new ScoreDoc(42, 1.0F) }), - 1.0F - ), - new DocValueFormat[0] - ); - addProfiling(profiled, queryResult); - queryResult.size(1); - FetchSearchResult fetchResult = new FetchSearchResult(); - fetchResult.setSearchShardTarget(queryResult.getSearchShardTarget()); - SearchHits hits = new SearchHits(new SearchHit[] { new SearchHit(42) }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0F); - fetchResult.shardResult(hits, fetchProfile(profiled)); - QueryFetchSearchResult fetchSearchResult = new QueryFetchSearchResult(queryResult, fetchResult); - fetchSearchResult.setShardIndex(0); - results.consumeResult(fetchSearchResult, () -> {}); - numHits = 1; - } else { - numHits = 0; - } + try { + boolean hasHits = randomBoolean(); + boolean profiled = hasHits && randomBoolean(); + final int numHits; + if (hasHits) { + QuerySearchResult queryResult = new QuerySearchResult(); + queryResult.setSearchShardTarget(new SearchShardTarget("node0", new ShardId("index", "index", 0), null)); + queryResult.topDocs( + new TopDocsAndMaxScore( + new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] { new ScoreDoc(42, 1.0F) }), + 1.0F + ), + new DocValueFormat[0] + ); + addProfiling(profiled, queryResult); + queryResult.size(1); + FetchSearchResult fetchResult = new FetchSearchResult(); + try { + fetchResult.setSearchShardTarget(queryResult.getSearchShardTarget()); + SearchHits hits = new SearchHits( + new SearchHit[] { new SearchHit(42) }, + new TotalHits(1, TotalHits.Relation.EQUAL_TO), + 1.0F + ); + fetchResult.shardResult(hits, fetchProfile(profiled)); + QueryFetchSearchResult fetchSearchResult = QueryFetchSearchResult.of(queryResult, fetchResult); + try { + fetchSearchResult.setShardIndex(0); + results.consumeResult(fetchSearchResult, () -> {}); + } finally { + fetchSearchResult.decRef(); + } + numHits = 1; + } finally { + fetchResult.decRef(); + } + } else { + numHits = 0; + } - FetchSearchPhase phase = new FetchSearchPhase( - results, - null, - mockSearchPhaseContext, - (searchResponse, scrollId) -> new SearchPhase("test") { - @Override - public void run() { - mockSearchPhaseContext.sendSearchResponse(searchResponse, null); + FetchSearchPhase phase = new FetchSearchPhase( + results, + null, + mockSearchPhaseContext, + (searchResponse, scrollId) -> new SearchPhase("test") { + @Override + public void run() { + mockSearchPhaseContext.sendSearchResponse(searchResponse, null); + } } + ); + assertEquals("fetch", phase.getName()); + phase.run(); + mockSearchPhaseContext.assertNoFailure(); + SearchResponse searchResponse = mockSearchPhaseContext.searchResponse.get(); + assertNotNull(searchResponse); + assertEquals(numHits, searchResponse.getHits().getTotalHits().value); + if (numHits != 0) { + assertEquals(42, searchResponse.getHits().getAt(0).docId()); } - ); - assertEquals("fetch", phase.getName()); - phase.run(); - mockSearchPhaseContext.assertNoFailure(); - SearchResponse searchResponse = mockSearchPhaseContext.searchResponse.get(); - assertNotNull(searchResponse); - assertEquals(numHits, searchResponse.getHits().getTotalHits().value); - if (numHits != 0) { - assertEquals(42, searchResponse.getHits().getAt(0).docId()); + assertProfiles(profiled, 1, searchResponse); + assertTrue(mockSearchPhaseContext.releasedSearchContexts.isEmpty()); + } finally { + results.decRef(); } - assertProfiles(profiled, 1, searchResponse); - assertTrue(mockSearchPhaseContext.releasedSearchContexts.isEmpty()); } private void assertProfiles(boolean profiled, int totalShards, SearchResponse searchResponse) { @@ -130,84 +148,109 @@ public void testFetchTwoDocument() { 2, exc -> {} ); - int resultSetSize = randomIntBetween(2, 10); - boolean profiled = randomBoolean(); + try { + int resultSetSize = randomIntBetween(2, 10); + boolean profiled = randomBoolean(); - ShardSearchContextId ctx1 = new ShardSearchContextId(UUIDs.base64UUID(), 123); - SearchShardTarget shard1Target = new SearchShardTarget("node1", new ShardId("test", "na", 0), null); - QuerySearchResult queryResult = new QuerySearchResult(ctx1, shard1Target, null); - queryResult.topDocs( - new TopDocsAndMaxScore( - new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] { new ScoreDoc(42, 1.0F) }), - 2.0F - ), - new DocValueFormat[0] - ); - queryResult.size(resultSetSize); // the size of the result set - queryResult.setShardIndex(0); - addProfiling(profiled, queryResult); - results.consumeResult(queryResult, () -> {}); + ShardSearchContextId ctx1 = new ShardSearchContextId(UUIDs.base64UUID(), 123); + SearchShardTarget shard1Target = new SearchShardTarget("node1", new ShardId("test", "na", 0), null); + SearchShardTarget shard2Target = new SearchShardTarget("node2", new ShardId("test", "na", 1), null); + QuerySearchResult queryResult = new QuerySearchResult(ctx1, shard1Target, null); + try { + queryResult.topDocs( + new TopDocsAndMaxScore( + new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] { new ScoreDoc(42, 1.0F) }), + 2.0F + ), + new DocValueFormat[0] + ); + queryResult.size(resultSetSize); // the size of the result set + queryResult.setShardIndex(0); + addProfiling(profiled, queryResult); + results.consumeResult(queryResult, () -> {}); - final ShardSearchContextId ctx2 = new ShardSearchContextId(UUIDs.base64UUID(), 321); - SearchShardTarget shard2Target = new SearchShardTarget("node2", new ShardId("test", "na", 1), null); - queryResult = new QuerySearchResult(ctx2, shard2Target, null); - queryResult.topDocs( - new TopDocsAndMaxScore( - new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] { new ScoreDoc(84, 2.0F) }), - 2.0F - ), - new DocValueFormat[0] - ); - queryResult.size(resultSetSize); - queryResult.setShardIndex(1); - addProfiling(profiled, queryResult); - results.consumeResult(queryResult, () -> {}); + } finally { + queryResult.decRef(); + } - mockSearchPhaseContext.searchTransport = new SearchTransportService(null, null, null) { - @Override - public void sendExecuteFetch( - Transport.Connection connection, - ShardFetchSearchRequest request, - SearchTask task, - SearchActionListener listener - ) { - FetchSearchResult fetchResult = new FetchSearchResult(); - SearchHits hits; - if (request.contextId().equals(ctx2)) { - fetchResult.setSearchShardTarget(shard2Target); - hits = new SearchHits(new SearchHit[] { new SearchHit(84) }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 2.0F); - } else { - assertEquals(ctx1, request.contextId()); - fetchResult.setSearchShardTarget(shard1Target); - hits = new SearchHits(new SearchHit[] { new SearchHit(42) }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0F); - } - fetchResult.shardResult(hits, fetchProfile(profiled)); - listener.onResponse(fetchResult); + final ShardSearchContextId ctx2 = new ShardSearchContextId(UUIDs.base64UUID(), 321); + try { + queryResult = new QuerySearchResult(ctx2, shard2Target, null); + queryResult.topDocs( + new TopDocsAndMaxScore( + new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] { new ScoreDoc(84, 2.0F) }), + 2.0F + ), + new DocValueFormat[0] + ); + queryResult.size(resultSetSize); + queryResult.setShardIndex(1); + addProfiling(profiled, queryResult); + results.consumeResult(queryResult, () -> {}); + } finally { + queryResult.decRef(); } - }; - FetchSearchPhase phase = new FetchSearchPhase( - results, - null, - mockSearchPhaseContext, - (searchResponse, scrollId) -> new SearchPhase("test") { + + mockSearchPhaseContext.searchTransport = new SearchTransportService(null, null, null) { @Override - public void run() { - mockSearchPhaseContext.sendSearchResponse(searchResponse, null); + public void sendExecuteFetch( + Transport.Connection connection, + ShardFetchSearchRequest request, + SearchTask task, + SearchActionListener listener + ) { + FetchSearchResult fetchResult = new FetchSearchResult(); + try { + SearchHits hits; + if (request.contextId().equals(ctx2)) { + fetchResult.setSearchShardTarget(shard2Target); + hits = new SearchHits( + new SearchHit[] { new SearchHit(84) }, + new TotalHits(1, TotalHits.Relation.EQUAL_TO), + 2.0F + ); + } else { + assertEquals(ctx1, request.contextId()); + fetchResult.setSearchShardTarget(shard1Target); + hits = new SearchHits( + new SearchHit[] { new SearchHit(42) }, + new TotalHits(1, TotalHits.Relation.EQUAL_TO), + 1.0F + ); + } + fetchResult.shardResult(hits, fetchProfile(profiled)); + listener.onResponse(fetchResult); + } finally { + fetchResult.decRef(); + } } - } - ); - assertEquals("fetch", phase.getName()); - phase.run(); - mockSearchPhaseContext.assertNoFailure(); - SearchResponse searchResponse = mockSearchPhaseContext.searchResponse.get(); - assertNotNull(searchResponse); - assertEquals(2, searchResponse.getHits().getTotalHits().value); - assertEquals(84, searchResponse.getHits().getAt(0).docId()); - assertEquals(42, searchResponse.getHits().getAt(1).docId()); - assertEquals(0, searchResponse.getFailedShards()); - assertEquals(2, searchResponse.getSuccessfulShards()); - assertProfiles(profiled, 2, searchResponse); - assertTrue(mockSearchPhaseContext.releasedSearchContexts.isEmpty()); + }; + FetchSearchPhase phase = new FetchSearchPhase( + results, + null, + mockSearchPhaseContext, + (searchResponse, scrollId) -> new SearchPhase("test") { + @Override + public void run() { + mockSearchPhaseContext.sendSearchResponse(searchResponse, null); + } + } + ); + assertEquals("fetch", phase.getName()); + phase.run(); + mockSearchPhaseContext.assertNoFailure(); + SearchResponse searchResponse = mockSearchPhaseContext.searchResponse.get(); + assertNotNull(searchResponse); + assertEquals(2, searchResponse.getHits().getTotalHits().value); + assertEquals(84, searchResponse.getHits().getAt(0).docId()); + assertEquals(42, searchResponse.getHits().getAt(1).docId()); + assertEquals(0, searchResponse.getFailedShards()); + assertEquals(2, searchResponse.getSuccessfulShards()); + assertProfiles(profiled, 2, searchResponse); + assertTrue(mockSearchPhaseContext.releasedSearchContexts.isEmpty()); + } finally { + results.decRef(); + } } public void testFailFetchOneDoc() { @@ -222,100 +265,116 @@ public void testFailFetchOneDoc() { 2, exc -> {} ); - int resultSetSize = randomIntBetween(2, 10); - boolean profiled = randomBoolean(); + try { + int resultSetSize = randomIntBetween(2, 10); + boolean profiled = randomBoolean(); - final ShardSearchContextId ctx = new ShardSearchContextId(UUIDs.base64UUID(), 123); - SearchShardTarget shard1Target = new SearchShardTarget("node1", new ShardId("test", "na", 0), null); - QuerySearchResult queryResult = new QuerySearchResult(ctx, shard1Target, null); - queryResult.topDocs( - new TopDocsAndMaxScore( - new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] { new ScoreDoc(42, 1.0F) }), - 2.0F - ), - new DocValueFormat[0] - ); - queryResult.size(resultSetSize); // the size of the result set - queryResult.setShardIndex(0); - addProfiling(profiled, queryResult); - results.consumeResult(queryResult, () -> {}); - - SearchShardTarget shard2Target = new SearchShardTarget("node2", new ShardId("test", "na", 1), null); - queryResult = new QuerySearchResult(new ShardSearchContextId("", 321), shard2Target, null); - queryResult.topDocs( - new TopDocsAndMaxScore( - new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] { new ScoreDoc(84, 2.0F) }), - 2.0F - ), - new DocValueFormat[0] - ); - queryResult.size(resultSetSize); - queryResult.setShardIndex(1); - addProfiling(profiled, queryResult); - results.consumeResult(queryResult, () -> {}); + final ShardSearchContextId ctx = new ShardSearchContextId(UUIDs.base64UUID(), 123); + SearchShardTarget shard1Target = new SearchShardTarget("node1", new ShardId("test", "na", 0), null); + QuerySearchResult queryResult = new QuerySearchResult(ctx, shard1Target, null); + try { + queryResult.topDocs( + new TopDocsAndMaxScore( + new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] { new ScoreDoc(42, 1.0F) }), + 2.0F + ), + new DocValueFormat[0] + ); + queryResult.size(resultSetSize); // the size of the result set + queryResult.setShardIndex(0); + addProfiling(profiled, queryResult); + results.consumeResult(queryResult, () -> {}); + } finally { + queryResult.decRef(); + } - mockSearchPhaseContext.searchTransport = new SearchTransportService(null, null, null) { - @Override - public void sendExecuteFetch( - Transport.Connection connection, - ShardFetchSearchRequest request, - SearchTask task, - SearchActionListener listener - ) { - if (request.contextId().getId() == 321) { - FetchSearchResult fetchResult = new FetchSearchResult(); - fetchResult.setSearchShardTarget(shard1Target); - SearchHits hits = new SearchHits( - new SearchHit[] { new SearchHit(84) }, - new TotalHits(1, TotalHits.Relation.EQUAL_TO), + SearchShardTarget shard2Target = new SearchShardTarget("node2", new ShardId("test", "na", 1), null); + queryResult = new QuerySearchResult(new ShardSearchContextId("", 321), shard2Target, null); + try { + queryResult.topDocs( + new TopDocsAndMaxScore( + new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] { new ScoreDoc(84, 2.0F) }), 2.0F - ); - fetchResult.shardResult(hits, fetchProfile(profiled)); - listener.onResponse(fetchResult); - } else { - listener.onFailure(new MockDirectoryWrapper.FakeIOException()); - } + ), + new DocValueFormat[0] + ); + queryResult.size(resultSetSize); + queryResult.setShardIndex(1); + addProfiling(profiled, queryResult); + results.consumeResult(queryResult, () -> {}); + } finally { + queryResult.decRef(); } - }; - FetchSearchPhase phase = new FetchSearchPhase( - results, - null, - mockSearchPhaseContext, - (searchResponse, scrollId) -> new SearchPhase("test") { + + mockSearchPhaseContext.searchTransport = new SearchTransportService(null, null, null) { @Override - public void run() { - mockSearchPhaseContext.sendSearchResponse(searchResponse, null); + public void sendExecuteFetch( + Transport.Connection connection, + ShardFetchSearchRequest request, + SearchTask task, + SearchActionListener listener + ) { + if (request.contextId().getId() == 321) { + FetchSearchResult fetchResult = new FetchSearchResult(); + try { + fetchResult.setSearchShardTarget(shard1Target); + SearchHits hits = new SearchHits( + new SearchHit[] { new SearchHit(84) }, + new TotalHits(1, TotalHits.Relation.EQUAL_TO), + 2.0F + ); + fetchResult.shardResult(hits, fetchProfile(profiled)); + listener.onResponse(fetchResult); + } finally { + fetchResult.decRef(); + } + } else { + listener.onFailure(new MockDirectoryWrapper.FakeIOException()); + } + } + }; + FetchSearchPhase phase = new FetchSearchPhase( + results, + null, + mockSearchPhaseContext, + (searchResponse, scrollId) -> new SearchPhase("test") { + @Override + public void run() { + mockSearchPhaseContext.sendSearchResponse(searchResponse, null); + } } - } - ); - assertEquals("fetch", phase.getName()); - phase.run(); - mockSearchPhaseContext.assertNoFailure(); - SearchResponse searchResponse = mockSearchPhaseContext.searchResponse.get(); - assertNotNull(searchResponse); - assertEquals(2, searchResponse.getHits().getTotalHits().value); - assertEquals(84, searchResponse.getHits().getAt(0).docId()); - assertEquals(1, searchResponse.getFailedShards()); - assertEquals(1, searchResponse.getSuccessfulShards()); - assertEquals(1, searchResponse.getShardFailures().length); - assertTrue(searchResponse.getShardFailures()[0].getCause() instanceof MockDirectoryWrapper.FakeIOException); - assertEquals(1, mockSearchPhaseContext.releasedSearchContexts.size()); - if (profiled) { - /* - * Shard 2 failed to fetch but still searched so it will have - * profiling information for the search on both shards but only - * for the fetch on the successful shard. - */ - assertThat(searchResponse.getProfileResults().values().size(), equalTo(2)); - assertThat(searchResponse.getProfileResults().get(shard1Target.toString()).getFetchPhase(), nullValue()); - assertThat( - searchResponse.getProfileResults().get(shard2Target.toString()).getFetchPhase().getTime(), - equalTo(FETCH_PROFILE_TIME) ); - } else { - assertThat(searchResponse.getProfileResults(), equalTo(Map.of())); + assertEquals("fetch", phase.getName()); + phase.run(); + mockSearchPhaseContext.assertNoFailure(); + SearchResponse searchResponse = mockSearchPhaseContext.searchResponse.get(); + assertNotNull(searchResponse); + assertEquals(2, searchResponse.getHits().getTotalHits().value); + assertEquals(84, searchResponse.getHits().getAt(0).docId()); + assertEquals(1, searchResponse.getFailedShards()); + assertEquals(1, searchResponse.getSuccessfulShards()); + assertEquals(1, searchResponse.getShardFailures().length); + assertTrue(searchResponse.getShardFailures()[0].getCause() instanceof MockDirectoryWrapper.FakeIOException); + assertEquals(1, mockSearchPhaseContext.releasedSearchContexts.size()); + if (profiled) { + /* + * Shard 2 failed to fetch but still searched so it will have + * profiling information for the search on both shards but only + * for the fetch on the successful shard. + */ + assertThat(searchResponse.getProfileResults().values().size(), equalTo(2)); + assertThat(searchResponse.getProfileResults().get(shard1Target.toString()).getFetchPhase(), nullValue()); + assertThat( + searchResponse.getProfileResults().get(shard2Target.toString()).getFetchPhase().getTime(), + equalTo(FETCH_PROFILE_TIME) + ); + } else { + assertThat(searchResponse.getProfileResults(), equalTo(Map.of())); + } + assertTrue(mockSearchPhaseContext.releasedSearchContexts.contains(ctx)); + } finally { + results.decRef(); } - assertTrue(mockSearchPhaseContext.releasedSearchContexts.contains(ctx)); } public void testFetchDocsConcurrently() throws InterruptedException { @@ -334,91 +393,103 @@ public void testFetchDocsConcurrently() throws InterruptedException { numHits, exc -> {} ); - SearchShardTarget[] shardTargets = new SearchShardTarget[numHits]; - for (int i = 0; i < numHits; i++) { - shardTargets[i] = new SearchShardTarget("node1", new ShardId("test", "na", i), null); - QuerySearchResult queryResult = new QuerySearchResult(new ShardSearchContextId("", i), shardTargets[i], null); - queryResult.topDocs( - new TopDocsAndMaxScore( - new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] { new ScoreDoc(i + 1, i) }), - i - ), - new DocValueFormat[0] - ); - queryResult.size(resultSetSize); // the size of the result set - queryResult.setShardIndex(i); - addProfiling(profiled, queryResult); - results.consumeResult(queryResult, () -> {}); - } - mockSearchPhaseContext.searchTransport = new SearchTransportService(null, null, null) { - @Override - public void sendExecuteFetch( - Transport.Connection connection, - ShardFetchSearchRequest request, - SearchTask task, - SearchActionListener listener - ) { - new Thread(() -> { - FetchSearchResult fetchResult = new FetchSearchResult(); - fetchResult.setSearchShardTarget(shardTargets[(int) request.contextId().getId()]); - SearchHits hits = new SearchHits( - new SearchHit[] { new SearchHit((int) (request.contextId().getId() + 1)) }, - new TotalHits(1, TotalHits.Relation.EQUAL_TO), - 100F + try { + SearchShardTarget[] shardTargets = new SearchShardTarget[numHits]; + for (int i = 0; i < numHits; i++) { + shardTargets[i] = new SearchShardTarget("node1", new ShardId("test", "na", i), null); + QuerySearchResult queryResult = new QuerySearchResult(new ShardSearchContextId("", i), shardTargets[i], null); + try { + queryResult.topDocs( + new TopDocsAndMaxScore( + new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] { new ScoreDoc(i + 1, i) }), + i + ), + new DocValueFormat[0] ); - fetchResult.shardResult(hits, fetchProfile(profiled)); - listener.onResponse(fetchResult); - }).start(); + queryResult.size(resultSetSize); // the size of the result set + queryResult.setShardIndex(i); + addProfiling(profiled, queryResult); + results.consumeResult(queryResult, () -> {}); + } finally { + queryResult.decRef(); + } } - }; - CountDownLatch latch = new CountDownLatch(1); - FetchSearchPhase phase = new FetchSearchPhase( - results, - null, - mockSearchPhaseContext, - (searchResponse, scrollId) -> new SearchPhase("test") { + mockSearchPhaseContext.searchTransport = new SearchTransportService(null, null, null) { @Override - public void run() { - mockSearchPhaseContext.sendSearchResponse(searchResponse, null); - latch.countDown(); + public void sendExecuteFetch( + Transport.Connection connection, + ShardFetchSearchRequest request, + SearchTask task, + SearchActionListener listener + ) { + new Thread(() -> { + FetchSearchResult fetchResult = new FetchSearchResult(); + try { + fetchResult.setSearchShardTarget(shardTargets[(int) request.contextId().getId()]); + SearchHits hits = new SearchHits( + new SearchHit[] { new SearchHit((int) (request.contextId().getId() + 1)) }, + new TotalHits(1, TotalHits.Relation.EQUAL_TO), + 100F + ); + fetchResult.shardResult(hits, fetchProfile(profiled)); + listener.onResponse(fetchResult); + } finally { + fetchResult.decRef(); + } + }).start(); + } + }; + CountDownLatch latch = new CountDownLatch(1); + FetchSearchPhase phase = new FetchSearchPhase( + results, + null, + mockSearchPhaseContext, + (searchResponse, scrollId) -> new SearchPhase("test") { + @Override + public void run() { + mockSearchPhaseContext.sendSearchResponse(searchResponse, null); + latch.countDown(); + } } + ); + assertEquals("fetch", phase.getName()); + phase.run(); + latch.await(); + mockSearchPhaseContext.assertNoFailure(); + SearchResponse searchResponse = mockSearchPhaseContext.searchResponse.get(); + assertNotNull(searchResponse); + assertEquals(numHits, searchResponse.getHits().getTotalHits().value); + assertEquals(Math.min(numHits, resultSetSize), searchResponse.getHits().getHits().length); + SearchHit[] hits = searchResponse.getHits().getHits(); + for (int i = 0; i < hits.length; i++) { + assertNotNull(hits[i]); + assertEquals("index: " + i, numHits - i, hits[i].docId()); + assertEquals("index: " + i, numHits - 1 - i, (int) hits[i].getScore()); } - ); - assertEquals("fetch", phase.getName()); - phase.run(); - latch.await(); - mockSearchPhaseContext.assertNoFailure(); - SearchResponse searchResponse = mockSearchPhaseContext.searchResponse.get(); - assertNotNull(searchResponse); - assertEquals(numHits, searchResponse.getHits().getTotalHits().value); - assertEquals(Math.min(numHits, resultSetSize), searchResponse.getHits().getHits().length); - SearchHit[] hits = searchResponse.getHits().getHits(); - for (int i = 0; i < hits.length; i++) { - assertNotNull(hits[i]); - assertEquals("index: " + i, numHits - i, hits[i].docId()); - assertEquals("index: " + i, numHits - 1 - i, (int) hits[i].getScore()); - } - assertEquals(0, searchResponse.getFailedShards()); - assertEquals(numHits, searchResponse.getSuccessfulShards()); - if (profiled) { - assertThat(searchResponse.getProfileResults().values().size(), equalTo(numHits)); - int count = 0; - for (SearchProfileShardResult profileShardResult : searchResponse.getProfileResults().values()) { - if (profileShardResult.getFetchPhase() != null) { - count++; - assertThat(profileShardResult.getFetchPhase().getTime(), equalTo(FETCH_PROFILE_TIME)); + assertEquals(0, searchResponse.getFailedShards()); + assertEquals(numHits, searchResponse.getSuccessfulShards()); + if (profiled) { + assertThat(searchResponse.getProfileResults().values().size(), equalTo(numHits)); + int count = 0; + for (SearchProfileShardResult profileShardResult : searchResponse.getProfileResults().values()) { + if (profileShardResult.getFetchPhase() != null) { + count++; + assertThat(profileShardResult.getFetchPhase().getTime(), equalTo(FETCH_PROFILE_TIME)); + } } + assertThat(count, equalTo(Math.min(numHits, resultSetSize))); + } else { + assertThat(searchResponse.getProfileResults(), equalTo(Map.of())); } - assertThat(count, equalTo(Math.min(numHits, resultSetSize))); - } else { - assertThat(searchResponse.getProfileResults(), equalTo(Map.of())); + int sizeReleasedContexts = Math.max(0, numHits - resultSetSize); // all non fetched results will be freed + assertEquals( + mockSearchPhaseContext.releasedSearchContexts.toString(), + sizeReleasedContexts, + mockSearchPhaseContext.releasedSearchContexts.size() + ); + } finally { + results.decRef(); } - int sizeReleasedContexts = Math.max(0, numHits - resultSetSize); // all non fetched results will be freed - assertEquals( - mockSearchPhaseContext.releasedSearchContexts.toString(), - sizeReleasedContexts, - mockSearchPhaseContext.releasedSearchContexts.size() - ); } public void testExceptionFailsPhase() { @@ -433,80 +504,103 @@ public void testExceptionFailsPhase() { 2, exc -> {} ); - int resultSetSize = randomIntBetween(2, 10); - boolean profiled = randomBoolean(); + try { + int resultSetSize = randomIntBetween(2, 10); + boolean profiled = randomBoolean(); - SearchShardTarget shard1Target = new SearchShardTarget("node1", new ShardId("test", "na", 0), null); - QuerySearchResult queryResult = new QuerySearchResult(new ShardSearchContextId("", 123), shard1Target, null); - queryResult.topDocs( - new TopDocsAndMaxScore( - new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] { new ScoreDoc(42, 1.0F) }), - 2.0F - ), - new DocValueFormat[0] - ); - queryResult.size(resultSetSize); // the size of the result set - queryResult.setShardIndex(0); - addProfiling(profiled, queryResult); - results.consumeResult(queryResult, () -> {}); - - SearchShardTarget shard2Target = new SearchShardTarget("node1", new ShardId("test", "na", 0), null); - queryResult = new QuerySearchResult(new ShardSearchContextId("", 321), shard2Target, null); - queryResult.topDocs( - new TopDocsAndMaxScore( - new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] { new ScoreDoc(84, 2.0F) }), - 2.0F - ), - new DocValueFormat[0] - ); - queryResult.size(resultSetSize); - queryResult.setShardIndex(1); - addProfiling(profiled, queryResult); - results.consumeResult(queryResult, () -> {}); - - AtomicInteger numFetches = new AtomicInteger(0); - mockSearchPhaseContext.searchTransport = new SearchTransportService(null, null, null) { - @Override - public void sendExecuteFetch( - Transport.Connection connection, - ShardFetchSearchRequest request, - SearchTask task, - SearchActionListener listener - ) { - FetchSearchResult fetchResult = new FetchSearchResult(); - if (numFetches.incrementAndGet() == 1) { - throw new RuntimeException("BOOM"); - } - SearchHits hits; - if (request.contextId().getId() == 321) { - fetchResult.setSearchShardTarget(shard2Target); - hits = new SearchHits(new SearchHit[] { new SearchHit(84) }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 2.0F); - } else { - fetchResult.setSearchShardTarget(shard1Target); - assertEquals(request, 123); - hits = new SearchHits(new SearchHit[] { new SearchHit(42) }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0F); - } - fetchResult.shardResult(hits, fetchProfile(profiled)); - listener.onResponse(fetchResult); + SearchShardTarget shard1Target = new SearchShardTarget("node1", new ShardId("test", "na", 0), null); + SearchShardTarget shard2Target = new SearchShardTarget("node1", new ShardId("test", "na", 0), null); + QuerySearchResult queryResult = new QuerySearchResult(new ShardSearchContextId("", 123), shard1Target, null); + try { + queryResult.topDocs( + new TopDocsAndMaxScore( + new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] { new ScoreDoc(42, 1.0F) }), + 2.0F + ), + new DocValueFormat[0] + ); + queryResult.size(resultSetSize); // the size of the result set + queryResult.setShardIndex(0); + addProfiling(profiled, queryResult); + results.consumeResult(queryResult, () -> {}); + } finally { + queryResult.decRef(); + } + queryResult = new QuerySearchResult(new ShardSearchContextId("", 321), shard2Target, null); + try { + queryResult.topDocs( + new TopDocsAndMaxScore( + new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] { new ScoreDoc(84, 2.0F) }), + 2.0F + ), + new DocValueFormat[0] + ); + queryResult.size(resultSetSize); + queryResult.setShardIndex(1); + addProfiling(profiled, queryResult); + results.consumeResult(queryResult, () -> {}); + } finally { + queryResult.decRef(); } - }; - FetchSearchPhase phase = new FetchSearchPhase( - results, - null, - mockSearchPhaseContext, - (searchResponse, scrollId) -> new SearchPhase("test") { + + AtomicInteger numFetches = new AtomicInteger(0); + mockSearchPhaseContext.searchTransport = new SearchTransportService(null, null, null) { @Override - public void run() { - mockSearchPhaseContext.sendSearchResponse(searchResponse, null); + public void sendExecuteFetch( + Transport.Connection connection, + ShardFetchSearchRequest request, + SearchTask task, + SearchActionListener listener + ) { + FetchSearchResult fetchResult = new FetchSearchResult(); + try { + if (numFetches.incrementAndGet() == 1) { + listener.onFailure(new RuntimeException("BOOM")); + return; + } + SearchHits hits; + if (request.contextId().getId() == 321) { + fetchResult.setSearchShardTarget(shard2Target); + hits = new SearchHits( + new SearchHit[] { new SearchHit(84) }, + new TotalHits(1, TotalHits.Relation.EQUAL_TO), + 2.0F + ); + } else { + fetchResult.setSearchShardTarget(shard1Target); + assertEquals(request, 123); + hits = new SearchHits( + new SearchHit[] { new SearchHit(42) }, + new TotalHits(1, TotalHits.Relation.EQUAL_TO), + 1.0F + ); + } + fetchResult.shardResult(hits, fetchProfile(profiled)); + listener.onResponse(fetchResult); + } finally { + fetchResult.decRef(); + } } - } - ); - assertEquals("fetch", phase.getName()); - phase.run(); - assertNotNull(mockSearchPhaseContext.phaseFailure.get()); - assertEquals(mockSearchPhaseContext.phaseFailure.get().getMessage(), "BOOM"); - assertNull(mockSearchPhaseContext.searchResponse.get()); - assertTrue(mockSearchPhaseContext.releasedSearchContexts.isEmpty()); + }; + FetchSearchPhase phase = new FetchSearchPhase( + results, + null, + mockSearchPhaseContext, + (searchResponse, scrollId) -> new SearchPhase("test") { + @Override + public void run() { + mockSearchPhaseContext.sendSearchResponse(searchResponse, null); + } + } + ); + assertEquals("fetch", phase.getName()); + phase.run(); + assertNotNull(mockSearchPhaseContext.searchResponse.get()); + assertThat(mockSearchPhaseContext.searchResponse.get().getShardFailures(), arrayWithSize(1)); + assertThat(mockSearchPhaseContext.releasedSearchContexts, hasSize(1)); + } finally { + results.decRef(); + } } public void testCleanupIrrelevantContexts() { // contexts that are not fetched should be cleaned up @@ -521,93 +615,109 @@ public void testCleanupIrrelevantContexts() { // contexts that are not fetched s 2, exc -> {} ); - int resultSetSize = 1; - boolean profiled = randomBoolean(); - - final ShardSearchContextId ctx1 = new ShardSearchContextId(UUIDs.base64UUID(), 123); - SearchShardTarget shard1Target = new SearchShardTarget("node1", new ShardId("test", "na", 0), null); - QuerySearchResult queryResult = new QuerySearchResult(ctx1, shard1Target, null); - queryResult.topDocs( - new TopDocsAndMaxScore( - new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] { new ScoreDoc(42, 1.0F) }), - 2.0F - ), - new DocValueFormat[0] - ); - queryResult.size(resultSetSize); // the size of the result set - queryResult.setShardIndex(0); - addProfiling(profiled, queryResult); - results.consumeResult(queryResult, () -> {}); + try { + int resultSetSize = 1; + boolean profiled = randomBoolean(); - final ShardSearchContextId ctx2 = new ShardSearchContextId(UUIDs.base64UUID(), 321); - SearchShardTarget shard2Target = new SearchShardTarget("node2", new ShardId("test", "na", 1), null); - queryResult = new QuerySearchResult(ctx2, shard2Target, null); - queryResult.topDocs( - new TopDocsAndMaxScore( - new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] { new ScoreDoc(84, 2.0F) }), - 2.0F - ), - new DocValueFormat[0] - ); - queryResult.size(resultSetSize); - queryResult.setShardIndex(1); - addProfiling(profiled, queryResult); - results.consumeResult(queryResult, () -> {}); - - mockSearchPhaseContext.searchTransport = new SearchTransportService(null, null, null) { - @Override - public void sendExecuteFetch( - Transport.Connection connection, - ShardFetchSearchRequest request, - SearchTask task, - SearchActionListener listener - ) { - FetchSearchResult fetchResult = new FetchSearchResult(); - if (request.contextId().getId() == 321) { - fetchResult.setSearchShardTarget(shard1Target); - SearchHits hits = new SearchHits( - new SearchHit[] { new SearchHit(84) }, - new TotalHits(1, TotalHits.Relation.EQUAL_TO), + final ShardSearchContextId ctx1 = new ShardSearchContextId(UUIDs.base64UUID(), 123); + SearchShardTarget shard1Target = new SearchShardTarget("node1", new ShardId("test", "na", 0), null); + QuerySearchResult queryResult = new QuerySearchResult(ctx1, shard1Target, null); + try { + queryResult.topDocs( + new TopDocsAndMaxScore( + new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] { new ScoreDoc(42, 1.0F) }), 2.0F - ); - fetchResult.shardResult(hits, fetchProfile(profiled)); - } else { - fail("requestID 123 should not be fetched but was"); - } - listener.onResponse(fetchResult); + ), + new DocValueFormat[0] + ); + queryResult.size(resultSetSize); // the size of the result set + queryResult.setShardIndex(0); + addProfiling(profiled, queryResult); + results.consumeResult(queryResult, () -> {}); + } finally { + queryResult.decRef(); + } + final ShardSearchContextId ctx2 = new ShardSearchContextId(UUIDs.base64UUID(), 321); + SearchShardTarget shard2Target = new SearchShardTarget("node2", new ShardId("test", "na", 1), null); + queryResult = new QuerySearchResult(ctx2, shard2Target, null); + try { + queryResult.topDocs( + new TopDocsAndMaxScore( + new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] { new ScoreDoc(84, 2.0F) }), + 2.0F + ), + new DocValueFormat[0] + ); + queryResult.size(resultSetSize); + queryResult.setShardIndex(1); + addProfiling(profiled, queryResult); + results.consumeResult(queryResult, () -> {}); + } finally { + queryResult.decRef(); } - }; - FetchSearchPhase phase = new FetchSearchPhase( - results, - null, - mockSearchPhaseContext, - (searchResponse, scrollId) -> new SearchPhase("test") { + + mockSearchPhaseContext.searchTransport = new SearchTransportService(null, null, null) { @Override - public void run() { - mockSearchPhaseContext.sendSearchResponse(searchResponse, null); + public void sendExecuteFetch( + Transport.Connection connection, + ShardFetchSearchRequest request, + SearchTask task, + SearchActionListener listener + ) { + FetchSearchResult fetchResult = new FetchSearchResult(); + try { + if (request.contextId().getId() == 321) { + fetchResult.setSearchShardTarget(shard1Target); + SearchHits hits = new SearchHits( + new SearchHit[] { new SearchHit(84) }, + new TotalHits(1, TotalHits.Relation.EQUAL_TO), + 2.0F + ); + fetchResult.shardResult(hits, fetchProfile(profiled)); + } else { + fail("requestID 123 should not be fetched but was"); + } + listener.onResponse(fetchResult); + } finally { + fetchResult.decRef(); + } + } + }; + FetchSearchPhase phase = new FetchSearchPhase( + results, + null, + mockSearchPhaseContext, + (searchResponse, scrollId) -> new SearchPhase("test") { + @Override + public void run() { + mockSearchPhaseContext.sendSearchResponse(searchResponse, null); + } } - } - ); - assertEquals("fetch", phase.getName()); - phase.run(); - mockSearchPhaseContext.assertNoFailure(); - SearchResponse searchResponse = mockSearchPhaseContext.searchResponse.get(); - assertNotNull(searchResponse); - assertEquals(2, searchResponse.getHits().getTotalHits().value); - assertEquals(1, searchResponse.getHits().getHits().length); - assertEquals(84, searchResponse.getHits().getAt(0).docId()); - assertEquals(0, searchResponse.getFailedShards()); - assertEquals(2, searchResponse.getSuccessfulShards()); - if (profiled) { - assertThat(searchResponse.getProfileResults().size(), equalTo(2)); - assertThat(searchResponse.getProfileResults().get(shard1Target.toString()).getFetchPhase(), nullValue()); - assertThat( - searchResponse.getProfileResults().get(shard2Target.toString()).getFetchPhase().getTime(), - equalTo(FETCH_PROFILE_TIME) ); + assertEquals("fetch", phase.getName()); + phase.run(); + mockSearchPhaseContext.assertNoFailure(); + SearchResponse searchResponse = mockSearchPhaseContext.searchResponse.get(); + assertNotNull(searchResponse); + assertEquals(2, searchResponse.getHits().getTotalHits().value); + assertEquals(1, searchResponse.getHits().getHits().length); + assertEquals(84, searchResponse.getHits().getAt(0).docId()); + assertEquals(0, searchResponse.getFailedShards()); + assertEquals(2, searchResponse.getSuccessfulShards()); + if (profiled) { + assertThat(searchResponse.getProfileResults().size(), equalTo(2)); + assertThat(searchResponse.getProfileResults().get(shard1Target.toString()).getFetchPhase(), nullValue()); + assertThat( + searchResponse.getProfileResults().get(shard2Target.toString()).getFetchPhase().getTime(), + equalTo(FETCH_PROFILE_TIME) + ); + } + assertEquals(1, mockSearchPhaseContext.releasedSearchContexts.size()); + assertTrue(mockSearchPhaseContext.releasedSearchContexts.contains(ctx1)); + } finally { + results.decRef(); } - assertEquals(1, mockSearchPhaseContext.releasedSearchContexts.size()); - assertTrue(mockSearchPhaseContext.releasedSearchContexts.contains(ctx1)); + } private void addProfiling(boolean profiled, QuerySearchResult queryResult) { diff --git a/server/src/test/java/org/elasticsearch/action/search/KnnSearchSingleNodeTests.java b/server/src/test/java/org/elasticsearch/action/search/KnnSearchSingleNodeTests.java index ee42be5cc92d8..e6abe2f041a4c 100644 --- a/server/src/test/java/org/elasticsearch/action/search/KnnSearchSingleNodeTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/KnnSearchSingleNodeTests.java @@ -53,8 +53,8 @@ public void testKnnSearchRemovedVector() throws IOException { createIndex("index", indexSettings, builder); for (int doc = 0; doc < 10; doc++) { - client().prepareIndex("index").setId(Integer.toString(doc)).setSource("vector", randomVector(), "text", "hello world").get(); - client().prepareIndex("index").setSource("text", "goodnight world").get(); + prepareIndex("index").setId(Integer.toString(doc)).setSource("vector", randomVector(), "text", "hello world").get(); + prepareIndex("index").setSource("text", "goodnight world").get(); } indicesAdmin().prepareRefresh("index").get(); @@ -96,8 +96,8 @@ public void testKnnWithQuery() throws IOException { createIndex("index", indexSettings, builder); for (int doc = 0; doc < 10; doc++) { - client().prepareIndex("index").setSource("vector", randomVector(), "text", "hello world").get(); - client().prepareIndex("index").setSource("text", "goodnight world").get(); + prepareIndex("index").setSource("vector", randomVector(), "text", "hello world").get(); + prepareIndex("index").setSource("text", "goodnight world").get(); } indicesAdmin().prepareRefresh("index").get(); @@ -141,7 +141,7 @@ public void testKnnFilter() throws IOException { for (int doc = 0; doc < 10; doc++) { String value = doc % 2 == 0 ? "first" : "second"; - client().prepareIndex("index").setId(String.valueOf(doc)).setSource("vector", randomVector(), "field", value).get(); + prepareIndex("index").setId(String.valueOf(doc)).setSource("vector", randomVector(), "field", value).get(); } indicesAdmin().prepareRefresh("index").get(); @@ -183,9 +183,9 @@ public void testKnnFilterWithRewrite() throws IOException { createIndex("index", indexSettings, builder); for (int doc = 0; doc < 10; doc++) { - client().prepareIndex("index").setId(String.valueOf(doc)).setSource("vector", randomVector(), "field", "value").get(); + prepareIndex("index").setId(String.valueOf(doc)).setSource("vector", randomVector(), "field", "value").get(); } - client().prepareIndex("index").setId("lookup-doc").setSource("other-field", "value").get(); + prepareIndex("index").setId("lookup-doc").setSource("other-field", "value").get(); indicesAdmin().prepareRefresh("index").get(); @@ -230,9 +230,9 @@ public void testMultiKnnClauses() throws IOException { createIndex("index", indexSettings, builder); for (int doc = 0; doc < 10; doc++) { - client().prepareIndex("index").setSource("vector", randomVector(1.0f, 2.0f), "text", "hello world", "number", 1).get(); - client().prepareIndex("index").setSource("vector_2", randomVector(20f, 21f), "text", "hello world", "number", 2).get(); - client().prepareIndex("index").setSource("text", "goodnight world", "number", 3).get(); + prepareIndex("index").setSource("vector", randomVector(1.0f, 2.0f), "text", "hello world", "number", 1).get(); + prepareIndex("index").setSource("vector_2", randomVector(20f, 21f), "text", "hello world", "number", 2).get(); + prepareIndex("index").setSource("text", "goodnight world", "number", 3).get(); } indicesAdmin().prepareRefresh("index").get(); @@ -290,7 +290,7 @@ public void testMultiKnnClausesSameDoc() throws IOException { for (int doc = 0; doc < 10; doc++) { // Make them have hte same vector. This will allow us to test the recall is the same but scores take into account both fields float[] vector = randomVector(); - client().prepareIndex("index").setSource("vector", vector, "vector_2", vector, "number", doc).get(); + prepareIndex("index").setSource("vector", vector, "vector_2", vector, "number", doc).get(); } indicesAdmin().prepareRefresh("index").get(); @@ -356,10 +356,10 @@ public void testKnnFilteredAlias() throws IOException { int expectedHits = 0; for (int doc = 0; doc < 10; doc++) { if (randomBoolean()) { - client().prepareIndex("index").setId(String.valueOf(doc)).setSource("vector", randomVector(), "field", "hit").get(); + prepareIndex("index").setId(String.valueOf(doc)).setSource("vector", randomVector(), "field", "hit").get(); ++expectedHits; } else { - client().prepareIndex("index").setId(String.valueOf(doc)).setSource("vector", randomVector(), "field", "not hit").get(); + prepareIndex("index").setId(String.valueOf(doc)).setSource("vector", randomVector(), "field", "not hit").get(); } } indicesAdmin().prepareRefresh("index").get(); @@ -389,8 +389,8 @@ public void testKnnSearchAction() throws IOException { createIndex("index2", indexSettings, builder); for (int doc = 0; doc < 10; doc++) { - client().prepareIndex("index1").setId(String.valueOf(doc)).setSource("vector", randomVector()).get(); - client().prepareIndex("index2").setId(String.valueOf(doc)).setSource("vector", randomVector()).get(); + prepareIndex("index1").setId(String.valueOf(doc)).setSource("vector", randomVector()).get(); + prepareIndex("index2").setId(String.valueOf(doc)).setSource("vector", randomVector()).get(); } indicesAdmin().prepareForceMerge("index1", "index2").setMaxNumSegments(1).get(); @@ -427,7 +427,7 @@ public void testKnnVectorsWith4096Dims() throws IOException { createIndex("index", indexSettings, builder); for (int doc = 0; doc < 10; doc++) { - client().prepareIndex("index").setSource("vector", randomVector(4096)).get(); + prepareIndex("index").setSource("vector", randomVector(4096)).get(); } indicesAdmin().prepareRefresh("index").get(); diff --git a/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java b/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java index dcddecb88323d..71156517b0306 100644 --- a/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java +++ b/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.internal.InternalSearchResponse; @@ -43,6 +44,8 @@ public final class MockSearchPhaseContext implements SearchPhaseContext { final SearchRequest searchRequest = new SearchRequest(); final AtomicReference searchResponse = new AtomicReference<>(); + private final List releasables = new ArrayList<>(); + public MockSearchPhaseContext(int numShards) { this.numShards = numShards; numSuccess = new AtomicInteger(numShards); @@ -137,12 +140,17 @@ public void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPhase) { @Override public void addReleasable(Releasable releasable) { - // Noop + releasables.add(releasable); } @Override public void execute(Runnable command) { - command.run(); + try { + command.run(); + } finally { + Releasables.close(releasables); + releasables.clear(); + } } @Override diff --git a/server/src/test/java/org/elasticsearch/action/search/MultiSearchActionTookTests.java b/server/src/test/java/org/elasticsearch/action/search/MultiSearchActionTookTests.java index 6dea3c1239cdc..f1867b223760d 100644 --- a/server/src/test/java/org/elasticsearch/action/search/MultiSearchActionTookTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/MultiSearchActionTookTests.java @@ -93,7 +93,7 @@ private void runTestTook(boolean controlledClock) throws Exception { TransportMultiSearchAction action = createTransportMultiSearchAction(controlledClock, expected); - action.doExecute(mock(Task.class), multiSearchRequest, new ActionListener() { + action.doExecute(mock(Task.class), multiSearchRequest, new ActionListener<>() { @Override public void onResponse(MultiSearchResponse multiSearchResponse) { if (controlledClock) { @@ -147,18 +147,21 @@ public void search(final SearchRequest request, final ActionListener { counter.decrementAndGet(); - listener.onResponse( - new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, - null, - 0, - 0, - 0, - 0L, - ShardSearchFailure.EMPTY_ARRAY, - SearchResponse.Clusters.EMPTY - ) + var resp = new SearchResponse( + InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, + null, + 0, + 0, + 0, + 0L, + ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY ); + try { + listener.onResponse(resp); + } finally { + resp.decRef(); + } }); } diff --git a/server/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java index d13bf82b5cd9a..7ba320d84f91e 100644 --- a/server/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java @@ -194,29 +194,33 @@ public void testResponseErrorToXContent() throws IOException { new MultiSearchResponse.Item(null, new IllegalStateException("baaaaaazzzz")) }, tookInMillis ); + try { - assertEquals(XContentHelper.stripWhitespace(Strings.format(""" - { - "took": %s, - "responses": [ + assertEquals(XContentHelper.stripWhitespace(Strings.format(""" { - "error": { - "root_cause": [ { "type": "illegal_state_exception", "reason": "foobar" } ], - "type": "illegal_state_exception", - "reason": "foobar" - }, - "status": 500 - }, - { - "error": { - "root_cause": [ { "type": "illegal_state_exception", "reason": "baaaaaazzzz" } ], - "type": "illegal_state_exception", - "reason": "baaaaaazzzz" - }, - "status": 500 - } - ] - }""", tookInMillis)), Strings.toString(response)); + "took": %s, + "responses": [ + { + "error": { + "root_cause": [ { "type": "illegal_state_exception", "reason": "foobar" } ], + "type": "illegal_state_exception", + "reason": "foobar" + }, + "status": 500 + }, + { + "error": { + "root_cause": [ { "type": "illegal_state_exception", "reason": "baaaaaazzzz" } ], + "type": "illegal_state_exception", + "reason": "baaaaaazzzz" + }, + "status": 500 + } + ] + }""", tookInMillis)), Strings.toString(response)); + } finally { + response.decRef(); + } } public void testMaxConcurrentSearchRequests() { diff --git a/server/src/test/java/org/elasticsearch/action/search/MultiSearchResponseTests.java b/server/src/test/java/org/elasticsearch/action/search/MultiSearchResponseTests.java index 0dda42276317a..2ad5d17770687 100644 --- a/server/src/test/java/org/elasticsearch/action/search/MultiSearchResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/MultiSearchResponseTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.ToXContent; @@ -119,6 +120,7 @@ public final void testFromXContent() throws IOException { .numberOfTestRuns(20) .supportsUnknownFields(supportsUnknownFields()) .assertEqualsConsumer(this::assertEqualInstances) + .dispose(RefCounted::decRef) .test(); } @@ -138,6 +140,7 @@ public void testFromXContentWithFailures() throws IOException { // exceptions are not of the same type whenever parsed back .assertToXContentEquivalence(false) .assertEqualsConsumer(this::assertEqualInstances) + .dispose(RefCounted::decRef) .test(); } diff --git a/server/src/test/java/org/elasticsearch/action/search/QueryPhaseResultConsumerTests.java b/server/src/test/java/org/elasticsearch/action/search/QueryPhaseResultConsumerTests.java index f18d69c442b4b..6035950ca4635 100644 --- a/server/src/test/java/org/elasticsearch/action/search/QueryPhaseResultConsumerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/QueryPhaseResultConsumerTests.java @@ -116,26 +116,30 @@ public void testProgressListenerExceptionsAreCaught() throws Exception { return curr; }) ); + try { + + CountDownLatch partialReduceLatch = new CountDownLatch(10); + + for (int i = 0; i < 10; i++) { + SearchShardTarget searchShardTarget = new SearchShardTarget("node", new ShardId("index", "uuid", i), null); + QuerySearchResult querySearchResult = new QuerySearchResult(); + TopDocs topDocs = new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), new ScoreDoc[0]); + querySearchResult.topDocs(new TopDocsAndMaxScore(topDocs, Float.NaN), new DocValueFormat[0]); + querySearchResult.setSearchShardTarget(searchShardTarget); + querySearchResult.setShardIndex(i); + queryPhaseResultConsumer.consumeResult(querySearchResult, partialReduceLatch::countDown); + } - CountDownLatch partialReduceLatch = new CountDownLatch(10); + assertEquals(10, searchProgressListener.onQueryResult.get()); + assertTrue(partialReduceLatch.await(10, TimeUnit.SECONDS)); + assertNull(onPartialMergeFailure.get()); + assertEquals(8, searchProgressListener.onPartialReduce.get()); - for (int i = 0; i < 10; i++) { - SearchShardTarget searchShardTarget = new SearchShardTarget("node", new ShardId("index", "uuid", i), null); - QuerySearchResult querySearchResult = new QuerySearchResult(); - TopDocs topDocs = new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), new ScoreDoc[0]); - querySearchResult.topDocs(new TopDocsAndMaxScore(topDocs, Float.NaN), new DocValueFormat[0]); - querySearchResult.setSearchShardTarget(searchShardTarget); - querySearchResult.setShardIndex(i); - queryPhaseResultConsumer.consumeResult(querySearchResult, partialReduceLatch::countDown); + queryPhaseResultConsumer.reduce(); + assertEquals(1, searchProgressListener.onFinalReduce.get()); + } finally { + queryPhaseResultConsumer.decRef(); } - - assertEquals(10, searchProgressListener.onQueryResult.get()); - assertTrue(partialReduceLatch.await(10, TimeUnit.SECONDS)); - assertNull(onPartialMergeFailure.get()); - assertEquals(8, searchProgressListener.onPartialReduce.get()); - - queryPhaseResultConsumer.reduce(); - assertEquals(1, searchProgressListener.onFinalReduce.get()); } private static class ThrowingSearchProgressListener extends SearchProgressListener { diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java index 0c2670348f9f6..430e66c116744 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java @@ -177,7 +177,6 @@ public void testLimitConcurrentShardRequests() throws InterruptedException { } CountDownLatch latch = new CountDownLatch(1); AtomicBoolean searchPhaseDidRun = new AtomicBoolean(false); - ActionListener responseListener = ActionTestUtils.assertNoFailureListener(response -> {}); DiscoveryNode primaryNode = DiscoveryNodeUtils.create("node_1"); // for the sake of this test we place the replica on the same node. ie. this is not a mistake since we limit per node now DiscoveryNode replicaNode = DiscoveryNodeUtils.create("node_1"); @@ -199,71 +198,80 @@ public void testLimitConcurrentShardRequests() throws InterruptedException { Map aliasFilters = Collections.singletonMap("_na_", AliasFilter.EMPTY); CountDownLatch awaitInitialRequests = new CountDownLatch(1); AtomicInteger numRequests = new AtomicInteger(0); - AbstractSearchAsyncAction asyncAction = new AbstractSearchAsyncAction( - "test", - logger, - transportService, - (cluster, node) -> { - assert cluster == null : "cluster was not null: " + cluster; - return lookup.get(node); - }, - aliasFilters, - Collections.emptyMap(), - null, - request, - responseListener, - shardsIter, - new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0), - ClusterState.EMPTY_STATE, - null, - new ArraySearchPhaseResults<>(shardsIter.size()), - request.getMaxConcurrentShardRequests(), - SearchResponse.Clusters.EMPTY - ) { - - @Override - protected void executePhaseOnShard( - SearchShardIterator shardIt, - SearchShardTarget shard, - SearchActionListener listener + var results = new ArraySearchPhaseResults(shardsIter.size()); + try { + AbstractSearchAsyncAction asyncAction = new AbstractSearchAsyncAction<>( + "test", + logger, + transportService, + (cluster, node) -> { + assert cluster == null : "cluster was not null: " + cluster; + return lookup.get(node); + }, + aliasFilters, + Collections.emptyMap(), + null, + request, + ActionTestUtils.assertNoFailureListener(response -> {}), + shardsIter, + new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0), + ClusterState.EMPTY_STATE, + null, + results, + request.getMaxConcurrentShardRequests(), + SearchResponse.Clusters.EMPTY ) { - seenShard.computeIfAbsent(shard.getShardId(), (i) -> { - numRequests.incrementAndGet(); // only count this once per shard copy - return Boolean.TRUE; - }); - new Thread(() -> { - safeAwait(awaitInitialRequests); - Transport.Connection connection = getConnection(null, shard.getNodeId()); - TestSearchPhaseResult testSearchPhaseResult = new TestSearchPhaseResult( - new ShardSearchContextId(UUIDs.randomBase64UUID(), contextIdGenerator.incrementAndGet()), - connection.getNode() - ); - if (shardFailures[shard.getShardId().id()]) { - listener.onFailure(new RuntimeException()); - } else { - listener.onResponse(testSearchPhaseResult); - } - }).start(); - } + @Override + protected void executePhaseOnShard( + SearchShardIterator shardIt, + SearchShardTarget shard, + SearchActionListener listener + ) { + seenShard.computeIfAbsent(shard.getShardId(), (i) -> { + numRequests.incrementAndGet(); // only count this once per shard copy + return Boolean.TRUE; + }); + + new Thread(() -> { + safeAwait(awaitInitialRequests); + Transport.Connection connection = getConnection(null, shard.getNodeId()); + TestSearchPhaseResult testSearchPhaseResult = new TestSearchPhaseResult( + new ShardSearchContextId(UUIDs.randomBase64UUID(), contextIdGenerator.incrementAndGet()), + connection.getNode() + ); + try { + if (shardFailures[shard.getShardId().id()]) { + listener.onFailure(new RuntimeException()); + } else { + listener.onResponse(testSearchPhaseResult); + } + } finally { + testSearchPhaseResult.decRef(); + } + }).start(); + } - @Override - protected SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context) { - return new SearchPhase("test") { - @Override - public void run() { - assertTrue(searchPhaseDidRun.compareAndSet(false, true)); - latch.countDown(); - } - }; - } - }; - asyncAction.start(); - assertEquals(numConcurrent, numRequests.get()); - awaitInitialRequests.countDown(); - latch.await(); - assertTrue(searchPhaseDidRun.get()); - assertEquals(numShards, numRequests.get()); + @Override + protected SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context) { + return new SearchPhase("test") { + @Override + public void run() { + assertTrue(searchPhaseDidRun.compareAndSet(false, true)); + latch.countDown(); + } + }; + } + }; + asyncAction.start(); + assertEquals(numConcurrent, numRequests.get()); + awaitInitialRequests.countDown(); + latch.await(); + assertTrue(searchPhaseDidRun.get()); + assertEquals(numShards, numRequests.get()); + } finally { + results.decRef(); + } } public void testFanOutAndCollect() throws InterruptedException { @@ -304,82 +312,87 @@ public void sendFreeContext(Transport.Connection connection, ShardSearchContextI ExecutorService executor = Executors.newFixedThreadPool(randomIntBetween(1, Runtime.getRuntime().availableProcessors())); final CountDownLatch latch = new CountDownLatch(1); final AtomicBoolean latchTriggered = new AtomicBoolean(); - AbstractSearchAsyncAction asyncAction = new AbstractSearchAsyncAction( - "test", - logger, - transportService, - (cluster, node) -> { - assert cluster == null : "cluster was not null: " + cluster; - return lookup.get(node); - }, - aliasFilters, - Collections.emptyMap(), - executor, - request, - responseListener, - shardsIter, - new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0), - ClusterState.EMPTY_STATE, - null, - new ArraySearchPhaseResults<>(shardsIter.size()), - request.getMaxConcurrentShardRequests(), - SearchResponse.Clusters.EMPTY - ) { - TestSearchResponse response = new TestSearchResponse(); - - @Override - protected void executePhaseOnShard( - SearchShardIterator shardIt, - SearchShardTarget shard, - SearchActionListener listener + var results = new ArraySearchPhaseResults(shardsIter.size()); + try { + AbstractSearchAsyncAction asyncAction = new AbstractSearchAsyncAction<>( + "test", + logger, + transportService, + (cluster, node) -> { + assert cluster == null : "cluster was not null: " + cluster; + return lookup.get(node); + }, + aliasFilters, + Collections.emptyMap(), + executor, + request, + responseListener, + shardsIter, + new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0), + ClusterState.EMPTY_STATE, + null, + results, + request.getMaxConcurrentShardRequests(), + SearchResponse.Clusters.EMPTY ) { - assertTrue("shard: " + shard.getShardId() + " has been queried twice", response.queried.add(shard.getShardId())); - Transport.Connection connection = getConnection(null, shard.getNodeId()); - TestSearchPhaseResult testSearchPhaseResult = new TestSearchPhaseResult( - new ShardSearchContextId(UUIDs.randomBase64UUID(), contextIdGenerator.incrementAndGet()), - connection.getNode() - ); - Set ids = nodeToContextMap.computeIfAbsent(connection.getNode(), (n) -> newConcurrentSet()); - ids.add(testSearchPhaseResult.getContextId()); - if (randomBoolean()) { - listener.onResponse(testSearchPhaseResult); - } else { - new Thread(() -> listener.onResponse(testSearchPhaseResult)).start(); + final TestSearchResponse response = new TestSearchResponse(); + + @Override + protected void executePhaseOnShard( + SearchShardIterator shardIt, + SearchShardTarget shard, + SearchActionListener listener + ) { + assertTrue("shard: " + shard.getShardId() + " has been queried twice", response.queried.add(shard.getShardId())); + Transport.Connection connection = getConnection(null, shard.getNodeId()); + TestSearchPhaseResult testSearchPhaseResult = new TestSearchPhaseResult( + new ShardSearchContextId(UUIDs.randomBase64UUID(), contextIdGenerator.incrementAndGet()), + connection.getNode() + ); + Set ids = nodeToContextMap.computeIfAbsent(connection.getNode(), (n) -> newConcurrentSet()); + ids.add(testSearchPhaseResult.getContextId()); + if (randomBoolean()) { + listener.onResponse(testSearchPhaseResult); + } else { + new Thread(() -> listener.onResponse(testSearchPhaseResult)).start(); + } } - } - @Override - protected SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context) { - return new SearchPhase("test") { - @Override - public void run() { - for (int i = 0; i < results.getNumShards(); i++) { - TestSearchPhaseResult result = results.getAtomicArray().get(i); - assertEquals(result.node.getId(), result.getSearchShardTarget().getNodeId()); - sendReleaseSearchContext(result.getContextId(), new MockConnection(result.node), OriginalIndices.NONE); + @Override + protected SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context) { + return new SearchPhase("test") { + @Override + public void run() { + for (int i = 0; i < results.getNumShards(); i++) { + TestSearchPhaseResult result = results.getAtomicArray().get(i); + assertEquals(result.node.getId(), result.getSearchShardTarget().getNodeId()); + sendReleaseSearchContext(result.getContextId(), new MockConnection(result.node), OriginalIndices.NONE); + } + responseListener.onResponse(response); + if (latchTriggered.compareAndSet(false, true) == false) { + throw new AssertionError("latch triggered twice"); + } + latch.countDown(); } - responseListener.onResponse(response); - if (latchTriggered.compareAndSet(false, true) == false) { - throw new AssertionError("latch triggered twice"); - } - latch.countDown(); - } - }; + }; + } + }; + asyncAction.start(); + latch.await(); + assertNotNull(response.get()); + assertFalse(nodeToContextMap.isEmpty()); + assertTrue(nodeToContextMap.toString(), nodeToContextMap.containsKey(primaryNode) || nodeToContextMap.containsKey(replicaNode)); + assertEquals(shardsIter.size(), numFreedContext.get()); + if (nodeToContextMap.containsKey(primaryNode)) { + assertTrue(nodeToContextMap.get(primaryNode).toString(), nodeToContextMap.get(primaryNode).isEmpty()); + } else { + assertTrue(nodeToContextMap.get(replicaNode).toString(), nodeToContextMap.get(replicaNode).isEmpty()); } - }; - asyncAction.start(); - latch.await(); - assertNotNull(response.get()); - assertFalse(nodeToContextMap.isEmpty()); - assertTrue(nodeToContextMap.toString(), nodeToContextMap.containsKey(primaryNode) || nodeToContextMap.containsKey(replicaNode)); - assertEquals(shardsIter.size(), numFreedContext.get()); - if (nodeToContextMap.containsKey(primaryNode)) { - assertTrue(nodeToContextMap.get(primaryNode).toString(), nodeToContextMap.get(primaryNode).isEmpty()); - } else { - assertTrue(nodeToContextMap.get(replicaNode).toString(), nodeToContextMap.get(replicaNode).isEmpty()); + final List runnables = executor.shutdownNow(); + assertThat(runnables, equalTo(Collections.emptyList())); + } finally { + results.decRef(); } - final List runnables = executor.shutdownNow(); - assertThat(runnables, equalTo(Collections.emptyList())); } public void testFanOutAndFail() throws InterruptedException { @@ -424,7 +437,7 @@ public void sendFreeContext(Transport.Connection connection, ShardSearchContextI lookup.put(replicaNode.getId(), new MockConnection(replicaNode)); Map aliasFilters = Collections.singletonMap("_na_", AliasFilter.EMPTY); ExecutorService executor = Executors.newFixedThreadPool(randomIntBetween(1, Runtime.getRuntime().availableProcessors())); - AbstractSearchAsyncAction asyncAction = new AbstractSearchAsyncAction( + AbstractSearchAsyncAction asyncAction = new AbstractSearchAsyncAction<>( "test", logger, transportService, @@ -445,7 +458,7 @@ public void sendFreeContext(Transport.Connection connection, ShardSearchContextI request.getMaxConcurrentShardRequests(), SearchResponse.Clusters.EMPTY ) { - TestSearchResponse response = new TestSearchResponse(); + final TestSearchResponse response = new TestSearchResponse(); @Override protected void executePhaseOnShard( @@ -506,7 +519,6 @@ public void testAllowPartialResults() throws InterruptedException { request.setMaxConcurrentShardRequests(numConcurrent); int numShards = randomIntBetween(5, 10); AtomicBoolean searchPhaseDidRun = new AtomicBoolean(false); - ActionListener responseListener = ActionTestUtils.assertNoFailureListener(response -> {}); DiscoveryNode primaryNode = DiscoveryNodeUtils.create("node_1"); // for the sake of this test we place the replica on the same node. ie. this is not a mistake since we limit per node now DiscoveryNode replicaNode = DiscoveryNodeUtils.create("node_1"); @@ -530,69 +542,78 @@ public void testAllowPartialResults() throws InterruptedException { Map aliasFilters = Collections.singletonMap("_na_", AliasFilter.EMPTY); AtomicInteger numRequests = new AtomicInteger(0); AtomicInteger numFailReplicas = new AtomicInteger(0); - AbstractSearchAsyncAction asyncAction = new AbstractSearchAsyncAction<>( - "test", - logger, - transportService, - (cluster, node) -> { - assert cluster == null : "cluster was not null: " + cluster; - return lookup.get(node); - }, - aliasFilters, - Collections.emptyMap(), - null, - request, - responseListener, - shardsIter, - new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0), - ClusterState.EMPTY_STATE, - null, - new ArraySearchPhaseResults<>(shardsIter.size()), - request.getMaxConcurrentShardRequests(), - SearchResponse.Clusters.EMPTY - ) { - - @Override - protected void executePhaseOnShard( - SearchShardIterator shardIt, - SearchShardTarget shard, - SearchActionListener listener + var results = new ArraySearchPhaseResults(shardsIter.size()); + try { + AbstractSearchAsyncAction asyncAction = new AbstractSearchAsyncAction<>( + "test", + logger, + transportService, + (cluster, node) -> { + assert cluster == null : "cluster was not null: " + cluster; + return lookup.get(node); + }, + aliasFilters, + Collections.emptyMap(), + null, + request, + ActionTestUtils.assertNoFailureListener(response -> {}), + shardsIter, + new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0), + ClusterState.EMPTY_STATE, + null, + results, + request.getMaxConcurrentShardRequests(), + SearchResponse.Clusters.EMPTY ) { - seenShard.computeIfAbsent(shard.getShardId(), (i) -> { - numRequests.incrementAndGet(); // only count this once per shard copy - return Boolean.TRUE; - }); - new Thread(() -> { - Transport.Connection connection = getConnection(null, shard.getNodeId()); - TestSearchPhaseResult testSearchPhaseResult = new TestSearchPhaseResult( - new ShardSearchContextId(UUIDs.randomBase64UUID(), contextIdGenerator.incrementAndGet()), - connection.getNode() - ); - if (shardIt.remaining() > 0) { - numFailReplicas.incrementAndGet(); - listener.onFailure(new RuntimeException()); - } else { - listener.onResponse(testSearchPhaseResult); - } - }).start(); - } - @Override - protected SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context) { - return new SearchPhase("test") { - @Override - public void run() { - assertTrue(searchPhaseDidRun.compareAndSet(false, true)); - latch.countDown(); - } - }; - } - }; - asyncAction.start(); - latch.await(); - assertTrue(searchPhaseDidRun.get()); - assertEquals(numShards, numRequests.get()); - assertThat(numFailReplicas.get(), greaterThanOrEqualTo(1)); + @Override + protected void executePhaseOnShard( + SearchShardIterator shardIt, + SearchShardTarget shard, + SearchActionListener listener + ) { + seenShard.computeIfAbsent(shard.getShardId(), (i) -> { + numRequests.incrementAndGet(); // only count this once per shard copy + return Boolean.TRUE; + }); + new Thread(() -> { + Transport.Connection connection = getConnection(null, shard.getNodeId()); + TestSearchPhaseResult testSearchPhaseResult = new TestSearchPhaseResult( + new ShardSearchContextId(UUIDs.randomBase64UUID(), contextIdGenerator.incrementAndGet()), + connection.getNode() + ); + try { + if (shardIt.remaining() > 0) { + numFailReplicas.incrementAndGet(); + listener.onFailure(new RuntimeException()); + } else { + listener.onResponse(testSearchPhaseResult); + } + } finally { + testSearchPhaseResult.decRef(); + } + }).start(); + } + + @Override + protected SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context) { + return new SearchPhase("test") { + @Override + public void run() { + assertTrue(searchPhaseDidRun.compareAndSet(false, true)); + latch.countDown(); + } + }; + } + }; + asyncAction.start(); + latch.await(); + assertTrue(searchPhaseDidRun.get()); + assertEquals(numShards, numRequests.get()); + assertThat(numFailReplicas.get(), greaterThanOrEqualTo(1)); + } finally { + results.decRef(); + } } public void testSkipUnavailableSearchShards() throws InterruptedException { diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java index 93436ed9b0768..0dcb6abe3a86e 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java @@ -72,6 +72,7 @@ import org.elasticsearch.test.InternalAggregationTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportMessage; import org.junit.After; import org.junit.Before; @@ -155,27 +156,31 @@ public void testSortDocs() { int nShards = randomIntBetween(1, 20); int queryResultSize = randomBoolean() ? 0 : randomIntBetween(1, nShards * 2); AtomicArray results = generateQueryResults(nShards, suggestions, queryResultSize, false, false, false); - Optional first = results.asList().stream().findFirst(); - int from = 0, size = 0; - if (first.isPresent()) { - from = first.get().queryResult().from(); - size = first.get().queryResult().size(); - } - int accumulatedLength = Math.min(queryResultSize, getTotalQueryHits(results)); - List reducedCompletionSuggestions = reducedSuggest(results); - for (Suggest.Suggestion suggestion : reducedCompletionSuggestions) { - int suggestionSize = suggestion.getEntries().get(0).getOptions().size(); - accumulatedLength += suggestionSize; - } - List topDocsList = new ArrayList<>(); - for (SearchPhaseResult result : results.asList()) { - QuerySearchResult queryResult = result.queryResult(); - TopDocs topDocs = queryResult.consumeTopDocs().topDocs; - SearchPhaseController.setShardIndex(topDocs, result.getShardIndex()); - topDocsList.add(topDocs); + try { + Optional first = results.asList().stream().findFirst(); + int from = 0, size = 0; + if (first.isPresent()) { + from = first.get().queryResult().from(); + size = first.get().queryResult().size(); + } + int accumulatedLength = Math.min(queryResultSize, getTotalQueryHits(results)); + List reducedCompletionSuggestions = reducedSuggest(results); + for (Suggest.Suggestion suggestion : reducedCompletionSuggestions) { + int suggestionSize = suggestion.getEntries().get(0).getOptions().size(); + accumulatedLength += suggestionSize; + } + List topDocsList = new ArrayList<>(); + for (SearchPhaseResult result : results.asList()) { + QuerySearchResult queryResult = result.queryResult(); + TopDocs topDocs = queryResult.consumeTopDocs().topDocs; + SearchPhaseController.setShardIndex(topDocs, result.getShardIndex()); + topDocsList.add(topDocs); + } + ScoreDoc[] sortedDocs = SearchPhaseController.sortDocs(true, topDocsList, from, size, reducedCompletionSuggestions).scoreDocs(); + assertThat(sortedDocs.length, equalTo(accumulatedLength)); + } finally { + results.asList().forEach(TransportMessage::decRef); } - ScoreDoc[] sortedDocs = SearchPhaseController.sortDocs(true, topDocsList, from, size, reducedCompletionSuggestions).scoreDocs(); - assertThat(sortedDocs.length, equalTo(accumulatedLength)); } public void testSortDocsIsIdempotent() throws Exception { @@ -190,36 +195,45 @@ public void testSortDocsIsIdempotent() throws Exception { queryResultSize, useConstantScore ); + List topDocsList = new ArrayList<>(); boolean ignoreFrom = randomBoolean(); - Optional first = results.asList().stream().findFirst(); int from = 0, size = 0; - if (first.isPresent()) { - from = first.get().queryResult().from(); - size = first.get().queryResult().size(); - } - List topDocsList = new ArrayList<>(); - for (SearchPhaseResult result : results.asList()) { - QuerySearchResult queryResult = result.queryResult(); - TopDocs topDocs = queryResult.consumeTopDocs().topDocs; - topDocsList.add(topDocs); - SearchPhaseController.setShardIndex(topDocs, result.getShardIndex()); + ScoreDoc[] sortedDocs; + try { + Optional first = results.asList().stream().findFirst(); + if (first.isPresent()) { + from = first.get().queryResult().from(); + size = first.get().queryResult().size(); + } + for (SearchPhaseResult result : results.asList()) { + QuerySearchResult queryResult = result.queryResult(); + TopDocs topDocs = queryResult.consumeTopDocs().topDocs; + topDocsList.add(topDocs); + SearchPhaseController.setShardIndex(topDocs, result.getShardIndex()); + } + sortedDocs = SearchPhaseController.sortDocs(ignoreFrom, topDocsList, from, size, Collections.emptyList()).scoreDocs(); + } finally { + results.asList().forEach(TransportMessage::decRef); } - ScoreDoc[] sortedDocs = SearchPhaseController.sortDocs(ignoreFrom, topDocsList, from, size, Collections.emptyList()).scoreDocs(); - results = generateSeededQueryResults(randomSeed, nShards, Collections.emptyList(), queryResultSize, useConstantScore); - topDocsList = new ArrayList<>(); - for (SearchPhaseResult result : results.asList()) { - QuerySearchResult queryResult = result.queryResult(); - TopDocs topDocs = queryResult.consumeTopDocs().topDocs; - topDocsList.add(topDocs); - SearchPhaseController.setShardIndex(topDocs, result.getShardIndex()); - } - ScoreDoc[] sortedDocs2 = SearchPhaseController.sortDocs(ignoreFrom, topDocsList, from, size, Collections.emptyList()).scoreDocs(); - assertEquals(sortedDocs.length, sortedDocs2.length); - for (int i = 0; i < sortedDocs.length; i++) { - assertEquals(sortedDocs[i].doc, sortedDocs2[i].doc); - assertEquals(sortedDocs[i].shardIndex, sortedDocs2[i].shardIndex); - assertEquals(sortedDocs[i].score, sortedDocs2[i].score, 0.0f); + try { + topDocsList = new ArrayList<>(); + for (SearchPhaseResult result : results.asList()) { + QuerySearchResult queryResult = result.queryResult(); + TopDocs topDocs = queryResult.consumeTopDocs().topDocs; + topDocsList.add(topDocs); + SearchPhaseController.setShardIndex(topDocs, result.getShardIndex()); + } + ScoreDoc[] sortedDocs2 = SearchPhaseController.sortDocs(ignoreFrom, topDocsList, from, size, Collections.emptyList()) + .scoreDocs(); + assertEquals(sortedDocs.length, sortedDocs2.length); + for (int i = 0; i < sortedDocs.length; i++) { + assertEquals(sortedDocs[i].doc, sortedDocs2[i].doc); + assertEquals(sortedDocs[i].shardIndex, sortedDocs2[i].shardIndex); + assertEquals(sortedDocs[i].score, sortedDocs2[i].score, 0.0f); + } + } finally { + results.asList().forEach(TransportMessage::decRef); } } @@ -257,77 +271,86 @@ public void testMerge() { profile, false ); - SearchPhaseController.ReducedQueryPhase reducedQueryPhase = SearchPhaseController.reducedQueryPhase( - queryResults.asList(), - new ArrayList<>(), - new ArrayList<>(), - new TopDocsStats(trackTotalHits), - 0, - true, - InternalAggregationTestCase.emptyReduceContextBuilder(), - null, - true - ); - List shards = queryResults.asList().stream().map(SearchPhaseResult::getSearchShardTarget).collect(toList()); - AtomicArray fetchResults = generateFetchResults( - shards, - reducedQueryPhase.sortedTopDocs().scoreDocs(), - reducedQueryPhase.suggest(), - profile - ); - InternalSearchResponse mergedResponse = SearchPhaseController.merge( - false, - reducedQueryPhase, - fetchResults.asList(), - fetchResults::get - ); - if (trackTotalHits == SearchContext.TRACK_TOTAL_HITS_DISABLED) { - assertNull(mergedResponse.hits.getTotalHits()); - } else { - assertThat(mergedResponse.hits.getTotalHits().value, equalTo(0L)); - assertEquals(mergedResponse.hits.getTotalHits().relation, Relation.EQUAL_TO); - } - for (SearchHit hit : mergedResponse.hits().getHits()) { - SearchPhaseResult searchPhaseResult = fetchResults.get(hit.getShard().getShardId().id()); - assertSame(searchPhaseResult.getSearchShardTarget(), hit.getShard()); - } - int suggestSize = 0; - for (Suggest.Suggestion s : reducedQueryPhase.suggest()) { - suggestSize += s.getEntries().stream().mapToInt(e -> e.getOptions().size()).sum(); - } - assertThat(suggestSize, lessThanOrEqualTo(maxSuggestSize)); - assertThat(mergedResponse.hits().getHits().length, equalTo(reducedQueryPhase.sortedTopDocs().scoreDocs().length - suggestSize)); - Suggest suggestResult = mergedResponse.suggest(); - for (Suggest.Suggestion suggestion : reducedQueryPhase.suggest()) { - assertThat(suggestion, instanceOf(CompletionSuggestion.class)); - if (suggestion.getEntries().get(0).getOptions().size() > 0) { - CompletionSuggestion suggestionResult = suggestResult.getSuggestion(suggestion.getName()); - assertNotNull(suggestionResult); - List options = suggestionResult.getEntries().get(0).getOptions(); - assertThat(options.size(), equalTo(suggestion.getEntries().get(0).getOptions().size())); - for (CompletionSuggestion.Entry.Option option : options) { - assertNotNull(option.getHit()); - SearchPhaseResult searchPhaseResult = fetchResults.get(option.getHit().getShard().getShardId().id()); - assertSame(searchPhaseResult.getSearchShardTarget(), option.getHit().getShard()); - } - } - } - if (profile) { - assertThat(mergedResponse.profile().entrySet(), hasSize(nShards)); - assertThat( - // All shards should have a query profile - mergedResponse.profile().toString(), - mergedResponse.profile().values().stream().filter(r -> r.getQueryProfileResults() != null).count(), - equalTo((long) nShards) + try { + SearchPhaseController.ReducedQueryPhase reducedQueryPhase = SearchPhaseController.reducedQueryPhase( + queryResults.asList(), + new ArrayList<>(), + new ArrayList<>(), + new TopDocsStats(trackTotalHits), + 0, + true, + InternalAggregationTestCase.emptyReduceContextBuilder(), + null, + true ); - assertThat( - // Some or all shards should have a fetch profile - mergedResponse.profile().toString(), - mergedResponse.profile().values().stream().filter(r -> r.getFetchPhase() != null).count(), - both(greaterThan(0L)).and(lessThanOrEqualTo((long) nShards)) + List shards = queryResults.asList() + .stream() + .map(SearchPhaseResult::getSearchShardTarget) + .collect(toList()); + AtomicArray fetchResults = generateFetchResults( + shards, + reducedQueryPhase.sortedTopDocs().scoreDocs(), + reducedQueryPhase.suggest(), + profile ); - } else { - assertThat(mergedResponse.profile(), is(anEmptyMap())); + try { + InternalSearchResponse mergedResponse = SearchPhaseController.merge(false, reducedQueryPhase, fetchResults); + if (trackTotalHits == SearchContext.TRACK_TOTAL_HITS_DISABLED) { + assertNull(mergedResponse.hits.getTotalHits()); + } else { + assertThat(mergedResponse.hits.getTotalHits().value, equalTo(0L)); + assertEquals(mergedResponse.hits.getTotalHits().relation, Relation.EQUAL_TO); + } + for (SearchHit hit : mergedResponse.hits().getHits()) { + SearchPhaseResult searchPhaseResult = fetchResults.get(hit.getShard().getShardId().id()); + assertSame(searchPhaseResult.getSearchShardTarget(), hit.getShard()); + } + int suggestSize = 0; + for (Suggest.Suggestion s : reducedQueryPhase.suggest()) { + suggestSize += s.getEntries().stream().mapToInt(e -> e.getOptions().size()).sum(); + } + assertThat(suggestSize, lessThanOrEqualTo(maxSuggestSize)); + assertThat( + mergedResponse.hits().getHits().length, + equalTo(reducedQueryPhase.sortedTopDocs().scoreDocs().length - suggestSize) + ); + Suggest suggestResult = mergedResponse.suggest(); + for (Suggest.Suggestion suggestion : reducedQueryPhase.suggest()) { + assertThat(suggestion, instanceOf(CompletionSuggestion.class)); + if (suggestion.getEntries().get(0).getOptions().size() > 0) { + CompletionSuggestion suggestionResult = suggestResult.getSuggestion(suggestion.getName()); + assertNotNull(suggestionResult); + List options = suggestionResult.getEntries().get(0).getOptions(); + assertThat(options.size(), equalTo(suggestion.getEntries().get(0).getOptions().size())); + for (CompletionSuggestion.Entry.Option option : options) { + assertNotNull(option.getHit()); + SearchPhaseResult searchPhaseResult = fetchResults.get(option.getHit().getShard().getShardId().id()); + assertSame(searchPhaseResult.getSearchShardTarget(), option.getHit().getShard()); + } + } + } + if (profile) { + assertThat(mergedResponse.profile().entrySet(), hasSize(nShards)); + assertThat( + // All shards should have a query profile + mergedResponse.profile().toString(), + mergedResponse.profile().values().stream().filter(r -> r.getQueryProfileResults() != null).count(), + equalTo((long) nShards) + ); + assertThat( + // Some or all shards should have a fetch profile + mergedResponse.profile().toString(), + mergedResponse.profile().values().stream().filter(r -> r.getFetchPhase() != null).count(), + both(greaterThan(0L)).and(lessThanOrEqualTo((long) nShards)) + ); + } else { + assertThat(mergedResponse.profile(), is(anEmptyMap())); + } + } finally { + fetchResults.asList().forEach(TransportMessage::decRef); + } + } finally { + queryResults.asList().forEach(TransportMessage::decRef); } } } @@ -337,70 +360,80 @@ public void testMergeWithRank() { int queryResultSize = randomBoolean() ? 0 : randomIntBetween(1, nShards * 2); for (int trackTotalHits : new int[] { SearchContext.TRACK_TOTAL_HITS_DISABLED, SearchContext.TRACK_TOTAL_HITS_ACCURATE }) { AtomicArray queryResults = generateQueryResults(nShards, List.of(), queryResultSize, false, false, true); - SearchPhaseController.ReducedQueryPhase reducedQueryPhase = SearchPhaseController.reducedQueryPhase( - queryResults.asList(), - new ArrayList<>(), - new ArrayList<>(), - new TopDocsStats(trackTotalHits), - 0, - true, - InternalAggregationTestCase.emptyReduceContextBuilder(), - new RankCoordinatorContext(randomIntBetween(1, 10), 0, randomIntBetween(11, 100)) { - @Override - public SearchPhaseController.SortedTopDocs rank(List querySearchResults, TopDocsStats topDocStats) { - PriorityQueue queue = new PriorityQueue(windowSize) { - @Override - protected boolean lessThan(RankDoc a, RankDoc b) { - return a.score < b.score; - } - }; - for (QuerySearchResult qsr : querySearchResults) { - RankShardResult rsr = qsr.getRankShardResult(); - if (rsr != null) { - for (RankDoc rd : ((TestRankShardResult) rsr).testRankDocs) { - queue.insertWithOverflow(rd); + try { + SearchPhaseController.ReducedQueryPhase reducedQueryPhase = SearchPhaseController.reducedQueryPhase( + queryResults.asList(), + new ArrayList<>(), + new ArrayList<>(), + new TopDocsStats(trackTotalHits), + 0, + true, + InternalAggregationTestCase.emptyReduceContextBuilder(), + new RankCoordinatorContext(randomIntBetween(1, 10), 0, randomIntBetween(11, 100)) { + @Override + public SearchPhaseController.SortedTopDocs rank( + List querySearchResults, + TopDocsStats topDocStats + ) { + PriorityQueue queue = new PriorityQueue(windowSize) { + @Override + protected boolean lessThan(RankDoc a, RankDoc b) { + return a.score < b.score; + } + }; + for (QuerySearchResult qsr : querySearchResults) { + RankShardResult rsr = qsr.getRankShardResult(); + if (rsr != null) { + for (RankDoc rd : ((TestRankShardResult) rsr).testRankDocs) { + queue.insertWithOverflow(rd); + } } } + int size = Math.min(this.size, queue.size()); + RankDoc[] topResults = new RankDoc[size]; + for (int rdi = 0; rdi < size; ++rdi) { + topResults[rdi] = queue.pop(); + topResults[rdi].rank = rdi + 1; + } + topDocStats.fetchHits = topResults.length; + return new SearchPhaseController.SortedTopDocs(topResults, false, null, null, null, 0); } - int size = Math.min(this.size, queue.size()); - RankDoc[] topResults = new RankDoc[size]; - for (int rdi = 0; rdi < size; ++rdi) { - topResults[rdi] = queue.pop(); - topResults[rdi].rank = rdi + 1; - } - topDocStats.fetchHits = topResults.length; - return new SearchPhaseController.SortedTopDocs(topResults, false, null, null, null, 0); + }, + true + ); + List shards = queryResults.asList() + .stream() + .map(SearchPhaseResult::getSearchShardTarget) + .collect(toList()); + AtomicArray fetchResults = generateFetchResults( + shards, + reducedQueryPhase.sortedTopDocs().scoreDocs(), + reducedQueryPhase.suggest(), + false + ); + try { + InternalSearchResponse mergedResponse = SearchPhaseController.merge(false, reducedQueryPhase, fetchResults); + if (trackTotalHits == SearchContext.TRACK_TOTAL_HITS_DISABLED) { + assertNull(mergedResponse.hits.getTotalHits()); + } else { + assertThat(mergedResponse.hits.getTotalHits().value, equalTo(0L)); + assertEquals(mergedResponse.hits.getTotalHits().relation, Relation.EQUAL_TO); } - }, - true - ); - List shards = queryResults.asList().stream().map(SearchPhaseResult::getSearchShardTarget).collect(toList()); - AtomicArray fetchResults = generateFetchResults( - shards, - reducedQueryPhase.sortedTopDocs().scoreDocs(), - reducedQueryPhase.suggest(), - false - ); - InternalSearchResponse mergedResponse = SearchPhaseController.merge( - false, - reducedQueryPhase, - fetchResults.asList(), - fetchResults::get - ); - if (trackTotalHits == SearchContext.TRACK_TOTAL_HITS_DISABLED) { - assertNull(mergedResponse.hits.getTotalHits()); - } else { - assertThat(mergedResponse.hits.getTotalHits().value, equalTo(0L)); - assertEquals(mergedResponse.hits.getTotalHits().relation, Relation.EQUAL_TO); - } - int rank = 1; - for (SearchHit hit : mergedResponse.hits().getHits()) { - SearchPhaseResult searchPhaseResult = fetchResults.get(hit.getShard().getShardId().id()); - assertSame(searchPhaseResult.getSearchShardTarget(), hit.getShard()); - assertEquals(rank++, hit.getRank()); + int rank = 1; + for (SearchHit hit : mergedResponse.hits().getHits()) { + SearchPhaseResult searchPhaseResult = fetchResults.get(hit.getShard().getShardId().id()); + assertSame(searchPhaseResult.getSearchShardTarget(), hit.getShard()); + assertEquals(rank++, hit.getRank()); + } + assertThat(mergedResponse.hits().getHits().length, equalTo(reducedQueryPhase.sortedTopDocs().scoreDocs().length)); + assertThat(mergedResponse.profile(), is(anEmptyMap())); + } finally { + fetchResults.asList().forEach(TransportMessage::decRef); + } + } finally { + + queryResults.asList().forEach(TransportMessage::decRef); } - assertThat(mergedResponse.hits().getHits().length, equalTo(reducedQueryPhase.sortedTopDocs().scoreDocs().length)); - assertThat(mergedResponse.profile(), is(anEmptyMap())); } } @@ -585,97 +618,113 @@ private void consumerTestCase(int numEmptyResponses) throws Exception { 3 + numEmptyResponses, exc -> {} ); - if (numEmptyResponses == 0) { - assertEquals(0, reductions.size()); - } - if (numEmptyResponses > 0) { - QuerySearchResult empty = QuerySearchResult.nullInstance(); - int shardId = 2 + numEmptyResponses; - empty.setShardIndex(2 + numEmptyResponses); - empty.setSearchShardTarget(new SearchShardTarget("node", new ShardId("a", "b", shardId), null)); - consumer.consumeResult(empty, latch::countDown); - numEmptyResponses--; - } - - QuerySearchResult result = new QuerySearchResult( - new ShardSearchContextId("", 0), - new SearchShardTarget("node", new ShardId("a", "b", 0), null), - null - ); - result.topDocs( - new TopDocsAndMaxScore(new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), new ScoreDoc[0]), Float.NaN), - new DocValueFormat[0] - ); - InternalAggregations aggs = InternalAggregations.from(singletonList(new Max("test", 1.0D, DocValueFormat.RAW, emptyMap()))); - result.aggregations(aggs); - result.setShardIndex(0); - consumer.consumeResult(result, latch::countDown); - - result = new QuerySearchResult( - new ShardSearchContextId("", 1), - new SearchShardTarget("node", new ShardId("a", "b", 0), null), - null - ); - result.topDocs( - new TopDocsAndMaxScore(new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), new ScoreDoc[0]), Float.NaN), - new DocValueFormat[0] - ); - aggs = InternalAggregations.from(singletonList(new Max("test", 3.0D, DocValueFormat.RAW, emptyMap()))); - result.aggregations(aggs); - result.setShardIndex(2); - consumer.consumeResult(result, latch::countDown); - - result = new QuerySearchResult( - new ShardSearchContextId("", 1), - new SearchShardTarget("node", new ShardId("a", "b", 0), null), - null - ); - result.topDocs( - new TopDocsAndMaxScore(new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), new ScoreDoc[0]), Float.NaN), - new DocValueFormat[0] - ); - aggs = InternalAggregations.from(singletonList(new Max("test", 2.0D, DocValueFormat.RAW, emptyMap()))); - result.aggregations(aggs); - result.setShardIndex(1); - consumer.consumeResult(result, latch::countDown); - - while (numEmptyResponses > 0) { - result = QuerySearchResult.nullInstance(); - int shardId = 2 + numEmptyResponses; - result.setShardIndex(shardId); - result.setSearchShardTarget(new SearchShardTarget("node", new ShardId("a", "b", shardId), null)); - consumer.consumeResult(result, latch::countDown); - numEmptyResponses--; + try { + if (numEmptyResponses == 0) { + assertEquals(0, reductions.size()); + } + if (numEmptyResponses > 0) { + QuerySearchResult empty = QuerySearchResult.nullInstance(); + int shardId = 2 + numEmptyResponses; + empty.setShardIndex(2 + numEmptyResponses); + empty.setSearchShardTarget(new SearchShardTarget("node", new ShardId("a", "b", shardId), null)); + consumer.consumeResult(empty, latch::countDown); + numEmptyResponses--; + } - } - latch.await(); - final int numTotalReducePhases; - if (numShards > bufferSize) { - if (bufferSize == 2) { - assertEquals(1, ((QueryPhaseResultConsumer) consumer).getNumReducePhases()); - assertEquals(1, reductions.size()); - assertEquals(false, reductions.get(0)); - numTotalReducePhases = 2; + QuerySearchResult result = new QuerySearchResult( + new ShardSearchContextId("", 0), + new SearchShardTarget("node", new ShardId("a", "b", 0), null), + null + ); + try { + result.topDocs( + new TopDocsAndMaxScore(new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), new ScoreDoc[0]), Float.NaN), + new DocValueFormat[0] + ); + InternalAggregations aggs = InternalAggregations.from(singletonList(new Max("test", 1.0D, DocValueFormat.RAW, emptyMap()))); + result.aggregations(aggs); + result.setShardIndex(0); + consumer.consumeResult(result, latch::countDown); + } finally { + result.decRef(); + } + result = new QuerySearchResult( + new ShardSearchContextId("", 1), + new SearchShardTarget("node", new ShardId("a", "b", 0), null), + null + ); + try { + result.topDocs( + new TopDocsAndMaxScore(new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), new ScoreDoc[0]), Float.NaN), + new DocValueFormat[0] + ); + InternalAggregations aggs = InternalAggregations.from(singletonList(new Max("test", 3.0D, DocValueFormat.RAW, emptyMap()))); + result.aggregations(aggs); + result.setShardIndex(2); + consumer.consumeResult(result, latch::countDown); + } finally { + result.decRef(); + } + result = new QuerySearchResult( + new ShardSearchContextId("", 1), + new SearchShardTarget("node", new ShardId("a", "b", 0), null), + null + ); + try { + result.topDocs( + new TopDocsAndMaxScore(new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), new ScoreDoc[0]), Float.NaN), + new DocValueFormat[0] + ); + InternalAggregations aggs = InternalAggregations.from(singletonList(new Max("test", 2.0D, DocValueFormat.RAW, emptyMap()))); + result.aggregations(aggs); + result.setShardIndex(1); + consumer.consumeResult(result, latch::countDown); + } finally { + result.decRef(); + } + while (numEmptyResponses > 0) { + result = QuerySearchResult.nullInstance(); + try { + int shardId = 2 + numEmptyResponses; + result.setShardIndex(shardId); + result.setSearchShardTarget(new SearchShardTarget("node", new ShardId("a", "b", shardId), null)); + consumer.consumeResult(result, latch::countDown); + } finally { + result.decRef(); + } + numEmptyResponses--; + } + latch.await(); + final int numTotalReducePhases; + if (numShards > bufferSize) { + if (bufferSize == 2) { + assertEquals(1, ((QueryPhaseResultConsumer) consumer).getNumReducePhases()); + assertEquals(1, reductions.size()); + assertEquals(false, reductions.get(0)); + numTotalReducePhases = 2; + } else { + assertEquals(0, ((QueryPhaseResultConsumer) consumer).getNumReducePhases()); + assertEquals(0, reductions.size()); + numTotalReducePhases = 1; + } } else { - assertEquals(0, ((QueryPhaseResultConsumer) consumer).getNumReducePhases()); assertEquals(0, reductions.size()); numTotalReducePhases = 1; } - } else { - assertEquals(0, reductions.size()); - numTotalReducePhases = 1; - } - SearchPhaseController.ReducedQueryPhase reduce = consumer.reduce(); - assertEquals(numTotalReducePhases, reduce.numReducePhases()); - assertEquals(numTotalReducePhases, reductions.size()); - assertAggReduction(request); - Max max = (Max) reduce.aggregations().asList().get(0); - assertEquals(3.0D, max.value(), 0.0D); - assertFalse(reduce.sortedTopDocs().isSortedByField()); - assertNull(reduce.sortedTopDocs().sortFields()); - assertNull(reduce.sortedTopDocs().collapseField()); - assertNull(reduce.sortedTopDocs().collapseValues()); + SearchPhaseController.ReducedQueryPhase reduce = consumer.reduce(); + assertEquals(numTotalReducePhases, reduce.numReducePhases()); + assertEquals(numTotalReducePhases, reductions.size()); + assertAggReduction(request); + Max max = (Max) reduce.aggregations().asList().get(0); + assertEquals(3.0D, max.value(), 0.0D); + assertFalse(reduce.sortedTopDocs().isSortedByField()); + assertNull(reduce.sortedTopDocs().sortFields()); + assertNull(reduce.sortedTopDocs().collapseField()); + assertNull(reduce.sortedTopDocs().collapseValues()); + } finally { + consumer.decRef(); + } } public void testConsumerConcurrently() throws Exception { @@ -694,54 +743,62 @@ public void testConsumerConcurrently() throws Exception { expectedNumResults, exc -> {} ); - AtomicInteger max = new AtomicInteger(); - Thread[] threads = new Thread[expectedNumResults]; - CountDownLatch latch = new CountDownLatch(expectedNumResults); - for (int i = 0; i < expectedNumResults; i++) { - int id = i; - threads[i] = new Thread(() -> { - int number = randomIntBetween(1, 1000); - max.updateAndGet(prev -> Math.max(prev, number)); - QuerySearchResult result = new QuerySearchResult( - new ShardSearchContextId("", id), - new SearchShardTarget("node", new ShardId("a", "b", id), null), - null - ); - result.topDocs( - new TopDocsAndMaxScore( - new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] { new ScoreDoc(0, number) }), - number - ), - new DocValueFormat[0] - ); - InternalAggregations aggs = InternalAggregations.from( - Collections.singletonList(new Max("test", (double) number, DocValueFormat.RAW, Collections.emptyMap())) - ); - result.aggregations(aggs); - result.setShardIndex(id); - result.size(1); - consumer.consumeResult(result, latch::countDown); + try { + AtomicInteger max = new AtomicInteger(); + Thread[] threads = new Thread[expectedNumResults]; + CountDownLatch latch = new CountDownLatch(expectedNumResults); + for (int i = 0; i < expectedNumResults; i++) { + int id = i; + threads[i] = new Thread(() -> { + int number = randomIntBetween(1, 1000); + max.updateAndGet(prev -> Math.max(prev, number)); + QuerySearchResult result = new QuerySearchResult( + new ShardSearchContextId("", id), + new SearchShardTarget("node", new ShardId("a", "b", id), null), + null + ); + try { + result.topDocs( + new TopDocsAndMaxScore( + new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] { new ScoreDoc(0, number) }), + number + ), + new DocValueFormat[0] + ); + InternalAggregations aggs = InternalAggregations.from( + Collections.singletonList(new Max("test", (double) number, DocValueFormat.RAW, Collections.emptyMap())) + ); + result.aggregations(aggs); + result.setShardIndex(id); + result.size(1); + consumer.consumeResult(result, latch::countDown); + } finally { + result.decRef(); + } - }); - threads[i].start(); - } - for (int i = 0; i < expectedNumResults; i++) { - threads[i].join(); - } - latch.await(); + }); + threads[i].start(); + } + for (int i = 0; i < expectedNumResults; i++) { + threads[i].join(); + } + latch.await(); - SearchPhaseController.ReducedQueryPhase reduce = consumer.reduce(); - assertAggReduction(request); - Max internalMax = (Max) reduce.aggregations().asList().get(0); - assertEquals(max.get(), internalMax.value(), 0.0D); - assertEquals(1, reduce.sortedTopDocs().scoreDocs().length); - assertEquals(max.get(), reduce.maxScore(), 0.0f); - assertEquals(expectedNumResults, reduce.totalHits().value); - assertEquals(max.get(), reduce.sortedTopDocs().scoreDocs()[0].score, 0.0f); - assertFalse(reduce.sortedTopDocs().isSortedByField()); - assertNull(reduce.sortedTopDocs().sortFields()); - assertNull(reduce.sortedTopDocs().collapseField()); - assertNull(reduce.sortedTopDocs().collapseValues()); + SearchPhaseController.ReducedQueryPhase reduce = consumer.reduce(); + assertAggReduction(request); + Max internalMax = (Max) reduce.aggregations().asList().get(0); + assertEquals(max.get(), internalMax.value(), 0.0D); + assertEquals(1, reduce.sortedTopDocs().scoreDocs().length); + assertEquals(max.get(), reduce.maxScore(), 0.0f); + assertEquals(expectedNumResults, reduce.totalHits().value); + assertEquals(max.get(), reduce.sortedTopDocs().scoreDocs()[0].score, 0.0f); + assertFalse(reduce.sortedTopDocs().isSortedByField()); + assertNull(reduce.sortedTopDocs().sortFields()); + assertNull(reduce.sortedTopDocs().collapseField()); + assertNull(reduce.sortedTopDocs().collapseValues()); + } finally { + consumer.decRef(); + } } public void testConsumerOnlyAggs() throws Exception { @@ -759,41 +816,49 @@ public void testConsumerOnlyAggs() throws Exception { expectedNumResults, exc -> {} ); - AtomicInteger max = new AtomicInteger(); - CountDownLatch latch = new CountDownLatch(expectedNumResults); - for (int i = 0; i < expectedNumResults; i++) { - int number = randomIntBetween(1, 1000); - max.updateAndGet(prev -> Math.max(prev, number)); - QuerySearchResult result = new QuerySearchResult( - new ShardSearchContextId("", i), - new SearchShardTarget("node", new ShardId("a", "b", i), null), - null - ); - result.topDocs( - new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[0]), number), - new DocValueFormat[0] - ); - InternalAggregations aggs = InternalAggregations.from( - Collections.singletonList(new Max("test", (double) number, DocValueFormat.RAW, Collections.emptyMap())) - ); - result.aggregations(aggs); - result.setShardIndex(i); - result.size(1); - consumer.consumeResult(result, latch::countDown); - } - latch.await(); + try { + AtomicInteger max = new AtomicInteger(); + CountDownLatch latch = new CountDownLatch(expectedNumResults); + for (int i = 0; i < expectedNumResults; i++) { + int number = randomIntBetween(1, 1000); + max.updateAndGet(prev -> Math.max(prev, number)); + QuerySearchResult result = new QuerySearchResult( + new ShardSearchContextId("", i), + new SearchShardTarget("node", new ShardId("a", "b", i), null), + null + ); + try { + result.topDocs( + new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[0]), number), + new DocValueFormat[0] + ); + InternalAggregations aggs = InternalAggregations.from( + Collections.singletonList(new Max("test", (double) number, DocValueFormat.RAW, Collections.emptyMap())) + ); + result.aggregations(aggs); + result.setShardIndex(i); + result.size(1); + consumer.consumeResult(result, latch::countDown); + } finally { + result.decRef(); + } + } + latch.await(); - SearchPhaseController.ReducedQueryPhase reduce = consumer.reduce(); - assertAggReduction(request); - Max internalMax = (Max) reduce.aggregations().asList().get(0); - assertEquals(max.get(), internalMax.value(), 0.0D); - assertEquals(0, reduce.sortedTopDocs().scoreDocs().length); - assertEquals(max.get(), reduce.maxScore(), 0.0f); - assertEquals(expectedNumResults, reduce.totalHits().value); - assertFalse(reduce.sortedTopDocs().isSortedByField()); - assertNull(reduce.sortedTopDocs().sortFields()); - assertNull(reduce.sortedTopDocs().collapseField()); - assertNull(reduce.sortedTopDocs().collapseValues()); + SearchPhaseController.ReducedQueryPhase reduce = consumer.reduce(); + assertAggReduction(request); + Max internalMax = (Max) reduce.aggregations().asList().get(0); + assertEquals(max.get(), internalMax.value(), 0.0D); + assertEquals(0, reduce.sortedTopDocs().scoreDocs().length); + assertEquals(max.get(), reduce.maxScore(), 0.0f); + assertEquals(expectedNumResults, reduce.totalHits().value); + assertFalse(reduce.sortedTopDocs().isSortedByField()); + assertNull(reduce.sortedTopDocs().sortFields()); + assertNull(reduce.sortedTopDocs().collapseField()); + assertNull(reduce.sortedTopDocs().collapseValues()); + } finally { + consumer.decRef(); + } } public void testConsumerOnlyHits() throws Exception { @@ -813,38 +878,46 @@ public void testConsumerOnlyHits() throws Exception { expectedNumResults, exc -> {} ); - AtomicInteger max = new AtomicInteger(); - CountDownLatch latch = new CountDownLatch(expectedNumResults); - for (int i = 0; i < expectedNumResults; i++) { - int number = randomIntBetween(1, 1000); - max.updateAndGet(prev -> Math.max(prev, number)); - QuerySearchResult result = new QuerySearchResult( - new ShardSearchContextId("", i), - new SearchShardTarget("node", new ShardId("a", "b", i), null), - null - ); - result.topDocs( - new TopDocsAndMaxScore( - new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] { new ScoreDoc(0, number) }), - number - ), - new DocValueFormat[0] - ); - result.setShardIndex(i); - result.size(1); - consumer.consumeResult(result, latch::countDown); + try { + AtomicInteger max = new AtomicInteger(); + CountDownLatch latch = new CountDownLatch(expectedNumResults); + for (int i = 0; i < expectedNumResults; i++) { + int number = randomIntBetween(1, 1000); + max.updateAndGet(prev -> Math.max(prev, number)); + QuerySearchResult result = new QuerySearchResult( + new ShardSearchContextId("", i), + new SearchShardTarget("node", new ShardId("a", "b", i), null), + null + ); + try { + result.topDocs( + new TopDocsAndMaxScore( + new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] { new ScoreDoc(0, number) }), + number + ), + new DocValueFormat[0] + ); + result.setShardIndex(i); + result.size(1); + consumer.consumeResult(result, latch::countDown); + } finally { + result.decRef(); + } + } + latch.await(); + SearchPhaseController.ReducedQueryPhase reduce = consumer.reduce(); + assertAggReduction(request); + assertEquals(1, reduce.sortedTopDocs().scoreDocs().length); + assertEquals(max.get(), reduce.maxScore(), 0.0f); + assertEquals(expectedNumResults, reduce.totalHits().value); + assertEquals(max.get(), reduce.sortedTopDocs().scoreDocs()[0].score, 0.0f); + assertFalse(reduce.sortedTopDocs().isSortedByField()); + assertNull(reduce.sortedTopDocs().sortFields()); + assertNull(reduce.sortedTopDocs().collapseField()); + assertNull(reduce.sortedTopDocs().collapseValues()); + } finally { + consumer.decRef(); } - latch.await(); - SearchPhaseController.ReducedQueryPhase reduce = consumer.reduce(); - assertAggReduction(request); - assertEquals(1, reduce.sortedTopDocs().scoreDocs().length); - assertEquals(max.get(), reduce.maxScore(), 0.0f); - assertEquals(expectedNumResults, reduce.totalHits().value); - assertEquals(max.get(), reduce.sortedTopDocs().scoreDocs()[0].score, 0.0f); - assertFalse(reduce.sortedTopDocs().isSortedByField()); - assertNull(reduce.sortedTopDocs().sortFields()); - assertNull(reduce.sortedTopDocs().collapseField()); - assertNull(reduce.sortedTopDocs().collapseValues()); } private void assertAggReduction(SearchRequest searchRequest) { @@ -872,39 +945,47 @@ public void testReduceTopNWithFromOffset() throws Exception { 4, exc -> {} ); - int score = 100; - CountDownLatch latch = new CountDownLatch(4); - for (int i = 0; i < 4; i++) { - QuerySearchResult result = new QuerySearchResult( - new ShardSearchContextId("", i), - new SearchShardTarget("node", new ShardId("a", "b", i), null), - null - ); - ScoreDoc[] docs = new ScoreDoc[3]; - for (int j = 0; j < docs.length; j++) { - docs[j] = new ScoreDoc(0, score--); + try { + int score = 100; + CountDownLatch latch = new CountDownLatch(4); + for (int i = 0; i < 4; i++) { + QuerySearchResult result = new QuerySearchResult( + new ShardSearchContextId("", i), + new SearchShardTarget("node", new ShardId("a", "b", i), null), + null + ); + try { + ScoreDoc[] docs = new ScoreDoc[3]; + for (int j = 0; j < docs.length; j++) { + docs[j] = new ScoreDoc(0, score--); + } + result.topDocs( + new TopDocsAndMaxScore(new TopDocs(new TotalHits(3, TotalHits.Relation.EQUAL_TO), docs), docs[0].score), + new DocValueFormat[0] + ); + result.setShardIndex(i); + result.size(5); + result.from(5); + consumer.consumeResult(result, latch::countDown); + } finally { + result.decRef(); + } } - result.topDocs( - new TopDocsAndMaxScore(new TopDocs(new TotalHits(3, TotalHits.Relation.EQUAL_TO), docs), docs[0].score), - new DocValueFormat[0] - ); - result.setShardIndex(i); - result.size(5); - result.from(5); - consumer.consumeResult(result, latch::countDown); + latch.await(); + // 4*3 results = 12 we get result 5 to 10 here with from=5 and size=5 + SearchPhaseController.ReducedQueryPhase reduce = consumer.reduce(); + ScoreDoc[] scoreDocs = reduce.sortedTopDocs().scoreDocs(); + assertEquals(5, scoreDocs.length); + assertEquals(100.f, reduce.maxScore(), 0.0f); + assertEquals(12, reduce.totalHits().value); + assertEquals(95.0f, scoreDocs[0].score, 0.0f); + assertEquals(94.0f, scoreDocs[1].score, 0.0f); + assertEquals(93.0f, scoreDocs[2].score, 0.0f); + assertEquals(92.0f, scoreDocs[3].score, 0.0f); + assertEquals(91.0f, scoreDocs[4].score, 0.0f); + } finally { + consumer.decRef(); } - latch.await(); - // 4*3 results = 12 we get result 5 to 10 here with from=5 and size=5 - SearchPhaseController.ReducedQueryPhase reduce = consumer.reduce(); - ScoreDoc[] scoreDocs = reduce.sortedTopDocs().scoreDocs(); - assertEquals(5, scoreDocs.length); - assertEquals(100.f, reduce.maxScore(), 0.0f); - assertEquals(12, reduce.totalHits().value); - assertEquals(95.0f, scoreDocs[0].score, 0.0f); - assertEquals(94.0f, scoreDocs[1].score, 0.0f); - assertEquals(93.0f, scoreDocs[2].score, 0.0f); - assertEquals(92.0f, scoreDocs[3].score, 0.0f); - assertEquals(91.0f, scoreDocs[4].score, 0.0f); } public void testConsumerSortByField() throws Exception { @@ -922,37 +1003,45 @@ public void testConsumerSortByField() throws Exception { expectedNumResults, exc -> {} ); - AtomicInteger max = new AtomicInteger(); - SortField[] sortFields = { new SortField("field", SortField.Type.INT, true) }; - DocValueFormat[] docValueFormats = { DocValueFormat.RAW }; - CountDownLatch latch = new CountDownLatch(expectedNumResults); - for (int i = 0; i < expectedNumResults; i++) { - int number = randomIntBetween(1, 1000); - max.updateAndGet(prev -> Math.max(prev, number)); - FieldDoc[] fieldDocs = { new FieldDoc(0, Float.NaN, new Object[] { number }) }; - TopDocs topDocs = new TopFieldDocs(new TotalHits(1, Relation.EQUAL_TO), fieldDocs, sortFields); - QuerySearchResult result = new QuerySearchResult( - new ShardSearchContextId("", i), - new SearchShardTarget("node", new ShardId("a", "b", i), null), - null - ); - result.topDocs(new TopDocsAndMaxScore(topDocs, Float.NaN), docValueFormats); - result.setShardIndex(i); - result.size(size); - consumer.consumeResult(result, latch::countDown); + try { + AtomicInteger max = new AtomicInteger(); + SortField[] sortFields = { new SortField("field", SortField.Type.INT, true) }; + DocValueFormat[] docValueFormats = { DocValueFormat.RAW }; + CountDownLatch latch = new CountDownLatch(expectedNumResults); + for (int i = 0; i < expectedNumResults; i++) { + int number = randomIntBetween(1, 1000); + max.updateAndGet(prev -> Math.max(prev, number)); + FieldDoc[] fieldDocs = { new FieldDoc(0, Float.NaN, new Object[] { number }) }; + TopDocs topDocs = new TopFieldDocs(new TotalHits(1, Relation.EQUAL_TO), fieldDocs, sortFields); + QuerySearchResult result = new QuerySearchResult( + new ShardSearchContextId("", i), + new SearchShardTarget("node", new ShardId("a", "b", i), null), + null + ); + try { + result.topDocs(new TopDocsAndMaxScore(topDocs, Float.NaN), docValueFormats); + result.setShardIndex(i); + result.size(size); + consumer.consumeResult(result, latch::countDown); + } finally { + result.decRef(); + } + } + latch.await(); + SearchPhaseController.ReducedQueryPhase reduce = consumer.reduce(); + assertAggReduction(request); + assertEquals(Math.min(expectedNumResults, size), reduce.sortedTopDocs().scoreDocs().length); + assertEquals(expectedNumResults, reduce.totalHits().value); + assertEquals(max.get(), ((FieldDoc) reduce.sortedTopDocs().scoreDocs()[0]).fields[0]); + assertTrue(reduce.sortedTopDocs().isSortedByField()); + assertEquals(1, reduce.sortedTopDocs().sortFields().length); + assertEquals("field", reduce.sortedTopDocs().sortFields()[0].getField()); + assertEquals(SortField.Type.INT, reduce.sortedTopDocs().sortFields()[0].getType()); + assertNull(reduce.sortedTopDocs().collapseField()); + assertNull(reduce.sortedTopDocs().collapseValues()); + } finally { + consumer.decRef(); } - latch.await(); - SearchPhaseController.ReducedQueryPhase reduce = consumer.reduce(); - assertAggReduction(request); - assertEquals(Math.min(expectedNumResults, size), reduce.sortedTopDocs().scoreDocs().length); - assertEquals(expectedNumResults, reduce.totalHits().value); - assertEquals(max.get(), ((FieldDoc) reduce.sortedTopDocs().scoreDocs()[0]).fields[0]); - assertTrue(reduce.sortedTopDocs().isSortedByField()); - assertEquals(1, reduce.sortedTopDocs().sortFields().length); - assertEquals("field", reduce.sortedTopDocs().sortFields()[0].getField()); - assertEquals(SortField.Type.INT, reduce.sortedTopDocs().sortFields()[0].getType()); - assertNull(reduce.sortedTopDocs().collapseField()); - assertNull(reduce.sortedTopDocs().collapseValues()); } public void testConsumerFieldCollapsing() throws Exception { @@ -970,41 +1059,49 @@ public void testConsumerFieldCollapsing() throws Exception { expectedNumResults, exc -> {} ); - SortField[] sortFields = { new SortField("field", SortField.Type.STRING) }; - BytesRef a = new BytesRef("a"); - BytesRef b = new BytesRef("b"); - BytesRef c = new BytesRef("c"); - Object[] collapseValues = new Object[] { a, b, c }; - DocValueFormat[] docValueFormats = { DocValueFormat.RAW }; - CountDownLatch latch = new CountDownLatch(expectedNumResults); - for (int i = 0; i < expectedNumResults; i++) { - Object[] values = { randomFrom(collapseValues) }; - FieldDoc[] fieldDocs = { new FieldDoc(0, Float.NaN, values) }; - TopDocs topDocs = new TopFieldGroups("field", new TotalHits(1, Relation.EQUAL_TO), fieldDocs, sortFields, values); - QuerySearchResult result = new QuerySearchResult( - new ShardSearchContextId("", i), - new SearchShardTarget("node", new ShardId("a", "b", i), null), - null - ); - result.topDocs(new TopDocsAndMaxScore(topDocs, Float.NaN), docValueFormats); - result.setShardIndex(i); - result.size(size); - consumer.consumeResult(result, latch::countDown); + try { + SortField[] sortFields = { new SortField("field", SortField.Type.STRING) }; + BytesRef a = new BytesRef("a"); + BytesRef b = new BytesRef("b"); + BytesRef c = new BytesRef("c"); + Object[] collapseValues = new Object[] { a, b, c }; + DocValueFormat[] docValueFormats = { DocValueFormat.RAW }; + CountDownLatch latch = new CountDownLatch(expectedNumResults); + for (int i = 0; i < expectedNumResults; i++) { + Object[] values = { randomFrom(collapseValues) }; + FieldDoc[] fieldDocs = { new FieldDoc(0, Float.NaN, values) }; + TopDocs topDocs = new TopFieldGroups("field", new TotalHits(1, Relation.EQUAL_TO), fieldDocs, sortFields, values); + QuerySearchResult result = new QuerySearchResult( + new ShardSearchContextId("", i), + new SearchShardTarget("node", new ShardId("a", "b", i), null), + null + ); + try { + result.topDocs(new TopDocsAndMaxScore(topDocs, Float.NaN), docValueFormats); + result.setShardIndex(i); + result.size(size); + consumer.consumeResult(result, latch::countDown); + } finally { + result.decRef(); + } + } + latch.await(); + SearchPhaseController.ReducedQueryPhase reduce = consumer.reduce(); + assertAggReduction(request); + assertEquals(3, reduce.sortedTopDocs().scoreDocs().length); + assertEquals(expectedNumResults, reduce.totalHits().value); + assertEquals(a, ((FieldDoc) reduce.sortedTopDocs().scoreDocs()[0]).fields[0]); + assertEquals(b, ((FieldDoc) reduce.sortedTopDocs().scoreDocs()[1]).fields[0]); + assertEquals(c, ((FieldDoc) reduce.sortedTopDocs().scoreDocs()[2]).fields[0]); + assertTrue(reduce.sortedTopDocs().isSortedByField()); + assertEquals(1, reduce.sortedTopDocs().sortFields().length); + assertEquals("field", reduce.sortedTopDocs().sortFields()[0].getField()); + assertEquals(SortField.Type.STRING, reduce.sortedTopDocs().sortFields()[0].getType()); + assertEquals("field", reduce.sortedTopDocs().collapseField()); + assertArrayEquals(collapseValues, reduce.sortedTopDocs().collapseValues()); + } finally { + consumer.decRef(); } - latch.await(); - SearchPhaseController.ReducedQueryPhase reduce = consumer.reduce(); - assertAggReduction(request); - assertEquals(3, reduce.sortedTopDocs().scoreDocs().length); - assertEquals(expectedNumResults, reduce.totalHits().value); - assertEquals(a, ((FieldDoc) reduce.sortedTopDocs().scoreDocs()[0]).fields[0]); - assertEquals(b, ((FieldDoc) reduce.sortedTopDocs().scoreDocs()[1]).fields[0]); - assertEquals(c, ((FieldDoc) reduce.sortedTopDocs().scoreDocs()[2]).fields[0]); - assertTrue(reduce.sortedTopDocs().isSortedByField()); - assertEquals(1, reduce.sortedTopDocs().sortFields().length); - assertEquals("field", reduce.sortedTopDocs().sortFields()[0].getField()); - assertEquals(SortField.Type.STRING, reduce.sortedTopDocs().sortFields()[0].getType()); - assertEquals("field", reduce.sortedTopDocs().collapseField()); - assertArrayEquals(collapseValues, reduce.sortedTopDocs().collapseValues()); } public void testConsumerSuggestions() throws Exception { @@ -1021,98 +1118,106 @@ public void testConsumerSuggestions() throws Exception { expectedNumResults, exc -> {} ); - int maxScoreTerm = -1; - int maxScorePhrase = -1; - int maxScoreCompletion = -1; - CountDownLatch latch = new CountDownLatch(expectedNumResults); - for (int i = 0; i < expectedNumResults; i++) { - QuerySearchResult result = new QuerySearchResult( - new ShardSearchContextId("", i), - new SearchShardTarget("node", new ShardId("a", "b", i), null), - null - ); - List>> suggestions = - new ArrayList<>(); - { - TermSuggestion termSuggestion = new TermSuggestion("term", 1, SortBy.SCORE); - TermSuggestion.Entry entry = new TermSuggestion.Entry(new Text("entry"), 0, 10); - int numOptions = randomIntBetween(1, 10); - for (int j = 0; j < numOptions; j++) { - int score = numOptions - j; - maxScoreTerm = Math.max(maxScoreTerm, score); - entry.addOption(new TermSuggestion.Entry.Option(new Text("option"), randomInt(), score)); + try { + int maxScoreTerm = -1; + int maxScorePhrase = -1; + int maxScoreCompletion = -1; + CountDownLatch latch = new CountDownLatch(expectedNumResults); + for (int i = 0; i < expectedNumResults; i++) { + QuerySearchResult result = new QuerySearchResult( + new ShardSearchContextId("", i), + new SearchShardTarget("node", new ShardId("a", "b", i), null), + null + ); + try { + List>> suggestions = + new ArrayList<>(); + { + TermSuggestion termSuggestion = new TermSuggestion("term", 1, SortBy.SCORE); + TermSuggestion.Entry entry = new TermSuggestion.Entry(new Text("entry"), 0, 10); + int numOptions = randomIntBetween(1, 10); + for (int j = 0; j < numOptions; j++) { + int score = numOptions - j; + maxScoreTerm = Math.max(maxScoreTerm, score); + entry.addOption(new TermSuggestion.Entry.Option(new Text("option"), randomInt(), score)); + } + termSuggestion.addTerm(entry); + suggestions.add(termSuggestion); + } + { + PhraseSuggestion phraseSuggestion = new PhraseSuggestion("phrase", 1); + PhraseSuggestion.Entry entry = new PhraseSuggestion.Entry(new Text("entry"), 0, 10); + int numOptions = randomIntBetween(1, 10); + for (int j = 0; j < numOptions; j++) { + int score = numOptions - j; + maxScorePhrase = Math.max(maxScorePhrase, score); + entry.addOption(new PhraseSuggestion.Entry.Option(new Text("option"), new Text("option"), score)); + } + phraseSuggestion.addTerm(entry); + suggestions.add(phraseSuggestion); + } + { + CompletionSuggestion completionSuggestion = new CompletionSuggestion("completion", 1, false); + CompletionSuggestion.Entry entry = new CompletionSuggestion.Entry(new Text("entry"), 0, 10); + int numOptions = randomIntBetween(1, 10); + for (int j = 0; j < numOptions; j++) { + int score = numOptions - j; + maxScoreCompletion = Math.max(maxScoreCompletion, score); + CompletionSuggestion.Entry.Option option = new CompletionSuggestion.Entry.Option( + j, + new Text("option"), + score, + Collections.emptyMap() + ); + entry.addOption(option); + } + completionSuggestion.addTerm(entry); + suggestions.add(completionSuggestion); + } + result.suggest(new Suggest(suggestions)); + result.topDocs(new TopDocsAndMaxScore(Lucene.EMPTY_TOP_DOCS, Float.NaN), new DocValueFormat[0]); + result.setShardIndex(i); + result.size(0); + consumer.consumeResult(result, latch::countDown); + } finally { + result.decRef(); } - termSuggestion.addTerm(entry); - suggestions.add(termSuggestion); } + latch.await(); + SearchPhaseController.ReducedQueryPhase reduce = consumer.reduce(); + assertEquals(3, reduce.suggest().size()); { - PhraseSuggestion phraseSuggestion = new PhraseSuggestion("phrase", 1); - PhraseSuggestion.Entry entry = new PhraseSuggestion.Entry(new Text("entry"), 0, 10); - int numOptions = randomIntBetween(1, 10); - for (int j = 0; j < numOptions; j++) { - int score = numOptions - j; - maxScorePhrase = Math.max(maxScorePhrase, score); - entry.addOption(new PhraseSuggestion.Entry.Option(new Text("option"), new Text("option"), score)); - } - phraseSuggestion.addTerm(entry); - suggestions.add(phraseSuggestion); + TermSuggestion term = reduce.suggest().getSuggestion("term"); + assertEquals(1, term.getEntries().size()); + assertEquals(1, term.getEntries().get(0).getOptions().size()); + assertEquals(maxScoreTerm, term.getEntries().get(0).getOptions().get(0).getScore(), 0f); } { - CompletionSuggestion completionSuggestion = new CompletionSuggestion("completion", 1, false); - CompletionSuggestion.Entry entry = new CompletionSuggestion.Entry(new Text("entry"), 0, 10); - int numOptions = randomIntBetween(1, 10); - for (int j = 0; j < numOptions; j++) { - int score = numOptions - j; - maxScoreCompletion = Math.max(maxScoreCompletion, score); - CompletionSuggestion.Entry.Option option = new CompletionSuggestion.Entry.Option( - j, - new Text("option"), - score, - Collections.emptyMap() - ); - entry.addOption(option); - } - completionSuggestion.addTerm(entry); - suggestions.add(completionSuggestion); + PhraseSuggestion phrase = reduce.suggest().getSuggestion("phrase"); + assertEquals(1, phrase.getEntries().size()); + assertEquals(1, phrase.getEntries().get(0).getOptions().size()); + assertEquals(maxScorePhrase, phrase.getEntries().get(0).getOptions().get(0).getScore(), 0f); } - result.suggest(new Suggest(suggestions)); - result.topDocs(new TopDocsAndMaxScore(Lucene.EMPTY_TOP_DOCS, Float.NaN), new DocValueFormat[0]); - result.setShardIndex(i); - result.size(0); - consumer.consumeResult(result, latch::countDown); - } - latch.await(); - SearchPhaseController.ReducedQueryPhase reduce = consumer.reduce(); - assertEquals(3, reduce.suggest().size()); - { - TermSuggestion term = reduce.suggest().getSuggestion("term"); - assertEquals(1, term.getEntries().size()); - assertEquals(1, term.getEntries().get(0).getOptions().size()); - assertEquals(maxScoreTerm, term.getEntries().get(0).getOptions().get(0).getScore(), 0f); - } - { - PhraseSuggestion phrase = reduce.suggest().getSuggestion("phrase"); - assertEquals(1, phrase.getEntries().size()); - assertEquals(1, phrase.getEntries().get(0).getOptions().size()); - assertEquals(maxScorePhrase, phrase.getEntries().get(0).getOptions().get(0).getScore(), 0f); - } - { - CompletionSuggestion completion = reduce.suggest().getSuggestion("completion"); - assertEquals(1, completion.getSize()); - assertEquals(1, completion.getOptions().size()); - CompletionSuggestion.Entry.Option option = completion.getOptions().get(0); - assertEquals(maxScoreCompletion, option.getScore(), 0f); + { + CompletionSuggestion completion = reduce.suggest().getSuggestion("completion"); + assertEquals(1, completion.getSize()); + assertEquals(1, completion.getOptions().size()); + CompletionSuggestion.Entry.Option option = completion.getOptions().get(0); + assertEquals(maxScoreCompletion, option.getScore(), 0f); + } + assertAggReduction(request); + assertEquals(1, reduce.sortedTopDocs().scoreDocs().length); + assertEquals(maxScoreCompletion, reduce.sortedTopDocs().scoreDocs()[0].score, 0f); + assertEquals(0, reduce.sortedTopDocs().scoreDocs()[0].doc); + assertNotEquals(-1, reduce.sortedTopDocs().scoreDocs()[0].shardIndex); + assertEquals(0, reduce.totalHits().value); + assertFalse(reduce.sortedTopDocs().isSortedByField()); + assertNull(reduce.sortedTopDocs().sortFields()); + assertNull(reduce.sortedTopDocs().collapseField()); + assertNull(reduce.sortedTopDocs().collapseValues()); + } finally { + consumer.decRef(); } - assertAggReduction(request); - assertEquals(1, reduce.sortedTopDocs().scoreDocs().length); - assertEquals(maxScoreCompletion, reduce.sortedTopDocs().scoreDocs()[0].score, 0f); - assertEquals(0, reduce.sortedTopDocs().scoreDocs()[0].doc); - assertNotEquals(-1, reduce.sortedTopDocs().scoreDocs()[0].shardIndex); - assertEquals(0, reduce.totalHits().value); - assertFalse(reduce.sortedTopDocs().isSortedByField()); - assertNull(reduce.sortedTopDocs().sortFields()); - assertNull(reduce.sortedTopDocs().collapseField()); - assertNull(reduce.sortedTopDocs().collapseValues()); } public void testProgressListener() throws Exception { @@ -1160,59 +1265,67 @@ public void onFinalReduce(List shards, TotalHits totalHits, Interna expectedNumResults, exc -> {} ); - AtomicInteger max = new AtomicInteger(); - Thread[] threads = new Thread[expectedNumResults]; - CountDownLatch latch = new CountDownLatch(expectedNumResults); - for (int i = 0; i < expectedNumResults; i++) { - int id = i; - threads[i] = new Thread(() -> { - int number = randomIntBetween(1, 1000); - max.updateAndGet(prev -> Math.max(prev, number)); - QuerySearchResult result = new QuerySearchResult( - new ShardSearchContextId("", id), - new SearchShardTarget("node", new ShardId("a", "b", id), null), - null - ); - result.topDocs( - new TopDocsAndMaxScore( - new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] { new ScoreDoc(0, number) }), - number - ), - new DocValueFormat[0] - ); - InternalAggregations aggs = InternalAggregations.from( - Collections.singletonList(new Max("test", (double) number, DocValueFormat.RAW, Collections.emptyMap())) - ); - result.aggregations(aggs); - result.setShardIndex(id); - result.size(1); - consumer.consumeResult(result, latch::countDown); - }); - threads[i].start(); - } - for (int i = 0; i < expectedNumResults; i++) { - threads[i].join(); - } - latch.await(); - SearchPhaseController.ReducedQueryPhase reduce = consumer.reduce(); - assertAggReduction(request); - Max internalMax = (Max) reduce.aggregations().asList().get(0); - assertEquals(max.get(), internalMax.value(), 0.0D); - assertEquals(1, reduce.sortedTopDocs().scoreDocs().length); - assertEquals(max.get(), reduce.maxScore(), 0.0f); - assertEquals(expectedNumResults, reduce.totalHits().value); - assertEquals(max.get(), reduce.sortedTopDocs().scoreDocs()[0].score, 0.0f); - assertFalse(reduce.sortedTopDocs().isSortedByField()); - assertNull(reduce.sortedTopDocs().sortFields()); - assertNull(reduce.sortedTopDocs().collapseField()); - assertNull(reduce.sortedTopDocs().collapseValues()); + try { + AtomicInteger max = new AtomicInteger(); + Thread[] threads = new Thread[expectedNumResults]; + CountDownLatch latch = new CountDownLatch(expectedNumResults); + for (int i = 0; i < expectedNumResults; i++) { + int id = i; + threads[i] = new Thread(() -> { + int number = randomIntBetween(1, 1000); + max.updateAndGet(prev -> Math.max(prev, number)); + QuerySearchResult result = new QuerySearchResult( + new ShardSearchContextId("", id), + new SearchShardTarget("node", new ShardId("a", "b", id), null), + null + ); + try { + result.topDocs( + new TopDocsAndMaxScore( + new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] { new ScoreDoc(0, number) }), + number + ), + new DocValueFormat[0] + ); + InternalAggregations aggs = InternalAggregations.from( + Collections.singletonList(new Max("test", (double) number, DocValueFormat.RAW, Collections.emptyMap())) + ); + result.aggregations(aggs); + result.setShardIndex(id); + result.size(1); + consumer.consumeResult(result, latch::countDown); + } finally { + result.decRef(); + } + }); + threads[i].start(); + } + for (int i = 0; i < expectedNumResults; i++) { + threads[i].join(); + } + latch.await(); + SearchPhaseController.ReducedQueryPhase reduce = consumer.reduce(); + assertAggReduction(request); + Max internalMax = (Max) reduce.aggregations().asList().get(0); + assertEquals(max.get(), internalMax.value(), 0.0D); + assertEquals(1, reduce.sortedTopDocs().scoreDocs().length); + assertEquals(max.get(), reduce.maxScore(), 0.0f); + assertEquals(expectedNumResults, reduce.totalHits().value); + assertEquals(max.get(), reduce.sortedTopDocs().scoreDocs()[0].score, 0.0f); + assertFalse(reduce.sortedTopDocs().isSortedByField()); + assertNull(reduce.sortedTopDocs().sortFields()); + assertNull(reduce.sortedTopDocs().collapseField()); + assertNull(reduce.sortedTopDocs().collapseValues()); - assertEquals(reduce.aggregations(), finalAggsListener.get()); - assertEquals(reduce.totalHits(), totalHitsListener.get()); + assertEquals(reduce.aggregations(), finalAggsListener.get()); + assertEquals(reduce.totalHits(), totalHitsListener.get()); - assertEquals(expectedNumResults, numQueryResultListener.get()); - assertEquals(0, numQueryFailureListener.get()); - assertEquals(numReduceListener.get(), reduce.numReducePhases()); + assertEquals(expectedNumResults, numQueryResultListener.get()); + assertEquals(0, numQueryFailureListener.get()); + assertEquals(numReduceListener.get(), reduce.numReducePhases()); + } finally { + consumer.decRef(); + } } } @@ -1243,48 +1356,58 @@ private void testReduceCase(int numShards, int bufferSize, boolean shouldFail) t numShards, exc -> hasConsumedFailure.set(true) ); - CountDownLatch latch = new CountDownLatch(numShards); - Thread[] threads = new Thread[numShards]; - for (int i = 0; i < numShards; i++) { - final int index = i; - threads[index] = new Thread(() -> { - QuerySearchResult result = new QuerySearchResult( - new ShardSearchContextId(UUIDs.randomBase64UUID(), index), - new SearchShardTarget("node", new ShardId("a", "b", index), null), - null - ); - result.topDocs( - new TopDocsAndMaxScore(new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), Lucene.EMPTY_SCORE_DOCS), Float.NaN), - new DocValueFormat[0] - ); - InternalAggregations aggs = InternalAggregations.from( - Collections.singletonList(new Max("test", 0d, DocValueFormat.RAW, Collections.emptyMap())) - ); - result.aggregations(aggs); - result.setShardIndex(index); - result.size(1); - consumer.consumeResult(result, latch::countDown); - }); - threads[index].start(); - } - for (int i = 0; i < numShards; i++) { - threads[i].join(); - } - latch.await(); - if (shouldFail) { - if (shouldFailPartial == false) { - circuitBreaker.shouldBreak.set(true); - } else { + try { + CountDownLatch latch = new CountDownLatch(numShards); + Thread[] threads = new Thread[numShards]; + for (int i = 0; i < numShards; i++) { + final int index = i; + threads[index] = new Thread(() -> { + QuerySearchResult result = new QuerySearchResult( + new ShardSearchContextId(UUIDs.randomBase64UUID(), index), + new SearchShardTarget("node", new ShardId("a", "b", index), null), + null + ); + try { + result.topDocs( + new TopDocsAndMaxScore( + new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), Lucene.EMPTY_SCORE_DOCS), + Float.NaN + ), + new DocValueFormat[0] + ); + InternalAggregations aggs = InternalAggregations.from( + Collections.singletonList(new Max("test", 0d, DocValueFormat.RAW, Collections.emptyMap())) + ); + result.aggregations(aggs); + result.setShardIndex(index); + result.size(1); + consumer.consumeResult(result, latch::countDown); + } finally { + result.decRef(); + } + }); + threads[index].start(); + } + for (int i = 0; i < numShards; i++) { + threads[i].join(); + } + latch.await(); + if (shouldFail) { + if (shouldFailPartial == false) { + circuitBreaker.shouldBreak.set(true); + } else { + circuitBreaker.shouldBreak.set(false); + } + CircuitBreakingException exc = expectThrows(CircuitBreakingException.class, () -> consumer.reduce()); + assertEquals(shouldFailPartial, hasConsumedFailure.get()); + assertThat(exc.getMessage(), containsString("")); circuitBreaker.shouldBreak.set(false); + } else { + consumer.reduce(); } - CircuitBreakingException exc = expectThrows(CircuitBreakingException.class, () -> consumer.reduce()); - assertEquals(shouldFailPartial, hasConsumedFailure.get()); - assertThat(exc.getMessage(), containsString("")); - circuitBreaker.shouldBreak.set(false); - } else { - consumer.reduce(); + } finally { + consumer.decRef(); } - consumer.close(); assertThat(circuitBreaker.allocated, equalTo(0L)); } @@ -1296,17 +1419,16 @@ public void testFailConsumeAggs() throws Exception { request.source(new SearchSourceBuilder().aggregation(AggregationBuilders.avg("foo")).size(0)); request.setBatchedReduceSize(bufferSize); AtomicBoolean hasConsumedFailure = new AtomicBoolean(); - try ( - QueryPhaseResultConsumer consumer = searchPhaseController.newSearchPhaseResults( - fixedExecutor, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - () -> false, - SearchProgressListener.NOOP, - request, - expectedNumResults, - exc -> hasConsumedFailure.set(true) - ) - ) { + QueryPhaseResultConsumer consumer = searchPhaseController.newSearchPhaseResults( + fixedExecutor, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + () -> false, + SearchProgressListener.NOOP, + request, + expectedNumResults, + exc -> hasConsumedFailure.set(true) + ); + try { for (int i = 0; i < expectedNumResults; i++) { final int index = i; QuerySearchResult result = new QuerySearchResult( @@ -1314,16 +1436,25 @@ public void testFailConsumeAggs() throws Exception { new SearchShardTarget("node", new ShardId("a", "b", index), null), null ); - result.topDocs( - new TopDocsAndMaxScore(new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), Lucene.EMPTY_SCORE_DOCS), Float.NaN), - new DocValueFormat[0] - ); - result.aggregations(null); - result.setShardIndex(index); - result.size(1); - expectThrows(Exception.class, () -> consumer.consumeResult(result, () -> {})); + try { + result.topDocs( + new TopDocsAndMaxScore( + new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), Lucene.EMPTY_SCORE_DOCS), + Float.NaN + ), + new DocValueFormat[0] + ); + result.aggregations(null); + result.setShardIndex(index); + result.size(1); + expectThrows(Exception.class, () -> consumer.consumeResult(result, () -> {})); + } finally { + result.decRef(); + } } assertNull(consumer.reduce().aggregations()); + } finally { + consumer.decRef(); } } diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncActionTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncActionTests.java index 6d5380273c8c8..3097376de7a41 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncActionTests.java @@ -118,38 +118,55 @@ public void sendExecuteQuery( new SearchShardTarget("node1", new ShardId("idx", "na", shardId), null), null ); - SortField sortField = new SortField("timestamp", SortField.Type.LONG); - if (withCollapse) { - queryResult.topDocs( - new TopDocsAndMaxScore( - new TopFieldGroups( - "collapse_field", - new TotalHits(1, withScroll ? TotalHits.Relation.EQUAL_TO : TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO), - new FieldDoc[] { new FieldDoc(randomInt(1000), Float.NaN, new Object[] { request.shardId().id() }) }, - new SortField[] { sortField }, - new Object[] { 0L } + try { + SortField sortField = new SortField("timestamp", SortField.Type.LONG); + if (withCollapse) { + queryResult.topDocs( + new TopDocsAndMaxScore( + new TopFieldGroups( + "collapse_field", + new TotalHits( + 1, + withScroll ? TotalHits.Relation.EQUAL_TO : TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO + ), + new FieldDoc[] { new FieldDoc(randomInt(1000), Float.NaN, new Object[] { request.shardId().id() }) }, + new SortField[] { sortField }, + new Object[] { 0L } + ), + Float.NaN ), - Float.NaN - ), - new DocValueFormat[] { DocValueFormat.RAW } - ); - } else { - queryResult.topDocs( - new TopDocsAndMaxScore( - new TopFieldDocs( - new TotalHits(1, withScroll ? TotalHits.Relation.EQUAL_TO : TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO), - new FieldDoc[] { new FieldDoc(randomInt(1000), Float.NaN, new Object[] { request.shardId().id() }) }, - new SortField[] { sortField } + new DocValueFormat[] { DocValueFormat.RAW } + ); + } else { + queryResult.topDocs( + new TopDocsAndMaxScore( + new TopFieldDocs( + new TotalHits( + 1, + withScroll ? TotalHits.Relation.EQUAL_TO : TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO + ), + new FieldDoc[] { new FieldDoc(randomInt(1000), Float.NaN, new Object[] { request.shardId().id() }) }, + new SortField[] { sortField } + ), + Float.NaN ), - Float.NaN - ), - new DocValueFormat[] { DocValueFormat.RAW } - ); + new DocValueFormat[] { DocValueFormat.RAW } + ); + } + queryResult.from(0); + queryResult.size(1); + successfulOps.incrementAndGet(); + queryResult.incRef(); + new Thread(() -> { + try { + listener.onResponse(queryResult); + } finally { + queryResult.decRef(); + } + }).start(); + } finally { + queryResult.decRef(); } - queryResult.from(0); - queryResult.size(1); - successfulOps.incrementAndGet(); - new Thread(() -> listener.onResponse(queryResult)).start(); } }; CountDownLatch latch = new CountDownLatch(1); @@ -186,59 +203,63 @@ public void sendExecuteQuery( shardsIter.size(), exc -> {} ); - SearchQueryThenFetchAsyncAction action = new SearchQueryThenFetchAsyncAction( - logger, - searchTransportService, - (clusterAlias, node) -> lookup.get(node), - Collections.singletonMap("_na_", AliasFilter.EMPTY), - Collections.emptyMap(), - EsExecutors.DIRECT_EXECUTOR_SERVICE, - resultConsumer, - searchRequest, - null, - shardsIter, - timeProvider, - new ClusterState.Builder(new ClusterName("test")).build(), - task, - SearchResponse.Clusters.EMPTY - ) { - @Override - protected SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context) { - return new SearchPhase("test") { - @Override - public void run() { - latch.countDown(); - } - }; - } - }; - action.start(); - latch.await(); - assertThat(successfulOps.get(), equalTo(numShards)); - if (withScroll) { - assertFalse(canReturnNullResponse.get()); - assertThat(numWithTopDocs.get(), equalTo(0)); - } else { - assertTrue(canReturnNullResponse.get()); - if (withCollapse) { + try { + SearchQueryThenFetchAsyncAction action = new SearchQueryThenFetchAsyncAction( + logger, + searchTransportService, + (clusterAlias, node) -> lookup.get(node), + Collections.singletonMap("_na_", AliasFilter.EMPTY), + Collections.emptyMap(), + EsExecutors.DIRECT_EXECUTOR_SERVICE, + resultConsumer, + searchRequest, + null, + shardsIter, + timeProvider, + new ClusterState.Builder(new ClusterName("test")).build(), + task, + SearchResponse.Clusters.EMPTY + ) { + @Override + protected SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context) { + return new SearchPhase("test") { + @Override + public void run() { + latch.countDown(); + } + }; + } + }; + action.start(); + latch.await(); + assertThat(successfulOps.get(), equalTo(numShards)); + if (withScroll) { + assertFalse(canReturnNullResponse.get()); assertThat(numWithTopDocs.get(), equalTo(0)); } else { - assertThat(numWithTopDocs.get(), greaterThanOrEqualTo(1)); + assertTrue(canReturnNullResponse.get()); + if (withCollapse) { + assertThat(numWithTopDocs.get(), equalTo(0)); + } else { + assertThat(numWithTopDocs.get(), greaterThanOrEqualTo(1)); + } + } + SearchPhaseController.ReducedQueryPhase phase = action.results.reduce(); + assertThat(phase.numReducePhases(), greaterThanOrEqualTo(1)); + if (withScroll) { + assertThat(phase.totalHits().value, equalTo((long) numShards)); + assertThat(phase.totalHits().relation, equalTo(TotalHits.Relation.EQUAL_TO)); + } else { + assertThat(phase.totalHits().value, equalTo(2L)); + assertThat(phase.totalHits().relation, equalTo(TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO)); } + assertThat(phase.sortedTopDocs().scoreDocs().length, equalTo(1)); + assertThat(phase.sortedTopDocs().scoreDocs()[0], instanceOf(FieldDoc.class)); + assertThat(((FieldDoc) phase.sortedTopDocs().scoreDocs()[0]).fields.length, equalTo(1)); + assertThat(((FieldDoc) phase.sortedTopDocs().scoreDocs()[0]).fields[0], equalTo(0)); + } finally { + resultConsumer.decRef(); } - SearchPhaseController.ReducedQueryPhase phase = action.results.reduce(); - assertThat(phase.numReducePhases(), greaterThanOrEqualTo(1)); - if (withScroll) { - assertThat(phase.totalHits().value, equalTo((long) numShards)); - assertThat(phase.totalHits().relation, equalTo(TotalHits.Relation.EQUAL_TO)); - } else { - assertThat(phase.totalHits().value, equalTo(2L)); - assertThat(phase.totalHits().relation, equalTo(TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO)); - } - assertThat(phase.sortedTopDocs().scoreDocs().length, equalTo(1)); - assertThat(phase.sortedTopDocs().scoreDocs()[0], instanceOf(FieldDoc.class)); - assertThat(((FieldDoc) phase.sortedTopDocs().scoreDocs()[0]).fields.length, equalTo(1)); - assertThat(((FieldDoc) phase.sortedTopDocs().scoreDocs()[0]).fields[0], equalTo(0)); } @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/101932") diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchRequestBuilderTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchRequestBuilderTests.java index 1747886c48da4..bb2b6f7903a6e 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchRequestBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchRequestBuilderTests.java @@ -20,7 +20,7 @@ public class SearchRequestBuilderTests extends ESTestCase { private SearchRequestBuilder makeBuilder() { ElasticsearchClient client = Mockito.mock(ElasticsearchClient.class); - return new SearchRequestBuilder(client, SearchAction.INSTANCE); + return new SearchRequestBuilder(client, TransportSearchAction.TYPE); } public void testEmptySourceToString() { diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java index 8005f2f412699..0c8496081ff19 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java @@ -528,7 +528,7 @@ public void testDescriptionIncludePreferenceAndRouting() { } private String toDescription(SearchRequest request) { - return request.createTask(0, "test", SearchAction.NAME, TaskId.EMPTY_TASK_ID, emptyMap()).getDescription(); + return request.createTask(0, "test", TransportSearchAction.TYPE.name(), TaskId.EMPTY_TASK_ID, emptyMap()).getDescription(); } public void testForceSyntheticUnsupported() { diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java index bba7027f193f3..70bd2d9f00a05 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java @@ -35,11 +35,11 @@ import java.util.IdentityHashMap; import java.util.List; import java.util.Set; +import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; -import static org.elasticsearch.action.support.PlainActionFuture.newFuture; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; import static org.mockito.Mockito.mock; @@ -101,7 +101,7 @@ public String getLocalNodeId() { client ); - PlainActionFuture future = newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); action.execute(task, multiSearchRequest, future); future.get(); assertEquals(numSearchRequests, counter.get()); @@ -110,7 +110,7 @@ public String getLocalNodeId() { } } - public void testBatchExecute() { + public void testBatchExecute() throws ExecutionException, InterruptedException { // Initialize dependencies of TransportMultiSearchAction Settings settings = Settings.builder().put("node.name", TransportMultiSearchActionTests.class.getSimpleName()).build(); ActionFilters actionFilters = mock(ActionFilters.class); @@ -205,10 +205,14 @@ public String getLocalNodeId() { multiSearchRequest.add(new SearchRequest()); } - MultiSearchResponse response = ActionTestUtils.executeBlocking(action, multiSearchRequest); - assertThat(response.getResponses().length, equalTo(numSearchRequests)); - assertThat(requests.size(), equalTo(numSearchRequests)); - assertThat(errorHolder.get(), nullValue()); + final PlainActionFuture future = new PlainActionFuture<>(); + ActionTestUtils.execute(action, multiSearchRequest, future.delegateFailure((l, response) -> { + assertThat(response.getResponses().length, equalTo(numSearchRequests)); + assertThat(requests.size(), equalTo(numSearchRequests)); + assertThat(errorHolder.get(), nullValue()); + l.onResponse(null); + })); + future.get(); } finally { assertTrue(ESTestCase.terminate(threadPool)); } diff --git a/server/src/test/java/org/elasticsearch/action/support/RefCountingListenerTests.java b/server/src/test/java/org/elasticsearch/action/support/RefCountingListenerTests.java index b1f49cb6efd6d..6e2e984c060fb 100644 --- a/server/src/test/java/org/elasticsearch/action/support/RefCountingListenerTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/RefCountingListenerTests.java @@ -10,14 +10,17 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.util.concurrent.RunOnce; +import org.elasticsearch.core.AbstractRefCounted; +import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ReachabilityChecker; +import java.io.IOException; import java.util.ArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Consumer; import static org.elasticsearch.common.util.concurrent.EsExecutors.DIRECT_EXECUTOR_SERVICE; import static org.hamcrest.Matchers.containsString; @@ -88,7 +91,7 @@ public String toString() { var reachChecker = new ReachabilityChecker(); var consumed = new AtomicBoolean(); - var consumingListener = refs.acquire(reachChecker.register(new Consumer() { + var consumingListener = refs.acquire(reachChecker.register(new CheckedConsumer() { @Override public void accept(String s) { assertEquals("test response", s); @@ -172,10 +175,10 @@ public void testValidation() { final String expectedMessage; if (randomBoolean()) { throwingRunnable = refs::acquire; - expectedMessage = RefCountingRunnable.ALREADY_CLOSED_MESSAGE; + expectedMessage = AbstractRefCounted.ALREADY_CLOSED_MESSAGE; } else { throwingRunnable = refs::close; - expectedMessage = "invalid decRef call: already closed"; + expectedMessage = AbstractRefCounted.INVALID_DECREF_MESSAGE; } assertEquals(expectedMessage, expectThrows(AssertionError.class, throwingRunnable).getMessage()); @@ -185,6 +188,7 @@ public void testValidation() { public void testConsumerFailure() { final var executed = new AtomicBoolean(); + final Runnable completeAcquiredRunOnce; try (var refs = new RefCountingListener(new ActionListener() { @Override public void onResponse(Void unused) { @@ -197,8 +201,19 @@ public void onFailure(Exception e) { executed.set(true); } })) { - refs.acquire(ignored -> { throw new ElasticsearchException("simulated"); }).onResponse(null); + final var listener = refs.acquire(ignored -> { + if (randomBoolean()) { + throw new ElasticsearchException("simulated"); + } else { + throw new IOException("simulated"); + } + }); + completeAcquiredRunOnce = new RunOnce(() -> listener.onResponse(null)); + if (randomBoolean()) { + completeAcquiredRunOnce.run(); + } } + completeAcquiredRunOnce.run(); assertTrue(executed.get()); } diff --git a/server/src/test/java/org/elasticsearch/action/support/RefCountingRunnableTests.java b/server/src/test/java/org/elasticsearch/action/support/RefCountingRunnableTests.java index b5ccc4f50969b..1018b073adb9d 100644 --- a/server/src/test/java/org/elasticsearch/action/support/RefCountingRunnableTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/RefCountingRunnableTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.Releasable; import org.elasticsearch.test.ESTestCase; @@ -166,10 +167,10 @@ public void testValidation() { final String expectedMessage; if (randomBoolean()) { throwingRunnable = randomBoolean() ? refs::acquire : refs::acquireListener; - expectedMessage = RefCountingRunnable.ALREADY_CLOSED_MESSAGE; + expectedMessage = AbstractRefCounted.ALREADY_CLOSED_MESSAGE; } else { throwingRunnable = refs::close; - expectedMessage = "invalid decRef call: already closed"; + expectedMessage = AbstractRefCounted.INVALID_DECREF_MESSAGE; } assertEquals(expectedMessage, expectThrows(AssertionError.class, throwingRunnable).getMessage()); diff --git a/server/src/test/java/org/elasticsearch/action/support/RetryableActionTests.java b/server/src/test/java/org/elasticsearch/action/support/RetryableActionTests.java index fd1439f23240e..95c4879812375 100644 --- a/server/src/test/java/org/elasticsearch/action/support/RetryableActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/RetryableActionTests.java @@ -33,7 +33,7 @@ public void setUp() throws Exception { public void testRetryableActionNoRetries() { final AtomicInteger executedCount = new AtomicInteger(); - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); final RetryableAction retryableAction = new RetryableAction<>( logger, taskQueue.getThreadPool(), @@ -65,7 +65,7 @@ public void testRetryableActionWillRetry() { int expectedRetryCount = randomIntBetween(1, 8); final AtomicInteger remainingFailedCount = new AtomicInteger(expectedRetryCount); final AtomicInteger retryCount = new AtomicInteger(); - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); final RetryableAction retryableAction = new RetryableAction<>( logger, taskQueue.getThreadPool(), @@ -113,7 +113,7 @@ public boolean shouldRetry(Exception e) { public void testRetryableActionTimeout() { final AtomicInteger retryCount = new AtomicInteger(); - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); final RetryableAction retryableAction = new RetryableAction<>( logger, taskQueue.getThreadPool(), @@ -161,7 +161,7 @@ public boolean shouldRetry(Exception e) { public void testTimeoutOfZeroMeansNoRetry() { final AtomicInteger executedCount = new AtomicInteger(); - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); final RetryableAction retryableAction = new RetryableAction<>( logger, taskQueue.getThreadPool(), @@ -191,7 +191,7 @@ public boolean shouldRetry(Exception e) { public void testFailedBecauseNotRetryable() { final AtomicInteger executedCount = new AtomicInteger(); - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); final RetryableAction retryableAction = new RetryableAction<>( logger, taskQueue.getThreadPool(), @@ -221,7 +221,7 @@ public boolean shouldRetry(Exception e) { public void testRetryableActionCancelled() { final AtomicInteger executedCount = new AtomicInteger(); - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); final RetryableAction retryableAction = new RetryableAction<>( logger, taskQueue.getThreadPool(), @@ -259,7 +259,7 @@ public boolean shouldRetry(Exception e) { } public void testMaxDelayBound() { - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); final RetryableAction retryableAction = new RetryableAction<>( logger, taskQueue.getThreadPool(), diff --git a/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java b/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java index 132366a57c3c5..59567c1ee9783 100644 --- a/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java @@ -99,7 +99,7 @@ protected void doExecute(Task task, TestRequest request, ActionListener future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); ActionTestUtils.execute(transportAction, null, new TestRequest(), future); try { diff --git a/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java b/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java index 7daa2bc82542c..b175123942cf7 100644 --- a/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java @@ -429,7 +429,7 @@ public void testNoShardOperationsExecutedIfTaskCancelled() throws Exception { final TransportBroadcastByNodeAction.BroadcastByNodeTransportRequestHandler handler = action.new BroadcastByNodeTransportRequestHandler(); - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); TestTransportChannel channel = new TestTransportChannel(future); final CancellableTask cancellableTask = new CancellableTask(randomLong(), "transport", "action", "", null, emptyMap()); @@ -490,7 +490,7 @@ public void testOperationExecution() throws Exception { final TransportBroadcastByNodeAction.BroadcastByNodeTransportRequestHandler handler = action.new BroadcastByNodeTransportRequestHandler(); - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); TestTransportChannel channel = new TestTransportChannel(future); handler.messageReceived(action.new NodeRequest(new Request(), new ArrayList<>(shards), nodeId), channel, null); diff --git a/server/src/test/java/org/elasticsearch/action/support/broadcast/unpromotable/TransportBroadcastUnpromotableActionTests.java b/server/src/test/java/org/elasticsearch/action/support/broadcast/unpromotable/TransportBroadcastUnpromotableActionTests.java index 7a63be3773cbe..b5bcb8c54668a 100644 --- a/server/src/test/java/org/elasticsearch/action/support/broadcast/unpromotable/TransportBroadcastUnpromotableActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/broadcast/unpromotable/TransportBroadcastUnpromotableActionTests.java @@ -216,7 +216,7 @@ private static List> getReplicaRoles } private int countRequestsForIndex(ClusterState state, String index) { - PlainActionFuture response = PlainActionFuture.newFuture(); + PlainActionFuture response = new PlainActionFuture<>(); state.routingTable().activePrimaryShardsGrouped(new String[] { index }, true).iterator().forEachRemaining(shardId -> { logger.debug("--> executing for primary shard id: {}", shardId.shardId()); ActionTestUtils.execute( @@ -321,7 +321,7 @@ public void testInvalidNodes() throws Exception { } IndexShardRoutingTable wrongRoutingTable = wrongRoutingTableBuilder.build(); - PlainActionFuture response = PlainActionFuture.newFuture(); + PlainActionFuture response = new PlainActionFuture<>(); logger.debug("--> executing for wrong shard routing table: {}", wrongRoutingTable); // The request fails if we don't mark shards as stale @@ -377,7 +377,6 @@ private ActionResponse brodcastUnpromotableRequest(IndexShardRoutingTable wrongR } public void testNullIndexShardRoutingTable() { - PlainActionFuture response = PlainActionFuture.newFuture(); IndexShardRoutingTable shardRoutingTable = null; assertThat( expectThrows( diff --git a/server/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java b/server/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java index 9b2063d742f2e..9458d0fe962e8 100644 --- a/server/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java @@ -438,14 +438,4 @@ protected TestNodeResponse(StreamInput in) throws IOException { } } - private static class OtherNodeResponse extends BaseNodeResponse { - OtherNodeResponse() { - super(mock(DiscoveryNode.class)); - } - - protected OtherNodeResponse(StreamInput in) throws IOException { - super(in); - } - } - } diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java index f0be7453d7b59..a787a50798e05 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java @@ -141,7 +141,7 @@ public void testNotStartedPrimary() throws InterruptedException, ExecutionExcept ) ); logger.debug("--> using initial state:\n{}", clusterService.state()); - PlainActionFuture response = PlainActionFuture.newFuture(); + PlainActionFuture response = new PlainActionFuture<>(); ActionTestUtils.execute(broadcastReplicationAction, null, new DummyBroadcastRequest(index), response); for (Tuple> shardRequests : broadcastReplicationAction.capturedShardRequests) { if (randomBoolean()) { @@ -160,7 +160,7 @@ public void testStartedPrimary() throws InterruptedException, ExecutionException final String index = "test"; setState(clusterService, state(index, randomBoolean(), ShardRoutingState.STARTED)); logger.debug("--> using initial state:\n{}", clusterService.state()); - PlainActionFuture response = PlainActionFuture.newFuture(); + PlainActionFuture response = new PlainActionFuture<>(); ActionTestUtils.execute(broadcastReplicationAction, null, new DummyBroadcastRequest(index), response); for (Tuple> shardRequests : broadcastReplicationAction.capturedShardRequests) { ReplicationResponse replicationResponse = new ReplicationResponse(); @@ -176,7 +176,7 @@ public void testResultCombine() throws InterruptedException, ExecutionException, int numShards = 1 + randomInt(3); setState(clusterService, stateWithAssignedPrimariesAndOneReplica(index, numShards)); logger.debug("--> using initial state:\n{}", clusterService.state()); - PlainActionFuture response = PlainActionFuture.newFuture(); + PlainActionFuture response = new PlainActionFuture<>(); ActionTestUtils.execute(broadcastReplicationAction, null, new DummyBroadcastRequest().indices(index), response); int succeeded = 0; int failed = 0; @@ -303,7 +303,7 @@ public BaseBroadcastResponse executeAndAssertImmediateResponse( TransportBroadcastReplicationAction broadcastAction, DummyBroadcastRequest request ) { - PlainActionFuture response = PlainActionFuture.newFuture(); + PlainActionFuture response = new PlainActionFuture<>(); ActionTestUtils.execute(broadcastAction, null, request, response); return response.actionGet("5s"); } diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/PendingReplicationActionsTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/PendingReplicationActionsTests.java index 73c1c1248d624..aaa2d0709bfbe 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/PendingReplicationActionsTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/PendingReplicationActionsTests.java @@ -43,7 +43,7 @@ public void tearDown() throws Exception { public void testAllocationIdActionCanBeRun() { String allocationId = UUIDs.randomBase64UUID(); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); pendingReplication.acceptNewTrackedAllocationIds(Collections.singleton(allocationId)); TestAction action = new TestAction(future); pendingReplication.addPendingAction(allocationId, action); @@ -54,7 +54,7 @@ public void testAllocationIdActionCanBeRun() { public void testMissingAllocationIdActionWillBeCancelled() { String allocationId = UUIDs.randomBase64UUID(); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); TestAction action = new TestAction(future); pendingReplication.addPendingAction(allocationId, action); expectThrows(IndexShardClosedException.class, future::actionGet); @@ -62,7 +62,7 @@ public void testMissingAllocationIdActionWillBeCancelled() { public void testAllocationIdActionWillBeCancelledIfTrackedAllocationChanges() { String allocationId = UUIDs.randomBase64UUID(); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); pendingReplication.acceptNewTrackedAllocationIds(Collections.singleton(allocationId)); TestAction action = new TestAction(future, false); pendingReplication.addPendingAction(allocationId, action); @@ -73,7 +73,7 @@ public void testAllocationIdActionWillBeCancelledIfTrackedAllocationChanges() { public void testAllocationIdActionWillBeCancelledOnClose() { String allocationId = UUIDs.randomBase64UUID(); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); pendingReplication.acceptNewTrackedAllocationIds(Collections.singleton(allocationId)); TestAction action = new TestAction(future, false); pendingReplication.addPendingAction(allocationId, action); diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/PostWriteRefreshTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/PostWriteRefreshTests.java index d14429647c7d3..735eb07a546c5 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/PostWriteRefreshTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/PostWriteRefreshTests.java @@ -89,7 +89,7 @@ public void testWaitUntilRefreshPrimaryShard() throws IOException { try { String id = "0"; Engine.IndexResult result = indexDoc(primary, "_doc", id); - PlainActionFuture f = PlainActionFuture.newFuture(); + PlainActionFuture f = new PlainActionFuture<>(); PostWriteRefresh postWriteRefresh = new PostWriteRefresh(transportService); postWriteRefresh.refreshShard( WriteRequest.RefreshPolicy.WAIT_UNTIL, @@ -114,7 +114,7 @@ public void testImmediateRefreshPrimaryShard() throws IOException { try { String id = "0"; Engine.IndexResult result = indexDoc(primary, "_doc", id); - PlainActionFuture f = PlainActionFuture.newFuture(); + PlainActionFuture f = new PlainActionFuture<>(); PostWriteRefresh postWriteRefresh = new PostWriteRefresh(transportService); postWriteRefresh.refreshShard( WriteRequest.RefreshPolicy.IMMEDIATE, @@ -138,7 +138,7 @@ public void testPrimaryWithUnpromotables() throws IOException { try { String id = "0"; Engine.IndexResult result = indexDoc(primary, "_doc", id); - PlainActionFuture f = PlainActionFuture.newFuture(); + PlainActionFuture f = new PlainActionFuture<>(); PostWriteRefresh postWriteRefresh = new PostWriteRefresh(transportService); ReplicationGroup replicationGroup = mock(ReplicationGroup.class); @@ -188,7 +188,7 @@ public void testWaitUntilRefreshReplicaShard() throws IOException { try { String id = "0"; Engine.IndexResult result = indexDoc(replica, "_doc", id); - PlainActionFuture f = PlainActionFuture.newFuture(); + PlainActionFuture f = new PlainActionFuture<>(); PostWriteRefresh.refreshReplicaShard(WriteRequest.RefreshPolicy.WAIT_UNTIL, replica, result.getTranslogLocation(), f); Releasable releasable = simulateScheduledRefresh(replica, false); f.actionGet(); @@ -207,7 +207,7 @@ public void testImmediateRefreshReplicaShard() throws IOException { try { String id = "0"; Engine.IndexResult result = indexDoc(replica, "_doc", id); - PlainActionFuture f = PlainActionFuture.newFuture(); + PlainActionFuture f = new PlainActionFuture<>(); PostWriteRefresh.refreshReplicaShard(WriteRequest.RefreshPolicy.IMMEDIATE, replica, result.getTranslogLocation(), f); f.actionGet(); assertEngineContainsIdNoRefresh(replica, id); @@ -221,7 +221,7 @@ public void testWaitForWithNullLocationCompletedImmediately() throws IOException recoverShardFromStore(primary); ReplicationGroup realReplicationGroup = primary.getReplicationGroup(); try { - PlainActionFuture f = PlainActionFuture.newFuture(); + PlainActionFuture f = new PlainActionFuture<>(); PostWriteRefresh postWriteRefresh = new PostWriteRefresh(transportService); ReplicationGroup replicationGroup = mock(ReplicationGroup.class); diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java index 25d9f9fec884b..f4c8e2baa94cf 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java @@ -149,7 +149,7 @@ public void testReplicaNoRefreshCall() throws Exception { TestRequest request = new TestRequest(); request.setRefreshPolicy(RefreshPolicy.NONE); // The default, but we'll set it anyway just to be explicit TestAction testAction = new TestAction(); - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); testAction.dispatchedShardOperationOnReplica(request, indexShard, future); final TransportReplicationAction.ReplicaResult result = future.actionGet(); CapturingActionListener listener = new CapturingActionListener<>(); @@ -188,7 +188,7 @@ public void testReplicaImmediateRefresh() throws Exception { TestRequest request = new TestRequest(); request.setRefreshPolicy(RefreshPolicy.IMMEDIATE); TestAction testAction = new TestAction(); - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); testAction.dispatchedShardOperationOnReplica(request, indexShard, future); final TransportReplicationAction.ReplicaResult result = future.actionGet(); CapturingActionListener listener = new CapturingActionListener<>(); @@ -235,7 +235,7 @@ public void testReplicaWaitForRefresh() throws Exception { TestRequest request = new TestRequest(); request.setRefreshPolicy(RefreshPolicy.WAIT_UNTIL); TestAction testAction = new TestAction(); - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); testAction.dispatchedShardOperationOnReplica(request, indexShard, future); final TransportReplicationAction.ReplicaResult result = future.actionGet(); CapturingActionListener listener = new CapturingActionListener<>(); @@ -273,7 +273,7 @@ public void testDocumentFailureInShardOperationOnPrimary() { public void testDocumentFailureInShardOperationOnReplica() throws Exception { TestRequest request = new TestRequest(); TestAction testAction = new TestAction(randomBoolean(), true); - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); testAction.dispatchedShardOperationOnReplica(request, indexShard, future); final TransportReplicationAction.ReplicaResult result = future.actionGet(); CapturingActionListener listener = new CapturingActionListener<>(); diff --git a/server/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsTests.java b/server/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsTests.java index 2c0b03194bb72..f1bf8d1dcb0dd 100644 --- a/server/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsTests.java +++ b/server/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsTests.java @@ -165,18 +165,16 @@ public void testRandomPayloadWithDelimitedPayloadTokenFilter() throws IOExceptio .build(); createIndex("test", setting, mapping); - client().prepareIndex("test") - .setId(Integer.toString(1)) + prepareIndex("test").setId(Integer.toString(1)) .setSource(jsonBuilder().startObject().field("field", queryString).endObject()) - .execute() - .actionGet(); + .get(); client().admin().indices().prepareRefresh().get(); TermVectorsRequestBuilder resp = client().prepareTermVectors("test", Integer.toString(1)) .setPayloads(true) .setOffsets(true) .setPositions(true) .setSelectedFields(); - TermVectorsResponse response = resp.execute().actionGet(); + TermVectorsResponse response = resp.get(); assertThat("doc id 1 doesn't exists but should", response.isExists(), equalTo(true)); Fields fields = response.getFields(); assertThat(fields.size(), equalTo(1)); diff --git a/server/src/test/java/org/elasticsearch/client/internal/AbstractClientHeadersTestCase.java b/server/src/test/java/org/elasticsearch/client/internal/AbstractClientHeadersTestCase.java index 32e9b214ab530..a776f74febc1b 100644 --- a/server/src/test/java/org/elasticsearch/client/internal/AbstractClientHeadersTestCase.java +++ b/server/src/test/java/org/elasticsearch/client/internal/AbstractClientHeadersTestCase.java @@ -22,7 +22,7 @@ import org.elasticsearch.action.delete.DeleteAction; import org.elasticsearch.action.get.GetAction; import org.elasticsearch.action.index.IndexAction; -import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.env.Environment; @@ -48,7 +48,7 @@ public abstract class AbstractClientHeadersTestCase extends ESTestCase { private static final ActionType[] ACTIONS = new ActionType[] { // client actions GetAction.INSTANCE, - SearchAction.INSTANCE, + TransportSearchAction.TYPE, DeleteAction.INSTANCE, DeleteStoredScriptAction.INSTANCE, IndexAction.INSTANCE, @@ -97,7 +97,7 @@ public void testActions() { // choosing arbitrary top level actions to test client.prepareGet("idx", "id").execute(new AssertingActionListener<>(GetAction.NAME, client.threadPool())); - client.prepareSearch().execute(new AssertingActionListener<>(SearchAction.NAME, client.threadPool())); + client.prepareSearch().execute(new AssertingActionListener<>(TransportSearchAction.TYPE.name(), client.threadPool())); client.prepareDelete("idx", "id").execute(new AssertingActionListener<>(DeleteAction.NAME, client.threadPool())); client.admin() .cluster() diff --git a/server/src/test/java/org/elasticsearch/client/internal/OriginSettingClientTests.java b/server/src/test/java/org/elasticsearch/client/internal/OriginSettingClientTests.java index 3a93f559284ca..5de0c628c83cd 100644 --- a/server/src/test/java/org/elasticsearch/client/internal/OriginSettingClientTests.java +++ b/server/src/test/java/org/elasticsearch/client/internal/OriginSettingClientTests.java @@ -37,7 +37,6 @@ protected void ActionListener listener ) { assertEquals(origin, threadPool().getThreadContext().getTransient(ThreadContext.ACTION_ORIGIN_TRANSIENT_NAME)); - super.doExecute(action, request, listener); } }; diff --git a/server/src/test/java/org/elasticsearch/client/internal/ParentTaskAssigningClientTests.java b/server/src/test/java/org/elasticsearch/client/internal/ParentTaskAssigningClientTests.java index 0f12076dd53b6..2c2e131b8c5ad 100644 --- a/server/src/test/java/org/elasticsearch/client/internal/ParentTaskAssigningClientTests.java +++ b/server/src/test/java/org/elasticsearch/client/internal/ParentTaskAssigningClientTests.java @@ -33,7 +33,6 @@ protected void ActionListener listener ) { assertEquals(parentTaskId[0], request.getParentTask()); - super.doExecute(action, request, listener); } }; diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java b/server/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java index 048eb24a39a66..4b96e7d447475 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.metadata.ClusterChangedEventUtils; import org.elasticsearch.cluster.metadata.IndexGraveyard; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; @@ -549,7 +550,7 @@ private static ClusterState executeIndicesChangesTest( } final ClusterState newState = nextState(previousState, changeClusterUUID, addedIndices, delIndices, 0); ClusterChangedEvent event = new ClusterChangedEvent("_na_", newState, previousState); - final List addsFromEvent = event.indicesCreated(); + final List addsFromEvent = ClusterChangedEventUtils.indicesCreated(event); List delsFromEvent = event.indicesDeleted(); assertThat(new HashSet<>(addsFromEvent), equalTo(addedIndices.stream().map(Index::getName).collect(Collectors.toSet()))); assertThat(new HashSet<>(delsFromEvent), equalTo(new HashSet<>(delIndices))); diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java b/server/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java index 87401562edd81..ff0f166eb8339 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java @@ -45,6 +45,7 @@ import org.elasticsearch.indices.EmptySystemIndices; import org.elasticsearch.plugins.ClusterPlugin; import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.telemetry.TelemetryProvider; import org.elasticsearch.test.gateway.TestGatewayAllocator; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -155,7 +156,7 @@ public void testRegisterAllocationDeciderDuplicate() { public Collection createAllocationDeciders(Settings settings, ClusterSettings clusterSettings) { return Collections.singletonList(new EnableAllocationDecider(clusterSettings)); } - }), clusterInfoService, null, threadPool, EmptySystemIndices.INSTANCE, WriteLoadForecaster.DEFAULT) + }), clusterInfoService, null, threadPool, EmptySystemIndices.INSTANCE, WriteLoadForecaster.DEFAULT, TelemetryProvider.NOOP) ); assertEquals(e.getMessage(), "Cannot specify allocation decider [" + EnableAllocationDecider.class.getName() + "] twice"); } @@ -166,7 +167,7 @@ public void testRegisterAllocationDecider() { public Collection createAllocationDeciders(Settings settings, ClusterSettings clusterSettings) { return Collections.singletonList(new FakeAllocationDecider()); } - }), clusterInfoService, null, threadPool, EmptySystemIndices.INSTANCE, WriteLoadForecaster.DEFAULT); + }), clusterInfoService, null, threadPool, EmptySystemIndices.INSTANCE, WriteLoadForecaster.DEFAULT, TelemetryProvider.NOOP); assertTrue(module.deciderList.stream().anyMatch(d -> d.getClass().equals(FakeAllocationDecider.class))); } @@ -176,7 +177,7 @@ private ClusterModule newClusterModuleWithShardsAllocator(Settings settings, Str public Map> getShardsAllocators(Settings settings, ClusterSettings clusterSettings) { return Collections.singletonMap(name, supplier); } - }), clusterInfoService, null, threadPool, EmptySystemIndices.INSTANCE, WriteLoadForecaster.DEFAULT); + }), clusterInfoService, null, threadPool, EmptySystemIndices.INSTANCE, WriteLoadForecaster.DEFAULT, TelemetryProvider.NOOP); } public void testRegisterShardsAllocator() { @@ -208,7 +209,8 @@ public void testUnknownShardsAllocator() { null, threadPool, EmptySystemIndices.INSTANCE, - WriteLoadForecaster.DEFAULT + WriteLoadForecaster.DEFAULT, + TelemetryProvider.NOOP ) ); assertEquals("Unknown ShardsAllocator [dne]", e.getMessage()); @@ -272,7 +274,8 @@ public void testRejectsReservedExistingShardsAllocatorName() { null, threadPool, EmptySystemIndices.INSTANCE, - WriteLoadForecaster.DEFAULT + WriteLoadForecaster.DEFAULT, + TelemetryProvider.NOOP ); expectThrows(IllegalArgumentException.class, () -> clusterModule.setExistingShardsAllocators(new TestGatewayAllocator())); } @@ -286,7 +289,8 @@ public void testRejectsDuplicateExistingShardsAllocatorName() { null, threadPool, EmptySystemIndices.INSTANCE, - WriteLoadForecaster.DEFAULT + WriteLoadForecaster.DEFAULT, + TelemetryProvider.NOOP ); expectThrows(IllegalArgumentException.class, () -> clusterModule.setExistingShardsAllocators(new TestGatewayAllocator())); } diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterStateObserverTests.java b/server/src/test/java/org/elasticsearch/cluster/ClusterStateObserverTests.java index 3c103990bbbca..459ba9b9b5055 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterStateObserverTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterStateObserverTests.java @@ -72,7 +72,7 @@ public void testWaitForState() throws Exception { final ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(DiscoveryNodes.builder()).build(); final ClusterService clusterService = mock(ClusterService.class); when(clusterService.state()).thenReturn(clusterState); - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); ClusterStateObserver.waitForState(clusterService, new ThreadContext(Settings.EMPTY), new ClusterStateObserver.Listener() { @Override public void onNewClusterState(ClusterState state) { diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java b/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java index 56c82ae12dc45..e0538603573f7 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java @@ -1248,7 +1248,7 @@ public void testHasMixedSystemIndexVersions() throws IOException { // equal mappings versions { var builder = ClusterState.builder(buildClusterState()); - builder.compatibilityVersions( + builder.nodeIdsToCompatibilityVersions( Map.of( "node1", new CompatibilityVersions( @@ -1268,7 +1268,7 @@ public void testHasMixedSystemIndexVersions() throws IOException { // unequal mappings versions { var builder = ClusterState.builder(buildClusterState()); - builder.compatibilityVersions( + builder.nodeIdsToCompatibilityVersions( Map.of( "node1", new CompatibilityVersions( @@ -1288,7 +1288,7 @@ public void testHasMixedSystemIndexVersions() throws IOException { // one node has a mappings version that the other is missing { var builder = ClusterState.builder(buildClusterState()); - builder.compatibilityVersions( + builder.nodeIdsToCompatibilityVersions( Map.of( "node1", new CompatibilityVersions( diff --git a/server/src/test/java/org/elasticsearch/cluster/action/index/MappingUpdatedActionTests.java b/server/src/test/java/org/elasticsearch/cluster/action/index/MappingUpdatedActionTests.java index 65a5093919e02..05969b14f2f9b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/action/index/MappingUpdatedActionTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/action/index/MappingUpdatedActionTests.java @@ -7,7 +7,6 @@ */ package org.elasticsearch.cluster.action.index; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.mapping.put.AutoPutMappingAction; import org.elasticsearch.action.support.PlainActionFuture; @@ -128,7 +127,7 @@ protected void sendUpdateMapping(Index index, Mapping mappingUpdate, ActionListe } public void testSendUpdateMappingUsingAutoPutMappingAction() { - DiscoveryNodes nodes = DiscoveryNodes.builder().add(DiscoveryNodeUtils.builder("first").version(Version.V_7_9_0).build()).build(); + DiscoveryNodes nodes = DiscoveryNodes.builder().add(DiscoveryNodeUtils.builder("first").build()).build(); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).nodes(nodes).build(); ClusterService clusterService = mock(ClusterService.class); when(clusterService.state()).thenReturn(clusterState); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinationStateTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinationStateTests.java index ed82391022629..3851d13dc2c15 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinationStateTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinationStateTests.java @@ -106,11 +106,11 @@ public void testStartJoinBeforeBootstrap() { assertThat(cs1.getCurrentTerm(), equalTo(0L)); StartJoinRequest startJoinRequest1 = new StartJoinRequest(randomFrom(node1, node2), randomLongBetween(1, 5)); Join v1 = cs1.handleStartJoin(startJoinRequest1); - assertThat(v1.getTargetNode(), equalTo(startJoinRequest1.getSourceNode())); - assertThat(v1.getSourceNode(), equalTo(node1)); - assertThat(v1.getTerm(), equalTo(startJoinRequest1.getTerm())); - assertThat(v1.getLastAcceptedTerm(), equalTo(initialStateNode1.term())); - assertThat(v1.getLastAcceptedVersion(), equalTo(initialStateNode1.version())); + assertThat(v1.masterCandidateNode(), equalTo(startJoinRequest1.getMasterCandidateNode())); + assertThat(v1.votingNode(), equalTo(node1)); + assertThat(v1.term(), equalTo(startJoinRequest1.getTerm())); + assertThat(v1.lastAcceptedTerm(), equalTo(initialStateNode1.term())); + assertThat(v1.lastAcceptedVersion(), equalTo(initialStateNode1.version())); assertThat(cs1.getCurrentTerm(), equalTo(startJoinRequest1.getTerm())); StartJoinRequest startJoinRequest2 = new StartJoinRequest( @@ -129,11 +129,11 @@ public void testStartJoinAfterBootstrap() { StartJoinRequest startJoinRequest1 = new StartJoinRequest(randomFrom(node1, node2), randomLongBetween(1, 5)); Join v1 = cs1.handleStartJoin(startJoinRequest1); - assertThat(v1.getTargetNode(), equalTo(startJoinRequest1.getSourceNode())); - assertThat(v1.getSourceNode(), equalTo(node1)); - assertThat(v1.getTerm(), equalTo(startJoinRequest1.getTerm())); - assertThat(v1.getLastAcceptedTerm(), equalTo(state1.term())); - assertThat(v1.getLastAcceptedVersion(), equalTo(state1.version())); + assertThat(v1.masterCandidateNode(), equalTo(startJoinRequest1.getMasterCandidateNode())); + assertThat(v1.votingNode(), equalTo(node1)); + assertThat(v1.term(), equalTo(startJoinRequest1.getTerm())); + assertThat(v1.lastAcceptedTerm(), equalTo(state1.term())); + assertThat(v1.lastAcceptedVersion(), equalTo(state1.version())); assertThat(cs1.getCurrentTerm(), equalTo(startJoinRequest1.getTerm())); StartJoinRequest startJoinRequest2 = new StartJoinRequest( @@ -212,7 +212,7 @@ public void testJoinWithHigherAcceptedTerm() { Join badJoin = new Join( randomFrom(node1, node2), node1, - v1.getTerm(), + v1.term(), randomLongBetween(state2.term() + 1, 30), randomNonNegativeLong() ); @@ -234,7 +234,7 @@ public void testJoinWithSameAcceptedTermButHigherVersion() { StartJoinRequest startJoinRequest2 = new StartJoinRequest(node2, randomLongBetween(startJoinRequest1.getTerm() + 1, 10)); Join v1 = cs1.handleStartJoin(startJoinRequest2); - Join badJoin = new Join(randomFrom(node1, node2), node1, v1.getTerm(), state2.term(), randomLongBetween(state2.version() + 1, 30)); + Join badJoin = new Join(randomFrom(node1, node2), node1, v1.term(), state2.term(), randomLongBetween(state2.version() + 1, 30)); assertThat( expectThrows(CoordinationStateRejectedException.class, () -> cs1.handleJoin(badJoin)).getMessage(), containsString("higher than current last accepted version") @@ -253,7 +253,7 @@ public void testJoinWithLowerLastAcceptedTermWinsElection() { StartJoinRequest startJoinRequest2 = new StartJoinRequest(node2, randomLongBetween(startJoinRequest1.getTerm() + 1, 10)); Join v1 = cs1.handleStartJoin(startJoinRequest2); - Join join = new Join(node1, node1, v1.getTerm(), randomLongBetween(0, state2.term() - 1), randomLongBetween(0, 20)); + Join join = new Join(node1, node1, v1.term(), randomLongBetween(0, state2.term() - 1), randomLongBetween(0, 20)); assertTrue(cs1.handleJoin(join)); assertTrue(cs1.electionWon()); assertTrue(cs1.containsJoinVoteFor(node1)); @@ -275,7 +275,7 @@ public void testJoinWithSameLastAcceptedTermButLowerOrSameVersionWinsElection() StartJoinRequest startJoinRequest2 = new StartJoinRequest(node2, randomLongBetween(startJoinRequest1.getTerm() + 1, 10)); Join v1 = cs1.handleStartJoin(startJoinRequest2); - Join join = new Join(node1, node1, v1.getTerm(), state2.term(), randomLongBetween(0, state2.version())); + Join join = new Join(node1, node1, v1.term(), state2.term(), randomLongBetween(0, state2.version())); assertTrue(cs1.handleJoin(join)); assertTrue(cs1.electionWon()); assertTrue(cs1.containsJoinVoteFor(node1)); @@ -296,7 +296,7 @@ public void testJoinDoesNotWinElection() { StartJoinRequest startJoinRequest2 = new StartJoinRequest(node2, randomLongBetween(startJoinRequest1.getTerm() + 1, 10)); Join v1 = cs1.handleStartJoin(startJoinRequest2); - Join join = new Join(node2, node1, v1.getTerm(), randomLongBetween(0, state2.term()), randomLongBetween(0, state2.version())); + Join join = new Join(node2, node1, v1.term(), randomLongBetween(0, state2.term()), randomLongBetween(0, state2.version())); assertTrue(cs1.handleJoin(join)); assertFalse(cs1.electionWon()); assertEquals(cs1.getLastPublishedVersion(), 0L); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java index 50bbbad05a778..82a172d1dccb8 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue; import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.monitor.StatusInfo; import org.elasticsearch.tasks.TaskManager; @@ -92,7 +93,7 @@ public void testJoinDeduplication() { Function.identity(), (listener, term) -> listener.onResponse(null), CompatibilityVersionsUtils.staticCurrent(), - Set.of() + new FeatureService(List.of()) ); transportService.start(); @@ -113,7 +114,7 @@ public void testJoinDeduplication() { assertEquals(node1, capturedRequest1.node()); assertTrue(joinHelper.isJoinPending()); - final var join1Term = optionalJoin1.stream().mapToLong(Join::getTerm).findFirst().orElse(0L); + final var join1Term = optionalJoin1.stream().mapToLong(Join::term).findFirst().orElse(0L); final var join1Status = new JoinStatus(node1, join1Term, PENDING_JOIN_WAITING_RESPONSE, TimeValue.ZERO); assertThat(joinHelper.getInFlightJoinStatuses(), equalTo(List.of(join1Status))); @@ -127,7 +128,7 @@ public void testJoinDeduplication() { CapturedRequest capturedRequest2 = capturedRequests2[0]; assertEquals(node2, capturedRequest2.node()); - final var join2Term = optionalJoin2.stream().mapToLong(Join::getTerm).findFirst().orElse(0L); + final var join2Term = optionalJoin2.stream().mapToLong(Join::term).findFirst().orElse(0L); final var join2Status = new JoinStatus(node2, join2Term, PENDING_JOIN_WAITING_RESPONSE, TimeValue.ZERO); assertThat( new HashSet<>(joinHelper.getInFlightJoinStatuses()), @@ -260,7 +261,7 @@ public void testJoinFailureOnUnhealthyNodes() { Function.identity(), (listener, term) -> listener.onResponse(null), CompatibilityVersionsUtils.staticCurrent(), - Set.of() + new FeatureService(List.of()) ); transportService.start(); @@ -337,7 +338,7 @@ public void testLatestStoredStateFailure() { Function.identity(), (listener, term) -> listener.onFailure(new ElasticsearchException("simulated")), CompatibilityVersionsUtils.staticCurrent(), - Set.of() + new FeatureService(List.of()) ); final var joinAccumulator = joinHelper.new CandidateJoinAccumulator(); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/LinearizabilityCheckerTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/LinearizabilityCheckerTests.java index 34fe7eae32fcb..83df7e7d18f5c 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/LinearizabilityCheckerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/LinearizabilityCheckerTests.java @@ -143,6 +143,27 @@ public void testRegisterWithLinearizableHistory() throws LinearizabilityCheckAbo assertTrue(LinearizabilityChecker.isLinearizable(registerSpec, history)); } + public void testRegisterHistoryVisualisation() { + final History history = new History(); + int write0 = history.invoke(42); // invoke write(42) + history.respond(history.invoke(null), 42); // read, returns 42 + history.respond(write0, null); // write(42) succeeds + + int write1 = history.invoke(24); // invoke write 24 + history.respond(history.invoke(null), 42); // read returns 42 + history.respond(history.invoke(null), 24); // subsequent read returns 24 + history.respond(write1, null); // write(24) succeeds + + assertEquals(""" + Partition 0 + 42 XXX null (0) + null X 42 (1) + 24 XXXXX null (2) + null X 42 (3) + null X 24 (4) + """, LinearizabilityChecker.visualize(registerSpec, history, o -> { throw new AssertionError("history was complete"); })); + } + public void testRegisterWithNonLinearizableHistory() throws LinearizabilityCheckAborted { final History history = new History(); int call0 = history.invoke(42); // 0: invoke write 42 diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/MessagesTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/MessagesTests.java index d8e038496a8d0..91d893121c2ab 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/MessagesTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/MessagesTests.java @@ -43,46 +43,46 @@ public void testJoinEqualsHashCodeSerialization() { // change sourceNode new Join( createNode(randomAlphaOfLength(20)), - join.getTargetNode(), - join.getTerm(), - join.getLastAcceptedTerm(), - join.getLastAcceptedVersion() + join.masterCandidateNode(), + join.term(), + join.lastAcceptedTerm(), + join.lastAcceptedVersion() ); case 1 -> // change targetNode new Join( - join.getSourceNode(), + join.votingNode(), createNode(randomAlphaOfLength(20)), - join.getTerm(), - join.getLastAcceptedTerm(), - join.getLastAcceptedVersion() + join.term(), + join.lastAcceptedTerm(), + join.lastAcceptedVersion() ); case 2 -> // change term new Join( - join.getSourceNode(), - join.getTargetNode(), - randomValueOtherThan(join.getTerm(), ESTestCase::randomNonNegativeLong), - join.getLastAcceptedTerm(), - join.getLastAcceptedVersion() + join.votingNode(), + join.masterCandidateNode(), + randomValueOtherThan(join.term(), ESTestCase::randomNonNegativeLong), + join.lastAcceptedTerm(), + join.lastAcceptedVersion() ); case 3 -> // change last accepted term new Join( - join.getSourceNode(), - join.getTargetNode(), - join.getTerm(), - randomValueOtherThan(join.getLastAcceptedTerm(), ESTestCase::randomNonNegativeLong), - join.getLastAcceptedVersion() + join.votingNode(), + join.masterCandidateNode(), + join.term(), + randomValueOtherThan(join.lastAcceptedTerm(), ESTestCase::randomNonNegativeLong), + join.lastAcceptedVersion() ); case 4 -> // change version new Join( - join.getSourceNode(), - join.getTargetNode(), - join.getTerm(), - join.getLastAcceptedTerm(), - randomValueOtherThan(join.getLastAcceptedVersion(), ESTestCase::randomNonNegativeLong) + join.votingNode(), + join.masterCandidateNode(), + join.term(), + join.lastAcceptedTerm(), + randomValueOtherThan(join.lastAcceptedVersion(), ESTestCase::randomNonNegativeLong) ); default -> throw new AssertionError(); } @@ -175,7 +175,7 @@ public void testStartJoinRequestEqualsHashCodeSerialization() { case 1 -> // change term new StartJoinRequest( - startJoinRequest.getSourceNode(), + startJoinRequest.getMasterCandidateNode(), randomValueOtherThan(startJoinRequest.getTerm(), ESTestCase::randomNonNegativeLong) ); default -> throw new AssertionError(); @@ -224,7 +224,7 @@ public void testJoinRequestEqualsHashCodeSerialization() { randomNonNegativeLong() ); JoinRequest initialJoinRequest = new JoinRequest( - initialJoin.getSourceNode(), + initialJoin.votingNode(), CompatibilityVersionsUtils.fakeSystemIndicesRandom(), Set.of(generateRandomStringArray(10, 10, false)), randomNonNegativeLong(), diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java index 46f03aef76b90..3d8f7caaa55bc 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java @@ -33,6 +33,8 @@ import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.features.FeatureService; +import org.elasticsearch.features.FeatureSpecification; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; @@ -72,7 +74,7 @@ public class NodeJoinExecutorTests extends ESTestCase { - private static final ActionListener NOT_COMPLETED_LISTENER = ActionTestUtils.assertNoFailureListener(t -> {}); + private static final ActionListener NO_FAILURE_LISTENER = ActionTestUtils.assertNoFailureListener(t -> {}); public void testPreventJoinClusterWithNewerIndices() { Settings.builder().build(); @@ -157,6 +159,74 @@ public void testPreventJoinClusterWithUnsupportedNodeVersions() { } } + public void testPreventJoinClusterWithMissingFeatures() throws Exception { + AllocationService allocationService = createAllocationService(); + RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); + FeatureService featureService = new FeatureService(List.of(new FeatureSpecification() { + @Override + public Set getFeatures() { + return Set.of(new NodeFeature("f1"), new NodeFeature("f2")); + } + })); + + NodeJoinExecutor executor = new NodeJoinExecutor(allocationService, rerouteService, featureService); + + DiscoveryNode masterNode = DiscoveryNodeUtils.create(UUIDs.base64UUID()); + DiscoveryNode otherNode = DiscoveryNodeUtils.create(UUIDs.base64UUID()); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(masterNode).localNodeId(masterNode.getId()).masterNodeId(masterNode.getId()).add(otherNode)) + .nodeFeatures(Map.of(masterNode.getId(), Set.of("f1", "f2"), otherNode.getId(), Set.of("f1", "f2"))) + .build(); + + DiscoveryNode newNode = DiscoveryNodeUtils.create(UUIDs.base64UUID()); + ClusterStateTaskExecutorUtils.executeAndAssertSuccessful( + clusterState, + executor, + List.of( + JoinTask.singleNode( + newNode, + CompatibilityVersionsUtils.staticCurrent(), + Set.of("f1"), + TEST_REASON, + ActionListener.wrap( + o -> fail("Should have failed"), + t -> assertThat(t.getMessage(), containsString("is missing required features [f2]")) + ), + 0L + ) + ) + ); + } + + public void testCanJoinClusterWithMissingIncompleteFeatures() throws Exception { + AllocationService allocationService = createAllocationService(); + RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); + FeatureService featureService = new FeatureService(List.of(new FeatureSpecification() { + @Override + public Set getFeatures() { + return Set.of(new NodeFeature("f1"), new NodeFeature("f2")); + } + })); + + NodeJoinExecutor executor = new NodeJoinExecutor(allocationService, rerouteService, featureService); + + DiscoveryNode masterNode = DiscoveryNodeUtils.create(UUIDs.base64UUID()); + DiscoveryNode otherNode = DiscoveryNodeUtils.create(UUIDs.base64UUID()); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(masterNode).localNodeId(masterNode.getId()).masterNodeId(masterNode.getId()).add(otherNode)) + .nodeFeatures(Map.of(masterNode.getId(), Set.of("f1", "f2"), otherNode.getId(), Set.of("f1"))) + .build(); + + DiscoveryNode newNode = DiscoveryNodeUtils.create(UUIDs.base64UUID()); + ClusterStateTaskExecutorUtils.executeAndAssertSuccessful( + clusterState, + executor, + List.of( + JoinTask.singleNode(newNode, CompatibilityVersionsUtils.staticCurrent(), Set.of("f1"), TEST_REASON, NO_FAILURE_LISTENER, 0L) + ) + ); + } + public void testSuccess() { Settings.builder().build(); Metadata.Builder metaBuilder = Metadata.builder(); @@ -205,7 +275,7 @@ public void testUpdatesNodeWithNewRoles() throws Exception { when(allocationService.adaptAutoExpandReplicas(any())).then(invocationOnMock -> invocationOnMock.getArguments()[0]); final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); - final NodeJoinExecutor executor = new NodeJoinExecutor(allocationService, rerouteService); + final NodeJoinExecutor executor = new NodeJoinExecutor(allocationService, rerouteService, createFeatureService()); final DiscoveryNode masterNode = DiscoveryNodeUtils.create(UUIDs.base64UUID()); @@ -226,14 +296,7 @@ public void testUpdatesNodeWithNewRoles() throws Exception { clusterState, executor, List.of( - JoinTask.singleNode( - actualNode, - CompatibilityVersionsUtils.staticCurrent(), - Set.of(), - TEST_REASON, - NOT_COMPLETED_LISTENER, - 0L - ) + JoinTask.singleNode(actualNode, CompatibilityVersionsUtils.staticCurrent(), Set.of(), TEST_REASON, NO_FAILURE_LISTENER, 0L) ) ); @@ -245,7 +308,7 @@ public void testRejectsStatesWithStaleTerm() { final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); final long executorTerm = randomLongBetween(0L, Long.MAX_VALUE - 1); - final var executor = new NodeJoinExecutor(allocationService, rerouteService); + final var executor = new NodeJoinExecutor(allocationService, rerouteService, createFeatureService()); final var masterNode = DiscoveryNodeUtils.create(UUIDs.randomBase64UUID(random())); final var clusterState = ClusterState.builder(ClusterName.DEFAULT) @@ -270,7 +333,7 @@ public void testRejectsStatesWithStaleTerm() { CompatibilityVersionsUtils.staticCurrent(), Set.of(), TEST_REASON, - NOT_COMPLETED_LISTENER, + NO_FAILURE_LISTENER, executorTerm ) ) @@ -282,7 +345,7 @@ public void testRejectsStatesWithStaleTerm() { CompatibilityVersionsUtils.staticCurrent(), Set.of(), TEST_REASON, - NOT_COMPLETED_LISTENER + NO_FAILURE_LISTENER ) ), executorTerm @@ -301,7 +364,7 @@ public void testRejectsStatesWithOtherMaster() { final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); final long executorTerm = randomNonNegativeLong(); - final var executor = new NodeJoinExecutor(allocationService, rerouteService); + final var executor = new NodeJoinExecutor(allocationService, rerouteService, createFeatureService()); final var masterNode = DiscoveryNodeUtils.create(UUIDs.randomBase64UUID(random())); final var localNode = DiscoveryNodeUtils.create(UUIDs.randomBase64UUID(random())); @@ -334,7 +397,7 @@ public void testRejectsStatesWithOtherMaster() { CompatibilityVersionsUtils.staticCurrent(), Set.of(), TEST_REASON, - NOT_COMPLETED_LISTENER, + NO_FAILURE_LISTENER, executorTerm ) ) @@ -346,7 +409,7 @@ public void testRejectsStatesWithOtherMaster() { CompatibilityVersionsUtils.staticCurrent(), Set.of(), TEST_REASON, - NOT_COMPLETED_LISTENER + NO_FAILURE_LISTENER ) ), executorTerm @@ -365,7 +428,7 @@ public void testRejectsStatesWithNoMasterIfNotBecomingMaster() { final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); final long executorTerm = randomNonNegativeLong(); - final var executor = new NodeJoinExecutor(allocationService, rerouteService); + final var executor = new NodeJoinExecutor(allocationService, rerouteService, createFeatureService()); final var masterNode = DiscoveryNodeUtils.create(UUIDs.base64UUID()); final var clusterState = ClusterState.builder(ClusterName.DEFAULT) @@ -389,7 +452,7 @@ public void testRejectsStatesWithNoMasterIfNotBecomingMaster() { CompatibilityVersionsUtils.staticCurrent(), Set.of(), TEST_REASON, - NOT_COMPLETED_LISTENER, + NO_FAILURE_LISTENER, executorTerm ) ), @@ -406,7 +469,7 @@ public void testRemovesOlderNodeInstancesWhenBecomingMaster() throws Exception { final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); final long executorTerm = randomLongBetween(1, Long.MAX_VALUE); - final var executor = new NodeJoinExecutor(allocationService, rerouteService); + final var executor = new NodeJoinExecutor(allocationService, rerouteService, createFeatureService()); final var masterNode = DiscoveryNodeUtils.create(UUIDs.randomBase64UUID(random())); final var otherNodeOld = DiscoveryNodeUtils.create(UUIDs.randomBase64UUID(random())); @@ -438,14 +501,14 @@ public void testRemovesOlderNodeInstancesWhenBecomingMaster() throws Exception { CompatibilityVersionsUtils.staticCurrent(), Set.of(), TEST_REASON, - NOT_COMPLETED_LISTENER + NO_FAILURE_LISTENER ), new JoinTask.NodeJoinTask( otherNodeNew, CompatibilityVersionsUtils.staticCurrent(), Set.of(), TEST_REASON, - NOT_COMPLETED_LISTENER + NO_FAILURE_LISTENER ) ), executorTerm @@ -472,7 +535,7 @@ public void testRemovesOlderNodeInstancesWhenBecomingMaster() throws Exception { CompatibilityVersionsUtils.staticCurrent(), Set.of(), TEST_REASON, - NOT_COMPLETED_LISTENER, + NO_FAILURE_LISTENER, executorTerm ), JoinTask.singleNode( @@ -497,7 +560,7 @@ public void testUpdatesVotingConfigExclusionsIfNeeded() throws Exception { final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); final long executorTerm = randomLongBetween(1, Long.MAX_VALUE); - final var executor = new NodeJoinExecutor(allocationService, rerouteService); + final var executor = new NodeJoinExecutor(allocationService, rerouteService, createFeatureService()); final var masterNode = DiscoveryNodeUtils.create(UUIDs.randomBase64UUID(random())); final var otherNode = DiscoveryNodeUtils.builder(UUIDs.randomBase64UUID(random())) @@ -540,14 +603,14 @@ public void testUpdatesVotingConfigExclusionsIfNeeded() throws Exception { CompatibilityVersionsUtils.staticCurrent(), Set.of(), TEST_REASON, - NOT_COMPLETED_LISTENER + NO_FAILURE_LISTENER ), new JoinTask.NodeJoinTask( otherNode, CompatibilityVersionsUtils.staticCurrent(), Set.of(), TEST_REASON, - NOT_COMPLETED_LISTENER + NO_FAILURE_LISTENER ) ), executorTerm @@ -566,7 +629,7 @@ public void testUpdatesVotingConfigExclusionsIfNeeded() throws Exception { CompatibilityVersionsUtils.staticCurrent(), Set.of(), TEST_REASON, - NOT_COMPLETED_LISTENER + NO_FAILURE_LISTENER ) ), executorTerm @@ -582,7 +645,7 @@ public void testUpdatesVotingConfigExclusionsIfNeeded() throws Exception { CompatibilityVersionsUtils.staticCurrent(), Set.of(), TEST_REASON, - NOT_COMPLETED_LISTENER, + NO_FAILURE_LISTENER, executorTerm ) ) @@ -602,7 +665,7 @@ public void testIgnoresOlderTerms() throws Exception { final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); final long currentTerm = randomLongBetween(100, 1000); - final var executor = new NodeJoinExecutor(allocationService, rerouteService); + final var executor = new NodeJoinExecutor(allocationService, rerouteService, createFeatureService()); final var masterNode = DiscoveryNodeUtils.create(UUIDs.randomBase64UUID(random())); final var clusterState = ClusterState.builder(ClusterName.DEFAULT) @@ -630,7 +693,7 @@ public void testIgnoresOlderTerms() throws Exception { public void testDesiredNodesMembershipIsUpgradedWhenNewNodesJoin() throws Exception { final var allocationService = createAllocationService(); final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); - final var executor = new NodeJoinExecutor(allocationService, rerouteService); + final var executor = new NodeJoinExecutor(allocationService, rerouteService, createFeatureService()); final var actualizedDesiredNodes = randomList(0, 5, this::createActualizedDesiredNode); final var pendingDesiredNodes = randomList(0, 5, this::createPendingDesiredNode); @@ -656,7 +719,7 @@ public void testDesiredNodesMembershipIsUpgradedWhenNewNodesJoin() throws Except CompatibilityVersionsUtils.staticCurrent(), Set.of(), TEST_REASON, - NOT_COMPLETED_LISTENER, + NO_FAILURE_LISTENER, 0L ) ) @@ -678,7 +741,7 @@ public void testDesiredNodesMembershipIsUpgradedWhenNewNodesJoin() throws Except public void testDesiredNodesMembershipIsUpgradedWhenANewMasterIsElected() throws Exception { final var allocationService = createAllocationService(); final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); - final var executor = new NodeJoinExecutor(allocationService, rerouteService); + final var executor = new NodeJoinExecutor(allocationService, rerouteService, createFeatureService()); final var actualizedDesiredNodes = randomList(1, 5, this::createPendingDesiredNode); final var pendingDesiredNodes = randomList(0, 5, this::createPendingDesiredNode); @@ -701,7 +764,7 @@ public void testDesiredNodesMembershipIsUpgradedWhenANewMasterIsElected() throws CompatibilityVersionsUtils.staticCurrent(), Set.of(), TEST_REASON, - NOT_COMPLETED_LISTENER + NO_FAILURE_LISTENER ) ), 1L @@ -729,7 +792,7 @@ public void testPerNodeLogging() { when(allocationService.adaptAutoExpandReplicas(any())).then(invocationOnMock -> invocationOnMock.getArguments()[0]); final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); - final NodeJoinExecutor executor = new NodeJoinExecutor(allocationService, rerouteService); + final NodeJoinExecutor executor = new NodeJoinExecutor(allocationService, rerouteService, createFeatureService()); final DiscoveryNode masterNode = DiscoveryNodeUtils.create(UUIDs.base64UUID()); final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) @@ -811,7 +874,7 @@ public void testResetsNodeLeftGenerationOnNewTerm() throws Exception { when(allocationService.adaptAutoExpandReplicas(any())).then(invocationOnMock -> invocationOnMock.getArguments()[0]); final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); - final NodeJoinExecutor executor = new NodeJoinExecutor(allocationService, rerouteService); + final NodeJoinExecutor executor = new NodeJoinExecutor(allocationService, rerouteService, createFeatureService()); final long term = randomLongBetween(0, Long.MAX_VALUE - 1); final DiscoveryNode masterNode = DiscoveryNodeUtils.create(UUIDs.base64UUID()); @@ -835,7 +898,7 @@ public void testResetsNodeLeftGenerationOnNewTerm() throws Exception { CompatibilityVersionsUtils.staticCurrent(), Set.of(), TEST_REASON, - NOT_COMPLETED_LISTENER + NO_FAILURE_LISTENER ) ), randomLongBetween(term + 1, Long.MAX_VALUE) @@ -851,7 +914,7 @@ public void testSetsNodeFeaturesWhenRejoining() throws Exception { final AllocationService allocationService = createAllocationService(); final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); - final NodeJoinExecutor executor = new NodeJoinExecutor(allocationService, rerouteService); + final NodeJoinExecutor executor = new NodeJoinExecutor(allocationService, rerouteService, createFeatureService()); final DiscoveryNode masterNode = DiscoveryNodeUtils.create(UUIDs.base64UUID()); @@ -875,7 +938,7 @@ public void testSetsNodeFeaturesWhenRejoining() throws Exception { CompatibilityVersionsUtils.staticCurrent(), Set.of("f1", "f2"), TEST_REASON, - NOT_COMPLETED_LISTENER, + NO_FAILURE_LISTENER, 0L ) ) @@ -895,16 +958,10 @@ private DesiredNodeWithStatus createPendingDesiredNode() { private static JoinTask createRandomTask(DiscoveryNode node, long term) { return randomBoolean() - ? JoinTask.singleNode(node, CompatibilityVersionsUtils.staticCurrent(), Set.of(), TEST_REASON, NOT_COMPLETED_LISTENER, term) + ? JoinTask.singleNode(node, CompatibilityVersionsUtils.staticCurrent(), Set.of(), TEST_REASON, NO_FAILURE_LISTENER, term) : JoinTask.completingElection( Stream.of( - new JoinTask.NodeJoinTask( - node, - CompatibilityVersionsUtils.staticCurrent(), - Set.of(), - TEST_REASON, - NOT_COMPLETED_LISTENER - ) + new JoinTask.NodeJoinTask(node, CompatibilityVersionsUtils.staticCurrent(), Set.of(), TEST_REASON, NO_FAILURE_LISTENER) ), term ); @@ -919,6 +976,10 @@ private static AllocationService createAllocationService() { return allocationService; } + private static FeatureService createFeatureService() { + return new FeatureService(List.of()); + } + // Hard-coding the class name here because it is also mentioned in the troubleshooting docs, so should not be renamed without care. private static final String LOGGER_NAME = "org.elasticsearch.cluster.coordination.NodeJoinExecutor"; diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinTests.java index e9e5b1c5338df..1a9d068da12ad 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue; import org.elasticsearch.common.util.concurrent.FutureUtils; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.monitor.NodeHealthService; import org.elasticsearch.monitor.StatusInfo; @@ -231,7 +232,7 @@ protected void onSendRequest(long requestId, String action, TransportRequest req LeaderHeartbeatService.NO_OP, StatefulPreVoteCollector::new, CompatibilityVersionsUtils.staticCurrent(), - Set.of() + new FeatureService(List.of()) ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTests.java index a479776aa6ab0..8372418c01644 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTests.java @@ -140,7 +140,7 @@ protected void onCompletion(boolean committed) { @Override protected void onJoin(Join join) { - assertNull(joins.put(join.getSourceNode(), join)); + assertNull(joins.put(join.votingNode(), join)); } @Override diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatServiceTests.java index 27b8dec366261..1df613a500f83 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatServiceTests.java @@ -62,7 +62,7 @@ public void testHeartBeatStoreScheduling() { listener -> listener.onResponse(OptionalLong.of(currentTermProvider.get())) ); - PlainActionFuture completionListener = PlainActionFuture.newFuture(); + PlainActionFuture completionListener = new PlainActionFuture<>(); final var currentLeader = DiscoveryNodeUtils.create("master"); heartbeatService.start(currentLeader, currentTermProvider.get(), completionListener); @@ -125,7 +125,7 @@ public void writeHeartbeat(Heartbeat newHeartbeat, ActionListener listener listener -> listener.onResponse(OptionalLong.of(currentTermProvider.get())) ); - PlainActionFuture completionListener = PlainActionFuture.newFuture(); + PlainActionFuture completionListener = new PlainActionFuture<>(); final var currentLeader = DiscoveryNodeUtils.create("master"); final boolean failFirstHeartBeat = randomBoolean(); @@ -167,7 +167,7 @@ public void testServiceStopsAfterTermBump() throws Exception { listener -> listener.onResponse(OptionalLong.of(currentTermProvider.get())) ); - PlainActionFuture completionListener = PlainActionFuture.newFuture(); + PlainActionFuture completionListener = new PlainActionFuture<>(); final var currentLeader = DiscoveryNodeUtils.create("master"); final long currentTerm = currentTermProvider.get(); @@ -300,7 +300,7 @@ protected long absoluteTimeInMillis() { } }; - PlainActionFuture completionListener = PlainActionFuture.newFuture(); + PlainActionFuture completionListener = new PlainActionFuture<>(); heartbeatService.start(currentLeader, 1, completionListener); var retryTask = threadPool.scheduledTasks.poll(); diff --git a/server/src/test/java/org/elasticsearch/cluster/desirednodes/DesiredNodesSettingsValidatorTests.java b/server/src/test/java/org/elasticsearch/cluster/desirednodes/DesiredNodesSettingsValidatorTests.java index 005252994d77e..819ec4b5266ac 100644 --- a/server/src/test/java/org/elasticsearch/cluster/desirednodes/DesiredNodesSettingsValidatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/desirednodes/DesiredNodesSettingsValidatorTests.java @@ -17,8 +17,8 @@ import static org.elasticsearch.cluster.metadata.DesiredNodesTestCase.randomDesiredNode; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.emptyArray; +import static org.hamcrest.Matchers.not; public class DesiredNodesSettingsValidatorTests extends ESTestCase { public void testNodeVersionValidation() { @@ -26,10 +26,10 @@ public void testNodeVersionValidation() { final DesiredNodesSettingsValidator validator = new DesiredNodesSettingsValidator(); - final IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> validator.validate(desiredNodes)); + final IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> validator.accept(desiredNodes)); assertThat(exception.getMessage(), containsString("Nodes with ids")); assertThat(exception.getMessage(), containsString("contain invalid settings")); - assertThat(exception.getSuppressed().length > 0, is(equalTo(true))); + assertThat(exception.getSuppressed(), not(emptyArray())); assertThat(exception.getSuppressed()[0].getMessage(), containsString("Illegal node version")); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/AutoExpandReplicasTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/AutoExpandReplicasTests.java index 7589617de7e76..1ca7333c90a2a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/AutoExpandReplicasTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/AutoExpandReplicasTests.java @@ -8,11 +8,9 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.TransportVersion; -import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; -import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.replication.ClusterStateCreationUtils; import org.elasticsearch.cluster.ClusterState; @@ -26,7 +24,6 @@ import org.elasticsearch.core.Strings; import org.elasticsearch.indices.cluster.ClusterStateChanges; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.VersionUtils; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -216,79 +213,6 @@ public void testAutoExpandWhenNodeLeavesAndPossiblyRejoins() throws InterruptedE } } - public void testOnlyAutoExpandAllocationFilteringAfterAllNodesUpgraded() { - final ThreadPool threadPool = new TestThreadPool(getClass().getName()); - final ClusterStateChanges cluster = new ClusterStateChanges(xContentRegistry(), threadPool); - - try { - List allNodes = new ArrayList<>(); - DiscoveryNode localNode = createNode( - VersionUtils.randomVersionBetween(random(), Version.V_7_0_0, Version.V_7_5_1), - DiscoveryNodeRole.MASTER_ROLE, - DiscoveryNodeRole.DATA_ROLE - ); // local node is the master - DiscoveryNode oldNode = createNode( - VersionUtils.randomVersionBetween(random(), Version.V_7_0_0, Version.V_7_5_1), - DiscoveryNodeRole.DATA_ROLE - ); // local node is the master - allNodes.add(localNode); - allNodes.add(oldNode); - ClusterState state = ClusterStateCreationUtils.state( - localNode, - localNode, - allNodes.toArray(new DiscoveryNode[0]), - TransportVersions.V_7_0_0 - ); - - CreateIndexRequest request = new CreateIndexRequest( - "index", - Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_AUTO_EXPAND_REPLICAS, "0-all").build() - ).waitForActiveShards(ActiveShardCount.NONE); - state = cluster.createIndex(state, request); - assertTrue(state.metadata().hasIndex("index")); - while (state.routingTable().index("index").shard(0).allShardsStarted() == false) { - logger.info(state); - state = cluster.applyStartedShards( - state, - state.routingTable().index("index").shard(0).shardsWithState(ShardRoutingState.INITIALIZING) - ); - state = cluster.reroute(state, new ClusterRerouteRequest()); - } - - DiscoveryNode newNode = createNode(Version.V_7_6_0, DiscoveryNodeRole.MASTER_ROLE, DiscoveryNodeRole.DATA_ROLE); // local node - // is the - // master - - state = cluster.addNode(state, newNode, TransportVersions.V_7_6_0); - - // use allocation filtering - state = cluster.updateSettings( - state, - new UpdateSettingsRequest("index").settings( - Settings.builder().put(IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_PREFIX + "._name", oldNode.getName()).build() - ) - ); - - while (state.routingTable().index("index").shard(0).allShardsStarted() == false) { - logger.info(state); - state = cluster.applyStartedShards( - state, - state.routingTable().index("index").shard(0).shardsWithState(ShardRoutingState.INITIALIZING) - ); - state = cluster.reroute(state, new ClusterRerouteRequest()); - } - - // check that presence of old node means that auto-expansion does not take allocation filtering into account - assertThat(state.routingTable().index("index").shard(0).size(), equalTo(3)); - - // remove old node and check that auto-expansion takes allocation filtering into account - state = cluster.removeNodes(state, Collections.singletonList(oldNode)); - assertThat(state.routingTable().index("index").shard(0).size(), equalTo(2)); - } finally { - terminate(threadPool); - } - } - public void testCalculateDesiredNumberOfReplicas() { int lowerBound = between(0, 9); int upperBound = between(lowerBound + 1, 10); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateTests.java index ac969eb7c9a10..fe678ec23afad 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateTests.java @@ -228,15 +228,14 @@ public void testXContentSerializationWithRollover() throws IOException { } DataStreamLifecycle lifecycle = randomLifecycle(); Template template = new Template(settings, mappings, aliases, lifecycle); - new ComposableIndexTemplate( - List.of(randomAlphaOfLength(4)), - template, - List.of(), - randomNonNegativeLong(), - randomNonNegativeLong(), - null, - dataStreamTemplate - ); + ComposableIndexTemplate.builder() + .indexPatterns(List.of(randomAlphaOfLength(4))) + .template(template) + .componentTemplates(List.of()) + .priority(randomNonNegativeLong()) + .version(randomNonNegativeLong()) + .dataStreamTemplate(dataStreamTemplate) + .build(); try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { builder.humanReadable(true); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTemplateTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTemplateTests.java index 6033da5d1a68e..f189a07f73039 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTemplateTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTemplateTests.java @@ -9,7 +9,6 @@ import org.elasticsearch.cluster.metadata.ComposableIndexTemplate.DataStreamTemplate; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.index.IndexMode; import org.elasticsearch.test.AbstractXContentSerializingTestCase; import org.elasticsearch.xcontent.XContentParser; @@ -38,8 +37,7 @@ protected DataStreamTemplate mutateInstance(DataStreamTemplate instance) { } public static DataStreamTemplate randomInstance() { - IndexMode indexMode = randomBoolean() ? randomFrom(IndexMode.values()) : null; - return new ComposableIndexTemplate.DataStreamTemplate(randomBoolean(), randomBoolean()); + return new ComposableIndexTemplate.DataStreamTemplate(randomBoolean(), randomBoolean(), randomBoolean()); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java index 1b1e512113712..1bda67030eca1 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java @@ -91,7 +91,9 @@ protected DataStream mutateInstance(DataStream instance) { var allowsCustomRouting = instance.isAllowCustomRouting(); var indexMode = instance.getIndexMode(); var lifecycle = instance.getLifecycle(); - switch (between(0, 9)) { + var failureStore = instance.isFailureStore(); + var failureIndices = instance.getFailureIndices(); + switch (between(0, 10)) { case 0 -> name = randomAlphaOfLength(10); case 1 -> indices = randomValueOtherThan(List.of(), DataStreamTestHelper::randomIndexInstances); case 2 -> generation = instance.getGeneration() + randomIntBetween(1, 10); @@ -120,6 +122,14 @@ protected DataStream mutateInstance(DataStream instance) { case 9 -> lifecycle = randomBoolean() && lifecycle != null ? null : DataStreamLifecycle.newBuilder().dataRetention(randomMillisUpToYear9999()).build(); + case 10 -> { + failureIndices = randomValueOtherThan(List.of(), DataStreamTestHelper::randomIndexInstances); + if (failureIndices.isEmpty()) { + failureStore = false; + } else { + failureStore = true; + } + } } return new DataStream( @@ -132,7 +142,9 @@ protected DataStream mutateInstance(DataStream instance) { isSystem, allowsCustomRouting, indexMode, - lifecycle + lifecycle, + failureStore, + failureIndices ); } @@ -187,7 +199,9 @@ public void testRolloverUpgradeToTsdbDataStream() { ds.isSystem(), ds.isAllowCustomRouting(), indexMode, - ds.getLifecycle() + ds.getLifecycle(), + ds.isFailureStore(), + ds.getFailureIndices() ); var newCoordinates = ds.nextWriteIndexAndGeneration(Metadata.EMPTY_METADATA); @@ -212,7 +226,9 @@ public void testRolloverDowngradeToRegularDataStream() { ds.isSystem(), ds.isAllowCustomRouting(), IndexMode.TIME_SERIES, - ds.getLifecycle() + ds.getLifecycle(), + ds.isFailureStore(), + ds.getFailureIndices() ); var newCoordinates = ds.nextWriteIndexAndGeneration(Metadata.EMPTY_METADATA); @@ -572,7 +588,9 @@ public void testSnapshot() { preSnapshotDataStream.isSystem(), preSnapshotDataStream.isAllowCustomRouting(), preSnapshotDataStream.getIndexMode(), - preSnapshotDataStream.getLifecycle() + preSnapshotDataStream.getLifecycle(), + preSnapshotDataStream.isFailureStore(), + preSnapshotDataStream.getFailureIndices() ); var reconciledDataStream = postSnapshotDataStream.snapshot( @@ -614,7 +632,9 @@ public void testSnapshotWithAllBackingIndicesRemoved() { preSnapshotDataStream.isSystem(), preSnapshotDataStream.isAllowCustomRouting(), preSnapshotDataStream.getIndexMode(), - preSnapshotDataStream.getLifecycle() + preSnapshotDataStream.getLifecycle(), + preSnapshotDataStream.isFailureStore(), + preSnapshotDataStream.getFailureIndices() ); assertNull(postSnapshotDataStream.snapshot(preSnapshotDataStream.getIndices().stream().map(Index::getName).toList())); @@ -1613,6 +1633,11 @@ public void testXContentSerializationWithRollover() throws IOException { if (randomBoolean()) { metadata = Map.of("key", "value"); } + boolean failureStore = randomBoolean(); + List failureIndices = List.of(); + if (failureStore) { + failureIndices = randomIndexInstances(); + } DataStreamLifecycle lifecycle = DataStreamLifecycle.newBuilder().dataRetention(randomMillisUpToYear9999()).build(); DataStream dataStream = new DataStream( @@ -1626,7 +1651,9 @@ public void testXContentSerializationWithRollover() throws IOException { System::currentTimeMillis, randomBoolean(), randomBoolean() ? IndexMode.STANDARD : null, // IndexMode.TIME_SERIES triggers validation that many unit tests doesn't pass - lifecycle + lifecycle, + failureStore, + failureIndices ); try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java index d94361d95057d..99dc1c84ba15b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java @@ -1557,14 +1557,6 @@ public void testDedupConcreteIndices() { } } - private static Metadata metadataBuilder(String... indices) { - Metadata.Builder mdBuilder = Metadata.builder(); - for (String concreteIndex : indices) { - mdBuilder.put(indexBuilder(concreteIndex)); - } - return mdBuilder.build(); - } - public void testFilterClosedIndicesOnAliases() { Metadata.Builder mdBuilder = Metadata.builder() .put(indexBuilder("test-0").state(State.OPEN).putAlias(AliasMetadata.builder("alias-0"))) diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamServiceTests.java index 2b40e28416129..e11f8c0cbe108 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamServiceTests.java @@ -60,6 +60,7 @@ public void testCreateDataStream() throws Exception { ClusterState newState = MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, cs, + true, req, ActionListener.noop() ); @@ -68,6 +69,7 @@ public void testCreateDataStream() throws Exception { assertThat(newState.metadata().dataStreams().get(dataStreamName).isSystem(), is(false)); assertThat(newState.metadata().dataStreams().get(dataStreamName).isHidden(), is(false)); assertThat(newState.metadata().dataStreams().get(dataStreamName).isReplicated(), is(false)); + assertThat(newState.metadata().dataStreams().get(dataStreamName).getLifecycle(), equalTo(DataStreamLifecycle.DEFAULT)); assertThat(newState.metadata().index(DataStream.getDefaultBackingIndexName(dataStreamName, 1)), notNullValue()); assertThat( newState.metadata().index(DataStream.getDefaultBackingIndexName(dataStreamName, 1)).getSettings().get("index.hidden"), @@ -97,6 +99,7 @@ public void testCreateDataStreamWithAliasFromTemplate() throws Exception { ClusterState newState = MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, cs, + randomBoolean(), req, ActionListener.noop() ); @@ -172,6 +175,7 @@ public void testCreateDataStreamWithAliasFromComponentTemplate() throws Exceptio ClusterState newState = MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, cs, + randomBoolean(), req, ActionListener.noop() ); @@ -210,6 +214,38 @@ private static AliasMetadata randomAlias(String prefix) { return builder.build(); } + public void testCreateDataStreamWithFailureStore() throws Exception { + final MetadataCreateIndexService metadataCreateIndexService = getMetadataCreateIndexService(); + final String dataStreamName = "my-data-stream"; + ComposableIndexTemplate template = new ComposableIndexTemplate.Builder().indexPatterns(List.of(dataStreamName + "*")) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false, true)) + .build(); + ClusterState cs = ClusterState.builder(new ClusterName("_name")) + .metadata(Metadata.builder().put("template", template).build()) + .build(); + CreateDataStreamClusterStateUpdateRequest req = new CreateDataStreamClusterStateUpdateRequest(dataStreamName); + ClusterState newState = MetadataCreateDataStreamService.createDataStream( + metadataCreateIndexService, + cs, + randomBoolean(), + req, + ActionListener.noop() + ); + var backingIndexName = DataStream.getDefaultBackingIndexName(dataStreamName, 1, req.getStartTime()); + var failureStoreIndexName = DataStream.getDefaultFailureStoreName(dataStreamName, 1, req.getStartTime()); + assertThat(newState.metadata().dataStreams().size(), equalTo(1)); + assertThat(newState.metadata().dataStreams().get(dataStreamName).getName(), equalTo(dataStreamName)); + assertThat(newState.metadata().dataStreams().get(dataStreamName).isSystem(), is(false)); + assertThat(newState.metadata().dataStreams().get(dataStreamName).isHidden(), is(false)); + assertThat(newState.metadata().dataStreams().get(dataStreamName).isReplicated(), is(false)); + assertThat(newState.metadata().index(backingIndexName), notNullValue()); + assertThat(newState.metadata().index(backingIndexName).getSettings().get("index.hidden"), equalTo("true")); + assertThat(newState.metadata().index(backingIndexName).isSystem(), is(false)); + assertThat(newState.metadata().index(failureStoreIndexName), notNullValue()); + assertThat(newState.metadata().index(failureStoreIndexName).getSettings().get("index.hidden"), equalTo("true")); + assertThat(newState.metadata().index(failureStoreIndexName).isSystem(), is(false)); + } + public void testCreateSystemDataStream() throws Exception { final MetadataCreateIndexService metadataCreateIndexService = getMetadataCreateIndexService(); final String dataStreamName = ".system-data-stream"; @@ -224,6 +260,7 @@ public void testCreateSystemDataStream() throws Exception { ClusterState newState = MetadataCreateDataStreamService.createDataStream( metadataCreateIndexService, cs, + randomBoolean(), req, ActionListener.noop() ); @@ -252,7 +289,13 @@ public void testCreateDuplicateDataStream() throws Exception { ResourceAlreadyExistsException e = expectThrows( ResourceAlreadyExistsException.class, - () -> MetadataCreateDataStreamService.createDataStream(metadataCreateIndexService, cs, req, ActionListener.noop()) + () -> MetadataCreateDataStreamService.createDataStream( + metadataCreateIndexService, + cs, + randomBoolean(), + req, + ActionListener.noop() + ) ); assertThat(e.getMessage(), containsString("data_stream [" + dataStreamName + "] already exists")); } @@ -264,7 +307,13 @@ public void testCreateDataStreamWithInvalidName() throws Exception { CreateDataStreamClusterStateUpdateRequest req = new CreateDataStreamClusterStateUpdateRequest(dataStreamName); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> MetadataCreateDataStreamService.createDataStream(metadataCreateIndexService, cs, req, ActionListener.noop()) + () -> MetadataCreateDataStreamService.createDataStream( + metadataCreateIndexService, + cs, + randomBoolean(), + req, + ActionListener.noop() + ) ); assertThat(e.getMessage(), containsString("must not contain the following characters")); } @@ -276,7 +325,13 @@ public void testCreateDataStreamWithUppercaseCharacters() throws Exception { CreateDataStreamClusterStateUpdateRequest req = new CreateDataStreamClusterStateUpdateRequest(dataStreamName); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> MetadataCreateDataStreamService.createDataStream(metadataCreateIndexService, cs, req, ActionListener.noop()) + () -> MetadataCreateDataStreamService.createDataStream( + metadataCreateIndexService, + cs, + randomBoolean(), + req, + ActionListener.noop() + ) ); assertThat(e.getMessage(), containsString("data_stream [" + dataStreamName + "] must be lowercase")); } @@ -288,7 +343,13 @@ public void testCreateDataStreamStartingWithPeriod() throws Exception { CreateDataStreamClusterStateUpdateRequest req = new CreateDataStreamClusterStateUpdateRequest(dataStreamName); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> MetadataCreateDataStreamService.createDataStream(metadataCreateIndexService, cs, req, ActionListener.noop()) + () -> MetadataCreateDataStreamService.createDataStream( + metadataCreateIndexService, + cs, + randomBoolean(), + req, + ActionListener.noop() + ) ); assertThat(e.getMessage(), containsString("data_stream [" + dataStreamName + "] must not start with '.ds-'")); } @@ -300,7 +361,13 @@ public void testCreateDataStreamNoTemplate() throws Exception { CreateDataStreamClusterStateUpdateRequest req = new CreateDataStreamClusterStateUpdateRequest(dataStreamName); Exception e = expectThrows( IllegalArgumentException.class, - () -> MetadataCreateDataStreamService.createDataStream(metadataCreateIndexService, cs, req, ActionListener.noop()) + () -> MetadataCreateDataStreamService.createDataStream( + metadataCreateIndexService, + cs, + randomBoolean(), + req, + ActionListener.noop() + ) ); assertThat(e.getMessage(), equalTo("no matching index template found for data stream [my-data-stream]")); } @@ -315,7 +382,13 @@ public void testCreateDataStreamNoValidTemplate() throws Exception { CreateDataStreamClusterStateUpdateRequest req = new CreateDataStreamClusterStateUpdateRequest(dataStreamName); Exception e = expectThrows( IllegalArgumentException.class, - () -> MetadataCreateDataStreamService.createDataStream(metadataCreateIndexService, cs, req, ActionListener.noop()) + () -> MetadataCreateDataStreamService.createDataStream( + metadataCreateIndexService, + cs, + randomBoolean(), + req, + ActionListener.noop() + ) ); assertThat( e.getMessage(), @@ -333,7 +406,13 @@ public static ClusterState createDataStream(final String dataStreamName) throws .metadata(Metadata.builder().put("template", template).build()) .build(); CreateDataStreamClusterStateUpdateRequest req = new CreateDataStreamClusterStateUpdateRequest(dataStreamName); - return MetadataCreateDataStreamService.createDataStream(metadataCreateIndexService, cs, req, ActionListener.noop()); + return MetadataCreateDataStreamService.createDataStream( + metadataCreateIndexService, + cs, + randomBoolean(), + req, + ActionListener.noop() + ); } private static MetadataCreateIndexService getMetadataCreateIndexService() throws Exception { @@ -379,7 +458,10 @@ private static SystemDataStreamDescriptor systemDataStreamDescriptor() { ".system-data-stream", "test system datastream", Type.EXTERNAL, - new ComposableIndexTemplate(List.of(".system-data-stream"), null, null, null, null, null, new DataStreamTemplate()), + ComposableIndexTemplate.builder() + .indexPatterns(List.of(".system-data-stream")) + .dataStreamTemplate(new DataStreamTemplate()) + .build(), Map.of(), List.of("stack"), ExecutorNames.DEFAULT_SYSTEM_DATA_STREAM_THREAD_POOLS diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsServiceTests.java index 172b3a6902f88..ba3b1a7387110 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsServiceTests.java @@ -354,7 +354,9 @@ public void testRemoveBrokenBackingIndexReference() { original.isSystem(), original.isAllowCustomRouting(), original.getIndexMode(), - original.getLifecycle() + original.getLifecycle(), + original.isFailureStore(), + original.getFailureIndices() ); var brokenState = ClusterState.builder(state).metadata(Metadata.builder(state.getMetadata()).put(broken).build()).build(); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java index 5d1c3fd0650d7..14cb19ba89810 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java @@ -463,40 +463,28 @@ public void testUpdateComponentTemplateWithIndexHiddenSetting() throws Exception state = metadataIndexTemplateService.addComponentTemplate(state, true, "foo", componentTemplate); assertNotNull(state.metadata().componentTemplates().get("foo")); - ComposableIndexTemplate firstGlobalIndexTemplate = new ComposableIndexTemplate( - List.of("*"), - template, - List.of("foo"), - 1L, - null, - null, - null, - null - ); + ComposableIndexTemplate firstGlobalIndexTemplate = ComposableIndexTemplate.builder() + .indexPatterns(List.of("*")) + .template(template) + .componentTemplates(List.of("foo")) + .priority(1L) + .build(); state = metadataIndexTemplateService.addIndexTemplateV2(state, true, "globalindextemplate1", firstGlobalIndexTemplate); - ComposableIndexTemplate secondGlobalIndexTemplate = new ComposableIndexTemplate( - List.of("*"), - template, - List.of("foo"), - 2L, - null, - null, - null, - null - ); + ComposableIndexTemplate secondGlobalIndexTemplate = ComposableIndexTemplate.builder() + .indexPatterns(List.of("*")) + .template(template) + .componentTemplates(List.of("foo")) + .priority(2L) + .build(); state = metadataIndexTemplateService.addIndexTemplateV2(state, true, "globalindextemplate2", secondGlobalIndexTemplate); - ComposableIndexTemplate fooPatternIndexTemplate = new ComposableIndexTemplate( - List.of("foo-*"), - template, - List.of("foo"), - 3L, - null, - null, - null, - null - ); + ComposableIndexTemplate fooPatternIndexTemplate = ComposableIndexTemplate.builder() + .indexPatterns(List.of("foo-*")) + .template(template) + .componentTemplates(List.of("foo")) + .priority(3L) + .build(); state = metadataIndexTemplateService.addIndexTemplateV2(state, true, "foopatternindextemplate", fooPatternIndexTemplate); // update the component template to set the index.hidden setting @@ -555,16 +543,14 @@ public void testUpdateIndexTemplateV2() throws Exception { List patterns = new ArrayList<>(template.indexPatterns()); patterns.add("new-pattern"); - template = new ComposableIndexTemplate( - patterns, - template.template(), - template.composedOf(), - template.priority(), - template.version(), - template.metadata(), - null, - null - ); + template = ComposableIndexTemplate.builder() + .indexPatterns(patterns) + .template(template.template()) + .componentTemplates(template.composedOf()) + .priority(template.priority()) + .version(template.version()) + .metadata(template.metadata()) + .build(); state = metadataIndexTemplateService.addIndexTemplateV2(state, false, "foo", template); assertNotNull(state.metadata().templatesV2().get("foo")); @@ -669,16 +655,9 @@ public void testPuttingV2TemplateGeneratesWarning() throws Exception { .metadata(Metadata.builder(Metadata.EMPTY_METADATA).put(v1Template).build()) .build(); - ComposableIndexTemplate v2Template = new ComposableIndexTemplate( - Arrays.asList("foo-bar-*", "eggplant"), - null, - null, - null, - null, - null, - null, - null - ); + ComposableIndexTemplate v2Template = ComposableIndexTemplate.builder() + .indexPatterns(Arrays.asList("foo-bar-*", "eggplant")) + .build(); state = metadataIndexTemplateService.addIndexTemplateV2(state, false, "v2-template", v2Template); assertCriticalWarnings( @@ -725,16 +704,10 @@ public void onFailure(Exception e) { waitToCreateComponentTemplate.await(10, TimeUnit.SECONDS); - ComposableIndexTemplate globalIndexTemplate = new ComposableIndexTemplate( - List.of("*"), - null, - List.of("ct-with-index-hidden-setting"), - null, - null, - null, - null, - null - ); + ComposableIndexTemplate globalIndexTemplate = ComposableIndexTemplate.builder() + .indexPatterns(List.of("*")) + .componentTemplates(List.of("ct-with-index-hidden-setting")) + .build(); expectThrows( InvalidIndexTemplateException.class, @@ -770,16 +743,9 @@ public void onFailure(Exception e) { */ public void testPuttingV1StarTemplateGeneratesWarning() throws Exception { final MetadataIndexTemplateService metadataIndexTemplateService = getMetadataIndexTemplateService(); - ComposableIndexTemplate v2Template = new ComposableIndexTemplate( - Arrays.asList("foo-bar-*", "eggplant"), - null, - null, - null, - null, - null, - null, - null - ); + ComposableIndexTemplate v2Template = ComposableIndexTemplate.builder() + .indexPatterns(Arrays.asList("foo-bar-*", "eggplant")) + .build(); ClusterState state = metadataIndexTemplateService.addIndexTemplateV2(ClusterState.EMPTY_STATE, false, "v2-template", v2Template); MetadataIndexTemplateService.PutRequest req = new MetadataIndexTemplateService.PutRequest("cause", "v1-template"); @@ -801,16 +767,9 @@ public void testPuttingV1StarTemplateGeneratesWarning() throws Exception { */ public void testPuttingV1NonStarTemplateGeneratesError() throws Exception { final MetadataIndexTemplateService metadataIndexTemplateService = getMetadataIndexTemplateService(); - ComposableIndexTemplate v2Template = new ComposableIndexTemplate( - Arrays.asList("foo-bar-*", "eggplant"), - null, - null, - null, - null, - null, - null, - null - ); + ComposableIndexTemplate v2Template = ComposableIndexTemplate.builder() + .indexPatterns(Arrays.asList("foo-bar-*", "eggplant")) + .build(); ClusterState state = metadataIndexTemplateService.addIndexTemplateV2(ClusterState.EMPTY_STATE, false, "v2-template", v2Template); MetadataIndexTemplateService.PutRequest req = new MetadataIndexTemplateService.PutRequest("cause", "v1-template"); @@ -845,16 +804,9 @@ public void testUpdatingV1NonStarTemplateWithUnchangedPatternsGeneratesWarning() .metadata(Metadata.builder(Metadata.EMPTY_METADATA).put(v1Template).build()) .build(); - ComposableIndexTemplate v2Template = new ComposableIndexTemplate( - Arrays.asList("foo-bar-*", "eggplant"), - null, - null, - null, - null, - null, - null, - null - ); + ComposableIndexTemplate v2Template = ComposableIndexTemplate.builder() + .indexPatterns(Arrays.asList("foo-bar-*", "eggplant")) + .build(); state = metadataIndexTemplateService.addIndexTemplateV2(state, false, "v2-template", v2Template); assertCriticalWarnings( @@ -894,16 +846,9 @@ public void testUpdatingV1NonStarWithChangedPatternsTemplateGeneratesError() thr .metadata(Metadata.builder(Metadata.EMPTY_METADATA).put(v1Template).build()) .build(); - ComposableIndexTemplate v2Template = new ComposableIndexTemplate( - Arrays.asList("foo-bar-*", "eggplant"), - null, - null, - null, - null, - null, - null, - null - ); + ComposableIndexTemplate v2Template = ComposableIndexTemplate.builder() + .indexPatterns(Arrays.asList("foo-bar-*", "eggplant")) + .build(); state = metadataIndexTemplateService.addIndexTemplateV2(state, false, "v2-template", v2Template); assertCriticalWarnings( @@ -937,28 +882,16 @@ public void testUpdatingV1NonStarWithChangedPatternsTemplateGeneratesError() thr public void testPuttingOverlappingV2Template() throws Exception { { - ComposableIndexTemplate template = new ComposableIndexTemplate( - Arrays.asList("egg*", "baz"), - null, - null, - 1L, - null, - null, - null, - null - ); + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(Arrays.asList("egg*", "baz")) + .priority(1L) + .build(); MetadataIndexTemplateService metadataIndexTemplateService = getMetadataIndexTemplateService(); ClusterState state = metadataIndexTemplateService.addIndexTemplateV2(ClusterState.EMPTY_STATE, false, "foo", template); - ComposableIndexTemplate newTemplate = new ComposableIndexTemplate( - Arrays.asList("abc", "baz*"), - null, - null, - 1L, - null, - null, - null, - null - ); + ComposableIndexTemplate newTemplate = ComposableIndexTemplate.builder() + .indexPatterns(Arrays.asList("abc", "baz*")) + .priority(1L) + .build(); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> metadataIndexTemplateService.addIndexTemplateV2(state, false, "foo2", newTemplate) @@ -974,28 +907,13 @@ public void testPuttingOverlappingV2Template() throws Exception { } { - ComposableIndexTemplate template = new ComposableIndexTemplate( - Arrays.asList("egg*", "baz"), - null, - null, - null, - null, - null, - null, - null - ); + ComposableIndexTemplate template = ComposableIndexTemplate.builder().indexPatterns(Arrays.asList("egg*", "baz")).build(); MetadataIndexTemplateService metadataIndexTemplateService = getMetadataIndexTemplateService(); ClusterState state = metadataIndexTemplateService.addIndexTemplateV2(ClusterState.EMPTY_STATE, false, "foo", template); - ComposableIndexTemplate newTemplate = new ComposableIndexTemplate( - Arrays.asList("abc", "baz*"), - null, - null, - 0L, - null, - null, - null, - null - ); + ComposableIndexTemplate newTemplate = ComposableIndexTemplate.builder() + .indexPatterns(Arrays.asList("abc", "baz*")) + .priority(0L) + .build(); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> metadataIndexTemplateService.addIndexTemplateV2(state, false, "foo2", newTemplate) @@ -1018,9 +936,18 @@ public void testFindV2Templates() throws Exception { ComponentTemplate ct = ComponentTemplateTests.randomNonDeprecatedInstance(); state = service.addComponentTemplate(state, true, "ct", ct); - ComposableIndexTemplate it = new ComposableIndexTemplate(List.of("i*"), null, List.of("ct"), null, 1L, null, null, null); + ComposableIndexTemplate it = ComposableIndexTemplate.builder() + .indexPatterns(List.of("i*")) + .componentTemplates(List.of("ct")) + .version(1L) + .build(); state = service.addIndexTemplateV2(state, true, "my-template", it); - ComposableIndexTemplate it2 = new ComposableIndexTemplate(List.of("in*"), null, List.of("ct"), 10L, 2L, null, null, null); + ComposableIndexTemplate it2 = ComposableIndexTemplate.builder() + .indexPatterns(List.of("in*")) + .componentTemplates(List.of("ct")) + .priority(10L) + .version(2L) + .build(); state = service.addIndexTemplateV2(state, true, "my-template2", it2); String result = MetadataIndexTemplateService.findV2Template(state.metadata(), "index", randomBoolean()); @@ -1035,9 +962,19 @@ public void testFindV2TemplatesForHiddenIndex() throws Exception { ComponentTemplate ct = ComponentTemplateTests.randomNonDeprecatedInstance(); state = service.addComponentTemplate(state, true, "ct", ct); - ComposableIndexTemplate it = new ComposableIndexTemplate(List.of("i*"), null, List.of("ct"), 0L, 1L, null, null, null); + ComposableIndexTemplate it = ComposableIndexTemplate.builder() + .indexPatterns(List.of("i*")) + .componentTemplates(List.of("ct")) + .priority(0L) + .version(1L) + .build(); state = service.addIndexTemplateV2(state, true, "my-template", it); - ComposableIndexTemplate it2 = new ComposableIndexTemplate(List.of("*"), null, List.of("ct"), 10L, 2L, null, null, null); + ComposableIndexTemplate it2 = ComposableIndexTemplate.builder() + .indexPatterns(List.of("*")) + .componentTemplates(List.of("ct")) + .priority(10L) + .version(2L) + .build(); state = service.addIndexTemplateV2(state, true, "my-template2", it2); String result = MetadataIndexTemplateService.findV2Template(state.metadata(), "index", true); @@ -1053,9 +990,19 @@ public void testFindV2TemplatesForDateMathIndex() throws Exception { ComponentTemplate ct = ComponentTemplateTests.randomNonDeprecatedInstance(); state = service.addComponentTemplate(state, true, "ct", ct); - ComposableIndexTemplate it = new ComposableIndexTemplate(List.of("index-*"), null, List.of("ct"), 0L, 1L, null, null, null); + ComposableIndexTemplate it = ComposableIndexTemplate.builder() + .indexPatterns(List.of("index-*")) + .componentTemplates(List.of("ct")) + .priority(0L) + .version(1L) + .build(); state = service.addIndexTemplateV2(state, true, "my-template", it); - ComposableIndexTemplate it2 = new ComposableIndexTemplate(List.of("*"), null, List.of("ct"), 10L, 2L, null, null, null); + ComposableIndexTemplate it2 = ComposableIndexTemplate.builder() + .indexPatterns(List.of("*")) + .componentTemplates(List.of("ct")) + .priority(10L) + .version(2L) + .build(); state = service.addIndexTemplateV2(state, true, "my-template2", it2); String result = MetadataIndexTemplateService.findV2Template(state.metadata(), indexName, true); @@ -1067,16 +1014,13 @@ public void testFindV2InvalidGlobalTemplate() { Template templateWithHiddenSetting = new Template(builder().put(IndexMetadata.SETTING_INDEX_HIDDEN, true).build(), null, null); try { // add an invalid global template that specifies the `index.hidden` setting - ComposableIndexTemplate invalidGlobalTemplate = new ComposableIndexTemplate( - List.of("*"), - templateWithHiddenSetting, - List.of("ct"), - 5L, - 1L, - null, - null, - null - ); + ComposableIndexTemplate invalidGlobalTemplate = ComposableIndexTemplate.builder() + .indexPatterns(List.of("*")) + .template(templateWithHiddenSetting) + .componentTemplates(List.of("ct")) + .priority(5L) + .version(1L) + .build(); Metadata invalidGlobalTemplateMetadata = Metadata.builder() .putCustom( ComposableIndexTemplateMetadata.TYPE, @@ -1119,14 +1063,20 @@ public void testResolveConflictingMappings() throws Exception { }"""), null), null, null); state = service.addComponentTemplate(state, true, "ct_high", ct1); state = service.addComponentTemplate(state, true, "ct_low", ct2); - ComposableIndexTemplate it = new ComposableIndexTemplate(List.of("i*"), new Template(null, new CompressedXContent(""" - { - "properties": { - "field": { - "type": "keyword" - } - } - }"""), null), List.of("ct_low", "ct_high"), 0L, 1L, null, null, null); + ComposableIndexTemplate it = ComposableIndexTemplate.builder() + .indexPatterns(List.of("i*")) + .template(new Template(null, new CompressedXContent(""" + { + "properties": { + "field": { + "type": "keyword" + } + } + }"""), null)) + .componentTemplates(List.of("ct_low", "ct_high")) + .priority(0L) + .version(1L) + .build(); state = service.addIndexTemplateV2(state, true, "my-template", it); List mappings = MetadataIndexTemplateService.collectMappings(state, "my-template", "my-index"); @@ -1175,14 +1125,20 @@ public void testResolveMappings() throws Exception { }"""), null), null, null); state = service.addComponentTemplate(state, true, "ct_high", ct1); state = service.addComponentTemplate(state, true, "ct_low", ct2); - ComposableIndexTemplate it = new ComposableIndexTemplate(List.of("i*"), new Template(null, new CompressedXContent(""" - { - "properties": { - "field3": { - "type": "integer" - } - } - }"""), null), List.of("ct_low", "ct_high"), 0L, 1L, null, null, null); + ComposableIndexTemplate it = ComposableIndexTemplate.builder() + .indexPatterns(List.of("i*")) + .template(new Template(null, new CompressedXContent(""" + { + "properties": { + "field3": { + "type": "integer" + } + } + }"""), null)) + .componentTemplates(List.of("ct_low", "ct_high")) + .priority(0L) + .version(1L) + .build(); state = service.addIndexTemplateV2(state, true, "my-template", it); List mappings = MetadataIndexTemplateService.collectMappings(state, "my-template", "my-index"); @@ -1219,14 +1175,21 @@ public void testDefinedTimestampMappingIsAddedForDataStreamTemplates() throws Ex state = service.addComponentTemplate(state, true, "ct1", ct1); { - ComposableIndexTemplate it = new ComposableIndexTemplate(List.of("logs*"), new Template(null, new CompressedXContent(""" - { - "properties": { - "field2": { - "type": "integer" - } - } - }"""), null), List.of("ct1"), 0L, 1L, null, new ComposableIndexTemplate.DataStreamTemplate(), null); + ComposableIndexTemplate it = ComposableIndexTemplate.builder() + .indexPatterns(List.of("logs*")) + .template(new Template(null, new CompressedXContent(""" + { + "properties": { + "field2": { + "type": "integer" + } + } + }"""), null)) + .componentTemplates(List.of("ct1")) + .priority(0L) + .version(1L) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build(); state = service.addIndexTemplateV2(state, true, "logs-data-stream-template", it); List mappings = MetadataIndexTemplateService.collectMappings( @@ -1267,14 +1230,20 @@ public void testDefinedTimestampMappingIsAddedForDataStreamTemplates() throws Ex { // indices matched by templates without the data stream field defined don't get the default @timestamp mapping - ComposableIndexTemplate it = new ComposableIndexTemplate(List.of("timeseries*"), new Template(null, new CompressedXContent(""" - { - "properties": { - "field2": { - "type": "integer" - } - } - }"""), null), List.of("ct1"), 0L, 1L, null, null, null); + ComposableIndexTemplate it = ComposableIndexTemplate.builder() + .indexPatterns(List.of("timeseries*")) + .template(new Template(null, new CompressedXContent(""" + { + "properties": { + "field2": { + "type": "integer" + } + } + }"""), null)) + .componentTemplates(List.of("ct1")) + .priority(0L) + .version(1L) + .build(); state = service.addIndexTemplateV2(state, true, "timeseries-template", it); List mappings = MetadataIndexTemplateService.collectMappings(state, "timeseries-template", "timeseries"); @@ -1335,16 +1304,13 @@ public void testUserDefinedMappingTakesPrecedenceOverDefault() throws Exception }"""), null), null, null); state = service.addComponentTemplate(state, true, "ct1", ct1); - ComposableIndexTemplate it = new ComposableIndexTemplate( - List.of("logs*"), - null, - List.of("ct1"), - 0L, - 1L, - null, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ); + ComposableIndexTemplate it = ComposableIndexTemplate.builder() + .indexPatterns(List.of("logs*")) + .componentTemplates(List.of("ct1")) + .priority(0L) + .version(1L) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build(); state = service.addIndexTemplateV2(state, true, "logs-template", it); List mappings = MetadataIndexTemplateService.collectMappings( @@ -1394,16 +1360,13 @@ public void testUserDefinedMappingTakesPrecedenceOverDefault() throws Exception } } }"""), null); - ComposableIndexTemplate it = new ComposableIndexTemplate( - List.of("timeseries*"), - template, - null, - 0L, - 1L, - null, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ); + ComposableIndexTemplate it = ComposableIndexTemplate.builder() + .indexPatterns(List.of("timeseries*")) + .template(template) + .priority(0L) + .version(1L) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build(); state = service.addIndexTemplateV2(state, true, "timeseries-template", it); List mappings = MetadataIndexTemplateService.collectMappings( @@ -1460,16 +1423,15 @@ public void testResolveSettings() throws Exception { ); state = service.addComponentTemplate(state, true, "ct_high", ct1); state = service.addComponentTemplate(state, true, "ct_low", ct2); - ComposableIndexTemplate it = new ComposableIndexTemplate( - List.of("i*"), - new Template(Settings.builder().put("index.blocks.write", false).put("index.number_of_shards", 3).build(), null, null), - List.of("ct_low", "ct_high"), - 0L, - 1L, - null, - null, - null - ); + ComposableIndexTemplate it = ComposableIndexTemplate.builder() + .indexPatterns(List.of("i*")) + .template( + new Template(Settings.builder().put("index.blocks.write", false).put("index.number_of_shards", 3).build(), null, null) + ) + .componentTemplates(List.of("ct_low", "ct_high")) + .priority(0L) + .version(1L) + .build(); state = service.addIndexTemplateV2(state, true, "my-template", it); Settings settings = MetadataIndexTemplateService.resolveSettings(state.metadata(), "my-template"); @@ -1495,16 +1457,13 @@ public void testResolveAliases() throws Exception { ComponentTemplate ct2 = new ComponentTemplate(new Template(null, null, a2), null, null); state = service.addComponentTemplate(state, true, "ct_high", ct1); state = service.addComponentTemplate(state, true, "ct_low", ct2); - ComposableIndexTemplate it = new ComposableIndexTemplate( - List.of("i*"), - new Template(null, null, a3), - List.of("ct_low", "ct_high"), - 0L, - 1L, - null, - null, - null - ); + ComposableIndexTemplate it = ComposableIndexTemplate.builder() + .indexPatterns(List.of("i*")) + .template(new Template(null, null, a3)) + .componentTemplates(List.of("ct_low", "ct_high")) + .priority(0L) + .version(1L) + .build(); state = service.addIndexTemplateV2(state, true, "my-template", it); List> resolvedAliases = MetadataIndexTemplateService.resolveAliases(state.metadata(), "my-template"); @@ -1662,16 +1621,14 @@ private void assertLifecycleResolution( DataStreamLifecycle lifecycleZ, DataStreamLifecycle expected ) throws Exception { - ComposableIndexTemplate it = new ComposableIndexTemplate( - List.of(randomAlphaOfLength(10) + "*"), - new Template(null, null, null, lifecycleZ), - composeOf, - 0L, - 1L, - null, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ); + ComposableIndexTemplate it = ComposableIndexTemplate.builder() + .indexPatterns(List.of(randomAlphaOfLength(10) + "*")) + .template(new Template(null, null, null, lifecycleZ)) + .componentTemplates(composeOf) + .priority(0L) + .version(1L) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build(); state = service.addIndexTemplateV2(state, true, "my-template", it); DataStreamLifecycle resolvedLifecycle = MetadataIndexTemplateService.resolveLifecycle(state.metadata(), "my-template"); @@ -1679,14 +1636,10 @@ private void assertLifecycleResolution( } public void testAddInvalidTemplate() throws Exception { - ComposableIndexTemplate template = new ComposableIndexTemplate( - Collections.singletonList("a"), - null, - Arrays.asList("good", "bad"), - null, - null, - null - ); + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("a")) + .componentTemplates(Arrays.asList("good", "bad")) + .build(); ComponentTemplate ct = new ComponentTemplate(new Template(Settings.EMPTY, null, null), null, null); final MetadataIndexTemplateService service = getMetadataIndexTemplateService(); @@ -1766,14 +1719,10 @@ public void testRemoveComponentTemplate() throws Exception { } public void testRemoveComponentTemplateInUse() throws Exception { - ComposableIndexTemplate template = new ComposableIndexTemplate( - Collections.singletonList("a"), - null, - Collections.singletonList("ct"), - null, - null, - null - ); + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("a")) + .componentTemplates(Collections.singletonList("ct")) + .build(); ComponentTemplate ct = new ComponentTemplate(new Template(null, new CompressedXContent("{}"), null), null, null); final MetadataIndexTemplateService service = getMetadataIndexTemplateService(); @@ -1848,19 +1797,25 @@ public void testIndexTemplateFailsToOverrideComponentTemplateMappingField() thro }"""), null), null, null); state = service.addComponentTemplate(state, true, "c1", ct1); state = service.addComponentTemplate(state, true, "c2", ct2); - ComposableIndexTemplate it = new ComposableIndexTemplate(List.of("i*"), new Template(null, new CompressedXContent(""" - { - "properties": { - "field2": { - "type": "object", - "properties": { - "bar": { - "type": "object" - } - } - } - } - }"""), null), randomBoolean() ? Arrays.asList("c1", "c2") : Arrays.asList("c2", "c1"), 0L, 1L, null, null, null); + ComposableIndexTemplate it = ComposableIndexTemplate.builder() + .indexPatterns(List.of("i*")) + .template(new Template(null, new CompressedXContent(""" + { + "properties": { + "field2": { + "type": "object", + "properties": { + "bar": { + "type": "object" + } + } + } + } + }"""), null)) + .componentTemplates(randomBoolean() ? Arrays.asList("c1", "c2") : Arrays.asList("c2", "c1")) + .priority(0L) + .version(1L) + .build(); final ClusterState finalState = state; IllegalArgumentException e = expectThrows( @@ -1897,7 +1852,10 @@ public void testIndexTemplateFailsToAdd() throws Exception { null ); state = service.addComponentTemplate(state, true, "ct", ct); - ComposableIndexTemplate it = new ComposableIndexTemplate(List.of("i*"), null, List.of("ct"), null, null, null); + ComposableIndexTemplate it = ComposableIndexTemplate.builder() + .indexPatterns(List.of("i*")) + .componentTemplates(List.of("ct")) + .build(); final ClusterState finalState = state; IllegalArgumentException e = expectThrows( @@ -1947,16 +1905,13 @@ public void testUpdateComponentTemplateFailsIfResolvedIndexTemplatesWouldBeInval """), null), null, null); state = service.addComponentTemplate(state, true, "c1", ct1); state = service.addComponentTemplate(state, true, "c2", ct2); - ComposableIndexTemplate it = new ComposableIndexTemplate( - List.of("i*"), - new Template(null, null, null), - randomBoolean() ? Arrays.asList("c1", "c2") : Arrays.asList("c2", "c1"), - 0L, - 1L, - null, - null, - null - ); + ComposableIndexTemplate it = ComposableIndexTemplate.builder() + .indexPatterns(List.of("i*")) + .template(new Template(null, null, null)) + .componentTemplates(randomBoolean() ? Arrays.asList("c1", "c2") : Arrays.asList("c2", "c1")) + .priority(0L) + .version(1L) + .build(); // Great, the templates aren't invalid state = service.addIndexTemplateV2(state, randomBoolean(), "my-template", it); @@ -2041,16 +1996,11 @@ public void testUnreferencedDataStreamsWhenAddingTemplate() throws Exception { ) .build(); - ComposableIndexTemplate template = new ComposableIndexTemplate( - Collections.singletonList("logs-*-*"), - null, - null, - 100L, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ); + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("logs-*-*")) + .priority(100L) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build(); state = service.addIndexTemplateV2(state, false, "logs", template); @@ -2072,16 +2022,10 @@ public void testUnreferencedDataStreamsWhenAddingTemplate() throws Exception { // Test replacing it with a version without the data stream config IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { - ComposableIndexTemplate nonDSTemplate = new ComposableIndexTemplate( - Collections.singletonList("logs-*-*"), - null, - null, - 100L, - null, - null, - null, - null - ); + ComposableIndexTemplate nonDSTemplate = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("logs-*-*")) + .priority(100L) + .build(); service.addIndexTemplateV2(stateWithDS, false, "logs", nonDSTemplate); }); @@ -2095,16 +2039,10 @@ public void testUnreferencedDataStreamsWhenAddingTemplate() throws Exception { // Test adding a higher priority version that would cause problems e = expectThrows(IllegalArgumentException.class, () -> { - ComposableIndexTemplate nonDSTemplate = new ComposableIndexTemplate( - Collections.singletonList("logs-my*-*"), - null, - null, - 105L, - null, - null, - null, - null - ); + ComposableIndexTemplate nonDSTemplate = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("logs-my*-*")) + .priority(105L) + .build(); service.addIndexTemplateV2(stateWithDS, false, "logs2", nonDSTemplate); }); @@ -2118,16 +2056,11 @@ public void testUnreferencedDataStreamsWhenAddingTemplate() throws Exception { // Change the pattern to one that doesn't match the data stream e = expectThrows(IllegalArgumentException.class, () -> { - ComposableIndexTemplate newTemplate = new ComposableIndexTemplate( - Collections.singletonList("logs-postgres-*"), - null, - null, - 100L, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ); + ComposableIndexTemplate newTemplate = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("logs-postgres-*")) + .priority(100L) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build(); service.addIndexTemplateV2(stateWithDS, false, "logs", newTemplate); }); @@ -2140,29 +2073,18 @@ public void testUnreferencedDataStreamsWhenAddingTemplate() throws Exception { ); // Add an additional template that matches our data stream at a lower priority - ComposableIndexTemplate mysqlTemplate = new ComposableIndexTemplate( - Collections.singletonList("logs-mysql-*"), - null, - null, - 50L, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ); + ComposableIndexTemplate mysqlTemplate = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("logs-mysql-*")) + .priority(50L) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build(); ClusterState stateWithDSAndTemplate = service.addIndexTemplateV2(stateWithDS, false, "logs-mysql", mysqlTemplate); // We should be able to replace the "logs" template, because we have the "logs-mysql" template that can handle the data stream - ComposableIndexTemplate nonDSTemplate = new ComposableIndexTemplate( - Collections.singletonList("logs-postgres-*"), - null, - null, - 100L, - null, - null, - null, - null - ); + ComposableIndexTemplate nonDSTemplate = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("logs-postgres-*")) + .priority(100L) + .build(); service.addIndexTemplateV2(stateWithDSAndTemplate, false, "logs", nonDSTemplate); } @@ -2185,16 +2107,11 @@ public void testDataStreamsUsingTemplates() throws Exception { ) .build(); - ComposableIndexTemplate template = new ComposableIndexTemplate( - Collections.singletonList("logs-*-*"), - null, - null, - 100L, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ); + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("logs-*-*")) + .priority(100L) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build(); state = service.addIndexTemplateV2(state, false, "logs", template); @@ -2214,16 +2131,11 @@ public void testDataStreamsUsingTemplates() throws Exception { ) .build(); - ComposableIndexTemplate fineGrainedLogsTemplate = new ComposableIndexTemplate( - Collections.singletonList("logs-mysql-*"), - null, - null, - 200L, // Higher priority - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ); + ComposableIndexTemplate fineGrainedLogsTemplate = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("logs-mysql-*")) + .priority(200L) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build(); state = service.addIndexTemplateV2(stateWithDS, false, "logs2", fineGrainedLogsTemplate); @@ -2250,16 +2162,11 @@ public void testRemovingHigherOrderTemplateOfDataStreamWithMultipleTemplates() t ClusterState state = ClusterState.EMPTY_STATE; final MetadataIndexTemplateService service = getMetadataIndexTemplateService(); - ComposableIndexTemplate template = new ComposableIndexTemplate( - Collections.singletonList("logs-*"), - null, - null, - 100L, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ); + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("logs-*")) + .priority(100L) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build(); state = service.addIndexTemplateV2(state, false, "logs", template); @@ -2279,16 +2186,11 @@ public void testRemovingHigherOrderTemplateOfDataStreamWithMultipleTemplates() t ) .build(); - ComposableIndexTemplate fineGrainedLogsTemplate = new ComposableIndexTemplate( - Collections.singletonList("logs-mysql-*"), - null, - null, - 200L, // Higher priority - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ); + ComposableIndexTemplate fineGrainedLogsTemplate = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("logs-mysql-*")) + .priority(200L) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build(); state = service.addIndexTemplateV2(stateWithDS, false, "logs-test", fineGrainedLogsTemplate); @@ -2304,28 +2206,16 @@ public void testRemovingHigherOrderTemplateOfDataStreamWithMultipleTemplates() t public void testV2TemplateOverlaps() throws Exception { { - ComposableIndexTemplate template = new ComposableIndexTemplate( - Arrays.asList("egg*", "baz"), - null, - null, - 1L, - null, - null, - null, - null - ); + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(Arrays.asList("egg*", "baz")) + .priority(1L) + .build(); MetadataIndexTemplateService metadataIndexTemplateService = getMetadataIndexTemplateService(); ClusterState state = metadataIndexTemplateService.addIndexTemplateV2(ClusterState.EMPTY_STATE, false, "foo", template); - ComposableIndexTemplate newTemplate = new ComposableIndexTemplate( - Arrays.asList("abc", "baz*"), - null, - null, - 1L, - null, - null, - null, - null - ); + ComposableIndexTemplate newTemplate = ComposableIndexTemplate.builder() + .indexPatterns(Arrays.asList("abc", "baz*")) + .priority(1L) + .build(); // when validating is false, we return the conflicts instead of throwing an exception var overlaps = MetadataIndexTemplateService.v2TemplateOverlaps(state, "foo2", newTemplate, false); @@ -2346,44 +2236,23 @@ public void testV2TemplateOverlaps() throws Exception { ) ); - ComposableIndexTemplate nonConflict = new ComposableIndexTemplate( - Arrays.asList("abc", "bar*"), - null, - null, - 1L, - null, - null, - null, - null - ); + ComposableIndexTemplate nonConflict = ComposableIndexTemplate.builder() + .indexPatterns(Arrays.asList("abc", "bar*")) + .priority(1L) + .build(); overlaps = MetadataIndexTemplateService.v2TemplateOverlaps(state, "no-conflict", nonConflict, true); assertTrue(overlaps.isEmpty()); } { - ComposableIndexTemplate template = new ComposableIndexTemplate( - Arrays.asList("egg*", "baz"), - null, - null, - null, - null, - null, - null, - null - ); + ComposableIndexTemplate template = ComposableIndexTemplate.builder().indexPatterns(Arrays.asList("egg*", "baz")).build(); MetadataIndexTemplateService metadataIndexTemplateService = getMetadataIndexTemplateService(); ClusterState state = metadataIndexTemplateService.addIndexTemplateV2(ClusterState.EMPTY_STATE, false, "foo", template); - ComposableIndexTemplate newTemplate = new ComposableIndexTemplate( - Arrays.asList("abc", "baz*"), - null, - null, - 0L, - null, - null, - null, - null - ); + ComposableIndexTemplate newTemplate = ComposableIndexTemplate.builder() + .indexPatterns(Arrays.asList("abc", "baz*")) + .priority(0L) + .build(); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> MetadataIndexTemplateService.v2TemplateOverlaps(state, "foo2", newTemplate, true) @@ -2543,16 +2412,13 @@ public void testComposableTemplateWithSubobjectsFalse() throws Exception { state = service.addComponentTemplate(state, true, "subobjects", subobjects); state = service.addComponentTemplate(state, true, "field_mapping", fieldMapping); - ComposableIndexTemplate it = new ComposableIndexTemplate( - List.of("test-*"), - new Template(null, null, null), - List.of("subobjects", "field_mapping"), - 0L, - 1L, - null, - null, - null - ); + ComposableIndexTemplate it = ComposableIndexTemplate.builder() + .indexPatterns(List.of("test-*")) + .template(new Template(null, null, null)) + .componentTemplates(List.of("subobjects", "field_mapping")) + .priority(0L) + .version(1L) + .build(); state = service.addIndexTemplateV2(state, true, "composable-template", it); List mappings = MetadataIndexTemplateService.collectMappings(state, "composable-template", "test-index"); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamServiceTests.java index 5b3079338e830..128601ff21250 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamServiceTests.java @@ -279,21 +279,17 @@ public void testCreateDataStreamWithSuppliedWriteIndex() throws Exception { .put(foo2, false) .put( "template", - new ComposableIndexTemplate( - List.of(dataStreamName + "*"), - null, - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate() - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of(dataStreamName + "*")) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ) ) .build(); ClusterState newState = MetadataMigrateToDataStreamService.migrateToDataStream( cs, + randomBoolean(), this::getMapperService, new MetadataMigrateToDataStreamService.MigrateToDataStreamClusterStateUpdateRequest( dataStreamName, @@ -341,21 +337,17 @@ public void testCreateDataStreamHidesBackingIndicesAndRemovesAlias() throws Exce .put(foo2, false) .put( "template", - new ComposableIndexTemplate( - List.of(dataStreamName + "*"), - null, - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate() - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of(dataStreamName + "*")) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ) ) .build(); ClusterState newState = MetadataMigrateToDataStreamService.migrateToDataStream( cs, + randomBoolean(), this::getMapperService, new MetadataMigrateToDataStreamService.MigrateToDataStreamClusterStateUpdateRequest( dataStreamName, @@ -403,15 +395,10 @@ public void testCreateDataStreamWithoutSuppliedWriteIndex() { .put(foo2, false) .put( "template", - new ComposableIndexTemplate( - List.of(dataStreamName + "*"), - null, - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate() - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of(dataStreamName + "*")) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ) ) .build(); @@ -420,6 +407,7 @@ public void testCreateDataStreamWithoutSuppliedWriteIndex() { IllegalArgumentException.class, () -> MetadataMigrateToDataStreamService.migrateToDataStream( cs, + randomBoolean(), this::getMapperService, new MetadataMigrateToDataStreamService.MigrateToDataStreamClusterStateUpdateRequest( dataStreamName, diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java index fe0dd5ea1a5e7..264d8c5ca1a95 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java @@ -2279,30 +2279,23 @@ public void testIsTimeSeriesTemplate() throws IOException { // Settings in component template: { var componentTemplate = new ComponentTemplate(template, null, null); - var indexTemplate = new ComposableIndexTemplate( - List.of("test-*"), - null, - List.of("component_template_1"), - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate() - ); + var indexTemplate = ComposableIndexTemplate.builder() + .indexPatterns(List.of("test-*")) + .componentTemplates(List.of("component_template_1")) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build(); Metadata m = Metadata.builder().put("component_template_1", componentTemplate).put("index_template_1", indexTemplate).build(); assertThat(m.isTimeSeriesTemplate(indexTemplate), is(true)); } // Settings in composable index template: { var componentTemplate = new ComponentTemplate(new Template(null, null, null), null, null); - var indexTemplate = new ComposableIndexTemplate( - List.of("test-*"), - template, - List.of("component_template_1"), - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate() - ); + var indexTemplate = ComposableIndexTemplate.builder() + .indexPatterns(List.of("test-*")) + .template(template) + .componentTemplates(List.of("component_template_1")) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build(); Metadata m = Metadata.builder().put("component_template_1", componentTemplate).put("index_template_1", indexTemplate).build(); assertThat(m.isTimeSeriesTemplate(indexTemplate), is(true)); } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetadataTests.java index 8bcd9201092d8..cb681b57b58dd 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetadataTests.java @@ -89,20 +89,21 @@ public void testSimpleJsonFromAndTo() throws IOException { ) .put( "index_templatev2", - new ComposableIndexTemplate( - Arrays.asList("foo", "bar*"), - new Template( - Settings.builder().put("setting", "value").build(), - new CompressedXContent("{\"baz\":\"eggplant\"}"), - Collections.singletonMap("alias", AliasMetadata.builder("alias").build()) - ), - Collections.singletonList("component_template"), - 5L, - 4L, - Collections.singletonMap("my_meta", Collections.singletonMap("potato", "chicken")), - randomBoolean() ? null : new ComposableIndexTemplate.DataStreamTemplate(), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(Arrays.asList("foo", "bar*")) + .template( + new Template( + Settings.builder().put("setting", "value").build(), + new CompressedXContent("{\"baz\":\"eggplant\"}"), + Collections.singletonMap("alias", AliasMetadata.builder("alias").build()) + ) + ) + .componentTemplates(Collections.singletonList("component_template")) + .priority(5L) + .version(4L) + .metadata(Collections.singletonMap("my_meta", Collections.singletonMap("potato", "chicken"))) + .dataStreamTemplate(randomBoolean() ? null : new ComposableIndexTemplate.DataStreamTemplate()) + .build() ) .put( IndexMetadata.builder("test12") diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityActionGuideTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityActionGuideTests.java index 4777b0eb357da..b731fd79c82fe 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityActionGuideTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityActionGuideTests.java @@ -8,17 +8,17 @@ package org.elasticsearch.cluster.routing.allocation; +import org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.test.ESTestCase; import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_CHECK_ALLOCATION_EXPLAIN_API; import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_ENABLE_CLUSTER_ROUTING_ALLOCATION; import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_ENABLE_INDEX_ROUTING_ALLOCATION; -import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_ENABLE_TIERS_LOOKUP; import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_NODE_CAPACITY; import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING; -import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING_LOOKUP; import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_SHARD_LIMIT_INDEX_SETTING; -import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_SHARD_LIMIT_INDEX_SETTING_LOOKUP; import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_MIGRATE_TIERS_AWAY_FROM_INCLUDE_DATA; import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_MIGRATE_TIERS_AWAY_FROM_REQUIRE_DATA; import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_RESTORE_FROM_SNAPSHOT; @@ -32,9 +32,16 @@ import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.RESTORE_FROM_SNAPSHOT_ACTION_GUIDE; import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.TIER_CAPACITY_ACTION_GUIDE; import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; public class ShardsAvailabilityActionGuideTests extends ESTestCase { + private final ShardsAvailabilityHealthIndicatorService service = new ShardsAvailabilityHealthIndicatorService( + mock(ClusterService.class), + mock(AllocationService.class), + mock(SystemIndices.class) + ); + public void testRestoreFromSnapshotAction() { assertThat(ACTION_RESTORE_FROM_SNAPSHOT.helpURL(), is(RESTORE_FROM_SNAPSHOT_ACTION_GUIDE)); } @@ -60,20 +67,17 @@ public void testEnableClusterRoutingAllocation() { } public void testEnableDataTiers() { - assertThat(ACTION_ENABLE_TIERS_LOOKUP.get(DataTier.DATA_HOT).helpURL(), is(ENABLE_TIER_ACTION_GUIDE)); + assertThat(service.getAddNodesWithRoleAction(DataTier.DATA_HOT).helpURL(), is(ENABLE_TIER_ACTION_GUIDE)); } public void testIncreaseShardLimitIndexSettingInTier() { - assertThat( - ACTION_INCREASE_SHARD_LIMIT_INDEX_SETTING_LOOKUP.get(DataTier.DATA_HOT).helpURL(), - is(INCREASE_SHARD_LIMIT_ACTION_GUIDE) - ); + assertThat(service.getIncreaseShardLimitIndexSettingAction(DataTier.DATA_HOT).helpURL(), is(INCREASE_SHARD_LIMIT_ACTION_GUIDE)); assertThat(ACTION_INCREASE_SHARD_LIMIT_INDEX_SETTING.helpURL(), is(INCREASE_SHARD_LIMIT_ACTION_GUIDE)); } public void testIncreaseShardLimitClusterSettingInTier() { assertThat( - ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING_LOOKUP.get(DataTier.DATA_HOT).helpURL(), + service.getIncreaseShardLimitClusterSettingAction(DataTier.DATA_HOT).helpURL(), is(INCREASE_CLUSTER_SHARD_LIMIT_ACTION_GUIDE) ); assertThat(ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING.helpURL(), is(INCREASE_CLUSTER_SHARD_LIMIT_ACTION_GUIDE)); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterAllocationSimulationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterAllocationSimulationTests.java index a057b638c04e2..13ea91a2bc99b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterAllocationSimulationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterAllocationSimulationTests.java @@ -47,6 +47,7 @@ import org.elasticsearch.index.IndexVersion; import org.elasticsearch.snapshots.SnapshotShardSizeInfo; import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.telemetry.TelemetryProvider; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.gateway.TestGatewayAllocator; import org.elasticsearch.threadpool.ThreadPool; @@ -487,7 +488,8 @@ private Map.Entry createNewAllocationSer threadPool, clusterService, (clusterState, routingAllocationAction) -> strategyRef.get() - .executeWithRoutingAllocation(clusterState, "reconcile-desired-balance", routingAllocationAction) + .executeWithRoutingAllocation(clusterState, "reconcile-desired-balance", routingAllocationAction), + TelemetryProvider.NOOP ) { @Override public void allocate(RoutingAllocation allocation, ActionListener listener) { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ContinuousComputationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ContinuousComputationTests.java index f85f3fbd356d9..cebc4860012ad 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ContinuousComputationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ContinuousComputationTests.java @@ -8,7 +8,9 @@ package org.elasticsearch.cluster.routing.allocation.allocator; +import org.apache.logging.log4j.Level; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.junit.AfterClass; @@ -19,11 +21,11 @@ import java.util.concurrent.CyclicBarrier; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.sameInstance; -import static org.junit.Assert.assertEquals; public class ContinuousComputationTests extends ESTestCase { @@ -46,7 +48,7 @@ public static void terminateThreadPool() { public void testConcurrency() throws Exception { final var result = new AtomicReference(); - final var computation = new ContinuousComputation(threadPool) { + final var computation = new ContinuousComputation(threadPool.generic()) { public final Semaphore executePermit = new Semaphore(1); @@ -94,7 +96,7 @@ public void testSkipsObsoleteValues() throws Exception { final var finalInput = new Object(); final var result = new AtomicReference(); - final var computation = new ContinuousComputation(threadPool) { + final var computation = new ContinuousComputation(threadPool.generic()) { @Override protected void processInput(Object input) { assertNotEquals(input, skippedInput); @@ -134,4 +136,59 @@ protected void processInput(Object input) { await.run(); assertBusy(() -> assertFalse(computation.isActive())); } + + public void testFailureHandling() { + final var input1 = new Object(); + final var input2 = new Object(); + + final var successCount = new AtomicInteger(); + final var failureCount = new AtomicInteger(); + + final var computation = new ContinuousComputation<>(r -> { + try { + r.run(); + successCount.incrementAndGet(); + } catch (AssertionError e) { + assertEquals("simulated", asInstanceOf(RuntimeException.class, e.getCause()).getMessage()); + failureCount.incrementAndGet(); + } + }) { + @Override + protected void processInput(Object input) { + if (input == input1) { + onNewInput(input2); + throw new RuntimeException("simulated"); + } + } + + @Override + public String toString() { + return "test computation"; + } + }; + + MockLogAppender.assertThatLogger( + () -> computation.onNewInput(input1), + ContinuousComputation.class, + new MockLogAppender.SeenEventExpectation( + "error log", + ContinuousComputation.class.getCanonicalName(), + Level.ERROR, + "unexpected error processing [test computation]" + ) + ); + + // check that both inputs were processed + assertEquals(1, failureCount.get()); + assertEquals(1, successCount.get()); + + // check that the computation still accepts and processes new inputs + computation.onNewInput(input2); + assertEquals(1, failureCount.get()); + assertEquals(2, successCount.get()); + + computation.onNewInput(input1); + assertEquals(2, failureCount.get()); + assertEquals(3, successCount.get()); + } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java index c1c83b8a2d90e..b4eba769543b8 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java @@ -15,12 +15,11 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.DiskUsage; +import org.elasticsearch.cluster.ESAllocationTestCase; import org.elasticsearch.cluster.TestShardRoutingRoleStrategies; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; -import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.AllocationId; import org.elasticsearch.cluster.routing.IndexRoutingTable; @@ -45,7 +44,6 @@ import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.snapshots.SnapshotShardSizeInfo; -import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.threadpool.ThreadPool; @@ -60,6 +58,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Function; import static java.util.stream.Collectors.toMap; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; @@ -78,7 +77,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -public class DesiredBalanceComputerTests extends ESTestCase { +public class DesiredBalanceComputerTests extends ESAllocationTestCase { static final String TEST_INDEX = "test-index"; @@ -388,33 +387,29 @@ public void testRespectsAssignmentByGatewayAllocators() { public void testSimulatesAchievingDesiredBalanceBeforeDelegating() { var allocateCalled = new AtomicBoolean(); - var desiredBalanceComputer = new DesiredBalanceComputer( - createBuiltInClusterSettings(), - mock(ThreadPool.class), - new ShardsAllocator() { - @Override - public void allocate(RoutingAllocation allocation) { - assertTrue(allocateCalled.compareAndSet(false, true)); - // whatever the allocation in the current cluster state, the desired balance service should start by moving all the - // known shards to their desired locations before delegating to the inner allocator - for (var routingNode : allocation.routingNodes()) { - assertThat( - allocation.routingNodes().toString(), - routingNode.numberOfOwningShards(), - equalTo(routingNode.nodeId().equals("node-2") ? 0 : 2) - ); - for (var shardRouting : routingNode) { - assertTrue(shardRouting.toString(), shardRouting.started()); - } + var desiredBalanceComputer = createDesiredBalanceComputer(new ShardsAllocator() { + @Override + public void allocate(RoutingAllocation allocation) { + assertTrue(allocateCalled.compareAndSet(false, true)); + // whatever the allocation in the current cluster state, the desired balance service should start by moving all the + // known shards to their desired locations before delegating to the inner allocator + for (var routingNode : allocation.routingNodes()) { + assertThat( + allocation.routingNodes().toString(), + routingNode.numberOfOwningShards(), + equalTo(routingNode.nodeId().equals("node-2") ? 0 : 2) + ); + for (var shardRouting : routingNode) { + assertTrue(shardRouting.toString(), shardRouting.started()); } } + } - @Override - public ShardAllocationDecision decideShardAllocation(ShardRouting shard, RoutingAllocation allocation) { - throw new AssertionError("only used for allocation explain"); - } + @Override + public ShardAllocationDecision decideShardAllocation(ShardRouting shard, RoutingAllocation allocation) { + throw new AssertionError("only used for allocation explain"); } - ); + }); var clusterState = createInitialClusterState(3); var index = clusterState.metadata().index(TEST_INDEX).getIndex(); @@ -584,7 +579,7 @@ public void testDesiredBalanceShouldConvergeInABigCluster() { for (int node = 0; node < nodes; node++) { var nodeId = "node-" + node; nodeIds.add(nodeId); - discoveryNodesBuilder.add(createDiscoveryNode(nodeId, DiscoveryNodeRole.roles())); + discoveryNodesBuilder.add(newNode(nodeId)); usedDiskSpace.put(nodeId, 0L); } @@ -692,11 +687,12 @@ public void testDesiredBalanceShouldConvergeInABigCluster() { var settings = Settings.EMPTY; var input = new DesiredBalanceInput(randomInt(), routingAllocationWithDecidersOf(clusterState, clusterInfo, settings), List.of()); - var desiredBalance = new DesiredBalanceComputer( - createBuiltInClusterSettings(), - mock(ThreadPool.class), - new BalancedShardsAllocator(settings) - ).compute(DesiredBalance.INITIAL, input, queue(), ignored -> iteration.incrementAndGet() < 1000); + var desiredBalance = createDesiredBalanceComputer(new BalancedShardsAllocator(settings)).compute( + DesiredBalance.INITIAL, + input, + queue(), + ignored -> iteration.incrementAndGet() < 1000 + ); var desiredDiskUsage = Maps.newMapWithExpectedSize(nodes); for (var assignment : desiredBalance.assignments().entrySet()) { @@ -736,10 +732,7 @@ private String pickAndRemoveRandomValueFrom(List values) { public void testComputeConsideringShardSizes() { - var discoveryNodesBuilder = DiscoveryNodes.builder() - .add(createDiscoveryNode("node-0", DiscoveryNodeRole.roles())) - .add(createDiscoveryNode("node-1", DiscoveryNodeRole.roles())) - .add(createDiscoveryNode("node-2", DiscoveryNodeRole.roles())); + var discoveryNodesBuilder = DiscoveryNodes.builder().add(newNode("node-0")).add(newNode("node-1")).add(newNode("node-2")); var metadataBuilder = Metadata.builder(); var routingTableBuilder = RoutingTable.builder(); @@ -751,7 +744,7 @@ public void testComputeConsideringShardSizes() { metadataBuilder.put( IndexMetadata.builder(indexName) - .settings(indexSettings(IndexVersion.current(), 1, 1).put("index.routing.allocation.exclude._name", "node-2")) + .settings(indexSettings(IndexVersion.current(), 1, 1).put("index.routing.allocation.exclude._id", "node-2")) ); var indexId = metadataBuilder.get(indexName).getIndex(); @@ -784,7 +777,7 @@ public void testComputeConsideringShardSizes() { metadataBuilder.put( IndexMetadata.builder(indexName) - .settings(indexSettings(IndexVersion.current(), 1, 0).put("index.routing.allocation.exclude._name", "node-2")) + .settings(indexSettings(IndexVersion.current(), 1, 0).put("index.routing.allocation.exclude._id", "node-2")) ); var indexId = metadataBuilder.get(indexName).getIndex(); @@ -807,9 +800,8 @@ public void testComputeConsideringShardSizes() { var node1Usage = new DiskUsage("node-1", "node-1", "/data", 1000, 100); var node2Usage = new DiskUsage("node-2", "node-2", "/data", 1000, 1000); - var clusterInfo = new ClusterInfo( - Map.of(node0Usage.nodeId(), node0Usage, node1Usage.nodeId(), node1Usage, node2Usage.getNodeId(), node2Usage), - Map.of(node0Usage.nodeId(), node0Usage, node1Usage.nodeId(), node1Usage, node2Usage.getNodeId(), node2Usage), + var clusterInfo = createClusterInfo( + List.of(node0Usage, node1Usage, node2Usage), Map.ofEntries( // node-0 & node-1 indexSize(clusterState, "index-0", 500, true), @@ -825,10 +817,7 @@ public void testComputeConsideringShardSizes() { indexSize(clusterState, "index-7", 50, true), indexSize(clusterState, "index-8", 50, true), indexSize(clusterState, "index-9", 50, true) - ), - Map.of(), - Map.of(), - Map.of() + ) ); var settings = Settings.builder() @@ -850,11 +839,7 @@ public void testComputeConsideringShardSizes() { ) ); - var desiredBalance = new DesiredBalanceComputer( - createBuiltInClusterSettings(), - mock(ThreadPool.class), - new BalancedShardsAllocator(settings) - ).compute( + var desiredBalance = createDesiredBalanceComputer(new BalancedShardsAllocator(settings)).compute( initial, new DesiredBalanceInput(randomInt(), routingAllocationWithDecidersOf(clusterState, clusterInfo, settings), List.of()), queue(), @@ -872,6 +857,11 @@ public void testComputeConsideringShardSizes() { assertThat(resultDiskUsage, allOf(aMapWithSize(2), hasEntry("node-0", 950L), hasEntry("node-1", 850L))); } + private static ClusterInfo createClusterInfo(List diskUsages, Map shardSizes) { + var diskUsage = diskUsages.stream().collect(toMap(DiskUsage::getNodeId, Function.identity())); + return new ClusterInfo(diskUsage, diskUsage, shardSizes, Map.of(), Map.of(), Map.of()); + } + public void testShouldLogComputationIteration() { checkIterationLogging( 999, @@ -961,9 +951,9 @@ private static ShardId findShardId(ClusterState clusterState, String name) { } static ClusterState createInitialClusterState(int dataNodesCount) { - var discoveryNodes = DiscoveryNodes.builder().add(createDiscoveryNode("master", Set.of(DiscoveryNodeRole.MASTER_ROLE))); + var discoveryNodes = DiscoveryNodes.builder().add(newNode("master", Set.of(DiscoveryNodeRole.MASTER_ROLE))); for (int i = 0; i < dataNodesCount; i++) { - discoveryNodes.add(createDiscoveryNode("node-" + i, Set.of(DiscoveryNodeRole.DATA_ROLE))); + discoveryNodes.add(newNode("node-" + i, Set.of(DiscoveryNodeRole.DATA_ROLE))); } var indexMetadata = IndexMetadata.builder(TEST_INDEX).settings(indexSettings(IndexVersion.current(), 2, 1)).build(); @@ -1019,15 +1009,11 @@ private static ShardRouting mutateAllocationStatus(ShardRouting shardRouting) { } } - private static DiscoveryNode createDiscoveryNode(String id, Set roles) { - return DiscoveryNodeUtils.builder(id).name(id).externalId(UUIDs.randomBase64UUID(random())).roles(roles).build(); - } - /** * @return a {@link DesiredBalanceComputer} which allocates unassigned primaries to node-0 and unassigned replicas to node-1 */ private static DesiredBalanceComputer createDesiredBalanceComputer() { - return new DesiredBalanceComputer(createBuiltInClusterSettings(), mock(ThreadPool.class), new ShardsAllocator() { + return createDesiredBalanceComputer(new ShardsAllocator() { @Override public void allocate(RoutingAllocation allocation) { final var unassignedIterator = allocation.routingNodes().unassigned().iterator(); @@ -1054,6 +1040,10 @@ public ShardAllocationDecision decideShardAllocation(ShardRouting shard, Routing }); } + private static DesiredBalanceComputer createDesiredBalanceComputer(ShardsAllocator shardsAllocator) { + return new DesiredBalanceComputer(createBuiltInClusterSettings(), mock(ThreadPool.class), shardsAllocator); + } + private static void assertDesiredAssignments(DesiredBalance desiredBalance, Map expected) { assertThat(desiredBalance.assignments(), equalTo(expected)); } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java index b67b4ef7e5a7f..1b3fa260db1fa 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java @@ -63,6 +63,7 @@ import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotShardSizeInfo; import org.elasticsearch.snapshots.SnapshotsInfoService; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.threadpool.ThreadPool; @@ -1206,7 +1207,7 @@ public void testRebalanceDoesNotCauseHotSpots() { new ConcurrentRebalanceAllocationDecider(clusterSettings), new ThrottlingAllocationDecider(clusterSettings) }; - var reconciler = new DesiredBalanceReconciler(clusterSettings, mock(ThreadPool.class)); + var reconciler = new DesiredBalanceReconciler(clusterSettings, mock(ThreadPool.class), mock(MeterRegistry.class)); var totalOutgoingMoves = new HashMap(); for (int i = 0; i < numberOfNodes; i++) { @@ -1275,7 +1276,7 @@ public void testShouldLogOnTooManyUndesiredAllocations() { var threadPool = mock(ThreadPool.class); when(threadPool.relativeTimeInMillis()).thenReturn(1L).thenReturn(2L).thenReturn(3L); - var reconciler = new DesiredBalanceReconciler(createBuiltInClusterSettings(), threadPool); + var reconciler = new DesiredBalanceReconciler(createBuiltInClusterSettings(), threadPool, mock(MeterRegistry.class)); var expectedWarningMessage = "[100%] of assigned shards (" + shardCount @@ -1315,7 +1316,10 @@ public void testShouldLogOnTooManyUndesiredAllocations() { } private static void reconcile(RoutingAllocation routingAllocation, DesiredBalance desiredBalance) { - new DesiredBalanceReconciler(createBuiltInClusterSettings(), mock(ThreadPool.class)).reconcile(desiredBalance, routingAllocation); + new DesiredBalanceReconciler(createBuiltInClusterSettings(), mock(ThreadPool.class), mock(MeterRegistry.class)).reconcile( + desiredBalance, + routingAllocation + ); } private static boolean isReconciled(RoutingNode node, DesiredBalance balance) { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java index add94e3b9344b..a4e5ccb7e6fa4 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java @@ -47,6 +47,7 @@ import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.snapshots.SnapshotShardSizeInfo; +import org.elasticsearch.telemetry.TelemetryProvider; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.threadpool.TestThreadPool; @@ -157,7 +158,8 @@ public ClusterState apply(ClusterState clusterState, RerouteStrategy routingAllo createShardsAllocator(), threadPool, clusterService, - reconcileAction + reconcileAction, + TelemetryProvider.NOOP ); assertValidStats(desiredBalanceShardsAllocator.getStats()); var allocationService = createAllocationService(desiredBalanceShardsAllocator, createGatewayAllocator(allocateUnassigned)); @@ -277,7 +279,8 @@ public ClusterState apply(ClusterState clusterState, RerouteStrategy routingAllo createShardsAllocator(), threadPool, clusterService, - reconcileAction + reconcileAction, + TelemetryProvider.NOOP ); var allocationService = new AllocationService( new AllocationDeciders(List.of()), @@ -369,7 +372,8 @@ public DesiredBalance compute( return super.compute(previousDesiredBalance, desiredBalanceInput, pendingDesiredBalanceMoves, isFresh); } }, - reconcileAction + reconcileAction, + TelemetryProvider.NOOP ); var allocationService = createAllocationService(desiredBalanceShardsAllocator, gatewayAllocator); allocationServiceRef.set(allocationService); @@ -471,7 +475,8 @@ public DesiredBalance compute( return super.compute(previousDesiredBalance, desiredBalanceInput, pendingDesiredBalanceMoves, isFresh); } }, - reconcileAction + reconcileAction, + TelemetryProvider.NOOP ); var allocationService = createAllocationService(desiredBalanceShardsAllocator, gatewayAllocator); @@ -561,7 +566,8 @@ public DesiredBalance compute( threadPool, clusterService, desiredBalanceComputer, - (reconcilerClusterState, rerouteStrategy) -> reconcilerClusterState + (reconcilerClusterState, rerouteStrategy) -> reconcilerClusterState, + TelemetryProvider.NOOP ); var service = createAllocationService(desiredBalanceShardsAllocator, createGatewayAllocator()); @@ -613,7 +619,8 @@ public void testResetDesiredBalanceOnNoLongerMaster() { threadPool, clusterService, desiredBalanceComputer, - (reconcilerClusterState, rerouteStrategy) -> reconcilerClusterState + (reconcilerClusterState, rerouteStrategy) -> reconcilerClusterState, + TelemetryProvider.NOOP ); var service = createAllocationService(desiredBalanceShardsAllocator, createGatewayAllocator()); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceStatsTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceStatsTests.java index ccd0ec6c0fb7b..bc71093bdfe98 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceStatsTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceStatsTests.java @@ -12,8 +12,6 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractWireSerializingTestCase; -import java.util.Locale; - import static org.hamcrest.Matchers.equalTo; public class DesiredBalanceStatsTests extends AbstractWireSerializingTestCase { @@ -25,6 +23,10 @@ protected Writeable.Reader instanceReader() { @Override protected DesiredBalanceStats createTestInstance() { + return randomDesiredBalanceStats(); + } + + public static DesiredBalanceStats randomDesiredBalanceStats() { return new DesiredBalanceStats( randomNonNegativeLong(), randomBoolean(), @@ -34,6 +36,9 @@ protected DesiredBalanceStats createTestInstance() { randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), randomNonNegativeLong() ); } @@ -48,8 +53,7 @@ public void testToXContent() { assertThat( Strings.toString(instance, true, false), equalTo( - String.format( - Locale.ROOT, + Strings.format( """ { "computation_converged_index" : %d, @@ -60,9 +64,12 @@ public void testToXContent() { "computation_iterations" : %d, "computed_shard_movements" : %d, "computation_time_in_millis" : %d, - "reconciliation_time_in_millis" : %d + "reconciliation_time_in_millis" : %d, + "unassigned_shards" : %d, + "total_allocations" : %d, + "undesired_allocations" : %d, + "undesired_allocations_ratio" : %s }""", - instance.lastConvergedIndex(), instance.computationActive(), instance.computationSubmitted(), @@ -71,7 +78,11 @@ public void testToXContent() { instance.computationIterations(), instance.computedShardMovements(), instance.cumulativeComputationTime(), - instance.cumulativeReconciliationTime() + instance.cumulativeReconciliationTime(), + instance.unassignedShards(), + instance.totalAllocations(), + instance.undesiredAllocations(), + Double.toString(instance.undesiredAllocationsRatio()) ) ) ); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDeciderTests.java new file mode 100644 index 0000000000000..cc8a65d0d577f --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDeciderTests.java @@ -0,0 +1,246 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.routing.allocation.decider; + +import org.elasticsearch.cluster.ClusterInfo; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.SnapshotsInProgress; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.repositories.ShardGeneration; +import org.elasticsearch.repositories.ShardSnapshotResult; +import org.elasticsearch.snapshots.Snapshot; +import org.elasticsearch.snapshots.SnapshotId; +import org.elasticsearch.snapshots.SnapshotShardSizeInfo; +import org.elasticsearch.test.ESTestCase; + +import java.util.Arrays; +import java.util.List; +import java.util.Map; + +public class SnapshotInProgressAllocationDeciderTests extends ESTestCase { + + private final SnapshotInProgressAllocationDecider decider = new SnapshotInProgressAllocationDecider(); + private final Index index = new Index(randomIdentifier(), randomUUID()); + private final ShardId shardId = new ShardId(index, 0); + private final String repositoryName = randomIdentifier(); + private final Snapshot snapshot = new Snapshot(repositoryName, new SnapshotId(randomIdentifier(), randomUUID())); + private final String nodeId = randomIdentifier(); + + public void testYesWhenSimulating() { + final var routingAllocation = new RoutingAllocation( + new AllocationDeciders(List.of(decider)), + ClusterState.EMPTY_STATE, + ClusterInfo.EMPTY, + SnapshotShardSizeInfo.EMPTY, + randomNonNegativeLong() + ).mutableCloneForSimulation(); + routingAllocation.setDebugMode(RoutingAllocation.DebugMode.ON); + + final var decision = decider.canAllocate( + TestShardRouting.newShardRouting(shardId, nodeId, true, ShardRoutingState.STARTED), + null, + routingAllocation + ); + + assertEquals(Decision.Type.YES, decision.type()); + assertEquals("allocation is always enabled when simulating", decision.getExplanation()); + } + + public void testYesWhenNotPrimary() { + final var routingAllocation = new RoutingAllocation( + new AllocationDeciders(List.of(decider)), + ClusterState.EMPTY_STATE, + ClusterInfo.EMPTY, + SnapshotShardSizeInfo.EMPTY, + randomNonNegativeLong() + ); + routingAllocation.setDebugMode(RoutingAllocation.DebugMode.ON); + + final var decision = decider.canAllocate( + TestShardRouting.newShardRouting(shardId, nodeId, false, ShardRoutingState.STARTED), + null, + routingAllocation + ); + + assertEquals(Decision.Type.YES, decision.type()); + assertEquals("the shard is not being snapshotted", decision.getExplanation()); + } + + public void testYesWhenNoSnapshots() { + final var routingAllocation = new RoutingAllocation( + new AllocationDeciders(List.of(decider)), + ClusterState.EMPTY_STATE, + ClusterInfo.EMPTY, + SnapshotShardSizeInfo.EMPTY, + randomNonNegativeLong() + ); + routingAllocation.setDebugMode(RoutingAllocation.DebugMode.ON); + + final var decision = decider.canAllocate( + TestShardRouting.newShardRouting(shardId, nodeId, true, ShardRoutingState.STARTED), + null, + routingAllocation + ); + + assertEquals(Decision.Type.YES, decision.type()); + assertEquals("no snapshots are currently running", decision.getExplanation()); + } + + public void testYesWhenNoShardSnapshot() { + final var routingAllocation = new RoutingAllocation( + new AllocationDeciders(List.of(decider)), + // snapshot in progress but not targetting this shard + makeClusterState(new ShardId(randomIdentifier(), randomUUID(), 0), randomFrom(SnapshotsInProgress.ShardState.values())), + ClusterInfo.EMPTY, + SnapshotShardSizeInfo.EMPTY, + randomNonNegativeLong() + ); + routingAllocation.setDebugMode(RoutingAllocation.DebugMode.ON); + + final var decision = decider.canAllocate( + TestShardRouting.newShardRouting(shardId, nodeId, true, ShardRoutingState.STARTED), + null, + routingAllocation + ); + + assertEquals(Decision.Type.YES, decision.type()); + assertEquals("the shard is not being snapshotted", decision.getExplanation()); + } + + public void testYesWhenShardSnapshotComplete() { + final var routingAllocation = new RoutingAllocation( + new AllocationDeciders(List.of(decider)), + // snapshot in progress but complete + makeClusterState( + shardId, + randomFrom( + Arrays.stream(SnapshotsInProgress.ShardState.values()).filter(SnapshotsInProgress.ShardState::completed).toList() + ) + ), + ClusterInfo.EMPTY, + SnapshotShardSizeInfo.EMPTY, + randomNonNegativeLong() + ); + routingAllocation.setDebugMode(RoutingAllocation.DebugMode.ON); + + final var decision = decider.canAllocate( + TestShardRouting.newShardRouting(shardId, nodeId, true, ShardRoutingState.STARTED), + null, + routingAllocation + ); + + assertEquals(Decision.Type.YES, decision.type()); + assertEquals("the shard is not being snapshotted", decision.getExplanation()); + } + + public void testYesWhenShardSnapshotOnDifferentNode() { + final var routingAllocation = new RoutingAllocation( + new AllocationDeciders(List.of(decider)), + makeClusterState(shardId, randomFrom(SnapshotsInProgress.ShardState.values())), + ClusterInfo.EMPTY, + SnapshotShardSizeInfo.EMPTY, + randomNonNegativeLong() + ); + routingAllocation.setDebugMode(RoutingAllocation.DebugMode.ON); + + final var decision = decider.canAllocate( + // shard on a different node from the snapshot in progress one + TestShardRouting.newShardRouting(shardId, randomIdentifier(), true, ShardRoutingState.STARTED), + null, + routingAllocation + ); + + assertEquals(Decision.Type.YES, decision.type()); + assertEquals("the shard is not being snapshotted", decision.getExplanation()); + } + + public void testThrottleWhenSnapshotInProgress() { + final var routingAllocation = new RoutingAllocation( + new AllocationDeciders(List.of(decider)), + makeClusterState(shardId, SnapshotsInProgress.ShardState.INIT), + ClusterInfo.EMPTY, + SnapshotShardSizeInfo.EMPTY, + randomNonNegativeLong() + ); + routingAllocation.setDebugMode(RoutingAllocation.DebugMode.ON); + + final var decision = decider.canAllocate( + TestShardRouting.newShardRouting(shardId, nodeId, true, ShardRoutingState.STARTED), + null, + routingAllocation + ); + + assertEquals(decision.getExplanation(), Decision.Type.THROTTLE, decision.type()); + assertEquals( + "waiting for snapshot [" + + SnapshotsInProgress.get(routingAllocation.getClusterState()).asStream().findFirst().orElseThrow().snapshot().toString() + + "] of shard [" + + shardId + + "] to complete on node [" + + nodeId + + "]", + decision.getExplanation() + ); + } + + private ClusterState makeClusterState(ShardId shardId, SnapshotsInProgress.ShardState shardState) { + return ClusterState.builder(ClusterName.DEFAULT) + .putCustom(SnapshotsInProgress.TYPE, makeSnapshotsInProgress(shardId, shardState)) + .build(); + } + + private SnapshotsInProgress makeSnapshotsInProgress(ShardId snapshotShardId, SnapshotsInProgress.ShardState shardState) { + final SnapshotsInProgress.ShardSnapshotStatus shardSnapshotStatus; + if (shardState == SnapshotsInProgress.ShardState.SUCCESS) { + shardSnapshotStatus = SnapshotsInProgress.ShardSnapshotStatus.success( + nodeId, + new ShardSnapshotResult(ShardGeneration.newGeneration(random()), ByteSizeValue.ZERO, 1) + ); + } else if (shardState == SnapshotsInProgress.ShardState.QUEUED) { + shardSnapshotStatus = new SnapshotsInProgress.ShardSnapshotStatus(null, shardState, null); + } else if (shardState.failed()) { + shardSnapshotStatus = new SnapshotsInProgress.ShardSnapshotStatus( + nodeId, + shardState, + randomAlphaOfLength(10), + ShardGeneration.newGeneration(random()) + ); + } else { + shardSnapshotStatus = new SnapshotsInProgress.ShardSnapshotStatus(nodeId, shardState, ShardGeneration.newGeneration(random())); + } + return SnapshotsInProgress.EMPTY.withUpdatedEntriesForRepo( + repositoryName, + List.of( + SnapshotsInProgress.Entry.snapshot( + snapshot, + randomBoolean(), + randomBoolean(), + shardState.completed() ? SnapshotsInProgress.State.SUCCESS : SnapshotsInProgress.State.STARTED, + Map.of(snapshotShardId.getIndexName(), new IndexId(snapshotShardId.getIndexName(), randomUUID())), + List.of(), + List.of(), + randomNonNegativeLong(), + randomNonNegativeLong(), + Map.of(snapshotShardId, shardSnapshotStatus), + null, + Map.of(), + IndexVersion.current() + ) + ) + ); + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorServiceTests.java index 9ab44ec3fb047..bb7523661a0fa 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorServiceTests.java @@ -85,13 +85,9 @@ import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_CHECK_ALLOCATION_EXPLAIN_API; import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_ENABLE_CLUSTER_ROUTING_ALLOCATION; import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_ENABLE_INDEX_ROUTING_ALLOCATION; -import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_ENABLE_TIERS_LOOKUP; import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_NODE_CAPACITY; import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING; -import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING_LOOKUP; import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_SHARD_LIMIT_INDEX_SETTING; -import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_SHARD_LIMIT_INDEX_SETTING_LOOKUP; -import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_TIER_CAPACITY_LOOKUP; import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_MIGRATE_TIERS_AWAY_FROM_INCLUDE_DATA; import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_MIGRATE_TIERS_AWAY_FROM_INCLUDE_DATA_LOOKUP; import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_MIGRATE_TIERS_AWAY_FROM_REQUIRE_DATA; @@ -1115,7 +1111,7 @@ public void testDiagnoseEnableDataTiers() { var service = createShardsAvailabilityIndicatorService(); // Get the list of user actions that are generated for this unassigned index shard - List actions = service.checkDataTierRelatedIssues( + List actions = service.checkNodeRoleRelatedIssues( indexMetadata, List.of( // Shard is not allowed due to data tier filter @@ -1125,11 +1121,12 @@ public void testDiagnoseEnableDataTiers() { 1 ) ), - ClusterState.EMPTY_STATE + ClusterState.EMPTY_STATE, + null ); assertThat(actions, hasSize(1)); - assertThat(actions, contains(ACTION_ENABLE_TIERS_LOOKUP.get(DataTier.DATA_HOT))); + assertThat(actions, contains(service.getAddNodesWithRoleAction(DataTier.DATA_HOT))); } public void testDiagnoseIncreaseShardLimitIndexSettingInTier() { @@ -1173,7 +1170,7 @@ public void testDiagnoseIncreaseShardLimitIndexSettingInTier() { var service = createShardsAvailabilityIndicatorService(); // Get the list of user actions that are generated for this unassigned index shard - List actions = service.checkDataTierRelatedIssues( + List actions = service.checkNodeRoleRelatedIssues( indexMetadata, List.of( new NodeAllocationResult( @@ -1184,11 +1181,12 @@ public void testDiagnoseIncreaseShardLimitIndexSettingInTier() { 1 ) ), - clusterState + clusterState, + null ); assertThat(actions, hasSize(1)); - assertThat(actions, contains(ACTION_INCREASE_SHARD_LIMIT_INDEX_SETTING_LOOKUP.get(DataTier.DATA_HOT))); + assertThat(actions, contains(service.getIncreaseShardLimitIndexSettingAction(DataTier.DATA_HOT))); } public void testDiagnoseIncreaseShardLimitClusterSettingInTier() { @@ -1237,7 +1235,7 @@ public void testDiagnoseIncreaseShardLimitClusterSettingInTier() { ); // Get the list of user actions that are generated for this unassigned index shard - List actions = service.checkDataTierRelatedIssues( + List actions = service.checkNodeRoleRelatedIssues( indexMetadata, List.of( new NodeAllocationResult( @@ -1248,11 +1246,12 @@ public void testDiagnoseIncreaseShardLimitClusterSettingInTier() { 1 ) ), - clusterState + clusterState, + null ); assertThat(actions, hasSize(1)); - assertThat(actions, contains(ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING_LOOKUP.get(DataTier.DATA_HOT))); + assertThat(actions, contains(service.getIncreaseShardLimitClusterSettingAction(DataTier.DATA_HOT))); } public void testDiagnoseIncreaseShardLimitIndexSettingInGeneral() { @@ -1296,7 +1295,7 @@ public void testDiagnoseIncreaseShardLimitIndexSettingInGeneral() { var service = createShardsAvailabilityIndicatorService(); // Get the list of user actions that are generated for this unassigned index shard - List actions = service.checkDataTierRelatedIssues( + List actions = service.checkNodeRoleRelatedIssues( indexMetadata, List.of( new NodeAllocationResult( @@ -1307,7 +1306,8 @@ public void testDiagnoseIncreaseShardLimitIndexSettingInGeneral() { 1 ) ), - clusterState + clusterState, + null ); assertThat(actions, hasSize(1)); @@ -1360,7 +1360,7 @@ public void testDiagnoseIncreaseShardLimitClusterSettingInGeneral() { ); // Get the list of user actions that are generated for this unassigned index shard - List actions = service.checkDataTierRelatedIssues( + List actions = service.checkNodeRoleRelatedIssues( indexMetadata, List.of( new NodeAllocationResult( @@ -1371,7 +1371,8 @@ public void testDiagnoseIncreaseShardLimitClusterSettingInGeneral() { 1 ) ), - clusterState + clusterState, + null ); assertThat(actions, hasSize(1)); @@ -1395,7 +1396,7 @@ public void testDiagnoseMigrateDataRequiredToDataTiers() { var service = createShardsAvailabilityIndicatorService(); // Get the list of user actions that are generated for this unassigned index shard - List actions = service.checkDataTierRelatedIssues( + List actions = service.checkNodeRoleRelatedIssues( indexMetadata, List.of( // Shard is allowed on data tier, but disallowed because of allocation filters @@ -1407,7 +1408,8 @@ public void testDiagnoseMigrateDataRequiredToDataTiers() { 1 ) ), - ClusterState.EMPTY_STATE + ClusterState.EMPTY_STATE, + null ); assertThat(actions, hasSize(1)); @@ -1431,7 +1433,7 @@ public void testDiagnoseMigrateDataIncludedToDataTiers() { var service = createShardsAvailabilityIndicatorService(); // Get the list of user actions that are generated for this unassigned index shard - List actions = service.checkDataTierRelatedIssues( + List actions = service.checkNodeRoleRelatedIssues( indexMetadata, List.of( // Shard is allowed on data tier, but disallowed because of allocation filters @@ -1443,7 +1445,8 @@ public void testDiagnoseMigrateDataIncludedToDataTiers() { 1 ) ), - ClusterState.EMPTY_STATE + ClusterState.EMPTY_STATE, + null ); assertThat(actions, hasSize(1)); @@ -1466,7 +1469,7 @@ public void testDiagnoseOtherFilteringIssue() { var service = createShardsAvailabilityIndicatorService(); // Get the list of user actions that are generated for this unassigned index shard - List actions = service.checkDataTierRelatedIssues( + List actions = service.checkNodeRoleRelatedIssues( indexMetadata, List.of( // Shard is allowed on data tier, but disallowed because of allocation filters @@ -1478,7 +1481,8 @@ public void testDiagnoseOtherFilteringIssue() { 1 ) ), - ClusterState.EMPTY_STATE + ClusterState.EMPTY_STATE, + null ); // checkDataTierRelatedIssues will leave list empty. Diagnosis methods upstream will add "Check allocation explain" action. @@ -1501,7 +1505,7 @@ public void testDiagnoseIncreaseTierCapacity() { var service = createShardsAvailabilityIndicatorService(); // Get the list of user actions that are generated for this unassigned index shard - List actions = service.checkDataTierRelatedIssues( + List actions = service.checkNodeRoleRelatedIssues( indexMetadata, List.of( // Shard is allowed on data tier, but disallowed because node is already hosting a copy of it. @@ -1517,11 +1521,12 @@ public void testDiagnoseIncreaseTierCapacity() { 1 ) ), - ClusterState.EMPTY_STATE + ClusterState.EMPTY_STATE, + null ); assertThat(actions, hasSize(1)); - assertThat(actions, contains(ACTION_INCREASE_TIER_CAPACITY_LOOKUP.get(DataTier.DATA_HOT))); + assertThat(actions, contains(service.getIncreaseNodeWithRoleCapacityAction(DataTier.DATA_HOT))); } public void testDiagnoseIncreaseNodeCapacity() { @@ -1540,7 +1545,7 @@ public void testDiagnoseIncreaseNodeCapacity() { var service = createShardsAvailabilityIndicatorService(); // Get the list of user actions that are generated for this unassigned index shard - List actions = service.checkDataTierRelatedIssues( + List actions = service.checkNodeRoleRelatedIssues( indexMetadata, List.of( // Shard is allowed on data tier, but disallowed because node is already hosting a copy of it. @@ -1556,7 +1561,8 @@ public void testDiagnoseIncreaseNodeCapacity() { 1 ) ), - ClusterState.EMPTY_STATE + ClusterState.EMPTY_STATE, + null ); assertThat(actions, hasSize(1)); @@ -1808,15 +1814,10 @@ private SystemIndices getSystemIndices( featureDataStreamName, "description", SystemDataStreamDescriptor.Type.EXTERNAL, - new ComposableIndexTemplate( - List.of(systemDataStreamPattern), - null, - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate() - ), + ComposableIndexTemplate.builder() + .indexPatterns(List.of(systemDataStreamPattern)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build(), Map.of(), List.of("test"), new ExecutorNames( @@ -1879,17 +1880,22 @@ public void testMappedFieldsForTelemetry() { DIAGNOSIS_WAIT_FOR_INITIALIZATION.getUniqueId(), equalTo("elasticsearch:health:shards_availability:diagnosis:initializing_shards") ); + var service = new ShardsAvailabilityHealthIndicatorService( + mock(ClusterService.class), + mock(AllocationService.class), + mock(SystemIndices.class) + ); for (String tier : List.of("data_content", "data_hot", "data_warm", "data_cold", "data_frozen")) { assertThat( - ACTION_ENABLE_TIERS_LOOKUP.get(tier).getUniqueId(), + service.getAddNodesWithRoleAction(tier).getUniqueId(), equalTo("elasticsearch:health:shards_availability:diagnosis:enable_data_tiers:tier:" + tier) ); assertThat( - ACTION_INCREASE_SHARD_LIMIT_INDEX_SETTING_LOOKUP.get(tier).getUniqueId(), + service.getIncreaseShardLimitIndexSettingAction(tier).getUniqueId(), equalTo("elasticsearch:health:shards_availability:diagnosis:increase_shard_limit_index_setting:tier:" + tier) ); assertThat( - ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING_LOOKUP.get(tier).getUniqueId(), + service.getIncreaseShardLimitClusterSettingAction(tier).getUniqueId(), equalTo("elasticsearch:health:shards_availability:diagnosis:increase_shard_limit_cluster_setting:tier:" + tier) ); assertThat( @@ -1901,7 +1907,7 @@ public void testMappedFieldsForTelemetry() { equalTo("elasticsearch:health:shards_availability:diagnosis:migrate_data_tiers_include_data:tier:" + tier) ); assertThat( - ACTION_INCREASE_TIER_CAPACITY_LOOKUP.get(tier).getUniqueId(), + service.getIncreaseNodeWithRoleCapacityAction(tier).getUniqueId(), equalTo("elasticsearch:health:shards_availability:diagnosis:increase_tier_capacity_for_allocations:tier:" + tier) ); } diff --git a/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java b/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java index b7792c5f85207..d89183d93205f 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.Maps; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.Scheduler; import org.mockito.ArgumentCaptor; @@ -49,8 +50,7 @@ public class TransportVersionsFixupListenerTests extends ESTestCase { - // TODO: replace with real constants when 8.8.0 is released - private static final Version NEXT_VERSION = Version.fromString("8.8.1"); + private static final Version NEXT_VERSION = Version.V_8_8_1; private static final TransportVersion NEXT_TRANSPORT_VERSION = TransportVersion.fromId(NEXT_VERSION.id); @SuppressWarnings("unchecked") @@ -83,7 +83,7 @@ private static NodesInfoResponse getResponse(Map respo .stream() .map( e -> new NodeInfo( - null, + "", e.getValue(), null, null, @@ -117,7 +117,13 @@ public void testNothingFixedWhenNothingToInfer() { .nodeIdsToCompatibilityVersions(versions(new CompatibilityVersions(TransportVersions.V_8_8_0, Map.of()))) .build(); - TransportVersionsFixupListener listeners = new TransportVersionsFixupListener(taskQueue, client, null, null); + TransportVersionsFixupListener listeners = new TransportVersionsFixupListener( + taskQueue, + client, + new FeatureService(List.of(new TransportFeatures())), + null, + null + ); listeners.clusterChanged(new ClusterChangedEvent("test", testState, ClusterState.EMPTY_STATE)); verify(taskQueue, never()).submitTask(anyString(), any(), any()); @@ -132,7 +138,13 @@ public void testNothingFixedWhenOnNextVersion() { .nodeIdsToCompatibilityVersions(versions(new CompatibilityVersions(NEXT_TRANSPORT_VERSION, Map.of()))) .build(); - TransportVersionsFixupListener listeners = new TransportVersionsFixupListener(taskQueue, client, null, null); + TransportVersionsFixupListener listeners = new TransportVersionsFixupListener( + taskQueue, + client, + new FeatureService(List.of(new TransportFeatures())), + null, + null + ); listeners.clusterChanged(new ClusterChangedEvent("test", testState, ClusterState.EMPTY_STATE)); verify(taskQueue, never()).submitTask(anyString(), any(), any()); @@ -152,7 +164,13 @@ public void testNothingFixedWhenOnPreviousVersion() { ) .build(); - TransportVersionsFixupListener listeners = new TransportVersionsFixupListener(taskQueue, client, null, null); + TransportVersionsFixupListener listeners = new TransportVersionsFixupListener( + taskQueue, + client, + new FeatureService(List.of(new TransportFeatures())), + null, + null + ); listeners.clusterChanged(new ClusterChangedEvent("test", testState, ClusterState.EMPTY_STATE)); verify(taskQueue, never()).submitTask(anyString(), any(), any()); @@ -176,7 +194,13 @@ public void testVersionsAreFixed() { ArgumentCaptor> action = ArgumentCaptor.forClass(ActionListener.class); ArgumentCaptor task = ArgumentCaptor.forClass(NodeTransportVersionTask.class); - TransportVersionsFixupListener listeners = new TransportVersionsFixupListener(taskQueue, client, null, null); + TransportVersionsFixupListener listeners = new TransportVersionsFixupListener( + taskQueue, + client, + new FeatureService(List.of(new TransportFeatures())), + null, + null + ); listeners.clusterChanged(new ClusterChangedEvent("test", testState, ClusterState.EMPTY_STATE)); verify(client).nodesInfo( argThat(transformedMatch(NodesInfoRequest::nodesIds, arrayContainingInAnyOrder("node1", "node2"))), @@ -202,7 +226,13 @@ public void testConcurrentChangesDoNotOverlap() { ) .build(); - TransportVersionsFixupListener listeners = new TransportVersionsFixupListener(taskQueue, client, null, null); + TransportVersionsFixupListener listeners = new TransportVersionsFixupListener( + taskQueue, + client, + new FeatureService(List.of(new TransportFeatures())), + null, + null + ); listeners.clusterChanged(new ClusterChangedEvent("test", testState1, ClusterState.EMPTY_STATE)); verify(client).nodesInfo(argThat(transformedMatch(NodesInfoRequest::nodesIds, arrayContainingInAnyOrder("node1", "node2"))), any()); // don't send back the response yet @@ -241,7 +271,13 @@ public void testFailedRequestsAreRetried() { ArgumentCaptor> action = ArgumentCaptor.forClass(ActionListener.class); ArgumentCaptor retry = ArgumentCaptor.forClass(Runnable.class); - TransportVersionsFixupListener listeners = new TransportVersionsFixupListener(taskQueue, client, scheduler, executor); + TransportVersionsFixupListener listeners = new TransportVersionsFixupListener( + taskQueue, + client, + new FeatureService(List.of(new TransportFeatures())), + scheduler, + executor + ); listeners.clusterChanged(new ClusterChangedEvent("test", testState1, ClusterState.EMPTY_STATE)); verify(client, times(1)).nodesInfo(any(), action.capture()); // do response immediately diff --git a/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobContainerTests.java b/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobContainerTests.java index bb4aefc0388e6..1f54046630cf8 100644 --- a/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobContainerTests.java +++ b/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobContainerTests.java @@ -13,7 +13,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.blobstore.BlobPath; -import org.elasticsearch.common.blobstore.OperationPurpose; import org.elasticsearch.common.blobstore.OptionalBytesReference; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -47,6 +46,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; +import static org.elasticsearch.repositories.blobstore.BlobStoreTestUtil.randomPurpose; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -87,7 +87,7 @@ public void testReadBlobRangeCorrectlySkipBytes() throws IOException { final long start = randomLongBetween(0L, Math.max(0L, blobData.length - 1)); final long length = randomLongBetween(1L, blobData.length - start); - try (InputStream stream = container.readBlob(OperationPurpose.SNAPSHOT, blobName, start, length)) { + try (InputStream stream = container.readBlob(randomPurpose(), blobName, start, length)) { assertThat(totalBytesRead.get(), equalTo(0L)); assertThat(Streams.consumeFully(stream), equalTo(length)); assertThat(totalBytesRead.get(), equalTo(length)); @@ -119,11 +119,11 @@ public void testDeleteIgnoringIfNotExistsDoesNotThrowFileNotFound() throws IOExc path ); - container.deleteBlobsIgnoringIfNotExists(OperationPurpose.SNAPSHOT, List.of(blobName).listIterator()); + container.deleteBlobsIgnoringIfNotExists(randomPurpose(), List.of(blobName).listIterator()); // Should not throw exception - container.deleteBlobsIgnoringIfNotExists(OperationPurpose.SNAPSHOT, List.of(blobName).listIterator()); + container.deleteBlobsIgnoringIfNotExists(randomPurpose(), List.of(blobName).listIterator()); - assertFalse(container.blobExists(OperationPurpose.SNAPSHOT, blobName)); + assertFalse(container.blobExists(randomPurpose(), blobName)); } private static BytesReference getBytesAsync(Consumer> consumer) { @@ -150,11 +150,11 @@ public void testCompareAndExchange() throws Exception { for (int i = 0; i < 5; i++) { switch (between(1, 4)) { - case 1 -> assertEquals(expectedValue.get(), getBytesAsync(l -> container.getRegister(OperationPurpose.SNAPSHOT, key, l))); + case 1 -> assertEquals(expectedValue.get(), getBytesAsync(l -> container.getRegister(randomPurpose(), key, l))); case 2 -> assertFalse( getAsync( l -> container.compareAndSetRegister( - OperationPurpose.SNAPSHOT, + randomPurpose(), key, randomValueOtherThan(expectedValue.get(), () -> new BytesArray(randomByteArrayOfLength(8))), new BytesArray(randomByteArrayOfLength(8)), @@ -166,7 +166,7 @@ public void testCompareAndExchange() throws Exception { expectedValue.get(), getBytesAsync( l -> container.compareAndExchangeRegister( - OperationPurpose.SNAPSHOT, + randomPurpose(), key, randomValueOtherThan(expectedValue.get(), () -> new BytesArray(randomByteArrayOfLength(8))), new BytesArray(randomByteArrayOfLength(8)), @@ -181,26 +181,20 @@ public void testCompareAndExchange() throws Exception { final var newValue = new BytesArray(randomByteArrayOfLength(8)); if (randomBoolean()) { - assertTrue( - getAsync(l -> container.compareAndSetRegister(OperationPurpose.SNAPSHOT, key, expectedValue.get(), newValue, l)) - ); + assertTrue(getAsync(l -> container.compareAndSetRegister(randomPurpose(), key, expectedValue.get(), newValue, l))); } else { assertEquals( expectedValue.get(), - getBytesAsync( - l -> container.compareAndExchangeRegister(OperationPurpose.SNAPSHOT, key, expectedValue.get(), newValue, l) - ) + getBytesAsync(l -> container.compareAndExchangeRegister(randomPurpose(), key, expectedValue.get(), newValue, l)) ); } expectedValue.set(newValue); } - container.writeBlob(OperationPurpose.SNAPSHOT, key, new BytesArray(new byte[17]), false); + container.writeBlob(randomPurpose(), key, new BytesArray(new byte[17]), false); expectThrows( IllegalStateException.class, - () -> getBytesAsync( - l -> container.compareAndExchangeRegister(OperationPurpose.SNAPSHOT, key, expectedValue.get(), BytesArray.EMPTY, l) - ) + () -> getBytesAsync(l -> container.compareAndExchangeRegister(randomPurpose(), key, expectedValue.get(), BytesArray.EMPTY, l)) ); } @@ -234,25 +228,20 @@ private static void checkAtomicWrite() throws IOException { BlobPath.EMPTY, path ); - container.writeBlobAtomic( - OperationPurpose.SNAPSHOT, - blobName, - new BytesArray(randomByteArrayOfLength(randomIntBetween(1, 512))), - true - ); + container.writeBlobAtomic(randomPurpose(), blobName, new BytesArray(randomByteArrayOfLength(randomIntBetween(1, 512))), true); final var blobData = new BytesArray(randomByteArrayOfLength(randomIntBetween(1, 512))); - container.writeBlobAtomic(OperationPurpose.SNAPSHOT, blobName, blobData, false); - assertEquals(blobData, Streams.readFully(container.readBlob(OperationPurpose.SNAPSHOT, blobName))); + container.writeBlobAtomic(randomPurpose(), blobName, blobData, false); + assertEquals(blobData, Streams.readFully(container.readBlob(randomPurpose(), blobName))); expectThrows( FileAlreadyExistsException.class, () -> container.writeBlobAtomic( - OperationPurpose.SNAPSHOT, + randomPurpose(), blobName, new BytesArray(randomByteArrayOfLength(randomIntBetween(1, 512))), true ) ); - for (String blob : container.listBlobs(OperationPurpose.SNAPSHOT).keySet()) { + for (String blob : container.listBlobs(randomPurpose()).keySet()) { assertFalse("unexpected temp blob [" + blob + "]", FsBlobContainer.isTempBlobName(blob)); } } diff --git a/server/src/test/java/org/elasticsearch/common/breaker/PreallocatedCircuitBreakerServiceTests.java b/server/src/test/java/org/elasticsearch/common/breaker/PreallocatedCircuitBreakerServiceTests.java index fce02ee5a7d5f..943ac8786644e 100644 --- a/server/src/test/java/org/elasticsearch/common/breaker/PreallocatedCircuitBreakerServiceTests.java +++ b/server/src/test/java/org/elasticsearch/common/breaker/PreallocatedCircuitBreakerServiceTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.indices.breaker.CircuitBreakerMetrics; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.test.ESTestCase; @@ -121,6 +122,7 @@ public void testRandom() { private HierarchyCircuitBreakerService real() { return new HierarchyCircuitBreakerService( + CircuitBreakerMetrics.NOOP, Settings.builder() // Pin the limit to something that'll totally fit in the heap we use for the tests .put(REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "100mb") diff --git a/server/src/test/java/org/elasticsearch/common/unit/ProcessorsTests.java b/server/src/test/java/org/elasticsearch/common/unit/ProcessorsTests.java index 1abc187e1640a..1b47ed1134914 100644 --- a/server/src/test/java/org/elasticsearch/common/unit/ProcessorsTests.java +++ b/server/src/test/java/org/elasticsearch/common/unit/ProcessorsTests.java @@ -75,6 +75,7 @@ public void testNeverRoundsDownToZero() { public void testValidation() { expectThrows(IllegalArgumentException.class, () -> Processors.of(-1.0)); + expectThrows(IllegalArgumentException.class, () -> Processors.of(0.0)); expectThrows(IllegalArgumentException.class, () -> Processors.of(Double.POSITIVE_INFINITY)); expectThrows(IllegalArgumentException.class, () -> Processors.of(Double.NEGATIVE_INFINITY)); expectThrows(IllegalArgumentException.class, () -> Processors.of(Double.NaN)); diff --git a/server/src/test/java/org/elasticsearch/common/util/BigArraysTests.java b/server/src/test/java/org/elasticsearch/common/util/BigArraysTests.java index 0ada1f67d1045..3372aa9bc685b 100644 --- a/server/src/test/java/org/elasticsearch/common/util/BigArraysTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/BigArraysTests.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.core.Releasables; +import org.elasticsearch.indices.breaker.CircuitBreakerMetrics; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; @@ -359,6 +360,7 @@ public void testMaxSizeExceededOnResize() throws Exception { for (String type : Arrays.asList("Byte", "Int", "Long", "Float", "Double", "Object")) { final int maxSize = randomIntBetween(1 << 8, 1 << 14); HierarchyCircuitBreakerService hcbs = new HierarchyCircuitBreakerService( + CircuitBreakerMetrics.NOOP, Settings.builder() .put(REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), maxSize, ByteSizeUnit.BYTES) .put(HierarchyCircuitBreakerService.USE_REAL_MEMORY_USAGE_SETTING.getKey(), false) @@ -411,7 +413,14 @@ public void testOverSizeUsesMinPageCount() { */ public void testPreallocate() { ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - try (HierarchyCircuitBreakerService realBreakers = new HierarchyCircuitBreakerService(Settings.EMPTY, List.of(), clusterSettings)) { + try ( + HierarchyCircuitBreakerService realBreakers = new HierarchyCircuitBreakerService( + CircuitBreakerMetrics.NOOP, + Settings.EMPTY, + List.of(), + clusterSettings + ) + ) { BigArrays unPreAllocated = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), realBreakers); long toPreallocate = randomLongBetween(4000, 10000); CircuitBreaker realBreaker = realBreakers.getBreaker(CircuitBreaker.REQUEST); @@ -491,6 +500,7 @@ private List bigArrayCreators(final long maxSize, final boolean private BigArrays newBigArraysInstance(final long maxSize, final boolean withBreaking) { HierarchyCircuitBreakerService hcbs = new HierarchyCircuitBreakerService( + CircuitBreakerMetrics.NOOP, Settings.builder() .put(REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), maxSize, ByteSizeUnit.BYTES) .put(HierarchyCircuitBreakerService.USE_REAL_MEMORY_USAGE_SETTING.getKey(), false) diff --git a/server/src/test/java/org/elasticsearch/common/util/BytesRefArrayTests.java b/server/src/test/java/org/elasticsearch/common/util/BytesRefArrayTests.java index 0ca6bf86ceec7..e7868f442efd0 100644 --- a/server/src/test/java/org/elasticsearch/common/util/BytesRefArrayTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/BytesRefArrayTests.java @@ -17,6 +17,9 @@ import org.elasticsearch.test.ESTestCase; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.stream.IntStream; import static org.hamcrest.Matchers.equalTo; @@ -115,6 +118,64 @@ public void testLookup() throws IOException { } } + public void testReadWritten() { + testReadWritten(false); + } + + public void testReadWrittenHalfEmpty() { + testReadWritten(true); + } + + private void testReadWritten(boolean halfEmpty) { + List values = new ArrayList<>(); + int bytes = PageCacheRecycler.PAGE_SIZE_IN_BYTES * between(2, 20); + int used = 0; + while (used < bytes) { + String str = halfEmpty && randomBoolean() ? "" : randomAlphaOfLengthBetween(0, 200); + BytesRef v = new BytesRef(str); + used += v.length; + values.add(v); + } + testReadWritten(values, randomBoolean() ? bytes : between(0, bytes)); + } + + public void testReadWrittenRepeated() { + testReadWrittenRepeated(false, between(2, 3000)); + } + + public void testReadWrittenRepeatedPowerOfTwo() { + testReadWrittenRepeated(false, 1024); + } + + public void testReadWrittenRepeatedHalfEmpty() { + testReadWrittenRepeated(true, between(1, 3000)); + } + + public void testReadWrittenRepeatedHalfEmptyPowerOfTwo() { + testReadWrittenRepeated(true, 1024); + } + + public void testReadWrittenRepeated(boolean halfEmpty, int listSize) { + List values = randomList(2, 10, () -> { + String str = halfEmpty && randomBoolean() ? "" : randomAlphaOfLengthBetween(0, 10); + return new BytesRef(str); + }); + testReadWritten(IntStream.range(0, listSize).mapToObj(i -> values).flatMap(List::stream).toList(), 10); + } + + private void testReadWritten(List values, int initialCapacity) { + try (BytesRefArray array = new BytesRefArray(initialCapacity, mockBigArrays())) { + for (BytesRef v : values) { + array.append(v); + } + BytesRef scratch = new BytesRef(); + for (int i = 0; i < values.size(); i++) { + array.get(i, scratch); + assertThat(scratch, equalTo(values.get(i))); + } + } + } + private static BigArrays mockBigArrays() { return new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); } diff --git a/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java b/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java index da1f2aa89642b..6061ed31d898e 100644 --- a/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.gateway.GatewayMetaState; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.plugins.ClusterCoordinationPlugin; @@ -43,7 +44,6 @@ import java.util.List; import java.util.Map; import java.util.Optional; -import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; import java.util.function.Supplier; @@ -117,7 +117,7 @@ private DiscoveryModule newModule( null, new NoneCircuitBreakerService(), CompatibilityVersionsUtils.staticCurrent(), - Set.of() + new FeatureService(List.of()) ); } diff --git a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java index 112f96562b7cd..9fd048cd4d2a7 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java +++ b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java @@ -42,9 +42,11 @@ import org.elasticsearch.test.NodeRoles; import org.elasticsearch.test.junit.annotations.TestLogging; import org.hamcrest.Matchers; +import org.junit.AssumptionViolatedException; import java.io.IOException; import java.nio.charset.StandardCharsets; +import java.nio.file.FileSystemException; import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; @@ -640,7 +642,16 @@ public void testSymlinkDataDirectory() throws Exception { Path dataPath = tempDir.resolve("data"); Files.createDirectories(dataPath); Path symLinkPath = tempDir.resolve("data_symlink"); - Files.createSymbolicLink(symLinkPath, dataPath); + try { + Files.createSymbolicLink(symLinkPath, dataPath); + } catch (FileSystemException e) { + if (IOUtils.WINDOWS && e.getMessage().equals("A required privilege is not held by the client")) { + throw new AssumptionViolatedException("Symlinks on windows needs admin privileges", e); + } else { + throw e; + } + } + NodeEnvironment env = newNodeEnvironment(new String[] { symLinkPath.toString() }, "/tmp", Settings.EMPTY); assertTrue(Files.exists(symLinkPath)); diff --git a/server/src/test/java/org/elasticsearch/features/FeatureServiceTests.java b/server/src/test/java/org/elasticsearch/features/FeatureServiceTests.java index 0a799934ae64e..26d880d0a5d8e 100644 --- a/server/src/test/java/org/elasticsearch/features/FeatureServiceTests.java +++ b/server/src/test/java/org/elasticsearch/features/FeatureServiceTests.java @@ -75,7 +75,7 @@ public void testFailsDuplicateFeatures() { public void testFailsNonHistoricalVersion() { FeatureSpecification fs = new TestFeatureSpecification( Set.of(), - Map.of(new NodeFeature("f1"), FeatureService.CLUSTER_FEATURES_ADDED_VERSION) + Map.of(new NodeFeature("f1"), Version.fromId(FeatureService.CLUSTER_FEATURES_ADDED_VERSION.id + 1)) ); assertThat( @@ -93,7 +93,7 @@ public void testGetNodeFeaturesCombinesAllSpecs() { ); FeatureService service = new FeatureService(specs); - assertThat(service.getNodeFeatures(), containsInAnyOrder("f1", "f2", "f3", "f4", "f5")); + assertThat(service.getNodeFeatures().keySet(), containsInAnyOrder("f1", "f2", "f3", "f4", "f5")); } public void testStateHasFeatures() { diff --git a/server/src/test/java/org/elasticsearch/health/HealthPeriodicLoggerTests.java b/server/src/test/java/org/elasticsearch/health/HealthPeriodicLoggerTests.java index ed18c1b0dc3d5..4203a984a8f07 100644 --- a/server/src/test/java/org/elasticsearch/health/HealthPeriodicLoggerTests.java +++ b/server/src/test/java/org/elasticsearch/health/HealthPeriodicLoggerTests.java @@ -106,7 +106,7 @@ public void testConvertToLoggedFields() { // test indicator status assertThat(loggerResults.get(makeHealthStatusString("network_latency")), equalTo("green")); assertThat(loggerResults.get(makeHealthStatusString("slow_task_assignment")), equalTo("yellow")); - assertThat(loggerResults.get(makeHealthStatusString("shards_availability")), equalTo("green")); + assertThat(loggerResults.get(makeHealthStatusString("shards_availability")), equalTo("yellow")); // test calculated overall status assertThat(loggerResults.get(makeHealthStatusString("overall")), equalTo(overallStatus.xContentValue())); @@ -114,7 +114,7 @@ public void testConvertToLoggedFields() { // test calculated message assertThat( loggerResults.get(HealthPeriodicLogger.MESSAGE_FIELD), - equalTo(String.format(Locale.ROOT, "health=%s", overallStatus.xContentValue())) + equalTo(String.format(Locale.ROOT, "health=%s [shards_availability,slow_task_assignment]", overallStatus.xContentValue())) ); // test empty results @@ -124,6 +124,19 @@ public void testConvertToLoggedFields() { assertThat(emptyResults.size(), equalTo(0)); } + + // test all-green results + { + results = getTestIndicatorResultsAllGreen(); + loggerResults = HealthPeriodicLogger.convertToLoggedFields(results); + overallStatus = HealthStatus.merge(results.stream().map(HealthIndicatorResult::status)); + + // test calculated message + assertThat( + loggerResults.get(HealthPeriodicLogger.MESSAGE_FIELD), + equalTo(String.format(Locale.ROOT, "health=%s", overallStatus.xContentValue())) + ); + } } public void testHealthNodeIsSelected() { @@ -432,6 +445,14 @@ public void testLoggingHappens() { private List getTestIndicatorResults() { var networkLatency = new HealthIndicatorResult("network_latency", GREEN, null, null, null, null); var slowTasks = new HealthIndicatorResult("slow_task_assignment", YELLOW, null, null, null, null); + var shardsAvailable = new HealthIndicatorResult("shards_availability", YELLOW, null, null, null, null); + + return List.of(networkLatency, slowTasks, shardsAvailable); + } + + private List getTestIndicatorResultsAllGreen() { + var networkLatency = new HealthIndicatorResult("network_latency", GREEN, null, null, null, null); + var slowTasks = new HealthIndicatorResult("slow_task_assignment", GREEN, null, null, null, null); var shardsAvailable = new HealthIndicatorResult("shards_availability", GREEN, null, null, null, null); return List.of(networkLatency, slowTasks, shardsAvailable); @@ -446,8 +467,7 @@ private HealthPeriodicLogger createAndInitHealthPeriodicLogger( HealthService testHealthService, boolean enabled ) { - testHealthPeriodicLogger = new HealthPeriodicLogger(Settings.EMPTY, clusterService, this.client, testHealthService); - testHealthPeriodicLogger.init(); + testHealthPeriodicLogger = HealthPeriodicLogger.create(Settings.EMPTY, clusterService, this.client, testHealthService); if (enabled) { clusterSettings.applySettings(Settings.builder().put(HealthPeriodicLogger.ENABLED_SETTING.getKey(), true).build()); } diff --git a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java index 719fa5748fff6..f5ff1980c26fd 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java @@ -701,7 +701,7 @@ public void onIndexCommitDelete(ShardId shardId, IndexCommit deletedCommit) { closeables.add(() -> indexShard.close("close shard at end of test", true)); indexShard.markAsRecovering("test", new RecoveryState(shardRouting, DiscoveryNodeUtils.create("_node_id", "_node_id"), null)); - final PlainActionFuture recoveryFuture = PlainActionFuture.newFuture(); + final PlainActionFuture recoveryFuture = new PlainActionFuture<>(); indexShard.recoverFromStore(recoveryFuture); recoveryFuture.get(); diff --git a/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java b/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java index eb034778be63d..d2304908a933b 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java @@ -275,7 +275,7 @@ public void testRefreshActuallyWorks() throws Exception { assertEquals(1000, refreshTask.getInterval().millis()); assertTrue(indexService.getRefreshTask().mustReschedule()); IndexShard shard = indexService.getShard(0); - client().prepareIndex("test").setId("0").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); + prepareIndex("test").setId("0").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); // now disable the refresh indicesAdmin().prepareUpdateSettings("test") .setSettings(Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1)) @@ -294,7 +294,7 @@ public void testRefreshActuallyWorks() throws Exception { }); assertFalse(refreshTask.isClosed()); // refresh every millisecond - client().prepareIndex("test").setId("1").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); + prepareIndex("test").setId("1").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); indicesAdmin().prepareUpdateSettings("test") .setSettings(Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "1ms")) .get(); @@ -306,7 +306,7 @@ public void testRefreshActuallyWorks() throws Exception { assertEquals(2, search.totalHits.value); } }); - client().prepareIndex("test").setId("2").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); + prepareIndex("test").setId("2").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); assertBusy(() -> { // this one becomes visible due to the scheduled refresh try (Engine.Searcher searcher = shard.acquireSearcher("test")) { @@ -324,7 +324,7 @@ public void testAsyncFsyncActuallyWorks() throws Exception { IndexService indexService = createIndex("test", settings); ensureGreen("test"); assertTrue(indexService.getRefreshTask().mustReschedule()); - client().prepareIndex("test").setId("1").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); + prepareIndex("test").setId("1").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); IndexShard shard = indexService.getShard(0); assertBusy(() -> assertFalse(shard.isSyncNeeded())); } @@ -344,7 +344,7 @@ public void testRescheduleAsyncFsync() throws Exception { assertNotNull(indexService.getFsyncTask()); assertTrue(indexService.getFsyncTask().mustReschedule()); - client().prepareIndex("test").setId("1").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); + prepareIndex("test").setId("1").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); assertNotNull(indexService.getFsyncTask()); final IndexShard shard = indexService.getShard(0); assertBusy(() -> assertFalse(shard.isSyncNeeded())); @@ -367,7 +367,7 @@ public void testAsyncTranslogTrimActuallyWorks() throws Exception { IndexService indexService = createIndex("test", settings); ensureGreen("test"); assertTrue(indexService.getTrimTranslogTask().mustReschedule()); - client().prepareIndex("test").setId("1").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); + prepareIndex("test").setId("1").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); indicesAdmin().prepareFlush("test").get(); IndexShard shard = indexService.getShard(0); assertBusy(() -> assertThat(IndexShardTestCase.getTranslog(shard).totalOperations(), equalTo(0))); @@ -384,7 +384,7 @@ public void testAsyncTranslogTrimTaskOnClosedIndex() throws Exception { int translogOps = 0; final int numDocs = scaledRandomIntBetween(10, 100); for (int i = 0; i < numDocs; i++) { - client().prepareIndex().setIndex(indexName).setId(String.valueOf(i)).setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); + prepareIndex(indexName).setId(String.valueOf(i)).setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); translogOps++; if (randomBoolean()) { indicesAdmin().prepareFlush(indexName).get(); @@ -449,14 +449,14 @@ public void testUpdateSyncIntervalDynamically() { assertNotNull(indexService.getFsyncTask()); assertTrue(indexService.getFsyncTask().mustReschedule()); - IndexMetadata indexMetadata = clusterAdmin().prepareState().execute().actionGet().getState().metadata().index("test"); + IndexMetadata indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); assertEquals("5s", indexMetadata.getSettings().get(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey())); indicesAdmin().prepareClose("test").get(); indicesAdmin().prepareUpdateSettings("test") .setSettings(Settings.builder().put(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey(), "20s")) .get(); - indexMetadata = clusterAdmin().prepareState().execute().actionGet().getState().metadata().index("test"); + indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); assertEquals("20s", indexMetadata.getSettings().get(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey())); } } diff --git a/server/src/test/java/org/elasticsearch/index/MergePolicyConfigTests.java b/server/src/test/java/org/elasticsearch/index/MergePolicyConfigTests.java index dbad9dd1cbdb5..7748208fcce32 100644 --- a/server/src/test/java/org/elasticsearch/index/MergePolicyConfigTests.java +++ b/server/src/test/java/org/elasticsearch/index/MergePolicyConfigTests.java @@ -60,7 +60,11 @@ private void assertCompoundThreshold(Settings settings, double noCFSRatio, ByteS } private static IndexSettings indexSettings(Settings settings) { - return new IndexSettings(newIndexMeta("test", settings), Settings.EMPTY); + return indexSettings(settings, Settings.EMPTY); + } + + private static IndexSettings indexSettings(Settings indexSettings, Settings nodeSettings) { + return new IndexSettings(newIndexMeta("test", indexSettings), nodeSettings); } public void testNoMerges() { @@ -118,7 +122,7 @@ public void testUpdateSettings() throws IOException { assertThat(indexSettings.getMergePolicy(randomBoolean()), Matchers.instanceOf(LogByteSizeMergePolicy.class)); } - public void testTieredMergePolicySettingsUpdate() throws IOException { + public void testTieredMergePolicySettingsUpdate() { IndexSettings indexSettings = indexSettings(Settings.EMPTY); assertEquals( ((TieredMergePolicy) indexSettings.getMergePolicy(false)).getForceMergeDeletesPctAllowed(), @@ -353,10 +357,6 @@ public Settings build(boolean value) { return Settings.builder().put(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); } - private Settings build(ByteSizeValue value) { - return Settings.builder().put(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); - } - public void testCompoundFileConfiguredByByteSize() throws IOException { for (boolean isTimeSeriesIndex : new boolean[] { false, true }) { try (Directory dir = newDirectory()) { @@ -394,4 +394,38 @@ public void testCompoundFileConfiguredByByteSize() throws IOException { } } } + + public void testDefaultMaxMergedSegment() { + var indexSettings = indexSettings(Settings.EMPTY); + { + TieredMergePolicy tieredPolicy = (TieredMergePolicy) new MergePolicyConfig(logger, indexSettings).getMergePolicy(false); + assertEquals(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getMbFrac(), tieredPolicy.getMaxMergedSegmentMB(), 0.0d); + } + { + LogByteSizeMergePolicy timePolicy = (LogByteSizeMergePolicy) new MergePolicyConfig(logger, indexSettings).getMergePolicy(true); + assertEquals(MergePolicyConfig.DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT.getMbFrac(), timePolicy.getMaxMergeMB(), 0.0d); + } + } + + public void testDefaultMaxMergedSegmentWithNodeOverrides() { + var maxMergedSegmentSize = ByteSizeValue.ofBytes(randomLongBetween(1L, Long.MAX_VALUE)); + { + var indexSettings = indexSettings( + Settings.EMPTY, + Settings.builder().put(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT_SETTING.getKey(), maxMergedSegmentSize).build() + ); + TieredMergePolicy tieredPolicy = (TieredMergePolicy) new MergePolicyConfig(logger, indexSettings).getMergePolicy(false); + assertEquals(maxMergedSegmentSize.getMbFrac(), tieredPolicy.getMaxMergedSegmentMB(), 0.0d); + } + { + var indexSettings = indexSettings( + Settings.EMPTY, + Settings.builder() + .put(MergePolicyConfig.DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT_SETTING.getKey(), maxMergedSegmentSize) + .build() + ); + LogByteSizeMergePolicy timePolicy = (LogByteSizeMergePolicy) new MergePolicyConfig(logger, indexSettings).getMergePolicy(true); + assertEquals(maxMergedSegmentSize.getMbFrac(), timePolicy.getMaxMergeMB(), 0.0d); + } + } } diff --git a/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java b/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java index ecc55c36f61c2..2fa3216ad5556 100644 --- a/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java +++ b/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java @@ -89,58 +89,59 @@ public SearchShardTask getTask() { } public void testLevelPrecedence() { - SearchContext ctx = searchContextWithSourceAndTask(createIndex("index")); - String uuid = UUIDs.randomBase64UUID(); - IndexSettings settings = new IndexSettings(createIndexMetadata("index", settings(uuid)), Settings.EMPTY); - SearchSlowLog log = new SearchSlowLog(settings); - - // For this test, when level is not breached, the level below should be used. - { - log.onQueryPhase(ctx, 40L); - assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.INFO)); - log.onQueryPhase(ctx, 41L); - assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.WARN)); - - log.onFetchPhase(ctx, 40L); - assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.INFO)); - log.onFetchPhase(ctx, 41L); - assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.WARN)); - } + try (SearchContext ctx = searchContextWithSourceAndTask(createIndex("index"))) { + String uuid = UUIDs.randomBase64UUID(); + IndexSettings settings = new IndexSettings(createIndexMetadata("index", settings(uuid)), Settings.EMPTY); + SearchSlowLog log = new SearchSlowLog(settings); + + // For this test, when level is not breached, the level below should be used. + { + log.onQueryPhase(ctx, 40L); + assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.INFO)); + log.onQueryPhase(ctx, 41L); + assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.WARN)); + + log.onFetchPhase(ctx, 40L); + assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.INFO)); + log.onFetchPhase(ctx, 41L); + assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.WARN)); + } - { - log.onQueryPhase(ctx, 30L); - assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.DEBUG)); - log.onQueryPhase(ctx, 31L); - assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.INFO)); + { + log.onQueryPhase(ctx, 30L); + assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.DEBUG)); + log.onQueryPhase(ctx, 31L); + assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.INFO)); - log.onFetchPhase(ctx, 30L); - assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.DEBUG)); - log.onFetchPhase(ctx, 31L); - assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.INFO)); - } + log.onFetchPhase(ctx, 30L); + assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.DEBUG)); + log.onFetchPhase(ctx, 31L); + assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.INFO)); + } - { - log.onQueryPhase(ctx, 20L); - assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.TRACE)); - log.onQueryPhase(ctx, 21L); - assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.DEBUG)); + { + log.onQueryPhase(ctx, 20L); + assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.TRACE)); + log.onQueryPhase(ctx, 21L); + assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.DEBUG)); - log.onFetchPhase(ctx, 20L); - assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.TRACE)); - log.onFetchPhase(ctx, 21L); - assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.DEBUG)); - } + log.onFetchPhase(ctx, 20L); + assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.TRACE)); + log.onFetchPhase(ctx, 21L); + assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.DEBUG)); + } - { - log.onQueryPhase(ctx, 10L); - assertNull(appender.getLastEventAndReset()); - log.onQueryPhase(ctx, 11L); - assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.TRACE)); + { + log.onQueryPhase(ctx, 10L); + assertNull(appender.getLastEventAndReset()); + log.onQueryPhase(ctx, 11L); + assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.TRACE)); - log.onFetchPhase(ctx, 10L); - assertNull(appender.getLastEventAndReset()); - log.onFetchPhase(ctx, 11L); - assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.TRACE)); + log.onFetchPhase(ctx, 10L); + assertNull(appender.getLastEventAndReset()); + log.onFetchPhase(ctx, 11L); + assertThat(appender.getLastEventAndReset().getLevel(), equalTo(Level.TRACE)); + } } } @@ -160,63 +161,71 @@ private Settings.Builder settings(String uuid) { } public void testTwoLoggersDifferentLevel() { - SearchContext ctx1 = searchContextWithSourceAndTask(createIndex("index-1")); - SearchContext ctx2 = searchContextWithSourceAndTask(createIndex("index-2")); - IndexSettings settings1 = new IndexSettings( - createIndexMetadata( - "index-1", - Settings.builder() - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) - .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) - .put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING.getKey(), "40nanos") - .put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING.getKey(), "40nanos") - ), - Settings.EMPTY - ); - SearchSlowLog log1 = new SearchSlowLog(settings1); + try ( + SearchContext ctx1 = searchContextWithSourceAndTask(createIndex("index-1")); + SearchContext ctx2 = searchContextWithSourceAndTask(createIndex("index-2")) + ) { + IndexSettings settings1 = new IndexSettings( + createIndexMetadata( + "index-1", + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) + .put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING.getKey(), "40nanos") + .put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING.getKey(), "40nanos") + ), + Settings.EMPTY + ); + SearchSlowLog log1 = new SearchSlowLog(settings1); - IndexSettings settings2 = new IndexSettings( - createIndexMetadata( - "index-2", - Settings.builder() - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) - .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) - .put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING.getKey(), "10nanos") - .put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING.getKey(), "10nanos") - ), - Settings.EMPTY - ); - SearchSlowLog log2 = new SearchSlowLog(settings2); - - { - // threshold set on WARN only, should not log - log1.onQueryPhase(ctx1, 11L); - assertNull(appender.getLastEventAndReset()); - log1.onFetchPhase(ctx1, 11L); - assertNull(appender.getLastEventAndReset()); - - // threshold set on TRACE, should log - log2.onQueryPhase(ctx2, 11L); - assertNotNull(appender.getLastEventAndReset()); - log2.onFetchPhase(ctx2, 11L); - assertNotNull(appender.getLastEventAndReset()); + IndexSettings settings2 = new IndexSettings( + createIndexMetadata( + "index-2", + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) + .put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING.getKey(), "10nanos") + .put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING.getKey(), "10nanos") + ), + Settings.EMPTY + ); + SearchSlowLog log2 = new SearchSlowLog(settings2); + + { + // threshold set on WARN only, should not log + log1.onQueryPhase(ctx1, 11L); + assertNull(appender.getLastEventAndReset()); + log1.onFetchPhase(ctx1, 11L); + assertNull(appender.getLastEventAndReset()); + + // threshold set on TRACE, should log + log2.onQueryPhase(ctx2, 11L); + assertNotNull(appender.getLastEventAndReset()); + log2.onFetchPhase(ctx2, 11L); + assertNotNull(appender.getLastEventAndReset()); + } } } public void testMultipleSlowLoggersUseSingleLog4jLogger() { LoggerContext context = (LoggerContext) LogManager.getContext(false); - SearchContext ctx1 = searchContextWithSourceAndTask(createIndex("index-1")); - IndexSettings settings1 = new IndexSettings(createIndexMetadata("index-1", settings(UUIDs.randomBase64UUID())), Settings.EMPTY); - SearchSlowLog log1 = new SearchSlowLog(settings1); - int numberOfLoggersBefore = context.getLoggers().size(); + try (SearchContext ctx1 = searchContextWithSourceAndTask(createIndex("index-1"))) { + IndexSettings settings1 = new IndexSettings(createIndexMetadata("index-1", settings(UUIDs.randomBase64UUID())), Settings.EMPTY); + SearchSlowLog log1 = new SearchSlowLog(settings1); + int numberOfLoggersBefore = context.getLoggers().size(); - SearchContext ctx2 = searchContextWithSourceAndTask(createIndex("index-2")); - IndexSettings settings2 = new IndexSettings(createIndexMetadata("index-2", settings(UUIDs.randomBase64UUID())), Settings.EMPTY); - SearchSlowLog log2 = new SearchSlowLog(settings2); + try (SearchContext ctx2 = searchContextWithSourceAndTask(createIndex("index-2"))) { + IndexSettings settings2 = new IndexSettings( + createIndexMetadata("index-2", settings(UUIDs.randomBase64UUID())), + Settings.EMPTY + ); + SearchSlowLog log2 = new SearchSlowLog(settings2); - int numberOfLoggersAfter = context.getLoggers().size(); - assertThat(numberOfLoggersAfter, equalTo(numberOfLoggersBefore)); + int numberOfLoggersAfter = context.getLoggers().size(); + assertThat(numberOfLoggersAfter, equalTo(numberOfLoggersBefore)); + } + } } private IndexMetadata createIndexMetadata(String index, Settings.Builder put) { @@ -225,49 +234,53 @@ private IndexMetadata createIndexMetadata(String index, Settings.Builder put) { public void testSlowLogHasJsonFields() throws IOException { IndexService index = createIndex("foo"); - SearchContext searchContext = searchContextWithSourceAndTask(index); - ESLogMessage p = SearchSlowLog.SearchSlowLogMessage.of(searchContext, 10); - - assertThat(p.get("elasticsearch.slowlog.message"), equalTo("[foo][0]")); - assertThat(p.get("elasticsearch.slowlog.took"), equalTo("10nanos")); - assertThat(p.get("elasticsearch.slowlog.took_millis"), equalTo("0")); - assertThat(p.get("elasticsearch.slowlog.total_hits"), equalTo("-1")); - assertThat(p.get("elasticsearch.slowlog.stats"), equalTo("[]")); - assertThat(p.get("elasticsearch.slowlog.search_type"), Matchers.nullValue()); - assertThat(p.get("elasticsearch.slowlog.total_shards"), equalTo("1")); - assertThat(p.get("elasticsearch.slowlog.source"), equalTo("{\\\"query\\\":{\\\"match_all\\\":{\\\"boost\\\":1.0}}}")); + try (SearchContext searchContext = searchContextWithSourceAndTask(index)) { + ESLogMessage p = SearchSlowLog.SearchSlowLogMessage.of(searchContext, 10); + + assertThat(p.get("elasticsearch.slowlog.message"), equalTo("[foo][0]")); + assertThat(p.get("elasticsearch.slowlog.took"), equalTo("10nanos")); + assertThat(p.get("elasticsearch.slowlog.took_millis"), equalTo("0")); + assertThat(p.get("elasticsearch.slowlog.total_hits"), equalTo("-1")); + assertThat(p.get("elasticsearch.slowlog.stats"), equalTo("[]")); + assertThat(p.get("elasticsearch.slowlog.search_type"), Matchers.nullValue()); + assertThat(p.get("elasticsearch.slowlog.total_shards"), equalTo("1")); + assertThat(p.get("elasticsearch.slowlog.source"), equalTo("{\\\"query\\\":{\\\"match_all\\\":{\\\"boost\\\":1.0}}}")); + } } public void testSlowLogsWithStats() throws IOException { IndexService index = createIndex("foo"); - SearchContext searchContext = createSearchContext(index, "group1"); - SearchSourceBuilder source = SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()); - searchContext.request().source(source); - searchContext.setTask( - new SearchShardTask(0, "n/a", "n/a", "test", null, Collections.singletonMap(Task.X_OPAQUE_ID_HTTP_HEADER, "my_id")) - ); + try (SearchContext searchContext = createSearchContext(index, "group1")) { + SearchSourceBuilder source = SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()); + searchContext.request().source(source); + searchContext.setTask( + new SearchShardTask(0, "n/a", "n/a", "test", null, Collections.singletonMap(Task.X_OPAQUE_ID_HTTP_HEADER, "my_id")) + ); - ESLogMessage p = SearchSlowLog.SearchSlowLogMessage.of(searchContext, 10); - assertThat(p.get("elasticsearch.slowlog.stats"), equalTo("[\\\"group1\\\"]")); + ESLogMessage p = SearchSlowLog.SearchSlowLogMessage.of(searchContext, 10); + assertThat(p.get("elasticsearch.slowlog.stats"), equalTo("[\\\"group1\\\"]")); + } - searchContext = createSearchContext(index, "group1", "group2"); - source = SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()); - searchContext.request().source(source); - searchContext.setTask( - new SearchShardTask(0, "n/a", "n/a", "test", null, Collections.singletonMap(Task.X_OPAQUE_ID_HTTP_HEADER, "my_id")) - ); - p = SearchSlowLog.SearchSlowLogMessage.of(searchContext, 10); - assertThat(p.get("elasticsearch.slowlog.stats"), equalTo("[\\\"group1\\\", \\\"group2\\\"]")); + try (SearchContext searchContext = createSearchContext(index, "group1", "group2");) { + SearchSourceBuilder source = SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()); + searchContext.request().source(source); + searchContext.setTask( + new SearchShardTask(0, "n/a", "n/a", "test", null, Collections.singletonMap(Task.X_OPAQUE_ID_HTTP_HEADER, "my_id")) + ); + ESLogMessage p = SearchSlowLog.SearchSlowLogMessage.of(searchContext, 10); + assertThat(p.get("elasticsearch.slowlog.stats"), equalTo("[\\\"group1\\\", \\\"group2\\\"]")); + } } public void testSlowLogSearchContextPrinterToLog() throws IOException { IndexService index = createIndex("foo"); - SearchContext searchContext = searchContextWithSourceAndTask(index); - ESLogMessage p = SearchSlowLog.SearchSlowLogMessage.of(searchContext, 10); - assertThat(p.get("elasticsearch.slowlog.message"), equalTo("[foo][0]")); - // Makes sure that output doesn't contain any new lines - assertThat(p.get("elasticsearch.slowlog.source"), not(containsString("\n"))); - assertThat(p.get("elasticsearch.slowlog.id"), equalTo("my_id")); + try (SearchContext searchContext = searchContextWithSourceAndTask(index)) { + ESLogMessage p = SearchSlowLog.SearchSlowLogMessage.of(searchContext, 10); + assertThat(p.get("elasticsearch.slowlog.message"), equalTo("[foo][0]")); + // Makes sure that output doesn't contain any new lines + assertThat(p.get("elasticsearch.slowlog.source"), not(containsString("\n"))); + assertThat(p.get("elasticsearch.slowlog.id"), equalTo("my_id")); + } } public void testSetQueryLevels() { diff --git a/server/src/test/java/org/elasticsearch/index/engine/FlushListenersTests.java b/server/src/test/java/org/elasticsearch/index/engine/FlushListenersTests.java index 2db512dbe0952..9c345eb923ab4 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/FlushListenersTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/FlushListenersTests.java @@ -33,7 +33,7 @@ public void testFlushListenerCompletedImmediatelyIfFlushAlreadyOccurred() { lastWriteLocation.generation - randomLongBetween(10, 90), 2 ); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); flushListeners.addOrNotify(waitLocation, future); assertThat(future.actionGet(), equalTo(generation)); } @@ -52,7 +52,7 @@ public void testFlushListenerCompletedAfterLocationFlushed() { lastWriteLocation.generation - randomLongBetween(10, 90), 2 ); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); flushListeners.addOrNotify(waitLocation, future); assertFalse(future.isDone()); @@ -71,7 +71,7 @@ public void testFlushListenerCompletedAfterLocationFlushed() { 2 ); - PlainActionFuture future2 = PlainActionFuture.newFuture(); + PlainActionFuture future2 = new PlainActionFuture<>(); flushListeners.addOrNotify(waitLocation2, future2); assertFalse(future2.isDone()); @@ -81,7 +81,7 @@ public void testFlushListenerCompletedAfterLocationFlushed() { } public void testFlushListenerClose() { - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); try (FlushListeners flushListeners = new FlushListeners(logger, new ThreadContext(Settings.EMPTY))) { Translog.Location waitLocation = new Translog.Location(randomLongBetween(0, 2), randomLongBetween(10, 90), 2); flushListeners.addOrNotify(waitLocation, future); @@ -91,7 +91,7 @@ public void testFlushListenerClose() { expectThrows(AlreadyClosedException.class, future::actionGet); - expectThrows(IllegalStateException.class, () -> flushListeners.addOrNotify(waitLocation, PlainActionFuture.newFuture())); + expectThrows(IllegalStateException.class, () -> flushListeners.addOrNotify(waitLocation, new PlainActionFuture<>())); } } } diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 072851789f2e0..ca3ee07de9192 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -2573,28 +2573,40 @@ public void testIndexWriterInfoStream() throws IllegalAccessException, IOExcepti } } - private static class MockMTAppender extends AbstractAppender { + private static class MockMergeThreadAppender extends AbstractAppender { + private final List messages = Collections.synchronizedList(new ArrayList<>()); + private final AtomicBoolean luceneMergeSchedulerEnded = new AtomicBoolean(); List messages() { return List.copyOf(messages); } - MockMTAppender(final String name) throws IllegalAccessException { + public boolean mergeCompleted() { + return luceneMergeSchedulerEnded.get(); + } + + MockMergeThreadAppender(final String name) throws IllegalAccessException { super(name, RegexFilter.createFilter(".*(\n.*)*", new String[0], false, null, null), null, false, Property.EMPTY_ARRAY); } @Override public void append(LogEvent event) { final String formattedMessage = event.getMessage().getFormattedMessage(); - if (event.getLevel() == Level.TRACE && formattedMessage.startsWith("merge thread")) { - messages.add(formattedMessage); + if (event.getLevel() == Level.TRACE && event.getMarker().getName().contains("[index][0]")) { + if (formattedMessage.startsWith("merge thread")) { + messages.add(formattedMessage); + } else if (event.getLoggerName().endsWith(".MS") + && formattedMessage.contains("MS: merge thread") + && formattedMessage.endsWith("end")) { + luceneMergeSchedulerEnded.set(true); + } } } } public void testMergeThreadLogging() throws Exception { - final MockMTAppender mockAppender = new MockMTAppender("testMergeThreadLogging"); + final MockMergeThreadAppender mockAppender = new MockMergeThreadAppender("testMergeThreadLogging"); mockAppender.start(); Logger rootLogger = LogManager.getRootLogger(); @@ -2613,26 +2625,29 @@ public void testMergeThreadLogging() throws Exception { engine.index(indexForDoc(testParsedDocument("3", null, testDocument(), B_1, null))); engine.index(indexForDoc(testParsedDocument("4", null, testDocument(), B_1, null))); engine.forceMerge(true, 1, false, UUIDs.randomBase64UUID()); - engine.flushAndClose(); assertBusy(() -> { assertThat(engine.getMergeStats().getTotal(), greaterThan(0L)); assertThat(engine.getMergeStats().getCurrent(), equalTo(0L)); }); - } - assertBusy(() -> { - List threadMsgs = mockAppender.messages().stream().filter(line -> line.startsWith("merge thread")).toList(); - assertThat("messages:" + threadMsgs, threadMsgs.size(), greaterThanOrEqualTo(3)); - assertThat( - threadMsgs, - containsInRelativeOrder( - matchesRegex("^merge thread .* start$"), - matchesRegex("^merge thread .* merge segment.*$"), - matchesRegex("^merge thread .* end$") - ) - ); - }); + assertBusy(() -> { + List threadMsgs = mockAppender.messages().stream().filter(line -> line.startsWith("merge thread")).toList(); + assertThat("messages:" + threadMsgs, threadMsgs.size(), greaterThanOrEqualTo(3)); + assertThat( + threadMsgs, + containsInRelativeOrder( + matchesRegex("^merge thread .* start$"), + matchesRegex("^merge thread .* merge segment.*$"), + matchesRegex("^merge thread .* end$") + ) + ); + assertThat(mockAppender.mergeCompleted(), is(true)); + }); + + Loggers.setLevel(rootLogger, savedLevel); + engine.close(); + } } finally { Loggers.setLevel(rootLogger, savedLevel); Loggers.removeAppender(rootLogger, mockAppender); @@ -3435,17 +3450,6 @@ public void testSkipTranslogReplay() throws IOException { } } - private Path[] filterExtraFSFiles(Path[] files) { - List paths = new ArrayList<>(); - for (Path p : files) { - if (p.getFileName().toString().startsWith("extra")) { - continue; - } - paths.add(p); - } - return paths.toArray(new Path[0]); - } - public void testTranslogReplay() throws IOException { final LongSupplier inSyncGlobalCheckpointSupplier = () -> this.engine.getProcessedLocalCheckpoint(); final int numDocs = randomIntBetween(1, 10); @@ -7687,7 +7691,7 @@ public void testFlushListener() throws Exception { InternalEngine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE) ) { Engine.IndexResult result1 = engine.index(indexForDoc(createParsedDoc("a", null))); - PlainActionFuture future1 = PlainActionFuture.newFuture(); + PlainActionFuture future1 = new PlainActionFuture<>(); engine.addFlushListener(result1.getTranslogLocation(), future1); assertFalse(future1.isDone()); engine.flush(); @@ -7695,7 +7699,7 @@ public void testFlushListener() throws Exception { Engine.IndexResult result2 = engine.index(indexForDoc(createParsedDoc("a", null))); engine.flush(); - PlainActionFuture future2 = PlainActionFuture.newFuture(); + PlainActionFuture future2 = new PlainActionFuture<>(); engine.addFlushListener(result2.getTranslogLocation(), future2); assertTrue(future2.isDone()); assertThat(future2.actionGet(), equalTo(engine.getLastCommittedSegmentInfos().getGeneration())); diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/FieldDataCacheTests.java b/server/src/test/java/org/elasticsearch/index/fielddata/FieldDataCacheTests.java index c6ff7776b3526..c1d9e1dc0fd17 100644 --- a/server/src/test/java/org/elasticsearch/index/fielddata/FieldDataCacheTests.java +++ b/server/src/test/java/org/elasticsearch/index/fielddata/FieldDataCacheTests.java @@ -20,6 +20,9 @@ import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.index.fielddata.plain.PagedBytesIndexFieldData; import org.elasticsearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData; @@ -78,6 +81,52 @@ public void testLoadGlobal_neverCacheIfFieldIsMissing() throws Exception { dir.close(); } + public void testGlobalOrdinalsCircuitBreaker() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = new IndexWriterConfig(null); + iwc.setMergePolicy(NoMergePolicy.INSTANCE); + IndexWriter iw = new IndexWriter(dir, iwc); + long numDocs = randomIntBetween(66000, 70000); + + for (int i = 1; i <= numDocs; i++) { + Document doc = new Document(); + doc.add(new SortedSetDocValuesField("field1", new BytesRef(String.valueOf(i)))); + iw.addDocument(doc); + if (i % 10000 == 0) { + iw.commit(); + } + } + iw.close(); + DirectoryReader ir = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(dir), new ShardId("_index", "_na_", 0)); + + int[] timesCalled = new int[1]; + SortedSetOrdinalsIndexFieldData sortedSetOrdinalsIndexFieldData = new SortedSetOrdinalsIndexFieldData( + new DummyAccountingFieldDataCache(), + "field1", + CoreValuesSourceType.KEYWORD, + new NoneCircuitBreakerService() { + @Override + public CircuitBreaker getBreaker(String name) { + assertThat(name, equalTo(CircuitBreaker.FIELDDATA)); + return new NoopCircuitBreaker("test") { + @Override + public void addEstimateBytesAndMaybeBreak(long bytes, String label) throws CircuitBreakingException { + assertThat(label, equalTo("Global Ordinals")); + assertThat(bytes, equalTo(0L)); + timesCalled[0]++; + } + }; + } + }, + MOCK_TO_SCRIPT_FIELD + ); + sortedSetOrdinalsIndexFieldData.loadGlobal(ir); + assertThat(timesCalled[0], equalTo(2)); + + ir.close(); + dir.close(); + } + private SortedSetOrdinalsIndexFieldData createSortedDV(String fieldName, IndexFieldDataCache indexFieldDataCache) { return new SortedSetOrdinalsIndexFieldData( indexFieldDataCache, diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java b/server/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java index 046facfe690c2..bf9176de1b124 100644 --- a/server/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java @@ -43,8 +43,6 @@ import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.test.InternalSettingsPlugin; -import org.elasticsearch.threadpool.TestThreadPool; -import org.elasticsearch.threadpool.ThreadPool; import org.mockito.ArgumentMatchers; import java.util.Arrays; @@ -59,6 +57,7 @@ import static org.elasticsearch.index.mapper.NumberFieldMapper.NumberType.LONG; import static org.elasticsearch.index.mapper.NumberFieldMapper.NumberType.SHORT; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -294,25 +293,17 @@ public void testSetCacheListenerTwice() { } private void doTestRequireDocValues(MappedFieldType ft) { - ThreadPool threadPool = new TestThreadPool("random_threadpool_name"); - try { - IndicesFieldDataCache cache = new IndicesFieldDataCache(Settings.EMPTY, null); - IndexFieldDataService ifds = new IndexFieldDataService( - IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), - cache, - null + Settings settings = Settings.EMPTY; + IndicesFieldDataCache cache = new IndicesFieldDataCache(settings, null); + IndexFieldDataService ifds = new IndexFieldDataService(IndexSettingsModule.newIndexSettings("test", settings), cache, null); + if (ft.hasDocValues()) { + ifds.getForField(ft, FieldDataContext.noRuntimeFields("test")); // no exception + } else { + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> ifds.getForField(ft, FieldDataContext.noRuntimeFields("test")) ); - if (ft.hasDocValues()) { - ifds.getForField(ft, FieldDataContext.noRuntimeFields("test")); // no exception - } else { - IllegalArgumentException e = expectThrows( - IllegalArgumentException.class, - () -> ifds.getForField(ft, FieldDataContext.noRuntimeFields("test")) - ); - assertThat(e.getMessage(), containsString("doc values")); - } - } finally { - threadPool.shutdown(); + assertThat(e.getMessage(), containsString("doc values")); } } @@ -360,4 +351,19 @@ public void testRequireDocValuesOnBools() { doTestRequireDocValues(new BooleanFieldMapper.BooleanFieldType("field")); doTestRequireDocValues(new BooleanFieldMapper.BooleanFieldType("field", true, false, false, null, null, Collections.emptyMap())); } + + public void testFieldDataCacheExpire() { + { + Settings settings = Settings.EMPTY; + IndicesFieldDataCache cache = new IndicesFieldDataCache(settings, new IndexFieldDataCache.Listener() { + }); + assertThat(cache.getCache().getExpireAfterAccessNanos(), equalTo(3_600_000_000_000L)); + } + { + Settings settings = Settings.builder().put(IndicesFieldDataCache.INDICES_FIELDDATA_CACHE_EXPIRE.getKey(), "5s").build(); + IndicesFieldDataCache cache = new IndicesFieldDataCache(settings, new IndexFieldDataCache.Listener() { + }); + assertThat(cache.getCache().getExpireAfterAccessNanos(), equalTo(5_000_000_000L)); + } + } } diff --git a/server/src/test/java/org/elasticsearch/index/fieldstats/FieldStatsProviderRefreshTests.java b/server/src/test/java/org/elasticsearch/index/fieldstats/FieldStatsProviderRefreshTests.java index e79b088893acd..a0eff567274dc 100644 --- a/server/src/test/java/org/elasticsearch/index/fieldstats/FieldStatsProviderRefreshTests.java +++ b/server/src/test/java/org/elasticsearch/index/fieldstats/FieldStatsProviderRefreshTests.java @@ -94,7 +94,7 @@ private void refreshIndex() { } private void indexDocument(String id, String sValue) { - DocWriteResponse response = client().prepareIndex("index").setId(id).setSource("s", sValue).get(); + DocWriteResponse response = prepareIndex("index").setId(id).setSource("s", sValue).get(); assertThat(response.status(), anyOf(equalTo(RestStatus.OK), equalTo(RestStatus.CREATED))); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/BlockSourceReaderTests.java b/server/src/test/java/org/elasticsearch/index/mapper/BlockSourceReaderTests.java new file mode 100644 index 0000000000000..3364e2e828bf2 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/mapper/BlockSourceReaderTests.java @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.document.StoredField; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.index.fieldvisitor.StoredFieldLoader; +import org.elasticsearch.search.fetch.StoredFieldsSpec; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.json.JsonXContent; + +import java.io.IOException; +import java.util.List; +import java.util.Set; +import java.util.function.Consumer; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.nullValue; + +public class BlockSourceReaderTests extends ESTestCase { + public void testSingle() throws IOException { + withIndex( + source -> source.field("field", "foo"), + ctx -> loadBlock(ctx, block -> assertThat(block.get(0), equalTo(new BytesRef("foo")))) + ); + } + + public void testMissing() throws IOException { + withIndex(source -> {}, ctx -> loadBlock(ctx, block -> assertThat(block.get(0), nullValue()))); + } + + public void testArray() throws IOException { + withIndex( + source -> source.startArray("field").value("foo").value("bar").endArray(), + ctx -> loadBlock(ctx, block -> assertThat(block.get(0), equalTo(List.of(new BytesRef("foo"), new BytesRef("bar"))))) + ); + } + + public void testEmptyArray() throws IOException { + withIndex(source -> source.startArray("field").endArray(), ctx -> loadBlock(ctx, block -> assertThat(block.get(0), nullValue()))); + } + + private void loadBlock(LeafReaderContext ctx, Consumer test) throws IOException { + BlockLoader loader = new BlockSourceReader.BytesRefsBlockLoader(SourceValueFetcher.toString(Set.of("field"))); + assertThat(loader.columnAtATimeReader(ctx), nullValue()); + BlockLoader.RowStrideReader reader = loader.rowStrideReader(ctx); + assertThat(loader.rowStrideStoredFieldSpec(), equalTo(StoredFieldsSpec.NEEDS_SOURCE)); + BlockLoaderStoredFieldsFromLeafLoader storedFields = new BlockLoaderStoredFieldsFromLeafLoader( + StoredFieldLoader.fromSpec(loader.rowStrideStoredFieldSpec()).getLoader(ctx, null), + loader.rowStrideStoredFieldSpec().requiresSource() ? SourceLoader.FROM_STORED_SOURCE.leaf(ctx.reader(), null) : null + ); + BlockLoader.Builder builder = loader.builder(TestBlock.factory(ctx.reader().numDocs()), 1); + storedFields.advanceTo(0); + reader.read(0, storedFields, builder); + TestBlock block = (TestBlock) builder.build(); + assertThat(block.size(), equalTo(1)); + test.accept(block); + } + + private void withIndex(CheckedConsumer buildSource, CheckedConsumer test) + throws IOException { + try ( + Directory directory = newDirectory(); + RandomIndexWriter writer = new RandomIndexWriter( + random(), + directory, + newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE) + ) + ) { + XContentBuilder source = JsonXContent.contentBuilder(); + source.startObject(); + buildSource.accept(source); + source.endObject(); + writer.addDocument(List.of(new StoredField(SourceFieldMapper.NAME, BytesReference.bytes(source).toBytesRef()))); + try (IndexReader reader = writer.getReader()) { + assertThat(reader.leaves(), hasSize(1)); + test.accept(reader.leaves().get(0)); + } + } + } +} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/BooleanScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/BooleanScriptFieldTypeTests.java index 8d5a47f08c663..d8f063ece35c0 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/BooleanScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/BooleanScriptFieldTypeTests.java @@ -417,8 +417,8 @@ public void testBlockLoader() throws IOException { try (DirectoryReader reader = iw.getReader()) { BooleanScriptFieldType fieldType = build("xor_param", Map.of("param", false), OnScriptError.FAIL); List expected = List.of(false, true); - assertThat(blockLoaderReadValues(reader, fieldType), equalTo(expected)); - assertThat(blockLoaderReadValuesFromSingleDoc(reader, fieldType), equalTo(expected)); + assertThat(blockLoaderReadValuesFromColumnAtATimeReader(reader, fieldType), equalTo(expected)); + assertThat(blockLoaderReadValuesFromRowStrideReader(reader, fieldType), equalTo(expected)); } } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java index 540b1a86d8e50..d83c75455292f 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java @@ -43,7 +43,6 @@ import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; import java.io.IOException; -import java.time.Instant; import java.time.ZoneId; import java.time.ZoneOffset; import java.util.Collections; @@ -333,10 +332,6 @@ public void testDateNanoDocValues() throws IOException { dir.close(); } - private Instant instant(String str) { - return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(str)).toInstant(); - } - private static DateFieldType fieldType(Resolution resolution, String format, String nullValue) { DateFormatter formatter = DateFormatter.forPattern(format); return new DateFieldType("field", true, false, true, formatter, resolution, nullValue, null, Collections.emptyMap()); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DateScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DateScriptFieldTypeTests.java index d1652b9f57716..eb3daf472ea2e 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DateScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DateScriptFieldTypeTests.java @@ -477,8 +477,11 @@ public void testBlockLoader() throws IOException { iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181355]}")))); try (DirectoryReader reader = iw.getReader()) { DateScriptFieldType fieldType = build("add_days", Map.of("days", 1), OnScriptError.FAIL); - assertThat(blockLoaderReadValues(reader, fieldType), equalTo(List.of(1595518581354L, 1595518581355L))); - assertThat(blockLoaderReadValuesFromSingleDoc(reader, fieldType), equalTo(List.of(1595518581354L, 1595518581355L))); + assertThat( + blockLoaderReadValuesFromColumnAtATimeReader(reader, fieldType), + equalTo(List.of(1595518581354L, 1595518581355L)) + ); + assertThat(blockLoaderReadValuesFromRowStrideReader(reader, fieldType), equalTo(List.of(1595518581354L, 1595518581355L))); } } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DoubleScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DoubleScriptFieldTypeTests.java index 0f05dad8098f4..d37e42e04edca 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DoubleScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DoubleScriptFieldTypeTests.java @@ -236,8 +236,8 @@ public void testBlockLoader() throws IOException { iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); try (DirectoryReader reader = iw.getReader()) { DoubleScriptFieldType fieldType = build("add_param", Map.of("param", 1), OnScriptError.FAIL); - assertThat(blockLoaderReadValues(reader, fieldType), equalTo(List.of(2d, 3d))); - assertThat(blockLoaderReadValuesFromSingleDoc(reader, fieldType), equalTo(List.of(2d, 3d))); + assertThat(blockLoaderReadValuesFromColumnAtATimeReader(reader, fieldType), equalTo(List.of(2d, 3d))); + assertThat(blockLoaderReadValuesFromRowStrideReader(reader, fieldType), equalTo(List.of(2d, 3d))); } } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IpScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IpScriptFieldTypeTests.java index 56ca5f3dae89f..cd19bb50b842c 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IpScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IpScriptFieldTypeTests.java @@ -256,8 +256,8 @@ public void testBlockLoader() throws IOException { new BytesRef(InetAddressPoint.encode(InetAddresses.forString("192.168.0.1"))), new BytesRef(InetAddressPoint.encode(InetAddresses.forString("192.168.1.1"))) ); - assertThat(blockLoaderReadValues(reader, fieldType), equalTo(expected)); - assertThat(blockLoaderReadValuesFromSingleDoc(reader, fieldType), equalTo(expected)); + assertThat(blockLoaderReadValuesFromColumnAtATimeReader(reader, fieldType), equalTo(expected)); + assertThat(blockLoaderReadValuesFromRowStrideReader(reader, fieldType), equalTo(expected)); } } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java index eafb33cd44cd4..d6e93fceb713e 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java @@ -657,18 +657,25 @@ protected Function loadBlockExpected() { @Override protected SyntheticSourceSupport syntheticSourceSupport(boolean ignoreMalformed) { assertFalse("keyword doesn't support ignore_malformed", ignoreMalformed); - return new KeywordSyntheticSourceSupport(randomBoolean(), usually() ? null : randomAlphaOfLength(2), true); + return new KeywordSyntheticSourceSupport( + randomBoolean() ? null : between(10, 100), + randomBoolean(), + usually() ? null : randomAlphaOfLength(2), + true + ); } static class KeywordSyntheticSourceSupport implements SyntheticSourceSupport { - private final Integer ignoreAbove = randomBoolean() ? null : between(10, 100); - private final boolean allIgnored = ignoreAbove != null && rarely(); + private final Integer ignoreAbove; + private final boolean allIgnored; private final boolean store; private final boolean docValues; private final String nullValue; private final boolean exampleSortsUsingIgnoreAbove; - KeywordSyntheticSourceSupport(boolean store, String nullValue, boolean exampleSortsUsingIgnoreAbove) { + KeywordSyntheticSourceSupport(Integer ignoreAbove, boolean store, String nullValue, boolean exampleSortsUsingIgnoreAbove) { + this.ignoreAbove = ignoreAbove; + this.allIgnored = ignoreAbove != null && rarely(); this.store = store; this.nullValue = nullValue; this.exampleSortsUsingIgnoreAbove = exampleSortsUsingIgnoreAbove; diff --git a/server/src/test/java/org/elasticsearch/index/mapper/KeywordScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/KeywordScriptFieldTypeTests.java index 65f4c2e3ea6eb..ce705f2e9ae8b 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/KeywordScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/KeywordScriptFieldTypeTests.java @@ -382,9 +382,12 @@ public void testBlockLoader() throws IOException { iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); try (DirectoryReader reader = iw.getReader()) { KeywordScriptFieldType fieldType = build("append_param", Map.of("param", "-Suffix"), OnScriptError.FAIL); - assertThat(blockLoaderReadValues(reader, fieldType), equalTo(List.of(new BytesRef("1-Suffix"), new BytesRef("2-Suffix")))); assertThat( - blockLoaderReadValuesFromSingleDoc(reader, fieldType), + blockLoaderReadValuesFromColumnAtATimeReader(reader, fieldType), + equalTo(List.of(new BytesRef("1-Suffix"), new BytesRef("2-Suffix"))) + ); + assertThat( + blockLoaderReadValuesFromRowStrideReader(reader, fieldType), equalTo(List.of(new BytesRef("1-Suffix"), new BytesRef("2-Suffix"))) ); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/LongScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/LongScriptFieldTypeTests.java index 1688cab24af3e..fd20b6c71e984 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/LongScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/LongScriptFieldTypeTests.java @@ -269,8 +269,8 @@ public void testBlockLoader() throws IOException { iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); try (DirectoryReader reader = iw.getReader()) { LongScriptFieldType fieldType = build("add_param", Map.of("param", 1), OnScriptError.FAIL); - assertThat(blockLoaderReadValues(reader, fieldType), equalTo(List.of(2L, 3L))); - assertThat(blockLoaderReadValuesFromSingleDoc(reader, fieldType), equalTo(List.of(2L, 3L))); + assertThat(blockLoaderReadValuesFromColumnAtATimeReader(reader, fieldType), equalTo(List.of(2L, 3L))); + assertThat(blockLoaderReadValuesFromRowStrideReader(reader, fieldType), equalTo(List.of(2L, 3L))); } } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java index bbfeaaa8b9d69..0460108e565ce 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java @@ -46,6 +46,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Strings; import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery; +import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.analysis.AnalyzerScope; @@ -1121,6 +1122,7 @@ protected SyntheticSourceSupport syntheticSourceSupport(boolean ignoreMalformed) boolean storedKeywordField = storeTextField || randomBoolean(); String nullValue = storeTextField || usually() ? null : randomAlphaOfLength(2); KeywordFieldMapperTests.KeywordSyntheticSourceSupport keywordSupport = new KeywordFieldMapperTests.KeywordSyntheticSourceSupport( + randomBoolean() ? null : between(10, 100), storedKeywordField, nullValue, false == storeTextField @@ -1324,4 +1326,52 @@ public void testEmpty() throws Exception { assertFalse(dv.advanceExact(3)); }); } + + @Override + protected boolean supportsColumnAtATimeReader(MapperService mapper, MappedFieldType ft) { + String parentName = mapper.mappingLookup().parentField(ft.name()); + if (parentName == null) { + TextFieldMapper.TextFieldType text = (TextFieldType) ft; + return text.syntheticSourceDelegate() != null && text.syntheticSourceDelegate().hasDocValues(); + } + MappedFieldType parent = mapper.fieldType(parentName); + if (false == parent.typeName().equals(KeywordFieldMapper.CONTENT_TYPE)) { + throw new UnsupportedOperationException(); + } + KeywordFieldMapper.KeywordFieldType kwd = (KeywordFieldMapper.KeywordFieldType) parent; + return kwd.hasDocValues(); + } + + public void testBlockLoaderFromParentColumnReader() throws IOException { + testBlockLoaderFromParent(true, randomBoolean()); + } + + public void testBlockLoaderParentFromRowStrideReader() throws IOException { + testBlockLoaderFromParent(false, randomBoolean()); + } + + private void testBlockLoaderFromParent(boolean columnReader, boolean syntheticSource) throws IOException { + boolean storeParent = randomBoolean(); + KeywordFieldMapperTests.KeywordSyntheticSourceSupport kwdSupport = new KeywordFieldMapperTests.KeywordSyntheticSourceSupport( + null, + storeParent, + null, + false == storeParent + ); + SyntheticSourceExample example = kwdSupport.example(5); + CheckedConsumer buildFields = b -> { + b.startObject("field"); + { + example.mapping().accept(b); + b.startObject("fields").startObject("sub"); + { + b.field("type", "text"); + } + b.endObject().endObject(); + } + b.endObject(); + }; + MapperService mapper = createMapperService(syntheticSource ? syntheticSourceMapping(buildFields) : mapping(buildFields)); + testBlockLoader(columnReader, example, mapper, "field.sub"); + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldSearchTests.java b/server/src/test/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldSearchTests.java index 20d307a0d4cb1..2c4c620c057b2 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldSearchTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldSearchTests.java @@ -74,8 +74,7 @@ public void setUpIndex() throws IOException { } public void testMatchQuery() throws Exception { - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setRefreshPolicy(RefreshPolicy.IMMEDIATE) .setSource( XContentFactory.jsonBuilder() @@ -96,8 +95,7 @@ public void testMatchQuery() throws Exception { } public void testMultiMatchQuery() throws Exception { - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setRefreshPolicy(RefreshPolicy.IMMEDIATE) .setSource( XContentFactory.jsonBuilder() @@ -117,8 +115,7 @@ public void testMultiMatchQuery() throws Exception { } public void testQueryStringQuery() throws Exception { - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setRefreshPolicy(RefreshPolicy.IMMEDIATE) .setSource( XContentFactory.jsonBuilder() @@ -140,8 +137,7 @@ public void testQueryStringQuery() throws Exception { } public void testSimpleQueryStringQuery() throws Exception { - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setRefreshPolicy(RefreshPolicy.IMMEDIATE) .setSource( XContentFactory.jsonBuilder() @@ -160,8 +156,7 @@ public void testSimpleQueryStringQuery() throws Exception { } public void testExists() throws Exception { - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setRefreshPolicy(RefreshPolicy.IMMEDIATE) .setSource( XContentFactory.jsonBuilder() @@ -202,7 +197,7 @@ public void testCardinalityAggregation() throws IOException { } for (int i = 0; i < 10; i++) { - bulkRequest.add(client().prepareIndex("test").setSource("other_field", "1")); + bulkRequest.add(prepareIndex("test").setSource("other_field", "1")); } BulkResponse bulkResponse = bulkRequest.get(); @@ -331,8 +326,7 @@ private TermsAggregationBuilder createTermsAgg(String field) { } public void testLoadDocValuesFields() throws Exception { - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setRefreshPolicy(RefreshPolicy.IMMEDIATE) .setSource( XContentFactory.jsonBuilder() @@ -361,8 +355,7 @@ public void testLoadDocValuesFields() throws Exception { } public void testFieldSort() throws Exception { - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setRefreshPolicy(RefreshPolicy.IMMEDIATE) .setSource( XContentFactory.jsonBuilder() @@ -375,8 +368,7 @@ public void testFieldSort() throws Exception { ) .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setRefreshPolicy(RefreshPolicy.IMMEDIATE) .setSource( XContentFactory.jsonBuilder() @@ -389,8 +381,7 @@ public void testFieldSort() throws Exception { ) .get(); - client().prepareIndex("test") - .setId("3") + prepareIndex("test").setId("3") .setRefreshPolicy(RefreshPolicy.IMMEDIATE) .setSource(XContentFactory.jsonBuilder().startObject().startObject("flattened").field("other_key", "E").endObject().endObject()) .get(); @@ -417,7 +408,7 @@ public void testSourceFiltering() { headers.put("origin", "https://www.elastic.co"); Map source = Collections.singletonMap("headers", headers); - client().prepareIndex("test").setId("1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).setSource(source).get(); + prepareIndex("test").setId("1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).setSource(source).get(); SearchResponse response = client().prepareSearch("test").setFetchSource(true).get(); assertThat(response.getHits().getAt(0).getSourceAsMap(), equalTo(source)); diff --git a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java index 8d4aa344519cf..160d1ec83f91f 100644 --- a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java @@ -70,7 +70,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBooleanSubQuery; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.hasItems; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.instanceOf; @@ -1427,22 +1426,6 @@ private static IndexMetadata newIndexMeta(String name, Settings oldIndexSettings return IndexMetadata.builder(name).settings(build).build(); } - private void assertQueryWithAllFieldsWildcard(Query query) { - assertEquals(DisjunctionMaxQuery.class, query.getClass()); - DisjunctionMaxQuery disjunctionMaxQuery = (DisjunctionMaxQuery) query; - int noMatchNoDocsQueries = 0; - for (Query q : disjunctionMaxQuery.getDisjuncts()) { - if (q.getClass() == MatchNoDocsQuery.class) { - noMatchNoDocsQueries++; - } - } - assertEquals(9, noMatchNoDocsQueries); - assertThat( - disjunctionMaxQuery.getDisjuncts(), - hasItems(new TermQuery(new Term(TEXT_FIELD_NAME, "hello")), new TermQuery(new Term(KEYWORD_FIELD_NAME, "hello"))) - ); - } - public void testWhitespaceKeywordQueries() throws IOException { String query = "\"query with spaces\""; QueryStringQueryBuilder b = new QueryStringQueryBuilder(query); diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java index 319c9d4a0e8fd..4fb5084d98fbb 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java @@ -144,7 +144,7 @@ public void testRetentionLeaseBackgroundSyncActionOnReplica() throws WriteStateE retentionLeases ); - final PlainActionFuture listener = PlainActionFuture.newFuture(); + final PlainActionFuture listener = new PlainActionFuture<>(); action.shardOperationOnReplica(request, indexShard, listener); final TransportReplicationAction.ReplicaResult result = listener.actionGet(); // the retention leases on the shard should be updated diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseStatsTests.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseStatsTests.java index c821dbe654bcf..792081fa27726 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseStatsTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseStatsTests.java @@ -47,7 +47,7 @@ public void testRetentionLeaseStats() throws InterruptedException { latch.await(); } - final IndicesStatsResponse indicesStats = client().admin().indices().prepareStats("index").execute().actionGet(); + final IndicesStatsResponse indicesStats = client().admin().indices().prepareStats("index").get(); assertThat(indicesStats.getShards(), arrayWithSize(1)); final RetentionLeaseStats retentionLeaseStats = indicesStats.getShards()[0].getRetentionLeaseStats(); assertThat( diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java index 7041f6db7f29c..692130354e4ed 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java @@ -150,7 +150,7 @@ public void testRetentionLeaseSyncActionOnReplica() throws WriteStateException { final RetentionLeases retentionLeases = mock(RetentionLeases.class); final RetentionLeaseSyncAction.Request request = new RetentionLeaseSyncAction.Request(indexShard.shardId(), retentionLeases); - PlainActionFuture listener = PlainActionFuture.newFuture(); + PlainActionFuture listener = new PlainActionFuture<>(); action.dispatchedShardOperationOnReplica(request, indexShard, listener); final TransportReplicationAction.ReplicaResult result = listener.actionGet(); // the retention leases on the shard should be updated diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 9765618e05e34..4d6e316b4b7d9 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -56,6 +56,7 @@ import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AtomicArray; @@ -77,6 +78,7 @@ import org.elasticsearch.index.engine.DocIdSeqNoAndSource; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineConfig; +import org.elasticsearch.index.engine.EngineException; import org.elasticsearch.index.engine.EngineTestCase; import org.elasticsearch.index.engine.InternalEngine; import org.elasticsearch.index.engine.InternalEngineFactory; @@ -123,6 +125,7 @@ import org.elasticsearch.test.DummyShardLock; import org.elasticsearch.test.FieldMaskingReader; import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.junit.annotations.TestIssueLogging; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.store.MockFSDirectoryFactory; import org.elasticsearch.threadpool.ThreadPool; @@ -172,6 +175,7 @@ import static java.util.Collections.emptySet; import static org.elasticsearch.cluster.routing.TestShardRouting.newShardRouting; import static org.elasticsearch.common.lucene.Lucene.cleanLuceneIndex; +import static org.elasticsearch.index.IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; import static org.elasticsearch.test.hamcrest.RegexMatcher.matches; import static org.elasticsearch.xcontent.ToXContent.EMPTY_PARAMS; @@ -427,7 +431,7 @@ public void testRejectOperationPermitWithHigherTermWhenNotStarted() throws IOExc indexShard.getPendingPrimaryTerm() + randomIntBetween(1, 100), UNASSIGNED_SEQ_NO, randomNonNegativeLong(), - PlainActionFuture.newFuture() + new PlainActionFuture<>() ) ); closeShards(indexShard); @@ -2619,7 +2623,7 @@ public void testRestoreShard() throws IOException { DiscoveryNode localNode = DiscoveryNodeUtils.builder("foo").roles(emptySet()).build(); target.markAsRecovering("store", new RecoveryState(routing, localNode, null)); - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); target.restoreFromRepository(new RestoreOnlyRepository("test") { @Override public void restoreShard( @@ -3028,7 +3032,7 @@ public void testRefreshListenersDuringPeerRecovery() throws IOException { called.set(true); }); - PlainActionFuture listener = PlainActionFuture.newFuture(); + PlainActionFuture listener = new PlainActionFuture<>(); shard.addRefreshListener(10, randomBoolean(), listener); expectThrows(IllegalIndexShardStateException.class, listener::actionGet); }; @@ -3156,13 +3160,13 @@ public void testRecoverFromLocalShard() throws IOException { final IndexShard differentIndex = newShard(new ShardId("index_2", "index_2", 0), true); recoverShardFromStore(differentIndex); expectThrows(IllegalArgumentException.class, () -> { - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); targetShard.recoverFromLocalShards(mappingConsumer, Arrays.asList(sourceShard, differentIndex), future); future.actionGet(); }); closeShards(differentIndex); - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); targetShard.recoverFromLocalShards(mappingConsumer, Arrays.asList(sourceShard), future); assertTrue(future.actionGet()); RecoveryState recoveryState = targetShard.recoveryState(); @@ -3535,7 +3539,11 @@ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IO IndexShardRecoveryException.class, () -> newStartedShard(p -> corruptedShard, true) ); - assertThat(indexShardRecoveryException.getMessage(), equalTo("failed recovery")); + assertThat(indexShardRecoveryException.getMessage(), equalTo("failed to recover from gateway")); + assertThat( + asInstanceOf(RecoveryFailedException.class, indexShardRecoveryException.getCause()).getMessage(), + containsString("Recovery failed") + ); appender.assertAllExpectationsMatched(); } finally { @@ -3791,7 +3799,7 @@ public void testIsSearchIdle() throws Exception { recoverShardFromStore(primary); indexDoc(primary, "_doc", "0", "{\"foo\" : \"bar\"}"); assertTrue(primary.getEngine().refreshNeeded()); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); primary.scheduledRefresh(future); assertTrue(future.actionGet()); assertFalse(primary.isSearchIdle()); @@ -3832,6 +3840,10 @@ public void testIsSearchIdle() throws Exception { closeShards(primary); } + @TestIssueLogging( + issueUrl = "https://github.com/elastic/elasticsearch/issues/101008", + value = "org.elasticsearch.index.shard.IndexShard:TRACE" + ) public void testScheduledRefresh() throws Exception { // Setup and make shard search idle: Settings settings = indexSettings(IndexVersion.current(), 1, 1).build(); @@ -3841,7 +3853,7 @@ public void testScheduledRefresh() throws Exception { recoverShardFromStore(primary); indexDoc(primary, "_doc", "0", "{\"foo\" : \"bar\"}"); assertTrue(primary.getEngine().refreshNeeded()); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); primary.scheduledRefresh(future); assertTrue(future.actionGet()); IndexScopedSettings scopedSettings = primary.indexSettings().getScopedSettings(); @@ -3854,7 +3866,7 @@ public void testScheduledRefresh() throws Exception { assertTrue(primary.getEngine().refreshNeeded()); long lastSearchAccess = primary.getLastSearcherAccess(); // Now since shard is search idle scheduleRefresh(...) shouldn't refresh even if a refresh is needed: - PlainActionFuture future2 = PlainActionFuture.newFuture(); + PlainActionFuture future2 = new PlainActionFuture<>(); primary.scheduledRefresh(future2); assertFalse(future2.actionGet()); assertEquals(lastSearchAccess, primary.getLastSearcherAccess()); @@ -3893,14 +3905,19 @@ public void testScheduledRefresh() throws Exception { latch.await(); // Index a document while shard is search active and ensure scheduleRefresh(...) makes documen visible: + logger.info("--> index doc while shard search active"); indexDoc(primary, "_doc", "2", "{\"foo\" : \"bar\"}"); - PlainActionFuture future4 = PlainActionFuture.newFuture(); + logger.info("--> scheduledRefresh(future4)"); + PlainActionFuture future4 = new PlainActionFuture<>(); primary.scheduledRefresh(future4); assertFalse(future4.actionGet()); + + logger.info("--> ensure search idle"); assertTrue(primary.isSearchIdle()); assertTrue(primary.searchIdleTime() >= TimeValue.ZERO.millis()); primary.flushOnIdle(0); - PlainActionFuture future5 = PlainActionFuture.newFuture(); + logger.info("--> scheduledRefresh(future5)"); + PlainActionFuture future5 = new PlainActionFuture<>(); primary.scheduledRefresh(future5); assertTrue(future5.actionGet()); // make sure we refresh once the shard is inactive try (Engine.Searcher searcher = primary.acquireSearcher("test")) { @@ -3917,7 +3934,7 @@ public void testRefreshIsNeededWithRefreshListeners() throws IOException, Interr recoverShardFromStore(primary); indexDoc(primary, "_doc", "0", "{\"foo\" : \"bar\"}"); assertTrue(primary.getEngine().refreshNeeded()); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); primary.scheduledRefresh(future); assertTrue(future.actionGet()); Engine.IndexResult doc = indexDoc(primary, "_doc", "1", "{\"foo\" : \"bar\"}"); @@ -3929,7 +3946,7 @@ public void testRefreshIsNeededWithRefreshListeners() throws IOException, Interr } assertEquals(1, latch.getCount()); assertTrue(primary.getEngine().refreshNeeded()); - PlainActionFuture future2 = PlainActionFuture.newFuture(); + PlainActionFuture future2 = new PlainActionFuture<>(); primary.scheduledRefresh(future2); assertTrue(future2.actionGet()); latch.await(); @@ -3947,7 +3964,7 @@ public void testRefreshIsNeededWithRefreshListeners() throws IOException, Interr } assertEquals(1, latch1.getCount()); assertTrue(primary.getEngine().refreshNeeded()); - PlainActionFuture future3 = PlainActionFuture.newFuture(); + PlainActionFuture future3 = new PlainActionFuture<>(); primary.scheduledRefresh(future3); assertTrue(future3.actionGet()); latch1.await(); @@ -4138,6 +4155,44 @@ public Engine.Delete preDelete(ShardId shardId, Engine.Delete delete) { closeShards(shard); } + public void testMultiplePeriodicFlushesCanBeTriggeredBeforeTheyAreDurable() throws Exception { + List> pendingListeners = Collections.synchronizedList(new ArrayList<>()); + // Ensure that a single document forces a flush after each write + var indexSettings = Settings.builder() + .put(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(Translog.DEFAULT_HEADER_SIZE_IN_BYTES + 1)) + .build(); + var shardStarted = new AtomicBoolean(); + var flushExecutedBarrier = new CyclicBarrier(2); + var shard = newStartedShard(true, indexSettings, config -> new InternalEngine(config) { + @Override + public void flush(boolean force, boolean waitIfOngoing, ActionListener listener) throws EngineException { + if (shardStarted.get()) { + super.flush(force, waitIfOngoing, ActionListener.noop()); + pendingListeners.add(listener); + safeAwait(flushExecutedBarrier); + } else { + super.flush(force, waitIfOngoing, listener); + } + } + }); + shardStarted.set(true); + + int numberOfFlushes = randomIntBetween(5, 10); + for (int i = 0; i < numberOfFlushes; i++) { + indexDoc(shard, "_doc", Integer.toString(i)); + shard.afterWriteOperation(); + safeAwait(flushExecutedBarrier); + } + + assertThat(pendingListeners.size(), is(numberOfFlushes)); + assertThat(shard.flushStats().getPeriodic(), is(equalTo(0L))); + + pendingListeners.forEach(l -> l.onResponse(new Engine.FlushResult(true, 1))); + assertThat(shard.flushStats().getPeriodic(), is(equalTo((long) numberOfFlushes))); + + closeShards(shard); + } + public void testOnCloseStats() throws IOException { final IndexShard indexShard = newStartedShard(true); diff --git a/server/src/test/java/org/elasticsearch/index/shard/SearchOperationListenerTests.java b/server/src/test/java/org/elasticsearch/index/shard/SearchOperationListenerTests.java index 714e6a05cd3e9..2baca5662161d 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/SearchOperationListenerTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/SearchOperationListenerTests.java @@ -137,160 +137,161 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest indexingOperationListeners, logger ); - SearchContext ctx = new TestSearchContext((SearchExecutionContext) null); - compositeListener.onQueryPhase(ctx, timeInNanos.get()); - assertEquals(0, preFetch.get()); - assertEquals(0, preQuery.get()); - assertEquals(0, failedFetch.get()); - assertEquals(0, failedQuery.get()); - assertEquals(2, onQuery.get()); - assertEquals(0, onFetch.get()); - assertEquals(0, newContext.get()); - assertEquals(0, newScrollContext.get()); - assertEquals(0, freeContext.get()); - assertEquals(0, freeScrollContext.get()); - assertEquals(0, validateSearchContext.get()); + try (SearchContext ctx = new TestSearchContext((SearchExecutionContext) null)) { + compositeListener.onQueryPhase(ctx, timeInNanos.get()); + assertEquals(0, preFetch.get()); + assertEquals(0, preQuery.get()); + assertEquals(0, failedFetch.get()); + assertEquals(0, failedQuery.get()); + assertEquals(2, onQuery.get()); + assertEquals(0, onFetch.get()); + assertEquals(0, newContext.get()); + assertEquals(0, newScrollContext.get()); + assertEquals(0, freeContext.get()); + assertEquals(0, freeScrollContext.get()); + assertEquals(0, validateSearchContext.get()); - compositeListener.onFetchPhase(ctx, timeInNanos.get()); - assertEquals(0, preFetch.get()); - assertEquals(0, preQuery.get()); - assertEquals(0, failedFetch.get()); - assertEquals(0, failedQuery.get()); - assertEquals(2, onQuery.get()); - assertEquals(2, onFetch.get()); - assertEquals(0, newContext.get()); - assertEquals(0, newScrollContext.get()); - assertEquals(0, freeContext.get()); - assertEquals(0, freeScrollContext.get()); - assertEquals(0, validateSearchContext.get()); + compositeListener.onFetchPhase(ctx, timeInNanos.get()); + assertEquals(0, preFetch.get()); + assertEquals(0, preQuery.get()); + assertEquals(0, failedFetch.get()); + assertEquals(0, failedQuery.get()); + assertEquals(2, onQuery.get()); + assertEquals(2, onFetch.get()); + assertEquals(0, newContext.get()); + assertEquals(0, newScrollContext.get()); + assertEquals(0, freeContext.get()); + assertEquals(0, freeScrollContext.get()); + assertEquals(0, validateSearchContext.get()); - compositeListener.onPreQueryPhase(ctx); - assertEquals(0, preFetch.get()); - assertEquals(2, preQuery.get()); - assertEquals(0, failedFetch.get()); - assertEquals(0, failedQuery.get()); - assertEquals(2, onQuery.get()); - assertEquals(2, onFetch.get()); - assertEquals(0, newContext.get()); - assertEquals(0, newScrollContext.get()); - assertEquals(0, freeContext.get()); - assertEquals(0, freeScrollContext.get()); - assertEquals(0, validateSearchContext.get()); + compositeListener.onPreQueryPhase(ctx); + assertEquals(0, preFetch.get()); + assertEquals(2, preQuery.get()); + assertEquals(0, failedFetch.get()); + assertEquals(0, failedQuery.get()); + assertEquals(2, onQuery.get()); + assertEquals(2, onFetch.get()); + assertEquals(0, newContext.get()); + assertEquals(0, newScrollContext.get()); + assertEquals(0, freeContext.get()); + assertEquals(0, freeScrollContext.get()); + assertEquals(0, validateSearchContext.get()); - compositeListener.onPreFetchPhase(ctx); - assertEquals(2, preFetch.get()); - assertEquals(2, preQuery.get()); - assertEquals(0, failedFetch.get()); - assertEquals(0, failedQuery.get()); - assertEquals(2, onQuery.get()); - assertEquals(2, onFetch.get()); - assertEquals(0, newContext.get()); - assertEquals(0, newScrollContext.get()); - assertEquals(0, freeContext.get()); - assertEquals(0, freeScrollContext.get()); - assertEquals(0, validateSearchContext.get()); + compositeListener.onPreFetchPhase(ctx); + assertEquals(2, preFetch.get()); + assertEquals(2, preQuery.get()); + assertEquals(0, failedFetch.get()); + assertEquals(0, failedQuery.get()); + assertEquals(2, onQuery.get()); + assertEquals(2, onFetch.get()); + assertEquals(0, newContext.get()); + assertEquals(0, newScrollContext.get()); + assertEquals(0, freeContext.get()); + assertEquals(0, freeScrollContext.get()); + assertEquals(0, validateSearchContext.get()); - compositeListener.onFailedFetchPhase(ctx); - assertEquals(2, preFetch.get()); - assertEquals(2, preQuery.get()); - assertEquals(2, failedFetch.get()); - assertEquals(0, failedQuery.get()); - assertEquals(2, onQuery.get()); - assertEquals(2, onFetch.get()); - assertEquals(0, newContext.get()); - assertEquals(0, newScrollContext.get()); - assertEquals(0, freeContext.get()); - assertEquals(0, freeScrollContext.get()); - assertEquals(0, validateSearchContext.get()); + compositeListener.onFailedFetchPhase(ctx); + assertEquals(2, preFetch.get()); + assertEquals(2, preQuery.get()); + assertEquals(2, failedFetch.get()); + assertEquals(0, failedQuery.get()); + assertEquals(2, onQuery.get()); + assertEquals(2, onFetch.get()); + assertEquals(0, newContext.get()); + assertEquals(0, newScrollContext.get()); + assertEquals(0, freeContext.get()); + assertEquals(0, freeScrollContext.get()); + assertEquals(0, validateSearchContext.get()); - compositeListener.onFailedQueryPhase(ctx); - assertEquals(2, preFetch.get()); - assertEquals(2, preQuery.get()); - assertEquals(2, failedFetch.get()); - assertEquals(2, failedQuery.get()); - assertEquals(2, onQuery.get()); - assertEquals(2, onFetch.get()); - assertEquals(0, newContext.get()); - assertEquals(0, newScrollContext.get()); - assertEquals(0, freeContext.get()); - assertEquals(0, freeScrollContext.get()); - assertEquals(0, validateSearchContext.get()); + compositeListener.onFailedQueryPhase(ctx); + assertEquals(2, preFetch.get()); + assertEquals(2, preQuery.get()); + assertEquals(2, failedFetch.get()); + assertEquals(2, failedQuery.get()); + assertEquals(2, onQuery.get()); + assertEquals(2, onFetch.get()); + assertEquals(0, newContext.get()); + assertEquals(0, newScrollContext.get()); + assertEquals(0, freeContext.get()); + assertEquals(0, freeScrollContext.get()); + assertEquals(0, validateSearchContext.get()); - compositeListener.onNewReaderContext(mock(ReaderContext.class)); - assertEquals(2, preFetch.get()); - assertEquals(2, preQuery.get()); - assertEquals(2, failedFetch.get()); - assertEquals(2, failedQuery.get()); - assertEquals(2, onQuery.get()); - assertEquals(2, onFetch.get()); - assertEquals(2, newContext.get()); - assertEquals(0, newScrollContext.get()); - assertEquals(0, freeContext.get()); - assertEquals(0, freeScrollContext.get()); - assertEquals(0, validateSearchContext.get()); + compositeListener.onNewReaderContext(mock(ReaderContext.class)); + assertEquals(2, preFetch.get()); + assertEquals(2, preQuery.get()); + assertEquals(2, failedFetch.get()); + assertEquals(2, failedQuery.get()); + assertEquals(2, onQuery.get()); + assertEquals(2, onFetch.get()); + assertEquals(2, newContext.get()); + assertEquals(0, newScrollContext.get()); + assertEquals(0, freeContext.get()); + assertEquals(0, freeScrollContext.get()); + assertEquals(0, validateSearchContext.get()); - compositeListener.onNewScrollContext(mock(ReaderContext.class)); - assertEquals(2, preFetch.get()); - assertEquals(2, preQuery.get()); - assertEquals(2, failedFetch.get()); - assertEquals(2, failedQuery.get()); - assertEquals(2, onQuery.get()); - assertEquals(2, onFetch.get()); - assertEquals(2, newContext.get()); - assertEquals(2, newScrollContext.get()); - assertEquals(0, freeContext.get()); - assertEquals(0, freeScrollContext.get()); - assertEquals(0, validateSearchContext.get()); + compositeListener.onNewScrollContext(mock(ReaderContext.class)); + assertEquals(2, preFetch.get()); + assertEquals(2, preQuery.get()); + assertEquals(2, failedFetch.get()); + assertEquals(2, failedQuery.get()); + assertEquals(2, onQuery.get()); + assertEquals(2, onFetch.get()); + assertEquals(2, newContext.get()); + assertEquals(2, newScrollContext.get()); + assertEquals(0, freeContext.get()); + assertEquals(0, freeScrollContext.get()); + assertEquals(0, validateSearchContext.get()); - compositeListener.onFreeReaderContext(mock(ReaderContext.class)); - assertEquals(2, preFetch.get()); - assertEquals(2, preQuery.get()); - assertEquals(2, failedFetch.get()); - assertEquals(2, failedQuery.get()); - assertEquals(2, onQuery.get()); - assertEquals(2, onFetch.get()); - assertEquals(2, newContext.get()); - assertEquals(2, newScrollContext.get()); - assertEquals(2, freeContext.get()); - assertEquals(0, freeScrollContext.get()); - assertEquals(0, validateSearchContext.get()); + compositeListener.onFreeReaderContext(mock(ReaderContext.class)); + assertEquals(2, preFetch.get()); + assertEquals(2, preQuery.get()); + assertEquals(2, failedFetch.get()); + assertEquals(2, failedQuery.get()); + assertEquals(2, onQuery.get()); + assertEquals(2, onFetch.get()); + assertEquals(2, newContext.get()); + assertEquals(2, newScrollContext.get()); + assertEquals(2, freeContext.get()); + assertEquals(0, freeScrollContext.get()); + assertEquals(0, validateSearchContext.get()); - compositeListener.onFreeScrollContext(mock(ReaderContext.class)); - assertEquals(2, preFetch.get()); - assertEquals(2, preQuery.get()); - assertEquals(2, failedFetch.get()); - assertEquals(2, failedQuery.get()); - assertEquals(2, onQuery.get()); - assertEquals(2, onFetch.get()); - assertEquals(2, newContext.get()); - assertEquals(2, newScrollContext.get()); - assertEquals(2, freeContext.get()); - assertEquals(2, freeScrollContext.get()); - assertEquals(0, validateSearchContext.get()); + compositeListener.onFreeScrollContext(mock(ReaderContext.class)); + assertEquals(2, preFetch.get()); + assertEquals(2, preQuery.get()); + assertEquals(2, failedFetch.get()); + assertEquals(2, failedQuery.get()); + assertEquals(2, onQuery.get()); + assertEquals(2, onFetch.get()); + assertEquals(2, newContext.get()); + assertEquals(2, newScrollContext.get()); + assertEquals(2, freeContext.get()); + assertEquals(2, freeScrollContext.get()); + assertEquals(0, validateSearchContext.get()); - if (throwingListeners == 0) { - compositeListener.validateReaderContext(mock(ReaderContext.class), Empty.INSTANCE); - } else { - RuntimeException expected = expectThrows( - RuntimeException.class, - () -> compositeListener.validateReaderContext(mock(ReaderContext.class), Empty.INSTANCE) - ); - assertNull(expected.getMessage()); - assertEquals(throwingListeners - 1, expected.getSuppressed().length); - if (throwingListeners > 1) { - assertThat(expected.getSuppressed()[0], not(sameInstance(expected))); + if (throwingListeners == 0) { + compositeListener.validateReaderContext(mock(ReaderContext.class), Empty.INSTANCE); + } else { + RuntimeException expected = expectThrows( + RuntimeException.class, + () -> compositeListener.validateReaderContext(mock(ReaderContext.class), Empty.INSTANCE) + ); + assertNull(expected.getMessage()); + assertEquals(throwingListeners - 1, expected.getSuppressed().length); + if (throwingListeners > 1) { + assertThat(expected.getSuppressed()[0], not(sameInstance(expected))); + } } + assertEquals(2, preFetch.get()); + assertEquals(2, preQuery.get()); + assertEquals(2, failedFetch.get()); + assertEquals(2, failedQuery.get()); + assertEquals(2, onQuery.get()); + assertEquals(2, onFetch.get()); + assertEquals(2, newContext.get()); + assertEquals(2, newScrollContext.get()); + assertEquals(2, freeContext.get()); + assertEquals(2, freeScrollContext.get()); + assertEquals(2, validateSearchContext.get()); } - assertEquals(2, preFetch.get()); - assertEquals(2, preQuery.get()); - assertEquals(2, failedFetch.get()); - assertEquals(2, failedQuery.get()); - assertEquals(2, onQuery.get()); - assertEquals(2, onFetch.get()); - assertEquals(2, newContext.get()); - assertEquals(2, newScrollContext.get()); - assertEquals(2, freeContext.get()); - assertEquals(2, freeScrollContext.get()); - assertEquals(2, validateSearchContext.get()); } } diff --git a/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java b/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java index b32e9f4db8b77..9e1be4c629b4a 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java @@ -24,7 +24,6 @@ import org.elasticsearch.xcontent.XContentType; import java.io.IOException; -import java.nio.charset.StandardCharsets; import java.util.function.LongSupplier; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; @@ -45,7 +44,7 @@ public void testGetForUpdate() throws IOException { assertTrue(primary.getEngine().refreshNeeded()); GetResult testGet = primary.getService().getForUpdate("0", UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM); assertFalse(testGet.getFields().containsKey(RoutingFieldMapper.NAME)); - assertEquals(new String(testGet.source(), StandardCharsets.UTF_8), "{\"foo\" : \"bar\"}"); + assertEquals(testGet.sourceRef().utf8ToString(), "{\"foo\" : \"bar\"}"); assertEquals(translogInMemorySegmentCountExpected, translogInMemorySegmentCount.getAsLong()); try (Engine.Searcher searcher = primary.getEngine().acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { assertEquals(searcher.getIndexReader().maxDoc(), 1); // we refreshed @@ -54,7 +53,7 @@ public void testGetForUpdate() throws IOException { Engine.IndexResult test1 = indexDoc(primary, "1", "{\"foo\" : \"baz\"}", XContentType.JSON, "foobar"); assertTrue(primary.getEngine().refreshNeeded()); GetResult testGet1 = primary.getService().getForUpdate("1", UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM); - assertEquals(new String(testGet1.source(), StandardCharsets.UTF_8), "{\"foo\" : \"baz\"}"); + assertEquals(testGet1.sourceRef().utf8ToString(), "{\"foo\" : \"baz\"}"); assertTrue(testGet1.getFields().containsKey(RoutingFieldMapper.NAME)); assertEquals("foobar", testGet1.getFields().get(RoutingFieldMapper.NAME).getValue()); assertEquals(translogInMemorySegmentCountExpected, translogInMemorySegmentCount.getAsLong()); @@ -70,14 +69,14 @@ public void testGetForUpdate() throws IOException { Engine.IndexResult test2 = indexDoc(primary, "1", "{\"foo\" : \"baz\"}", XContentType.JSON, "foobar"); assertTrue(primary.getEngine().refreshNeeded()); testGet1 = primary.getService().getForUpdate("1", UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM); - assertEquals(new String(testGet1.source(), StandardCharsets.UTF_8), "{\"foo\" : \"baz\"}"); + assertEquals(testGet1.sourceRef().utf8ToString(), "{\"foo\" : \"baz\"}"); assertTrue(testGet1.getFields().containsKey(RoutingFieldMapper.NAME)); assertEquals("foobar", testGet1.getFields().get(RoutingFieldMapper.NAME).getValue()); assertEquals(translogInMemorySegmentCountExpected, translogInMemorySegmentCount.getAsLong()); final long primaryTerm = primary.getOperationPrimaryTerm(); testGet1 = primary.getService().getForUpdate("1", test2.getSeqNo(), primaryTerm); - assertEquals(new String(testGet1.source(), StandardCharsets.UTF_8), "{\"foo\" : \"baz\"}"); + assertEquals(testGet1.sourceRef().utf8ToString(), "{\"foo\" : \"baz\"}"); assertEquals(translogInMemorySegmentCountExpected, translogInMemorySegmentCount.getAsLong()); expectThrows(VersionConflictEngineException.class, () -> primary.getService().getForUpdate("1", test2.getSeqNo() + 1, primaryTerm)); @@ -145,7 +144,7 @@ private void runGetFromTranslogWithOptions( assertFalse(testGet.getFields().containsKey(RoutingFieldMapper.NAME)); assertFalse(testGet.getFields().containsKey("foo")); assertFalse(testGet.getFields().containsKey("bar")); - assertThat(new String(testGet.source() == null ? new byte[0] : testGet.source(), StandardCharsets.UTF_8), equalTo(expectedResult)); + assertThat(testGet.sourceRef() == null ? "" : testGet.sourceRef().utf8ToString(), equalTo(expectedResult)); try (Engine.Searcher searcher = primary.getEngine().acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { assertEquals(searcher.getIndexReader().maxDoc(), 1); // we refreshed } @@ -153,7 +152,7 @@ private void runGetFromTranslogWithOptions( indexDoc(primary, "1", docToIndex, XContentType.JSON, "foobar"); assertTrue(primary.getEngine().refreshNeeded()); GetResult testGet1 = primary.getService().getForUpdate("1", UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM); - assertEquals(new String(testGet1.source() == null ? new byte[0] : testGet1.source(), StandardCharsets.UTF_8), expectedResult); + assertEquals(testGet1.sourceRef() == null ? "" : testGet1.sourceRef().utf8ToString(), expectedResult); assertTrue(testGet1.getFields().containsKey(RoutingFieldMapper.NAME)); assertFalse(testGet.getFields().containsKey("foo")); assertFalse(testGet.getFields().containsKey("bar")); @@ -174,7 +173,7 @@ private void runGetFromTranslogWithOptions( assertTrue(primary.getEngine().refreshNeeded()); GetResult testGet2 = primary.getService() .get("2", new String[] { "foo" }, true, 1, VersionType.INTERNAL, FetchSourceContext.FETCH_SOURCE, false); - assertEquals(new String(testGet2.source() == null ? new byte[0] : testGet2.source(), StandardCharsets.UTF_8), expectedResult); + assertEquals(testGet2.sourceRef() == null ? "" : testGet2.sourceRef().utf8ToString(), expectedResult); assertTrue(testGet2.getFields().containsKey(RoutingFieldMapper.NAME)); assertTrue(testGet2.getFields().containsKey("foo")); assertEquals(expectedFooVal, testGet2.getFields().get("foo").getValue()); @@ -189,7 +188,7 @@ private void runGetFromTranslogWithOptions( testGet2 = primary.getService() .get("2", new String[] { "foo" }, true, 1, VersionType.INTERNAL, FetchSourceContext.FETCH_SOURCE, false); - assertEquals(new String(testGet2.source() == null ? new byte[0] : testGet2.source(), StandardCharsets.UTF_8), expectedResult); + assertEquals(testGet2.sourceRef() == null ? "" : testGet2.sourceRef().utf8ToString(), expectedResult); assertTrue(testGet2.getFields().containsKey(RoutingFieldMapper.NAME)); assertTrue(testGet2.getFields().containsKey("foo")); assertEquals(expectedFooVal, testGet2.getFields().get("foo").getValue()); @@ -246,7 +245,7 @@ public void testGetFromTranslog() throws IOException { assertFalse(LiveVersionMapTestUtils.isUnsafe(map)); // A flush shouldn't change the recorded last unsafe generation for gets - PlainActionFuture flushFuture = PlainActionFuture.newFuture(); + PlainActionFuture flushFuture = new PlainActionFuture<>(); engine.flush(true, true, flushFuture); var flushResult = flushFuture.actionGet(); assertTrue(flushResult.flushPerformed()); diff --git a/server/src/test/java/org/elasticsearch/index/termvectors/TermVectorsServiceTests.java b/server/src/test/java/org/elasticsearch/index/termvectors/TermVectorsServiceTests.java index c7d52e197a1cf..b51c869fc73c4 100644 --- a/server/src/test/java/org/elasticsearch/index/termvectors/TermVectorsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/termvectors/TermVectorsServiceTests.java @@ -48,7 +48,7 @@ public void testTook() throws Exception { createIndex("test", Settings.EMPTY, mapping); ensureGreen(); - client().prepareIndex("test").setId("0").setSource("field", "foo bar").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("0").setSource("field", "foo bar").setRefreshPolicy(IMMEDIATE).get(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndexService test = indicesService.indexService(resolveIndex("test")); @@ -82,9 +82,7 @@ public void testDocFreqs() throws IOException { int max = between(3, 10); BulkRequestBuilder bulk = client().prepareBulk(); for (int i = 0; i < max; i++) { - bulk.add( - client().prepareIndex("test").setId(Integer.toString(i)).setSource("text", "the quick brown fox jumped over the lazy dog") - ); + bulk.add(prepareIndex("test").setId(Integer.toString(i)).setSource("text", "the quick brown fox jumped over the lazy dog")); } bulk.get(); @@ -123,9 +121,7 @@ public void testWithIndexedPhrases() throws IOException { int max = between(3, 10); BulkRequestBuilder bulk = client().prepareBulk(); for (int i = 0; i < max; i++) { - bulk.add( - client().prepareIndex("test").setId(Integer.toString(i)).setSource("text", "the quick brown fox jumped over the lazy dog") - ); + bulk.add(prepareIndex("test").setId(Integer.toString(i)).setSource("text", "the quick brown fox jumped over the lazy dog")); } bulk.get(); diff --git a/server/src/test/java/org/elasticsearch/indices/ExecutorSelectorTests.java b/server/src/test/java/org/elasticsearch/indices/ExecutorSelectorTests.java index 4fbac946f5967..b27094a6f37f2 100644 --- a/server/src/test/java/org/elasticsearch/indices/ExecutorSelectorTests.java +++ b/server/src/test/java/org/elasticsearch/indices/ExecutorSelectorTests.java @@ -77,15 +77,10 @@ public void testDefaultSystemDataStreamThreadPools() { ".test-data-stream", "a data stream for testing", SystemDataStreamDescriptor.Type.INTERNAL, - new ComposableIndexTemplate( - List.of(".system-data-stream"), - null, - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate() - ), + ComposableIndexTemplate.builder() + .indexPatterns(List.of(".system-data-stream")) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build(), Map.of(), Collections.singletonList("test"), null @@ -114,15 +109,10 @@ public void testCustomSystemDataStreamThreadPools() { ".test-data-stream", "a data stream for testing", SystemDataStreamDescriptor.Type.INTERNAL, - new ComposableIndexTemplate( - List.of(".system-data-stream"), - null, - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate() - ), + ComposableIndexTemplate.builder() + .indexPatterns(List.of(".system-data-stream")) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build(), Map.of(), Collections.singletonList("test"), new ExecutorNames( diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java index 025f5e89e9469..846625fc4f790 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java @@ -268,7 +268,7 @@ public void testDeleteIndexStore() throws Exception { assertNull(meta.index("test")); test = createIndex("test"); - client().prepareIndex("test").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); client().admin().indices().prepareFlush("test").get(); assertHitCount(client().prepareSearch("test"), 1); IndexMetadata secondMetadata = clusterService.state().metadata().index("test"); diff --git a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java b/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java index 4195c98f32391..c20c9615573d6 100644 --- a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java @@ -62,6 +62,7 @@ public void testThreadedUpdatesToChildBreaker() throws Exception { final AtomicReference breakerRef = new AtomicReference<>(null); final CircuitBreakerService service = new HierarchyCircuitBreakerService( + CircuitBreakerMetrics.NOOP, Settings.EMPTY, Collections.emptyList(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) @@ -79,6 +80,7 @@ public void checkParentLimit(long newBytesReserved, String label) throws Circuit }; final BreakerSettings settings = new BreakerSettings(CircuitBreaker.REQUEST, (BYTES_PER_THREAD * NUM_THREADS) - 1, 1.0); final ChildMemoryCircuitBreaker breaker = new ChildMemoryCircuitBreaker( + CircuitBreakerMetrics.NOOP.getParentTripCountTotal(), settings, logger, (HierarchyCircuitBreakerService) service, @@ -127,6 +129,7 @@ public void testThreadedUpdatesToChildBreakerWithParentLimit() throws Exception final AtomicInteger parentTripped = new AtomicInteger(0); final AtomicReference breakerRef = new AtomicReference<>(null); final CircuitBreakerService service = new HierarchyCircuitBreakerService( + CircuitBreakerMetrics.NOOP, Settings.EMPTY, Collections.emptyList(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) @@ -155,6 +158,7 @@ public void checkParentLimit(long newBytesReserved, String label) throws Circuit }; final BreakerSettings settings = new BreakerSettings(CircuitBreaker.REQUEST, childLimit, 1.0); final ChildMemoryCircuitBreaker breaker = new ChildMemoryCircuitBreaker( + CircuitBreakerMetrics.NOOP.getParentTripCountTotal(), settings, logger, (HierarchyCircuitBreakerService) service, @@ -219,6 +223,7 @@ public void testBorrowingSiblingBreakerMemory() { .build(); try ( CircuitBreakerService service = new HierarchyCircuitBreakerService( + CircuitBreakerMetrics.NOOP, clusterSettings, Collections.emptyList(), new ClusterSettings(clusterSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) @@ -261,6 +266,7 @@ public void testParentBreaksOnRealMemoryUsage() { AtomicLong memoryUsage = new AtomicLong(); final CircuitBreakerService service = new HierarchyCircuitBreakerService( + CircuitBreakerMetrics.NOOP, clusterSettings, Collections.emptyList(), new ClusterSettings(clusterSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) @@ -341,6 +347,7 @@ public void testParentTriggersG1GCBeforeBreaking() throws InterruptedException, AtomicLong time = new AtomicLong(randomLongBetween(Long.MIN_VALUE / 2, Long.MAX_VALUE / 2)); long interval = randomLongBetween(1, 1000); final HierarchyCircuitBreakerService service = new HierarchyCircuitBreakerService( + CircuitBreakerMetrics.NOOP, clusterSettings, Collections.emptyList(), new ClusterSettings(clusterSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), @@ -424,6 +431,7 @@ public void testParentDoesOverLimitCheck() { boolean saveTheDay = randomBoolean(); AtomicBoolean overLimitTriggered = new AtomicBoolean(); final HierarchyCircuitBreakerService service = new HierarchyCircuitBreakerService( + CircuitBreakerMetrics.NOOP, clusterSettings, Collections.emptyList(), new ClusterSettings(clusterSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), @@ -690,6 +698,7 @@ public void testTrippedCircuitBreakerDurability() { .build(); try ( CircuitBreakerService service = new HierarchyCircuitBreakerService( + CircuitBreakerMetrics.NOOP, clusterSettings, Collections.emptyList(), new ClusterSettings(clusterSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) @@ -732,6 +741,7 @@ public void testAllocationBucketsBreaker() { try ( HierarchyCircuitBreakerService service = new HierarchyCircuitBreakerService( + CircuitBreakerMetrics.NOOP, clusterSettings, Collections.emptyList(), new ClusterSettings(clusterSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) @@ -763,6 +773,7 @@ public void testRegisterCustomCircuitBreakers_WithDuplicates() { IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, () -> new HierarchyCircuitBreakerService( + CircuitBreakerMetrics.NOOP, Settings.EMPTY, Collections.singletonList(new BreakerSettings(CircuitBreaker.FIELDDATA, 100, 1.2)), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) @@ -776,6 +787,7 @@ public void testRegisterCustomCircuitBreakers_WithDuplicates() { iae = expectThrows( IllegalArgumentException.class, () -> new HierarchyCircuitBreakerService( + CircuitBreakerMetrics.NOOP, Settings.EMPTY, Arrays.asList(new BreakerSettings("foo", 100, 1.2), new BreakerSettings("foo", 200, 0.1)), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) @@ -790,6 +802,7 @@ public void testRegisterCustomCircuitBreakers_WithDuplicates() { public void testCustomCircuitBreakers() { try ( CircuitBreakerService service = new HierarchyCircuitBreakerService( + CircuitBreakerMetrics.NOOP, Settings.EMPTY, Arrays.asList(new BreakerSettings("foo", 100, 1.2), new BreakerSettings("bar", 200, 0.1)), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) @@ -811,6 +824,7 @@ private static long mb(long size) { public void testUpdatingUseRealMemory() { try ( HierarchyCircuitBreakerService service = new HierarchyCircuitBreakerService( + CircuitBreakerMetrics.NOOP, Settings.EMPTY, Collections.emptyList(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) @@ -840,6 +854,7 @@ public void testApplySettingForUpdatingUseRealMemory() { try ( HierarchyCircuitBreakerService service = new HierarchyCircuitBreakerService( + CircuitBreakerMetrics.NOOP, Settings.EMPTY, Collections.emptyList(), clusterSettings diff --git a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerTelemetryTests.java b/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerTelemetryTests.java new file mode 100644 index 0000000000000..961fe2dc15efe --- /dev/null +++ b/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerTelemetryTests.java @@ -0,0 +1,239 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.indices.breaker; + +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.telemetry.Measurement; +import org.elasticsearch.telemetry.RecordingInstruments; +import org.elasticsearch.telemetry.RecordingMeterRegistry; +import org.elasticsearch.telemetry.TestTelemetryPlugin; +import org.elasticsearch.telemetry.metric.LongCounter; +import org.elasticsearch.telemetry.metric.MeterRegistry; +import org.elasticsearch.test.ESIntegTestCase; +import org.hamcrest.Matchers; + +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Stream; + +import static org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING; +import static org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING; +import static org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING; +import static org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_OVERHEAD_SETTING; +import static org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING; +import static org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING; +import static org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0, supportsDedicatedMasters = true) +public class HierarchyCircuitBreakerTelemetryTests extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return List.of(TestCircuitBreakerTelemetryPlugin.class); + } + + public static class TestCircuitBreakerTelemetryPlugin extends TestTelemetryPlugin { + protected final MeterRegistry meter = new RecordingMeterRegistry() { + private final LongCounter inFlightRequests = new RecordingInstruments.RecordingLongCounter( + CircuitBreakerMetrics.ES_BREAKER_PARENT_TRIP_COUNT_TOTAL, + recorder + ) { + @Override + public void incrementBy(long inc) { + throw new UnsupportedOperationException(); + } + + @Override + public void incrementBy(long inc, Map attributes) { + throw new UnsupportedOperationException(); + } + }; + + private final LongCounter fielddata = new RecordingInstruments.RecordingLongCounter( + CircuitBreakerMetrics.ES_BREAKER_FIELD_DATA_TRIP_COUNT_TOTAL, + recorder + ) { + @Override + public void incrementBy(long inc) { + throw new UnsupportedOperationException(); + } + + @Override + public void incrementBy(long inc, Map attributes) { + throw new UnsupportedOperationException(); + } + }; + + private final LongCounter request = new RecordingInstruments.RecordingLongCounter( + CircuitBreakerMetrics.ES_BREAKER_REQUEST_TRIP_COUNT_TOTAL, + recorder + ) { + @Override + public void incrementBy(long inc) { + throw new UnsupportedOperationException(); + } + + @Override + public void incrementBy(long inc, Map attributes) { + throw new UnsupportedOperationException(); + } + }; + + private final LongCounter parent = new RecordingInstruments.RecordingLongCounter( + CircuitBreakerMetrics.ES_BREAKER_PARENT_TRIP_COUNT_TOTAL, + recorder + ) { + @Override + public void incrementBy(long inc) { + throw new UnsupportedOperationException(); + } + + @Override + public void incrementBy(long inc, Map attributes) { + throw new UnsupportedOperationException(); + } + }; + + @Override + protected LongCounter buildLongCounter(String name, String description, String unit) { + if (name.equals(inFlightRequests.getName())) { + return inFlightRequests; + } else if (name.equals(request.getName())) { + return request; + } else if (name.equals(fielddata.getName())) { + return fielddata; + } else if (name.equals(parent.getName())) { + return parent; + } + throw new IllegalArgumentException("Unknown counter metric name [" + name + "]"); + } + + @Override + public LongCounter registerLongCounter(String name, String description, String unit) { + assertCircuitBreakerName(name); + return super.registerLongCounter(name, description, unit); + } + + @Override + public LongCounter getLongCounter(String name) { + assertCircuitBreakerName(name); + return super.getLongCounter(name); + } + + private void assertCircuitBreakerName(final String name) { + assertThat( + name, + Matchers.oneOf( + CircuitBreakerMetrics.ES_BREAKER_FIELD_DATA_TRIP_COUNT_TOTAL, + CircuitBreakerMetrics.ES_BREAKER_IN_FLIGHT_REQUESTS_TRIP_COUNT_TOTAL, + CircuitBreakerMetrics.ES_BREAKER_PARENT_TRIP_COUNT_TOTAL, + CircuitBreakerMetrics.ES_BREAKER_REQUEST_TRIP_COUNT_TOTAL + ) + ); + } + }; + } + + public void testCircuitBreakerTripCountMetric() { + final Settings circuitBreakerSettings = Settings.builder() + .put(FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), 100, ByteSizeUnit.BYTES) + .put(FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey(), 1.0) + .put(REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), 100, ByteSizeUnit.BYTES) + .put(REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey(), 1.0) + .put(IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), 100, ByteSizeUnit.BYTES) + .put(IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey(), 1.0) + .put(TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), 150, ByteSizeUnit.BYTES) + .put(HierarchyCircuitBreakerService.USE_REAL_MEMORY_USAGE_SETTING.getKey(), false) + .build(); + String dataNodeName = null; + String masterNodeName = null; + try { + // NOTE: we start with empty circuitBreakerSettings to allow cluster formation + masterNodeName = internalCluster().startMasterOnlyNode(Settings.EMPTY); + dataNodeName = internalCluster().startDataOnlyNode(Settings.EMPTY); + assertTrue(clusterAdmin().prepareUpdateSettings().setPersistentSettings(circuitBreakerSettings).get().isAcknowledged()); + assertTrue( + client().admin() + .indices() + .prepareCreate("test") + .setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ) + .get() + .isAcknowledged() + ); + assertEquals( + RestStatus.OK.getStatus(), + client().prepareIndex("test").setWaitForActiveShards(1).setSource("field", "value").get().status().getStatus() + ); + } catch (CircuitBreakingException cbex) { + final List dataNodeMeasurements = getMeasurements(dataNodeName); + final List masterNodeMeasurements = getMeasurements(masterNodeName); + final List allMeasurements = Stream.concat(dataNodeMeasurements.stream(), masterNodeMeasurements.stream()) + .toList(); + assertThat(allMeasurements, Matchers.not(Matchers.empty())); + final Measurement measurement = allMeasurements.get(0); + assertThat(1L, Matchers.equalTo(measurement.getLong())); + assertThat(1L, Matchers.equalTo(measurement.value())); + assertThat(true, Matchers.equalTo(measurement.isLong())); + return; + } + fail("Expected exception not thrown"); + } + + private List getMeasurements(String dataNodeName) { + final TestTelemetryPlugin dataNodeTelemetryPlugin = internalCluster().getInstance(PluginsService.class, dataNodeName) + .filterPlugins(TestCircuitBreakerTelemetryPlugin.class) + .toList() + .get(0); + return Measurement.combine( + Stream.of( + dataNodeTelemetryPlugin.getLongCounterMeasurement(CircuitBreakerMetrics.ES_BREAKER_IN_FLIGHT_REQUESTS_TRIP_COUNT_TOTAL) + .stream(), + dataNodeTelemetryPlugin.getLongCounterMeasurement(CircuitBreakerMetrics.ES_BREAKER_FIELD_DATA_TRIP_COUNT_TOTAL).stream(), + dataNodeTelemetryPlugin.getLongCounterMeasurement(CircuitBreakerMetrics.ES_BREAKER_REQUEST_TRIP_COUNT_TOTAL).stream(), + dataNodeTelemetryPlugin.getLongCounterMeasurement(CircuitBreakerMetrics.ES_BREAKER_PARENT_TRIP_COUNT_TOTAL).stream() + ).flatMap(Function.identity()).toList() + ); + } + + // Make sure circuit breaker telemetry on trip count reports the same values as circuit breaker stats + private void assertCircuitBreakerTripCount( + final HierarchyCircuitBreakerService circuitBreakerService, + final String circuitBreakerName, + int firstBytesEstimate, + int secondBytesEstimate, + long expectedTripCountValue + ) { + try { + circuitBreakerService.getBreaker(circuitBreakerName).addEstimateBytesAndMaybeBreak(firstBytesEstimate, randomAlphaOfLength(5)); + circuitBreakerService.getBreaker(circuitBreakerName).addEstimateBytesAndMaybeBreak(secondBytesEstimate, randomAlphaOfLength(5)); + } catch (final CircuitBreakingException cbex) { + final CircuitBreakerStats circuitBreakerStats = Arrays.stream(circuitBreakerService.stats().getAllStats()) + .filter(stats -> circuitBreakerName.equals(stats.getName())) + .findAny() + .get(); + assertThat(circuitBreakerService.getBreaker(circuitBreakerName).getTrippedCount(), Matchers.equalTo(expectedTripCountValue)); + assertThat(circuitBreakerStats.getTrippedCount(), Matchers.equalTo(expectedTripCountValue)); + } + } + +} diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java index 6f57707cd9e78..13ecc0841ba55 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java @@ -84,6 +84,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettingProviders; @@ -134,6 +135,7 @@ public class ClusterStateChanges { private final TransportService transportService; private final AllocationService allocationService; private final ClusterService clusterService; + private final FeatureService featureService; private final ShardStateAction.ShardFailedClusterStateTaskExecutor shardFailedClusterStateTaskExecutor; private final ShardStateAction.ShardStartedClusterStateTaskExecutor shardStartedClusterStateTaskExecutor; @@ -216,6 +218,8 @@ protected ExecutorService createThreadPoolExecutor() { } // services + featureService = new FeatureService(List.of()); + transportService = new TransportService( SETTINGS, transport, @@ -406,7 +410,7 @@ public ClusterState reroute(ClusterState state, ClusterRerouteRequest request) { public ClusterState addNode(ClusterState clusterState, DiscoveryNode discoveryNode, TransportVersion transportVersion) { return runTasks( - new NodeJoinExecutor(allocationService, (s, p, r) -> {}), + new NodeJoinExecutor(allocationService, (s, p, r) -> {}, featureService), clusterState, List.of( JoinTask.singleNode( @@ -423,7 +427,7 @@ public ClusterState addNode(ClusterState clusterState, DiscoveryNode discoveryNo public ClusterState joinNodesAndBecomeMaster(ClusterState clusterState, List nodes, TransportVersion transportVersion) { return runTasks( - new NodeJoinExecutor(allocationService, (s, p, r) -> {}), + new NodeJoinExecutor(allocationService, (s, p, r) -> {}, featureService), clusterState, List.of( JoinTask.completingElection( diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java index 16832aa07ccd6..0317a6baf040a 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java @@ -347,7 +347,7 @@ public void testMarkDoneFailureIsPropagated() throws Exception { shard.markAsRecovering("peer recovery", new RecoveryState(shard.routingEntry(), pNode, rNode)); shard.prepareForIndexRecovery(); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); RecoveryTarget recoveryTarget = new RecoveryTarget(shard, null, 0L, null, null, new PeerRecoveryTargetService.RecoveryListener() { @Override public void onRecoveryDone(RecoveryState state, ShardLongFieldRange timestampMillisFieldRange) { @@ -458,7 +458,7 @@ public int getReadSnapshotFileBufferSizeForRepo(String repository) { RecoveryTarget recoveryTarget = new RecoveryTarget(shard, null, 0L, snapshotFilesProvider, () -> {}, null); - PlainActionFuture writeSnapshotFileFuture = PlainActionFuture.newFuture(); + PlainActionFuture writeSnapshotFileFuture = new PlainActionFuture<>(); recoveryTarget.restoreFileFromSnapshot(repositoryName, indexId, fileInfo, writeSnapshotFileFuture); writeSnapshotFileFuture.get(); @@ -538,7 +538,7 @@ public int getReadSnapshotFileBufferSizeForRepo(String repository) { SNAPSHOT_FILE_PART_SIZE ); - PlainActionFuture writeSnapshotFileFuture = PlainActionFuture.newFuture(); + PlainActionFuture writeSnapshotFileFuture = new PlainActionFuture<>(); recoveryTarget.restoreFileFromSnapshot(repositoryName, indexId, fileInfo, writeSnapshotFileFuture); ExecutionException executionException = expectThrows(ExecutionException.class, writeSnapshotFileFuture::get); @@ -558,7 +558,7 @@ public int getReadSnapshotFileBufferSizeForRepo(String repository) { assertThat(fileDetails.recovered(), equalTo(0L)); // Subsequent writes on the same file can proceed without issues - PlainActionFuture writeChunkFuture = PlainActionFuture.newFuture(); + PlainActionFuture writeChunkFuture = new PlainActionFuture<>(); ReleasableBytesReference bytesRef = ReleasableBytesReference.wrap(new BytesArray(fileData)); recoveryTarget.writeFileChunk(storeFileMetadata, 0, bytesRef, true, 0, writeChunkFuture); writeChunkFuture.get(); @@ -641,7 +641,7 @@ public int getReadSnapshotFileBufferSizeForRepo(String repository) { for (Map.Entry fileInfoEntry : snapshotFiles.entrySet()) { BlobStoreIndexShardSnapshot.FileInfo fileInfo = fileInfoEntry.getKey(); - PlainActionFuture writeSnapshotFileFuture = PlainActionFuture.newFuture(); + PlainActionFuture writeSnapshotFileFuture = new PlainActionFuture<>(); recoveryTarget.restoreFileFromSnapshot(repositoryName, indexId, fileInfo, writeSnapshotFileFuture); // Simulate error, that stops downloading snapshot files @@ -652,7 +652,7 @@ public int getReadSnapshotFileBufferSizeForRepo(String repository) { writeSnapshotFileFuture.get(); } - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); recoveryTarget.receiveFileInfo(emptyList(), emptyList(), emptyList(), emptyList(), 0, future); future.get(); @@ -713,7 +713,7 @@ public int getReadSnapshotFileBufferSizeForRepo(String repository) { recoveryTarget.incRef(); - PlainActionFuture writeSnapshotFileFuture = PlainActionFuture.newFuture(); + PlainActionFuture writeSnapshotFileFuture = new PlainActionFuture<>(); recoveryTarget.restoreFileFromSnapshot(repository, indexId, fileInfo, writeSnapshotFileFuture); writeSnapshotFileFuture.get(); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryRequestTrackerTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryRequestTrackerTests.java index 831a9423cee40..6d3d22b91a811 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryRequestTrackerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryRequestTrackerTests.java @@ -47,7 +47,7 @@ public void testIdempotencyIsEnforced() { final long seqNo = j; int iterations = randomIntBetween(2, 5); for (int i = 0; i < iterations; ++i) { - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); Set> set = seqToResult.computeIfAbsent(seqNo, (k) -> ConcurrentCollections.newConcurrentSet()); set.add(future); threadPool.generic().execute(() -> { diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index 0285a8c66fffb..fd285ba8b239f 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -1175,7 +1175,7 @@ void recoverFilesFromSourceAndSnapshot( super.recoverFilesFromSourceAndSnapshot(shardRecoveryPlan, store, stopWatch, listener); } }; - PlainActionFuture phase1Listener = PlainActionFuture.newFuture(); + PlainActionFuture phase1Listener = new PlainActionFuture<>(); IndexCommit indexCommit = DirectoryReader.listCommits(dir).get(0); handler.phase1(indexCommit, 0, () -> 0, phase1Listener); phase1Listener.get(); @@ -1271,7 +1271,7 @@ void createRetentionLease(long startingSeqNo, ActionListener lis } }; - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); handler.recoverFilesFromSourceAndSnapshot(shardRecoveryPlan, store, mock(StopWatch.class), future); future.actionGet(); @@ -1339,7 +1339,7 @@ void createRetentionLease(long startingSeqNo, ActionListener lis } }; - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); handler.recoverFilesFromSourceAndSnapshot(shardRecoveryPlan, store, mock(StopWatch.class), future); assertBusy(() -> { @@ -1431,7 +1431,7 @@ void createRetentionLease(long startingSeqNo, ActionListener lis } }; - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); handler.recoverFilesFromSourceAndSnapshot(shardRecoveryPlan, store, mock(StopWatch.class), future); downloadSnapshotFileReceived.await(); @@ -1502,7 +1502,7 @@ void createRetentionLease(long startingSeqNo, ActionListener lis } }; - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); handler.recoverFilesFromSourceAndSnapshot(shardRecoveryPlan, store, mock(StopWatch.class), future); downloadSnapshotFileReceived.await(); @@ -1657,7 +1657,7 @@ void createRetentionLease(long startingSeqNo, ActionListener lis } }; - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); handler.recoverFilesFromSourceAndSnapshot(shardRecoveryPlan, store, mock(StopWatch.class), future); downloadSnapshotFileReceived.await(); diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java index 6243131141497..d345197d88a23 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java @@ -2605,12 +2605,32 @@ private static IngestService createWithProcessors( ThreadPool threadPool = mock(ThreadPool.class); when(threadPool.generic()).thenReturn(EsExecutors.DIRECT_EXECUTOR_SERVICE); when(threadPool.executor(anyString())).thenReturn(EsExecutors.DIRECT_EXECUTOR_SERVICE); - return new IngestService(mock(ClusterService.class), threadPool, null, null, null, List.of(new IngestPlugin() { - @Override - public Map getProcessors(final Processor.Parameters parameters) { - return processors; - } - }), client, null, documentParsingObserverSupplier); + IngestService ingestService = new IngestService( + mock(ClusterService.class), + threadPool, + null, + null, + null, + List.of(new IngestPlugin() { + @Override + public Map getProcessors(final Processor.Parameters parameters) { + return processors; + } + }), + client, + null, + documentParsingObserverSupplier + ); + if (randomBoolean()) { + /* + * Testing the copy constructor directly is difficult because there is no equals() method in IngestService, but there is a lot + * of private internal state. Here we use the copy constructor half the time in all of the unit tests, with the assumption that + * if none of our tests observe any difference then the copy constructor is working as expected. + */ + return new IngestService(ingestService); + } else { + return ingestService; + } } private CompoundProcessor mockCompoundProcessor() { @@ -2629,10 +2649,6 @@ private class IngestDocumentMatcher implements ArgumentMatcher { private final IngestDocument ingestDocument; - IngestDocumentMatcher(String index, String type, String id, Map source) { - this.ingestDocument = new IngestDocument(index, id, 1, null, null, source); - } - IngestDocumentMatcher(String index, String type, String id, long version, VersionType versionType, Map source) { this.ingestDocument = new IngestDocument(index, id, version, null, versionType, source); } diff --git a/server/src/test/java/org/elasticsearch/ingest/SimulateIngestServiceTests.java b/server/src/test/java/org/elasticsearch/ingest/SimulateIngestServiceTests.java new file mode 100644 index 0000000000000..793cd21fbfd5b --- /dev/null +++ b/server/src/test/java/org/elasticsearch/ingest/SimulateIngestServiceTests.java @@ -0,0 +1,143 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.ingest; + +import org.elasticsearch.action.bulk.SimulateBulkRequest; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.plugins.IngestPlugin; +import org.elasticsearch.plugins.internal.DocumentParsingObserver; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.XContentType; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SimulateIngestServiceTests extends ESTestCase { + + public void testGetPipeline() { + PipelineConfiguration pipelineConfiguration = new PipelineConfiguration("pipeline1", new BytesArray(""" + {"processors": [{"processor1" : {}}]}"""), XContentType.JSON); + IngestMetadata ingestMetadata = new IngestMetadata(Map.of("pipeline1", pipelineConfiguration)); + Map processors = new HashMap<>(); + processors.put( + "processor1", + (factories, tag, description, config) -> new FakeProcessor("processor1", tag, description, ingestDocument -> {}) { + } + ); + processors.put( + "processor2", + (factories, tag, description, config) -> new FakeProcessor("processor2", tag, description, ingestDocument -> {}) { + } + ); + processors.put( + "processor3", + (factories, tag, description, config) -> new FakeProcessor("processor3", tag, description, ingestDocument -> {}) { + } + ); + IngestService ingestService = createWithProcessors(processors); + ingestService.innerUpdatePipelines(ingestMetadata); + { + // First we make sure that if there are no substitutions that we get our original pipeline back: + SimulateBulkRequest simulateBulkRequest = new SimulateBulkRequest((Map>) null); + SimulateIngestService simulateIngestService = new SimulateIngestService(ingestService, simulateBulkRequest); + Pipeline pipeline = simulateIngestService.getPipeline("pipeline1"); + assertThat(pipeline.getProcessors().size(), equalTo(1)); + assertThat(pipeline.getProcessors().get(0).getType(), equalTo("processor1")); + assertNull(simulateIngestService.getPipeline("pipeline2")); + } + { + // Here we make sure that if we have a substitution with the same name as the original pipeline that we get the new one back + Map> pipelineSubstitutions = new HashMap<>() { + { + put("pipeline1", new HashMap<>() { + { + put("processors", List.of(new HashMap<>() { + { + put("processor2", new HashMap<>()); + } + }, new HashMap<>() { + { + put("processor3", new HashMap<>()); + } + })); + } + }); + put("pipeline2", new HashMap<>() { + { + put("processors", List.of(new HashMap<>() { + { + put("processor3", new HashMap<>()); + } + })); + } + }); + } + }; + SimulateBulkRequest simulateBulkRequest = new SimulateBulkRequest(pipelineSubstitutions); + SimulateIngestService simulateIngestService = new SimulateIngestService(ingestService, simulateBulkRequest); + Pipeline pipeline1 = simulateIngestService.getPipeline("pipeline1"); + assertThat(pipeline1.getProcessors().size(), equalTo(2)); + assertThat(pipeline1.getProcessors().get(0).getType(), equalTo("processor2")); + assertThat(pipeline1.getProcessors().get(1).getType(), equalTo("processor3")); + Pipeline pipeline2 = simulateIngestService.getPipeline("pipeline2"); + assertThat(pipeline2.getProcessors().size(), equalTo(1)); + assertThat(pipeline2.getProcessors().get(0).getType(), equalTo("processor3")); + } + { + /* + * Here we make sure that if we have a substitution for a new pipeline we still get the original one back (as well as the new + * one). + */ + Map> pipelineSubstitutions = new HashMap<>() { + { + put("pipeline2", new HashMap<>() { + { + put("processors", List.of(new HashMap<>() { + { + put("processor3", new HashMap<>()); + } + })); + } + }); + } + }; + SimulateBulkRequest simulateBulkRequest = new SimulateBulkRequest(pipelineSubstitutions); + SimulateIngestService simulateIngestService = new SimulateIngestService(ingestService, simulateBulkRequest); + Pipeline pipeline1 = simulateIngestService.getPipeline("pipeline1"); + assertThat(pipeline1.getProcessors().size(), equalTo(1)); + assertThat(pipeline1.getProcessors().get(0).getType(), equalTo("processor1")); + Pipeline pipeline2 = simulateIngestService.getPipeline("pipeline2"); + assertThat(pipeline2.getProcessors().size(), equalTo(1)); + assertThat(pipeline2.getProcessors().get(0).getType(), equalTo("processor3")); + } + } + + private static IngestService createWithProcessors(Map processors) { + Client client = mock(Client.class); + ThreadPool threadPool = mock(ThreadPool.class); + when(threadPool.generic()).thenReturn(EsExecutors.DIRECT_EXECUTOR_SERVICE); + when(threadPool.executor(anyString())).thenReturn(EsExecutors.DIRECT_EXECUTOR_SERVICE); + return new IngestService(mock(ClusterService.class), threadPool, null, null, null, List.of(new IngestPlugin() { + @Override + public Map getProcessors(final Processor.Parameters parameters) { + return processors; + } + }), client, null, () -> DocumentParsingObserver.EMPTY_INSTANCE); + } +} diff --git a/server/src/test/java/org/elasticsearch/node/NodeTests.java b/server/src/test/java/org/elasticsearch/node/NodeTests.java index 7a832de05fa60..4af842ca07bb1 100644 --- a/server/src/test/java/org/elasticsearch/node/NodeTests.java +++ b/server/src/test/java/org/elasticsearch/node/NodeTests.java @@ -63,6 +63,7 @@ import java.util.Map; import java.util.Objects; import java.util.Optional; +import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.TimeUnit; @@ -366,8 +367,8 @@ public void testHeadersToCopyInTaskManagerAreTheSameAsDeclaredInTask() throws IO Settings.Builder settings = baseSettings(); try (Node node = new MockNode(settings.build(), basePlugins())) { final TransportService transportService = node.injector().getInstance(TransportService.class); - final List taskHeaders = transportService.getTaskManager().getTaskHeaders(); - assertThat(taskHeaders, containsInAnyOrder(Task.HEADERS_TO_COPY.toArray(new String[] {}))); + final Set taskHeaders = transportService.getTaskManager().getTaskHeaders(); + assertThat(taskHeaders, containsInAnyOrder(Task.HEADERS_TO_COPY.toArray())); } } diff --git a/server/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java b/server/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java index 6ea93cc9c5940..d4668c7824ae4 100644 --- a/server/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java +++ b/server/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java @@ -239,7 +239,7 @@ private static NodeInfo createNodeInfo() { indexingBuffer = ByteSizeValue.ofBytes(random().nextLong() & ((1L << 40) - 1)); } return new NodeInfo( - VersionUtils.randomVersion(random()), + randomAlphaOfLengthBetween(6, 32), TransportVersionUtils.randomVersion(random()), IndexVersionUtils.randomVersion(random()), componentVersions, diff --git a/server/src/test/java/org/elasticsearch/plugins/scanners/NamedComponentReaderTests.java b/server/src/test/java/org/elasticsearch/plugins/scanners/NamedComponentReaderTests.java index 18e26aa78786f..78814e8854df0 100644 --- a/server/src/test/java/org/elasticsearch/plugins/scanners/NamedComponentReaderTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/scanners/NamedComponentReaderTests.java @@ -12,7 +12,6 @@ import org.elasticsearch.test.ESTestCase; import java.io.IOException; -import java.net.MalformedURLException; import java.net.URL; import java.nio.file.Files; import java.nio.file.Path; @@ -94,11 +93,4 @@ public void testFindNamedComponentInJarWithNamedComponentscacheFile() throws IOE ); } - private URL toURL(Path p) { - try { - return p.toUri().toURL(); - } catch (MalformedURLException e) { - throw new RuntimeException(e); - } - } } diff --git a/server/src/test/java/org/elasticsearch/readiness/ReadinessServiceTests.java b/server/src/test/java/org/elasticsearch/readiness/ReadinessServiceTests.java index 9e680615019dc..e794752aff15e 100644 --- a/server/src/test/java/org/elasticsearch/readiness/ReadinessServiceTests.java +++ b/server/src/test/java/org/elasticsearch/readiness/ReadinessServiceTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.readiness; +import org.apache.logging.log4j.Level; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -29,6 +30,7 @@ import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.http.HttpStats; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.test.readiness.ReadinessClientProbe; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -247,7 +249,30 @@ public void testStatusChange() throws Exception { .build(); event = new ClusterChangedEvent("test", newState, previousState); - readinessService.clusterChanged(event); + var mockAppender = new MockLogAppender(); + try (var ignored = mockAppender.capturing(ReadinessService.class)) { + mockAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "node shutting down logged", + ReadinessService.class.getCanonicalName(), + Level.INFO, + "marking node as not ready because it's shutting down" + ) + ); + readinessService.clusterChanged(event); + mockAppender.assertAllExpectationsMatched(); + + mockAppender.addExpectation( + new MockLogAppender.UnseenEventExpectation( + "node shutting down not logged twice", + ReadinessService.class.getCanonicalName(), + Level.INFO, + "marking node as not ready because it's shutting down" + ) + ); + readinessService.clusterChanged(event); + mockAppender.assertAllExpectationsMatched(); + } assertFalse(readinessService.ready()); tcpReadinessProbeFalse(readinessService); diff --git a/server/src/test/java/org/elasticsearch/repositories/SnapshotIndexCommitTests.java b/server/src/test/java/org/elasticsearch/repositories/SnapshotIndexCommitTests.java index d3ca9653432e2..bfe30446a06fa 100644 --- a/server/src/test/java/org/elasticsearch/repositories/SnapshotIndexCommitTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/SnapshotIndexCommitTests.java @@ -15,7 +15,6 @@ import org.elasticsearch.test.ESTestCase; import java.io.IOException; -import java.util.concurrent.CyclicBarrier; import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicBoolean; @@ -73,31 +72,6 @@ private void runAbortTest(boolean throwOnClose, @Nullable Exception outerExcepti assertOnCompletionBehaviour(throwOnClose, outerException, indexCommitRef); } - private void runConcurrentTest(boolean throwOnClose) throws Exception { - final var isClosed = new AtomicBoolean(); - final var indexCommitRef = getSnapshotIndexCommit(throwOnClose, isClosed); - final var completeFuture = new PlainActionFuture(); - final var closingActionListener = indexCommitRef.closingBefore(completeFuture); - - final var barrier = new CyclicBarrier(2); - final var completeThread = new Thread(() -> { - safeAwait(barrier); - closingActionListener.onResponse("success"); - }); - completeThread.start(); - - final var abortThread = new Thread(() -> { - safeAwait(barrier); - indexCommitRef.onAbort(); - }); - abortThread.start(); - - completeThread.join(); - abortThread.join(); - - assertOnCompletionFuture(throwOnClose, null, completeFuture); - } - private SnapshotIndexCommit getSnapshotIndexCommit(boolean throwOnClose, AtomicBoolean isClosed) { return new SnapshotIndexCommit(new Engine.IndexCommitRef(null, () -> { assertTrue(isClosed.compareAndSet(false, true)); diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java index 907eedbfa7bf6..ef625706ffffe 100644 --- a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java @@ -20,7 +20,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Numbers; import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.blobstore.OperationPurpose; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -68,6 +67,7 @@ import java.util.stream.Collectors; import static org.elasticsearch.repositories.RepositoryDataTests.generateRandomRepoData; +import static org.elasticsearch.repositories.blobstore.BlobStoreTestUtil.randomPurpose; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; @@ -104,7 +104,7 @@ public void testRetrieveSnapshots() { int numDocs = randomIntBetween(10, 20); for (int i = 0; i < numDocs; i++) { String id = Integer.toString(i); - client().prepareIndex(indexName).setId(id).setSource("text", "sometext").get(); + prepareIndex(indexName).setId(id).setSource("text", "sometext").get(); } indicesAdmin().prepareFlush(indexName).get(); @@ -204,7 +204,7 @@ public void testCorruptIndexLatestFile() throws Exception { for (int i = 0; i < 16; i++) { repository.blobContainer() - .writeBlob(OperationPurpose.SNAPSHOT, BlobStoreRepository.INDEX_LATEST_BLOB, new BytesArray(buffer, 0, i), false); + .writeBlob(randomPurpose(), BlobStoreRepository.INDEX_LATEST_BLOB, new BytesArray(buffer, 0, i), false); if (i == 8) { assertThat(repository.readSnapshotIndexLatestBlob(), equalTo(generation)); } else { @@ -414,7 +414,7 @@ protected void snapshotFile(SnapshotShardContext context, BlobStoreIndexShardSna SnapshotShardContext context = ShardSnapshotTaskRunnerTests.dummyContext(); int noOfFiles = randomIntBetween(10, 100); BlockingQueue files = new LinkedBlockingQueue<>(noOfFiles); - PlainActionFuture listenerCalled = PlainActionFuture.newFuture(); + PlainActionFuture listenerCalled = new PlainActionFuture<>(); ActionListener> allFilesUploadListener = ActionListener.running(() -> listenerCalled.onResponse(null)); for (int i = 0; i < noOfFiles; i++) { files.add(ShardSnapshotTaskRunnerTests.dummyFileInfo()); diff --git a/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java b/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java index b5361a22226d1..7d7f92181c44f 100644 --- a/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java @@ -117,7 +117,7 @@ public void testSnapshotAndRestore() throws IOException { IndexId indexId = new IndexId(idxSettings.getIndex().getName(), idxSettings.getUUID()); IndexCommit indexCommit = Lucene.getIndexCommit(Lucene.readSegmentInfos(store.directory()), store.directory()); - final PlainActionFuture snapshot1Future = PlainActionFuture.newFuture(); + final PlainActionFuture snapshot1Future = new PlainActionFuture<>(); IndexShardSnapshotStatus snapshotStatus = IndexShardSnapshotStatus.newInitializing(null); repository.snapshotShard( new SnapshotShardContext( @@ -148,7 +148,7 @@ public void testSnapshotAndRestore() throws IOException { ); routing = ShardRoutingHelper.initialize(routing, localNode.getId(), 0); RecoveryState state = new RecoveryState(routing, localNode, null); - final PlainActionFuture restore1Future = PlainActionFuture.newFuture(); + final PlainActionFuture restore1Future = new PlainActionFuture<>(); repository.restoreShard(store, snapshotId, indexId, shardId, state, restore1Future); restore1Future.actionGet(); @@ -160,7 +160,7 @@ public void testSnapshotAndRestore() throws IOException { SnapshotId incSnapshotId = new SnapshotId("test1", "test1"); IndexCommit incIndexCommit = Lucene.getIndexCommit(Lucene.readSegmentInfos(store.directory()), store.directory()); Collection commitFileNames = incIndexCommit.getFileNames(); - final PlainActionFuture snapshot2future = PlainActionFuture.newFuture(); + final PlainActionFuture snapshot2future = new PlainActionFuture<>(); IndexShardSnapshotStatus snapshotStatus2 = IndexShardSnapshotStatus.newInitializing(shardGeneration); repository.snapshotShard( new SnapshotShardContext( @@ -183,7 +183,7 @@ public void testSnapshotAndRestore() throws IOException { // roll back to the first snap and then incrementally restore RecoveryState firstState = new RecoveryState(routing, localNode, null); - final PlainActionFuture restore2Future = PlainActionFuture.newFuture(); + final PlainActionFuture restore2Future = new PlainActionFuture<>(); repository.restoreShard(store, snapshotId, indexId, shardId, firstState, restore2Future); restore2Future.actionGet(); assertEquals( @@ -193,7 +193,7 @@ public void testSnapshotAndRestore() throws IOException { ); RecoveryState secondState = new RecoveryState(routing, localNode, null); - final PlainActionFuture restore3Future = PlainActionFuture.newFuture(); + final PlainActionFuture restore3Future = new PlainActionFuture<>(); repository.restoreShard(store, incSnapshotId, indexId, shardId, secondState, restore3Future); restore3Future.actionGet(); assertEquals(secondState.getIndex().reusedFileCount(), commitFileNames.size() - 2); @@ -294,7 +294,7 @@ protected BlobContainer wrapChild(BlobContainer child) { final SnapshotId snapshotId = new SnapshotId("test", "test"); final IndexId indexId = new IndexId(idxSettings.getIndex().getName(), idxSettings.getUUID()); IndexCommit indexCommit1 = Lucene.getIndexCommit(Lucene.readSegmentInfos(store1.directory()), store1.directory()); - final PlainActionFuture snapshot1Future = PlainActionFuture.newFuture(); + final PlainActionFuture snapshot1Future = new PlainActionFuture<>(); IndexShardSnapshotStatus snapshotStatus1 = IndexShardSnapshotStatus.newInitializing(null); // Scenario 1 - Shard data files will be cleaned up if they fail to write @@ -332,7 +332,7 @@ protected BlobContainer wrapChild(BlobContainer child) { final ShardId shardId2 = new ShardId(idxSettings.getIndex(), 2); final Store store2 = new Store(shardId2, idxSettings, directory, new DummyShardLock(shardId2)); final IndexCommit indexCommit2 = Lucene.getIndexCommit(Lucene.readSegmentInfos(store2.directory()), store2.directory()); - final PlainActionFuture snapshot2Future = PlainActionFuture.newFuture(); + final PlainActionFuture snapshot2Future = new PlainActionFuture<>(); canErrorForWriteBlob.set(false); shouldErrorForWriteMetadataBlob.set(true); repository.snapshotShard( diff --git a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java index 06328734e394d..718ba4a0f0e2f 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.http.HttpResponse; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.http.HttpStats; +import org.elasticsearch.indices.breaker.CircuitBreakerMetrics; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.rest.RestHandler.Route; import org.elasticsearch.rest.action.RestToXContentListener; @@ -96,6 +97,7 @@ public class RestControllerTests extends ESTestCase { @Before public void setup() { circuitBreakerService = new HierarchyCircuitBreakerService( + CircuitBreakerMetrics.NOOP, Settings.builder() .put(HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), BREAKER_LIMIT) // We want to have reproducible results in this test, hence we disable real memory usage accounting diff --git a/server/src/test/java/org/elasticsearch/rest/RestHttpResponseHeadersTests.java b/server/src/test/java/org/elasticsearch/rest/RestHttpResponseHeadersTests.java index 204596cabdfe3..9c38cd2615355 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestHttpResponseHeadersTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestHttpResponseHeadersTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.indices.breaker.CircuitBreakerMetrics; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.rest.RestHandler.Route; @@ -72,6 +73,7 @@ public void testUnsupportedMethodResponseHttpHeader() throws Exception { // Initialize test candidate RestController CircuitBreakerService circuitBreakerService = new HierarchyCircuitBreakerService( + CircuitBreakerMetrics.NOOP, Settings.EMPTY, Collections.emptyList(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) diff --git a/server/src/test/java/org/elasticsearch/rest/action/RestCancellableNodeClientTests.java b/server/src/test/java/org/elasticsearch/rest/action/RestCancellableNodeClientTests.java index a21eab1d95911..3f425ac202b6c 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/RestCancellableNodeClientTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/RestCancellableNodeClientTests.java @@ -14,9 +14,9 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksAction; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.settings.Settings; @@ -76,7 +76,7 @@ public void testCompletedTasks() throws Exception { for (int j = 0; j < numTasks; j++) { PlainActionFuture actionFuture = new PlainActionFuture<>(); RestCancellableNodeClient client = new RestCancellableNodeClient(testClient, channel); - threadPool.generic().submit(() -> client.execute(SearchAction.INSTANCE, new SearchRequest(), actionFuture)); + threadPool.generic().submit(() -> client.execute(TransportSearchAction.TYPE, new SearchRequest(), actionFuture)); futures.add(actionFuture); } } @@ -106,7 +106,7 @@ public void testCancelledTasks() throws Exception { totalSearches += numTasks; RestCancellableNodeClient client = new RestCancellableNodeClient(nodeClient, channel); for (int j = 0; j < numTasks; j++) { - client.execute(SearchAction.INSTANCE, new SearchRequest(), null); + client.execute(TransportSearchAction.TYPE, new SearchRequest(), null); } assertEquals(numTasks, RestCancellableNodeClient.getNumTasks(channel)); } @@ -139,7 +139,7 @@ public void testChannelAlreadyClosed() { RestCancellableNodeClient client = new RestCancellableNodeClient(testClient, channel); for (int j = 0; j < numTasks; j++) { // here the channel will be first registered, then straight-away removed from the map as the close listener is invoked - client.execute(SearchAction.INSTANCE, new SearchRequest(), null); + client.execute(TransportSearchAction.TYPE, new SearchRequest(), null); } } assertEquals(initialHttpChannels, RestCancellableNodeClient.getNumChannels()); @@ -177,7 +177,7 @@ public Task exe } return task; } - case SearchAction.NAME -> { + case TransportSearchAction.NAME -> { searchRequests.incrementAndGet(); Task searchTask = request.createTask(counter.getAndIncrement(), "search", action.name(), null, Collections.emptyMap()); if (timeout == false) { diff --git a/server/src/test/java/org/elasticsearch/rest/action/cat/RestPluginsActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/cat/RestPluginsActionTests.java index e58127e9a6d9b..c5256e0056873 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/cat/RestPluginsActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/cat/RestPluginsActionTests.java @@ -8,8 +8,8 @@ package org.elasticsearch.rest.action.cat; +import org.elasticsearch.Build; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.admin.cluster.node.info.PluginsAndModules; @@ -64,7 +64,7 @@ private Table buildTable(List pluginDescriptor) { for (int i = 0; i < 3; i++) { nodeInfos.add( new NodeInfo( - Version.CURRENT, + Build.current().version(), TransportVersion.current(), IndexVersion.current(), Map.of(), diff --git a/server/src/test/java/org/elasticsearch/rest/action/document/RestIndexActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/document/RestIndexActionTests.java index 67c730e868192..c2145991e51fa 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/document/RestIndexActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/document/RestIndexActionTests.java @@ -23,7 +23,6 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.document.RestIndexAction.AutoIdHandler; import org.elasticsearch.rest.action.document.RestIndexAction.CreateHandler; -import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.test.rest.RestActionTestCase; import org.elasticsearch.xcontent.XContentType; @@ -65,13 +64,6 @@ public void testAutoIdDefaultsToOptypeCreate() { checkAutoIdOpType(Version.CURRENT, DocWriteRequest.OpType.CREATE); } - public void testAutoIdDefaultsToOptypeIndexForOlderVersions() { - checkAutoIdOpType( - VersionUtils.randomVersionBetween(random(), null, VersionUtils.getPreviousVersion(Version.V_7_5_0)), - DocWriteRequest.OpType.INDEX - ); - } - private void checkAutoIdOpType(Version minClusterVersion, DocWriteRequest.OpType expectedOpType) { SetOnce executeCalled = new SetOnce<>(); verifyingClient.setExecuteVerifier((actionType, request) -> { diff --git a/server/src/test/java/org/elasticsearch/rest/action/ingest/RestSimulateIngestActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/ingest/RestSimulateIngestActionTests.java new file mode 100644 index 0000000000000..a738a13f62c21 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/rest/action/ingest/RestSimulateIngestActionTests.java @@ -0,0 +1,249 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.rest.action.ingest; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.ingest.SimulateIndexResponse; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.rest.AbstractRestChannel; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.XContentType; + +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +public class RestSimulateIngestActionTests extends ESTestCase { + + public void testConvertToBulkRequestXContentBytes() throws Exception { + { + // No index, no id, which we expect to be fine: + String simulateRequestJson = """ + { + "docs": [ + { + "_source": { + "my-keyword-field": "FOO" + } + }, + { + "_source": { + "my-keyword-field": "BAR" + } + } + ], + "pipeline_substitutions": { + "my-pipeline-2": { + "processors": [ + { + "set": { + "field": "my-new-boolean-field", + "value": true + } + } + ] + } + } + } + """; + String bulkRequestJson = """ + {"index":{}} + {"my-keyword-field":"FOO"} + {"index":{}} + {"my-keyword-field":"BAR"} + """; + testInputJsonConvertsToOutputJson(simulateRequestJson, bulkRequestJson); + } + + { + // index and id: + String simulateRequestJson = """ + { + "docs": [ + { + "_index": "index", + "_id": "123", + "_source": { + "foo": "bar" + } + }, + { + "_index": "index", + "_id": "456", + "_source": { + "foo": "rab" + } + } + ] + } + """; + String bulkRequestJson = """ + {"index":{"_index":"index","_id":"123"}} + {"foo":"bar"} + {"index":{"_index":"index","_id":"456"}} + {"foo":"rab"} + """; + testInputJsonConvertsToOutputJson(simulateRequestJson, bulkRequestJson); + } + + { + // We expect an IllegalArgumentException if there are no docs: + String simulateRequestJson = """ + { + "docs": [ + ] + } + """; + String bulkRequestJson = """ + {"index":{"_index":"index","_id":"123"}} + {"foo":"bar"} + {"index":{"_index":"index","_id":"456"}} + {"foo":"rab"} + """; + expectThrows(IllegalArgumentException.class, () -> testInputJsonConvertsToOutputJson(simulateRequestJson, bulkRequestJson)); + } + + { + // non-trivial source: + String simulateRequestJson = """ + { + "docs": [ + { + "_index": "index", + "_id": "123", + "_source": { + "foo": "bar", + "some_object": { + "prop1": "val1", + "some_array": [1, 2, 3, 4] + } + } + } + ] + } + """; + String bulkRequestJson = """ + {"index":{"_index":"index","_id":"123"}} + {"some_object":{"prop1":"val1","some_array":[1,2,3,4]},"foo":"bar"} + """; + testInputJsonConvertsToOutputJson(simulateRequestJson, bulkRequestJson); + } + } + + private void testInputJsonConvertsToOutputJson(String inputJson, String expectedOutputJson) throws Exception { + Map sourceMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), inputJson, false); + BytesReference bulkXcontentBytes = RestSimulateIngestAction.convertToBulkRequestXContentBytes(sourceMap); + String bulkRequestJson = XContentHelper.convertToJson(bulkXcontentBytes, false, XContentType.JSON); + assertThat(bulkRequestJson, equalTo(expectedOutputJson)); + } + + public void testSimulateIngestRestToXContentListener() throws Exception { + // First, make sure it works with success responses: + BulkItemResponse[] responses = new BulkItemResponse[3]; + responses[0] = getSuccessBulkItemResponse("123", "{\"foo\": \"bar\"}"); + responses[1] = getFailureBulkItemResponse("678", "This has failed"); + responses[2] = getSuccessBulkItemResponse("456", "{\"bar\": \"baz\"}"); + BulkResponse bulkResponse = new BulkResponse(responses, randomLongBetween(0, 50000)); + String expectedXContent = """ + { + "docs" : [ + { + "doc" : { + "_id" : "123", + "_index" : "index1", + "_version" : 3, + "_source" : { + "foo" : "bar" + }, + "executed_pipelines" : [ + "pipeline1", + "pipeline2" + ] + } + }, + { + "doc" : { + "_id" : "678", + "_index" : "index1", + "error" : { + "type" : "runtime_exception", + "reason" : "This has failed" + } + } + }, + { + "doc" : { + "_id" : "456", + "_index" : "index1", + "_version" : 3, + "_source" : { + "bar" : "baz" + }, + "executed_pipelines" : [ + "pipeline1", + "pipeline2" + ] + } + } + ] + }"""; + testSimulateIngestRestToXContentListener(bulkResponse, expectedXContent); + } + + private BulkItemResponse getFailureBulkItemResponse(String id, String failureMessage) { + return BulkItemResponse.failure( + randomInt(), + randomFrom(DocWriteRequest.OpType.values()), + new BulkItemResponse.Failure("index1", id, new RuntimeException(failureMessage)) + ); + } + + private BulkItemResponse getSuccessBulkItemResponse(String id, String source) { + ByteBuffer[] sourceByteBuffer = new ByteBuffer[1]; + sourceByteBuffer[0] = ByteBuffer.wrap(source.getBytes(StandardCharsets.UTF_8)); + return BulkItemResponse.success( + randomInt(), + randomFrom(DocWriteRequest.OpType.values()), + new SimulateIndexResponse( + id, + "index1", + 3, + BytesReference.fromByteBuffers(sourceByteBuffer), + XContentType.JSON, + List.of("pipeline1", "pipeline2") + ) + ); + } + + private void testSimulateIngestRestToXContentListener(BulkResponse bulkResponse, String expectedResult) throws Exception { + final FakeRestRequest request = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).build(); + final SetOnce responseSetOnce = new SetOnce<>(); + RestSimulateIngestAction.SimulateIngestRestToXContentListener listener = + new RestSimulateIngestAction.SimulateIngestRestToXContentListener(new AbstractRestChannel(request, true) { + @Override + public void sendResponse(RestResponse response) { + responseSetOnce.set(response); + } + }); + listener.onResponse(bulkResponse); + RestResponse response = responseSetOnce.get(); + String bulkRequestJson = XContentHelper.convertToJson(response.content(), true, true, XContentType.JSON); + assertThat(bulkRequestJson, equalTo(expectedResult)); + } +} diff --git a/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java b/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java index d90a82f08e4f0..a531a74d956ee 100644 --- a/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java +++ b/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java @@ -8,17 +8,28 @@ package org.elasticsearch.search; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.SortedSetDocValuesField; +import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.Sort; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; +import org.apache.lucene.tests.store.BaseDirectoryWrapper; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.action.OriginalIndices; +import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; @@ -26,14 +37,23 @@ import org.elasticsearch.index.cache.IndexCache; import org.elasticsearch.index.cache.query.QueryCache; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.fielddata.IndexFieldDataCache; +import org.elasticsearch.index.fielddata.plain.BinaryIndexFieldData; +import org.elasticsearch.index.fielddata.plain.SortedOrdinalsIndexFieldData; import org.elasticsearch.index.mapper.IdLoader; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.MockFieldMapper; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.search.aggregations.bucket.range.DateRangeAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; +import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.LegacyReaderContext; import org.elasticsearch.search.internal.ReaderContext; @@ -42,15 +62,20 @@ import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.search.rescore.RescoreContext; import org.elasticsearch.search.slice.SliceBuilder; +import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.SortAndFormats; +import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; import java.util.UUID; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.function.Function; import java.util.function.Supplier; +import java.util.function.ToLongFunction; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -147,7 +172,8 @@ protected Engine.Searcher acquireSearcherInternal(String source) { null, false, null, - randomInt(), + randomFrom(SearchService.ResultsType.values()), + randomBoolean(), randomInt() ); contextWithoutScroll.from(300); @@ -178,63 +204,67 @@ protected Engine.Searcher acquireSearcherInternal(String source) { shardSearchRequest, randomNonNegativeLong() ); - DefaultSearchContext context1 = new DefaultSearchContext( - readerContext, - shardSearchRequest, - target, - null, - timeout, - null, - false, - null, - randomInt(), - randomInt() - ); - context1.from(300); - exception = expectThrows(IllegalArgumentException.class, context1::preProcess); - assertThat( - exception.getMessage(), - equalTo( - "Batch size is too large, size must be less than or equal to: [" - + maxResultWindow - + "] but was [310]. Scroll batch sizes cost as much memory as result windows so they are " - + "controlled by the [" - + IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey() - + "] index level setting." + try ( + DefaultSearchContext context1 = new DefaultSearchContext( + readerContext, + shardSearchRequest, + target, + null, + timeout, + null, + false, + null, + randomFrom(SearchService.ResultsType.values()), + randomBoolean(), + randomInt() ) - ); - - // resultWindow not greater than maxResultWindow and both rescore and sort are not null - context1.from(0); - DocValueFormat docValueFormat = mock(DocValueFormat.class); - SortAndFormats sortAndFormats = new SortAndFormats(new Sort(), new DocValueFormat[] { docValueFormat }); - context1.sort(sortAndFormats); - - RescoreContext rescoreContext = mock(RescoreContext.class); - when(rescoreContext.getWindowSize()).thenReturn(500); - context1.addRescore(rescoreContext); - - exception = expectThrows(IllegalArgumentException.class, context1::preProcess); - assertThat(exception.getMessage(), equalTo("Cannot use [sort] option in conjunction with [rescore].")); - - // rescore is null but sort is not null and rescoreContext.getWindowSize() exceeds maxResultWindow - context1.sort(null); - exception = expectThrows(IllegalArgumentException.class, context1::preProcess); - - assertThat( - exception.getMessage(), - equalTo( - "Rescore window [" - + rescoreContext.getWindowSize() - + "] is too large. " - + "It must be less than [" - + maxRescoreWindow - + "]. This prevents allocating massive heaps for storing the results " - + "to be rescored. This limit can be set by changing the [" - + IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey() - + "] index level setting." - ) - ); + ) { + context1.from(300); + exception = expectThrows(IllegalArgumentException.class, context1::preProcess); + assertThat( + exception.getMessage(), + equalTo( + "Batch size is too large, size must be less than or equal to: [" + + maxResultWindow + + "] but was [310]. Scroll batch sizes cost as much memory as result windows so they are " + + "controlled by the [" + + IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey() + + "] index level setting." + ) + ); + + // resultWindow not greater than maxResultWindow and both rescore and sort are not null + context1.from(0); + DocValueFormat docValueFormat = mock(DocValueFormat.class); + SortAndFormats sortAndFormats = new SortAndFormats(new Sort(), new DocValueFormat[] { docValueFormat }); + context1.sort(sortAndFormats); + + RescoreContext rescoreContext = mock(RescoreContext.class); + when(rescoreContext.getWindowSize()).thenReturn(500); + context1.addRescore(rescoreContext); + + exception = expectThrows(IllegalArgumentException.class, context1::preProcess); + assertThat(exception.getMessage(), equalTo("Cannot use [sort] option in conjunction with [rescore].")); + + // rescore is null but sort is not null and rescoreContext.getWindowSize() exceeds maxResultWindow + context1.sort(null); + exception = expectThrows(IllegalArgumentException.class, context1::preProcess); + + assertThat( + exception.getMessage(), + equalTo( + "Rescore window [" + + rescoreContext.getWindowSize() + + "] is too large. " + + "It must be less than [" + + maxRescoreWindow + + "]. This prevents allocating massive heaps for storing the results " + + "to be rescored. This limit can be set by changing the [" + + IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey() + + "] index level setting." + ) + ); + } readerContext.close(); readerContext = new ReaderContext( @@ -253,90 +283,103 @@ public ScrollContext scrollContext() { } }; // rescore is null but sliceBuilder is not null - DefaultSearchContext context2 = new DefaultSearchContext( - readerContext, - shardSearchRequest, - target, - null, - timeout, - null, - false, - null, - randomInt(), - randomInt() - ); - - SliceBuilder sliceBuilder = mock(SliceBuilder.class); - int numSlices = maxSlicesPerScroll + randomIntBetween(1, 100); - when(sliceBuilder.getMax()).thenReturn(numSlices); - context2.sliceBuilder(sliceBuilder); - - exception = expectThrows(IllegalArgumentException.class, context2::preProcess); - assertThat( - exception.getMessage(), - equalTo( - "The number of slices [" - + numSlices - + "] is too large. It must " - + "be less than [" - + maxSlicesPerScroll - + "]. This limit can be set by changing the [" - + IndexSettings.MAX_SLICES_PER_SCROLL.getKey() - + "] index level setting." + try ( + DefaultSearchContext context2 = new DefaultSearchContext( + readerContext, + shardSearchRequest, + target, + null, + timeout, + null, + false, + null, + randomFrom(SearchService.ResultsType.values()), + randomBoolean(), + randomInt() ) - ); + ) { - // No exceptions should be thrown - when(shardSearchRequest.getAliasFilter()).thenReturn(AliasFilter.EMPTY); - when(shardSearchRequest.indexBoost()).thenReturn(AbstractQueryBuilder.DEFAULT_BOOST); + SliceBuilder sliceBuilder = mock(SliceBuilder.class); + int numSlices = maxSlicesPerScroll + randomIntBetween(1, 100); + when(sliceBuilder.getMax()).thenReturn(numSlices); + context2.sliceBuilder(sliceBuilder); + + exception = expectThrows(IllegalArgumentException.class, context2::preProcess); + assertThat( + exception.getMessage(), + equalTo( + "The number of slices [" + + numSlices + + "] is too large. It must " + + "be less than [" + + maxSlicesPerScroll + + "]. This limit can be set by changing the [" + + IndexSettings.MAX_SLICES_PER_SCROLL.getKey() + + "] index level setting." + ) + ); + + // No exceptions should be thrown + when(shardSearchRequest.getAliasFilter()).thenReturn(AliasFilter.EMPTY); + when(shardSearchRequest.indexBoost()).thenReturn(AbstractQueryBuilder.DEFAULT_BOOST); + } - DefaultSearchContext context3 = new DefaultSearchContext( - readerContext, - shardSearchRequest, - target, - null, - timeout, - null, - false, - null, - randomInt(), - randomInt() - ); ParsedQuery parsedQuery = ParsedQuery.parsedMatchAllQuery(); - context3.sliceBuilder(null).parsedQuery(parsedQuery).preProcess(); - assertEquals(context3.query(), context3.buildFilteredQuery(parsedQuery.query())); - - when(searchExecutionContext.getFieldType(anyString())).thenReturn(mock(MappedFieldType.class)); - - readerContext.close(); - readerContext = new ReaderContext( - newContextId(), - indexService, - indexShard, - searcherSupplier.get(), - randomNonNegativeLong(), - false - ); - DefaultSearchContext context4 = new DefaultSearchContext( - readerContext, - shardSearchRequest, - target, - null, - timeout, - null, - false, - null, - randomInt(), - randomInt() - ); - context4.sliceBuilder(new SliceBuilder(1, 2)).parsedQuery(parsedQuery).preProcess(); - Query query1 = context4.query(); - context4.sliceBuilder(new SliceBuilder(0, 2)).parsedQuery(parsedQuery).preProcess(); - Query query2 = context4.query(); - assertTrue(query1 instanceof MatchNoDocsQuery || query2 instanceof MatchNoDocsQuery); - - readerContext.close(); - threadPool.shutdown(); + try ( + DefaultSearchContext context3 = new DefaultSearchContext( + readerContext, + shardSearchRequest, + target, + null, + timeout, + null, + false, + null, + randomFrom(SearchService.ResultsType.values()), + randomBoolean(), + randomInt() + ) + ) { + context3.sliceBuilder(null).parsedQuery(parsedQuery).preProcess(); + assertEquals(context3.query(), context3.buildFilteredQuery(parsedQuery.query())); + + when(searchExecutionContext.getFieldType(anyString())).thenReturn(mock(MappedFieldType.class)); + + readerContext.close(); + readerContext = new ReaderContext( + newContextId(), + indexService, + indexShard, + searcherSupplier.get(), + randomNonNegativeLong(), + false + ); + } + + try ( + DefaultSearchContext context4 = new DefaultSearchContext( + readerContext, + shardSearchRequest, + target, + null, + timeout, + null, + false, + null, + randomFrom(SearchService.ResultsType.values()), + randomBoolean(), + randomInt() + ) + ) { + context4.sliceBuilder(new SliceBuilder(1, 2)).parsedQuery(parsedQuery).preProcess(); + Query query1 = context4.query(); + context4.sliceBuilder(new SliceBuilder(0, 2)).parsedQuery(parsedQuery).preProcess(); + Query query2 = context4.query(); + assertTrue(query1 instanceof MatchNoDocsQuery || query2 instanceof MatchNoDocsQuery); + + readerContext.close(); + threadPool.shutdown(); + } } } @@ -352,6 +395,8 @@ public void testClearQueryCancellationsOnClose() throws IOException { when(indexShard.getThreadPool()).thenReturn(threadPool); IndexService indexService = mock(IndexService.class); + MapperService mapperService = mock(MapperService.class); + when(indexService.mapperService()).thenReturn(mapperService); try (Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { @@ -394,7 +439,8 @@ protected Engine.Searcher acquireSearcherInternal(String source) { null, false, null, - randomInt(), + randomFrom(SearchService.ResultsType.values()), + randomBoolean(), randomInt() ); @@ -431,6 +477,321 @@ public void testNewIdLoaderWithTsdb() throws Exception { } } + public void testDetermineMaximumNumberOfSlices() { + IndexShard indexShard = mock(IndexShard.class); + when(indexShard.shardId()).thenReturn(new ShardId("index", "uuid", 0)); + ShardSearchRequest parallelReq = new ShardSearchRequest( + OriginalIndices.NONE, + new SearchRequest().allowPartialSearchResults(randomBoolean()), + indexShard.shardId(), + 0, + 1, + AliasFilter.EMPTY, + 1f, + System.currentTimeMillis(), + null + ); + ShardSearchRequest singleSliceReq = new ShardSearchRequest( + OriginalIndices.NONE, + new SearchRequest().allowPartialSearchResults(randomBoolean()) + .source(new SearchSourceBuilder().sort(SortBuilders.fieldSort(FieldSortBuilder.DOC_FIELD_NAME))), + indexShard.shardId(), + 0, + 1, + AliasFilter.EMPTY, + 1f, + System.currentTimeMillis(), + null + ); + int executorPoolSize = randomIntBetween(1, 100); + ExecutorService threadPoolExecutor = EsExecutors.newFixed( + "test", + executorPoolSize, + 0, + Thread::new, + new ThreadContext(Settings.EMPTY), + EsExecutors.TaskTrackingConfig.DO_NOT_TRACK + ); + ExecutorService notThreadPoolExecutor = Executors.newWorkStealingPool(); + ToLongFunction fieldCardinality = name -> -1; + + assertEquals( + executorPoolSize, + DefaultSearchContext.determineMaximumNumberOfSlices( + threadPoolExecutor, + parallelReq, + SearchService.ResultsType.DFS, + true, + fieldCardinality + ) + ); + assertEquals( + executorPoolSize, + DefaultSearchContext.determineMaximumNumberOfSlices( + threadPoolExecutor, + singleSliceReq, + SearchService.ResultsType.DFS, + true, + fieldCardinality + ) + ); + assertEquals( + 1, + DefaultSearchContext.determineMaximumNumberOfSlices(null, parallelReq, SearchService.ResultsType.DFS, true, fieldCardinality) + ); + assertEquals( + executorPoolSize, + DefaultSearchContext.determineMaximumNumberOfSlices( + threadPoolExecutor, + parallelReq, + SearchService.ResultsType.QUERY, + true, + fieldCardinality + ) + ); + assertEquals( + 1, + DefaultSearchContext.determineMaximumNumberOfSlices( + threadPoolExecutor, + singleSliceReq, + SearchService.ResultsType.QUERY, + true, + fieldCardinality + ) + ); + assertEquals( + 1, + DefaultSearchContext.determineMaximumNumberOfSlices( + notThreadPoolExecutor, + parallelReq, + SearchService.ResultsType.DFS, + true, + fieldCardinality + ) + ); + + assertEquals( + executorPoolSize, + DefaultSearchContext.determineMaximumNumberOfSlices( + threadPoolExecutor, + parallelReq, + SearchService.ResultsType.DFS, + false, + fieldCardinality + ) + ); + assertEquals( + 1, + DefaultSearchContext.determineMaximumNumberOfSlices(null, parallelReq, SearchService.ResultsType.DFS, false, fieldCardinality) + ); + assertEquals( + 1, + DefaultSearchContext.determineMaximumNumberOfSlices( + threadPoolExecutor, + parallelReq, + SearchService.ResultsType.QUERY, + false, + fieldCardinality + ) + ); + assertEquals( + 1, + DefaultSearchContext.determineMaximumNumberOfSlices(null, parallelReq, SearchService.ResultsType.QUERY, false, fieldCardinality) + ); + assertEquals( + 1, + DefaultSearchContext.determineMaximumNumberOfSlices( + notThreadPoolExecutor, + parallelReq, + SearchService.ResultsType.DFS, + false, + fieldCardinality + ) + ); + } + + public void testIsParallelCollectionSupportedForResults() { + SearchSourceBuilder searchSourceBuilderOrNull = randomBoolean() ? null : new SearchSourceBuilder(); + ToLongFunction fieldCardinality = name -> -1; + for (var resultsType : SearchService.ResultsType.values()) { + switch (resultsType) { + case NONE, FETCH -> assertFalse( + "NONE and FETCH phases do not support parallel collection.", + DefaultSearchContext.isParallelCollectionSupportedForResults( + resultsType, + searchSourceBuilderOrNull, + fieldCardinality, + randomBoolean() + ) + ); + case DFS -> assertTrue( + "DFS phase always supports parallel collection.", + DefaultSearchContext.isParallelCollectionSupportedForResults( + resultsType, + searchSourceBuilderOrNull, + fieldCardinality, + randomBoolean() + ) + ); + case QUERY -> { + SearchSourceBuilder searchSourceBuilderNoAgg = new SearchSourceBuilder(); + assertTrue( + "Parallel collection should be supported for the query phase when no agg is present.", + DefaultSearchContext.isParallelCollectionSupportedForResults( + resultsType, + searchSourceBuilderNoAgg, + fieldCardinality, + true + ) + ); + assertTrue( + "Parallel collection should be supported for the query phase when the source is null.", + DefaultSearchContext.isParallelCollectionSupportedForResults(resultsType, null, fieldCardinality, true) + ); + + SearchSourceBuilder searchSourceAggSupportsParallelCollection = new SearchSourceBuilder(); + searchSourceAggSupportsParallelCollection.aggregation(new DateRangeAggregationBuilder("dateRange")); + assertTrue( + "Parallel collection should be supported for the query phase when when enabled && contains supported agg.", + DefaultSearchContext.isParallelCollectionSupportedForResults( + resultsType, + searchSourceAggSupportsParallelCollection, + fieldCardinality, + true + ) + ); + + assertFalse( + "Parallel collection should not be supported for the query phase when disabled.", + DefaultSearchContext.isParallelCollectionSupportedForResults( + resultsType, + searchSourceBuilderNoAgg, + fieldCardinality, + false + ) + ); + assertFalse( + "Parallel collection should not be supported for the query phase when disabled and source is null.", + DefaultSearchContext.isParallelCollectionSupportedForResults(resultsType, null, fieldCardinality, false) + ); + + SearchSourceBuilder searchSourceAggDoesNotSupportParallelCollection = new SearchSourceBuilder(); + searchSourceAggDoesNotSupportParallelCollection.aggregation(new TermsAggregationBuilder("terms")); + assertFalse( + "Parallel collection should not be supported for the query phase when " + + "enabled && does not contains supported agg.", + DefaultSearchContext.isParallelCollectionSupportedForResults( + resultsType, + searchSourceAggDoesNotSupportParallelCollection, + fieldCardinality, + true + ) + ); + + SearchSourceBuilder searchSourceMultiAggDoesNotSupportParallelCollection = new SearchSourceBuilder(); + searchSourceMultiAggDoesNotSupportParallelCollection.aggregation(new TermsAggregationBuilder("terms")); + searchSourceMultiAggDoesNotSupportParallelCollection.aggregation(new DateRangeAggregationBuilder("dateRange")); + assertFalse( + "Parallel collection should not be supported for the query phase when when enabled && contains unsupported agg.", + DefaultSearchContext.isParallelCollectionSupportedForResults( + resultsType, + searchSourceMultiAggDoesNotSupportParallelCollection, + fieldCardinality, + true + ) + ); + } + default -> throw new UnsupportedOperationException("Untested ResultsType added, please add new testcases."); + } + } + } + + public void testGetFieldCardinalityNoLeaves() throws IOException { + try (BaseDirectoryWrapper dir = newDirectory()) { + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig()); + writer.close(); + try (DirectoryReader reader = DirectoryReader.open(writer.getDirectory())) { + SortedOrdinalsIndexFieldData high = new SortedOrdinalsIndexFieldData( + new IndexFieldDataCache.None(), + "empty", + CoreValuesSourceType.KEYWORD, + new NoneCircuitBreakerService(), + null + ); + assertEquals(0, DefaultSearchContext.getFieldCardinality(high, reader)); + } + } + } + + public void testGetFieldCardinalityNoLeavesNoGlobalOrdinals() throws IOException { + try (BaseDirectoryWrapper dir = newDirectory()) { + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig()); + writer.close(); + try (DirectoryReader reader = DirectoryReader.open(writer.getDirectory())) { + BinaryIndexFieldData binaryIndexFieldData = new BinaryIndexFieldData("high", CoreValuesSourceType.KEYWORD); + assertEquals(-1, DefaultSearchContext.getFieldCardinality(binaryIndexFieldData, reader)); + } + } + } + + public void testGetFieldCardinality() throws IOException { + try (BaseDirectoryWrapper dir = newDirectory()) { + final int numDocs = scaledRandomIntBetween(100, 200); + try (RandomIndexWriter w = new RandomIndexWriter(random(), dir, new IndexWriterConfig())) { + for (int i = 0; i < numDocs; ++i) { + Document doc = new Document(); + doc.add(new SortedSetDocValuesField("high", new BytesRef(Integer.toString(i)))); + doc.add(new SortedSetDocValuesField("low", new BytesRef(Integer.toString(i % 3)))); + w.addDocument(doc); + } + } + try (DirectoryReader reader = DirectoryReader.open(dir)) { + BinaryIndexFieldData binaryIndexFieldData = new BinaryIndexFieldData("high", CoreValuesSourceType.KEYWORD); + assertEquals(-1, DefaultSearchContext.getFieldCardinality(binaryIndexFieldData, reader)); + SortedOrdinalsIndexFieldData nonExistent = new SortedOrdinalsIndexFieldData( + new IndexFieldDataCache.None(), + "non_existent", + CoreValuesSourceType.KEYWORD, + new NoneCircuitBreakerService(), + null + ); + assertEquals(0, DefaultSearchContext.getFieldCardinality(nonExistent, reader)); + SortedOrdinalsIndexFieldData high = new SortedOrdinalsIndexFieldData( + new IndexFieldDataCache.None(), + "high", + CoreValuesSourceType.KEYWORD, + new NoneCircuitBreakerService(), + null + ); + assertEquals(numDocs, DefaultSearchContext.getFieldCardinality(high, reader)); + SortedOrdinalsIndexFieldData low = new SortedOrdinalsIndexFieldData( + new IndexFieldDataCache.None(), + "low", + CoreValuesSourceType.KEYWORD, + new NoneCircuitBreakerService(), + null + ); + assertEquals(3, DefaultSearchContext.getFieldCardinality(low, reader)); + } + } + } + + public void testGetFieldCardinalityUnmappedField() { + MapperService mapperService = mock(MapperService.class); + IndexService indexService = mock(IndexService.class); + when(indexService.mapperService()).thenReturn(mapperService); + assertEquals(-1, DefaultSearchContext.getFieldCardinality("field", indexService, null)); + } + + public void testGetFieldCardinalityRuntimeField() { + MapperService mapperService = mock(MapperService.class); + when(mapperService.fieldType(anyString())).thenReturn(new MockFieldMapper.FakeFieldType("field")); + IndexService indexService = mock(IndexService.class); + when(indexService.mapperService()).thenReturn(mapperService); + when(indexService.loadFielddata(any(), any())).thenThrow(new RuntimeException()); + assertEquals(-1, DefaultSearchContext.getFieldCardinality("field", indexService, null)); + } + private DefaultSearchContext createDefaultSearchContext(Settings providedIndexSettings) throws IOException { TimeValue timeout = new TimeValue(randomIntBetween(1, 100)); ShardSearchRequest shardSearchRequest = mock(ShardSearchRequest.class); @@ -508,7 +869,8 @@ protected Engine.Searcher acquireSearcherInternal(String source) { null, false, null, - randomInt(), + randomFrom(SearchService.ResultsType.values()), + randomBoolean(), randomInt() ); } diff --git a/server/src/test/java/org/elasticsearch/search/MultiValueModeTests.java b/server/src/test/java/org/elasticsearch/search/MultiValueModeTests.java index 310ce21e0618a..488d79559f589 100644 --- a/server/src/test/java/org/elasticsearch/search/MultiValueModeTests.java +++ b/server/src/test/java/org/elasticsearch/search/MultiValueModeTests.java @@ -423,7 +423,6 @@ private void verifySortedNumericDouble( missingValue, rootDocs, new BitSetIterator(innerDocs, 0L), - maxDoc, maxChildren ); int prevRoot = -1; diff --git a/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java b/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java index 0a9336b93bd2c..57974cff0d03c 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java @@ -7,8 +7,6 @@ */ package org.elasticsearch.search; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.util.CharsRefBuilder; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.common.CheckedBiConsumer; @@ -28,11 +26,9 @@ import org.elasticsearch.index.query.functionscore.GaussDecayFunctionBuilder; import org.elasticsearch.plugins.SearchPlugin; import org.elasticsearch.search.aggregations.AggregationBuilder; -import org.elasticsearch.search.aggregations.AggregationReduceContext; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.BaseAggregationBuilder; -import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.terms.heuristic.ChiSquare; import org.elasticsearch.search.aggregations.pipeline.AbstractPipelineAggregationBuilder; @@ -56,12 +52,8 @@ import org.elasticsearch.search.rescore.RescoreContext; import org.elasticsearch.search.rescore.RescorerBuilder; import org.elasticsearch.search.suggest.Suggest.Suggestion; -import org.elasticsearch.search.suggest.Suggest.Suggestion.Entry; -import org.elasticsearch.search.suggest.Suggest.Suggestion.Entry.Option; -import org.elasticsearch.search.suggest.Suggester; import org.elasticsearch.search.suggest.SuggestionBuilder; import org.elasticsearch.search.suggest.SuggestionSearchContext; -import org.elasticsearch.search.suggest.SuggestionSearchContext.SuggestionContext; import org.elasticsearch.search.suggest.term.TermSuggestion; import org.elasticsearch.search.suggest.term.TermSuggestionBuilder; import org.elasticsearch.test.ESTestCase; @@ -590,10 +582,6 @@ protected XContentBuilder internalXContent(XContentBuilder builder, Params param return null; } - private static TestPipelineAggregationBuilder fromXContent(String name, XContentParser p) { - return null; - } - @Override protected void validate(ValidationContext context) {} @@ -603,20 +591,6 @@ public TransportVersion getMinimalSupportedVersion() { } } - /** - * Dummy test {@link PipelineAggregator} used to test registering aggregation builders. - */ - private static class TestPipelineAggregator extends PipelineAggregator { - TestPipelineAggregator() { - super("test", new String[] {}, null); - } - - @Override - public InternalAggregation reduce(InternalAggregation aggregation, AggregationReduceContext reduceContext) { - return null; - } - } - private static class TestRescorerBuilder extends RescorerBuilder { public static TestRescorerBuilder fromXContent(XContentParser parser) { return null; @@ -653,28 +627,6 @@ public TransportVersion getMinimalSupportedVersion() { } } - private static class TestSuggester extends Suggester { - - @Override - protected Suggestion> innerExecute( - String name, - SuggestionSearchContext.SuggestionContext suggestion, - IndexSearcher searcher, - CharsRefBuilder spare - ) throws IOException { - return null; - } - - @Override - protected Suggestion> emptySuggestion( - String name, - SuggestionContext suggestion, - CharsRefBuilder spare - ) throws IOException { - return null; - } - } - private static class TestSuggestionBuilder extends SuggestionBuilder { public static final String SUGGESTION_NAME = "test"; diff --git a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java index 1fc7ddc42d5d0..270ab3003a1f1 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -25,9 +25,7 @@ import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.elasticsearch.action.search.ClearScrollRequest; -import org.elasticsearch.action.search.ClosePointInTimeAction; import org.elasticsearch.action.search.ClosePointInTimeRequest; -import org.elasticsearch.action.search.OpenPointInTimeAction; import org.elasticsearch.action.search.OpenPointInTimeRequest; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequest; @@ -35,8 +33,9 @@ import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.action.search.SearchShardTask; import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.action.search.TransportClosePointInTimeAction; +import org.elasticsearch.action.search.TransportOpenPointInTimeAction; import org.elasticsearch.action.search.TransportSearchAction; -import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.WriteRequest; @@ -48,8 +47,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; @@ -84,7 +81,6 @@ import org.elasticsearch.search.aggregations.MultiBucketConsumerService; import org.elasticsearch.search.aggregations.bucket.filter.FiltersAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.range.DateRangeAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValueType; @@ -103,8 +99,6 @@ import org.elasticsearch.search.query.NonCountingTermQuery; import org.elasticsearch.search.query.QuerySearchRequest; import org.elasticsearch.search.query.QuerySearchResult; -import org.elasticsearch.search.sort.FieldSortBuilder; -import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.search.suggest.SuggestBuilder; import org.elasticsearch.tasks.TaskCancelHelper; import org.elasticsearch.tasks.TaskCancelledException; @@ -126,8 +120,6 @@ import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; import java.util.concurrent.Semaphore; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.atomic.AtomicBoolean; @@ -248,7 +240,7 @@ protected Settings nodeSettings() { public void testClearOnClose() { createIndex("index"); - client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); SearchResponse searchResponse = client().prepareSearch("index").setSize(1).setScroll("1m").get(); assertThat(searchResponse.getScrollId(), is(notNullValue())); SearchService service = getInstanceFromNode(SearchService.class); @@ -260,7 +252,7 @@ public void testClearOnClose() { public void testClearOnStop() { createIndex("index"); - client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); SearchResponse searchResponse = client().prepareSearch("index").setSize(1).setScroll("1m").get(); assertThat(searchResponse.getScrollId(), is(notNullValue())); SearchService service = getInstanceFromNode(SearchService.class); @@ -272,7 +264,7 @@ public void testClearOnStop() { public void testClearIndexDelete() { createIndex("index"); - client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); SearchResponse searchResponse = client().prepareSearch("index").setSize(1).setScroll("1m").get(); assertThat(searchResponse.getScrollId(), is(notNullValue())); SearchService service = getInstanceFromNode(SearchService.class); @@ -285,7 +277,7 @@ public void testClearIndexDelete() { public void testCloseSearchContextOnRewriteException() { // if refresh happens while checking the exception, the subsequent reference count might not match, so we switch it off createIndex("index", Settings.builder().put("index.refresh_interval", -1).build()); - client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); SearchService service = getInstanceFromNode(SearchService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class); @@ -304,7 +296,7 @@ public void testCloseSearchContextOnRewriteException() { public void testSearchWhileIndexDeleted() throws InterruptedException { createIndex("index"); - client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); SearchService service = getInstanceFromNode(SearchService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class); @@ -337,8 +329,7 @@ public void run() { } catch (InterruptedException e) { throw new AssertionError(e); } - client().prepareIndex("index") - .setSource("field", "value") + prepareIndex("index").setSource("field", "value") .setRefreshPolicy(randomFrom(WriteRequest.RefreshPolicy.values())) .execute(new ActionListener() { @Override @@ -380,18 +371,29 @@ public void onFailure(Exception e) { null ), new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()), - result + result.delegateFailure((l, r) -> { + r.incRef(); + l.onResponse(r); + }) ); - SearchPhaseResult searchPhaseResult = result.get(); - List intCursors = new ArrayList<>(1); - intCursors.add(0); - ShardFetchRequest req = new ShardFetchRequest(searchPhaseResult.getContextId(), intCursors, null/* not a scroll */); - PlainActionFuture listener = new PlainActionFuture<>(); - service.executeFetchPhase(req, new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()), listener); - listener.get(); - if (useScroll) { - // have to free context since this test does not remove the index from IndicesService. - service.freeReaderContext(searchPhaseResult.getContextId()); + final SearchPhaseResult searchPhaseResult = result.get(); + try { + List intCursors = new ArrayList<>(1); + intCursors.add(0); + ShardFetchRequest req = new ShardFetchRequest( + searchPhaseResult.getContextId(), + intCursors, + null/* not a scroll */ + ); + PlainActionFuture listener = new PlainActionFuture<>(); + service.executeFetchPhase(req, new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()), listener); + listener.get(); + if (useScroll) { + // have to free context since this test does not remove the index from IndicesService. + service.freeReaderContext(searchPhaseResult.getContextId()); + } + } finally { + searchPhaseResult.decRef(); } } catch (ExecutionException ex) { assertThat(ex.getCause(), instanceOf(RuntimeException.class)); @@ -421,7 +423,7 @@ public void onFailure(Exception e) { public void testSearchWhileIndexDeletedDoesNotLeakSearchContext() throws ExecutionException, InterruptedException { createIndex("index"); - client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); @@ -476,7 +478,7 @@ public void testSearchWhileIndexDeletedDoesNotLeakSearchContext() throws Executi public void testBeforeShardLockDuringShardCreate() { IndexService indexService = createIndex("index", Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).build()); - client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); SearchResponse searchResponse = client().prepareSearch("index").setSize(1).setScroll("1m").get(); assertThat(searchResponse.getScrollId(), is(notNullValue())); SearchService service = getInstanceFromNode(SearchService.class); @@ -574,7 +576,7 @@ public void testTimeout() throws IOException { public void testMaxDocvalueFieldsSearch() throws IOException { final Settings settings = Settings.builder().put(IndexSettings.MAX_DOCVALUE_FIELDS_SEARCH_SETTING.getKey(), 1).build(); createIndex("index", settings, null, "field1", "keyword", "field2", "keyword"); - client().prepareIndex("index").setId("1").setSource("field1", "value1", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("index").setId("1").setSource("field1", "value1", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); final SearchService service = getInstanceFromNode(SearchService.class); final IndicesService indicesService = getInstanceFromNode(IndicesService.class); @@ -628,11 +630,7 @@ public void testMaxDocvalueFieldsSearch() throws IOException { public void testDeduplicateDocValuesFields() throws Exception { createIndex("index", Settings.EMPTY, "_doc", "field1", "type=date", "field2", "type=date"); - client().prepareIndex("index") - .setId("1") - .setSource("field1", "2022-08-03", "field2", "2022-08-04") - .setRefreshPolicy(IMMEDIATE) - .get(); + prepareIndex("index").setId("1").setSource("field1", "2022-08-03", "field2", "2022-08-04").setRefreshPolicy(IMMEDIATE).get(); SearchService service = getInstanceFromNode(SearchService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); @@ -778,7 +776,7 @@ public void testIgnoreScriptfieldIfSizeZero() throws IOException { */ public void testMaxOpenScrollContexts() throws Exception { createIndex("index"); - client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); final SearchService service = getInstanceFromNode(SearchService.class); final IndicesService indicesService = getInstanceFromNode(IndicesService.class); @@ -1015,7 +1013,7 @@ public void testCanMatch() throws Exception { ).canMatch() ); // the source can match and can be rewritten to a match_none, but not the alias filter - final DocWriteResponse response = client().prepareIndex("index").setSource("id", "1").get(); + final DocWriteResponse response = prepareIndex("index").setSource("id", "1").get(); assertEquals(RestStatus.CREATED, response.status()); searchRequest.indices("alias").source(new SearchSourceBuilder().query(new TermQueryBuilder("id", "1"))); assertFalse( @@ -1097,7 +1095,7 @@ public void testCanRewriteToMatchNone() { ); } - public void testSetSearchThrottled() { + public void testSetSearchThrottled() throws IOException { createIndex("throttled_threadpool_index"); client().execute( InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.INSTANCE, @@ -1110,7 +1108,7 @@ public void testSetSearchThrottled() { final SearchService service = getInstanceFromNode(SearchService.class); Index index = resolveIndex("throttled_threadpool_index"); assertTrue(service.getIndicesService().indexServiceSafe(index).getIndexSettings().isSearchThrottled()); - client().prepareIndex("throttled_threadpool_index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("throttled_threadpool_index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); assertSearchHits( client().prepareSearch("throttled_threadpool_index") .setIndicesOptions(IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED) @@ -1135,21 +1133,6 @@ public void testSetSearchThrottled() { ); assertEquals("can not update private setting [index.search.throttled]; this setting is managed by Elasticsearch", iae.getMessage()); assertFalse(service.getIndicesService().indexServiceSafe(index).getIndexSettings().isSearchThrottled()); - SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(false); - ShardSearchRequest req = new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - new ShardId(index, 0), - 0, - 1, - AliasFilter.EMPTY, - 1f, - -1, - null - ); - Thread currentThread = Thread.currentThread(); - // we still make sure can match is executed on the network thread - service.canMatch(req, ActionTestUtils.assertNoFailureListener(r -> assertSame(Thread.currentThread(), currentThread))); } public void testAggContextGetsMatchAll() throws IOException { @@ -1214,7 +1197,7 @@ public void testExpandSearchThrottled() { ) ).actionGet(); - client().prepareIndex("throttled_threadpool_index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("throttled_threadpool_index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); assertHitCount(client().prepareSearch(), 1L); assertHitCount(client().prepareSearch().setIndicesOptions(IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED), 1L); } @@ -1227,7 +1210,7 @@ public void testExpandSearchFrozen() { new InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.Request(indexName, "index.frozen", "true") ).actionGet(); - client().prepareIndex(indexName).setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex(indexName).setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); assertHitCount(client().prepareSearch(), 0L); assertHitCount(client().prepareSearch().setIndicesOptions(IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED), 1L); assertWarnings(TransportSearchAction.FROZEN_INDICES_DEPRECATION_MESSAGE.replace("{}", indexName)); @@ -1439,7 +1422,7 @@ public void testDeleteIndexWhileSearch() throws Exception { createIndex("test"); int numDocs = randomIntBetween(1, 20); for (int i = 0; i < numDocs; i++) { - client().prepareIndex("test").setSource("f", "v").get(); + prepareIndex("test").setSource("f", "v").get(); } indicesAdmin().prepareRefresh("test").get(); AtomicBoolean stopped = new AtomicBoolean(false); @@ -1602,6 +1585,7 @@ public void onResponse(SearchPhaseResult searchPhaseResult) { fail("Search not cancelled early"); } finally { service.freeReaderContext(searchPhaseResult.getContextId()); + searchPhaseResult.decRef(); latch3.countDown(); } } @@ -1698,7 +1682,7 @@ public void onFailure(Exception e) { client().clearScroll(clearScrollRequest); } - public void testWaitOnRefresh() { + public void testWaitOnRefresh() throws ExecutionException, InterruptedException { createIndex("index"); final SearchService service = getInstanceFromNode(SearchService.class); final IndicesService indicesService = getInstanceFromNode(IndicesService.class); @@ -1708,11 +1692,10 @@ public void testWaitOnRefresh() { searchRequest.setWaitForCheckpointsTimeout(TimeValue.timeValueSeconds(30)); searchRequest.setWaitForCheckpoints(Collections.singletonMap("index", new long[] { 0 })); - final DocWriteResponse response = client().prepareIndex("index").setSource("id", "1").get(); + final DocWriteResponse response = prepareIndex("index").setSource("id", "1").get(); assertEquals(RestStatus.CREATED, response.status()); SearchShardTask task = new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()); - PlainActionFuture future = PlainActionFuture.newFuture(); ShardSearchRequest request = new ShardSearchRequest( OriginalIndices.NONE, searchRequest, @@ -1726,9 +1709,12 @@ public void testWaitOnRefresh() { null, null ); - service.executeQueryPhase(request, task, future); - SearchPhaseResult searchPhaseResult = future.actionGet(); - assertEquals(1, searchPhaseResult.queryResult().getTotalHits().value); + PlainActionFuture future = new PlainActionFuture<>(); + service.executeQueryPhase(request, task, future.delegateFailure((l, r) -> { + assertEquals(1, r.queryResult().getTotalHits().value); + l.onResponse(null); + })); + future.get(); } public void testWaitOnRefreshFailsWithRefreshesDisabled() { @@ -1741,11 +1727,11 @@ public void testWaitOnRefreshFailsWithRefreshesDisabled() { searchRequest.setWaitForCheckpointsTimeout(TimeValue.timeValueSeconds(30)); searchRequest.setWaitForCheckpoints(Collections.singletonMap("index", new long[] { 0 })); - final DocWriteResponse response = client().prepareIndex("index").setSource("id", "1").get(); + final DocWriteResponse response = prepareIndex("index").setSource("id", "1").get(); assertEquals(RestStatus.CREATED, response.status()); SearchShardTask task = new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); ShardSearchRequest request = new ShardSearchRequest( OriginalIndices.NONE, searchRequest, @@ -1777,11 +1763,11 @@ public void testWaitOnRefreshFailsIfCheckpointNotIndexed() { searchRequest.setWaitForCheckpointsTimeout(TimeValue.timeValueMillis(randomIntBetween(10, 100))); searchRequest.setWaitForCheckpoints(Collections.singletonMap("index", new long[] { 1 })); - final DocWriteResponse response = client().prepareIndex("index").setSource("id", "1").get(); + final DocWriteResponse response = prepareIndex("index").setSource("id", "1").get(); assertEquals(RestStatus.CREATED, response.status()); SearchShardTask task = new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); ShardSearchRequest request = new ShardSearchRequest( OriginalIndices.NONE, searchRequest, @@ -1814,11 +1800,11 @@ public void testWaitOnRefreshTimeout() { searchRequest.setWaitForCheckpointsTimeout(TimeValue.timeValueMillis(randomIntBetween(10, 100))); searchRequest.setWaitForCheckpoints(Collections.singletonMap("index", new long[] { 0 })); - final DocWriteResponse response = client().prepareIndex("index").setSource("id", "1").get(); + final DocWriteResponse response = prepareIndex("index").setSource("id", "1").get(); assertEquals(RestStatus.CREATED, response.status()); SearchShardTask task = new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); ShardSearchRequest request = new ShardSearchRequest( OriginalIndices.NONE, searchRequest, @@ -1842,12 +1828,12 @@ public void testMinimalSearchSourceInShardRequests() { createIndex("test"); int numDocs = between(0, 10); for (int i = 0; i < numDocs; i++) { - client().prepareIndex("test").setSource("id", Integer.toString(i)).get(); + prepareIndex("test").setSource("id", Integer.toString(i)).get(); } indicesAdmin().prepareRefresh("test").get(); String pitId = client().execute( - OpenPointInTimeAction.INSTANCE, + TransportOpenPointInTimeAction.TYPE, new OpenPointInTimeRequest("test").keepAlive(TimeValue.timeValueMinutes(10)) ).actionGet().getPointInTimeId(); final MockSearchService searchService = (MockSearchService) getInstanceFromNode(SearchService.class); @@ -1862,7 +1848,7 @@ public void testMinimalSearchSourceInShardRequests() { numDocs ); } finally { - client().execute(ClosePointInTimeAction.INSTANCE, new ClosePointInTimeRequest(pitId)).actionGet(); + client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pitId)).actionGet(); } assertThat(shardRequests, not(emptyList())); for (ShardSearchRequest shardRequest : shardRequests) { @@ -1874,7 +1860,7 @@ public void testMinimalSearchSourceInShardRequests() { public void testDfsQueryPhaseRewrite() { createIndex("index"); - client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); final SearchService service = getInstanceFromNode(SearchService.class); final IndicesService indicesService = getInstanceFromNode(IndicesService.class); final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); @@ -1892,7 +1878,6 @@ public void testDfsQueryPhaseRewrite() { -1, null ); - PlainActionFuture plainActionFuture = new PlainActionFuture<>(); final Engine.SearcherSupplier reader = indexShard.acquireSearcherSupplier(); ReaderContext context = service.createAndPutReaderContext( request, @@ -1901,6 +1886,7 @@ public void testDfsQueryPhaseRewrite() { reader, SearchService.KEEPALIVE_INTERVAL_SETTING.get(Settings.EMPTY).millis() ); + PlainActionFuture plainActionFuture = new PlainActionFuture<>(); service.executeQueryPhase( new QuerySearchRequest(null, context.id(), request, new AggregatedDfs(Map.of(), Map.of(), 10)), new SearchShardTask(42L, "", "", "", null, Collections.emptyMap()), @@ -1930,8 +1916,8 @@ public void testEnableSearchWorkerThreads() throws IOException { try (ReaderContext readerContext = createReaderContext(indexService, indexShard)) { SearchService service = getInstanceFromNode(SearchService.class); SearchShardTask task = new SearchShardTask(0, "type", "action", "description", null, emptyMap()); - { - SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.DFS, randomBoolean()); + + try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.DFS, randomBoolean())) { assertNotNull(searchContext.searcher().getExecutor()); } @@ -1942,8 +1928,7 @@ public void testEnableSearchWorkerThreads() throws IOException { .setPersistentSettings(Settings.builder().put(SEARCH_WORKER_THREADS_ENABLED.getKey(), false).build()) .get(); assertTrue(response.isAcknowledged()); - { - SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.DFS, randomBoolean()); + try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.DFS, randomBoolean())) { assertNull(searchContext.searcher().getExecutor()); } } finally { @@ -1953,82 +1938,9 @@ public void testEnableSearchWorkerThreads() throws IOException { .prepareUpdateSettings() .setPersistentSettings(Settings.builder().putNull(SEARCH_WORKER_THREADS_ENABLED.getKey()).build()) .get(); - SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.DFS, randomBoolean()); - assertNotNull(searchContext.searcher().getExecutor()); - } - } - } - - public void testDetermineMaximumNumberOfSlices() { - IndexService indexService = createIndex("index", Settings.EMPTY); - IndexShard indexShard = indexService.getShard(0); - ShardSearchRequest parallelReq = new ShardSearchRequest( - OriginalIndices.NONE, - new SearchRequest().allowPartialSearchResults(randomBoolean()), - indexShard.shardId(), - 0, - indexService.numberOfShards(), - AliasFilter.EMPTY, - 1f, - System.currentTimeMillis(), - null - ); - ShardSearchRequest singleSliceReq = new ShardSearchRequest( - OriginalIndices.NONE, - new SearchRequest().allowPartialSearchResults(randomBoolean()) - .source(new SearchSourceBuilder().sort(SortBuilders.fieldSort(FieldSortBuilder.DOC_FIELD_NAME))), - indexShard.shardId(), - 0, - indexService.numberOfShards(), - AliasFilter.EMPTY, - 1f, - System.currentTimeMillis(), - null - ); - int executorPoolSize = randomIntBetween(1, 100); - ExecutorService threadPoolExecutor = EsExecutors.newFixed( - "test", - executorPoolSize, - 0, - Thread::new, - new ThreadContext(Settings.EMPTY), - EsExecutors.TaskTrackingConfig.DO_NOT_TRACK - ); - ExecutorService notThreadPoolExecutor = Executors.newWorkStealingPool(); - - SearchService service = getInstanceFromNode(SearchService.class); - { - assertEquals(executorPoolSize, service.determineMaximumNumberOfSlices(threadPoolExecutor, parallelReq, ResultsType.DFS)); - assertEquals(executorPoolSize, service.determineMaximumNumberOfSlices(threadPoolExecutor, singleSliceReq, ResultsType.DFS)); - assertEquals(1, service.determineMaximumNumberOfSlices(null, parallelReq, ResultsType.DFS)); - assertEquals(executorPoolSize, service.determineMaximumNumberOfSlices(threadPoolExecutor, parallelReq, ResultsType.QUERY)); - assertEquals(1, service.determineMaximumNumberOfSlices(threadPoolExecutor, singleSliceReq, ResultsType.QUERY)); - assertEquals(1, service.determineMaximumNumberOfSlices(notThreadPoolExecutor, parallelReq, ResultsType.DFS)); - } - try { - ClusterUpdateSettingsResponse response = client().admin() - .cluster() - .prepareUpdateSettings() - .setPersistentSettings(Settings.builder().put(QUERY_PHASE_PARALLEL_COLLECTION_ENABLED.getKey(), false).build()) - .get(); - assertTrue(response.isAcknowledged()); - { - assertEquals(executorPoolSize, service.determineMaximumNumberOfSlices(threadPoolExecutor, parallelReq, ResultsType.DFS)); - assertEquals(1, service.determineMaximumNumberOfSlices(null, parallelReq, ResultsType.DFS)); - assertEquals(1, service.determineMaximumNumberOfSlices(threadPoolExecutor, parallelReq, ResultsType.QUERY)); - assertEquals(1, service.determineMaximumNumberOfSlices(null, parallelReq, ResultsType.QUERY)); - assertEquals(1, service.determineMaximumNumberOfSlices(notThreadPoolExecutor, parallelReq, ResultsType.DFS)); - } - } finally { - // reset original default setting - client().admin() - .cluster() - .prepareUpdateSettings() - .setPersistentSettings(Settings.builder().putNull(QUERY_PHASE_PARALLEL_COLLECTION_ENABLED.getKey()).build()) - .get(); - { - assertEquals(executorPoolSize, service.determineMaximumNumberOfSlices(threadPoolExecutor, parallelReq, ResultsType.DFS)); - assertEquals(executorPoolSize, service.determineMaximumNumberOfSlices(threadPoolExecutor, parallelReq, ResultsType.QUERY)); + try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.DFS, randomBoolean())) { + assertNotNull(searchContext.searcher().getExecutor()); + } } } } @@ -2045,7 +1957,7 @@ public void testSlicingBehaviourForParallelCollection() throws Exception { executor.setMaximumPoolSize(configuredMaxPoolSize); // We set this explicitly to be independent of CPU cores. int numDocs = randomIntBetween(50, 100); for (int i = 0; i < numDocs; i++) { - client().prepareIndex("index").setId(String.valueOf(i)).setSource("field", "value").get(); + prepareIndex("index").setId(String.valueOf(i)).setSource("field", "value").get(); if (i % 5 == 0) { indicesAdmin().prepareRefresh("index").get(); } @@ -2068,114 +1980,37 @@ public void testSlicingBehaviourForParallelCollection() throws Exception { try (ReaderContext readerContext = createReaderContext(indexService, indexShard)) { SearchShardTask task = new SearchShardTask(0, "type", "action", "description", null, emptyMap()); { - SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.DFS, true); - ContextIndexSearcher searcher = searchContext.searcher(); - assertNotNull(searcher.getExecutor()); - - final int maxPoolSize = executor.getMaximumPoolSize(); - assertEquals( - "Sanity check to ensure this isn't the default of 1 when pool size is unset", - configuredMaxPoolSize, - maxPoolSize - ); - - final int expectedSlices = ContextIndexSearcher.computeSlices(searcher.getIndexReader().leaves(), maxPoolSize, 1).length; - assertNotEquals("Sanity check to ensure this isn't the default of 1 when pool size is unset", 1, expectedSlices); - - final long priorExecutorTaskCount = executor.getCompletedTaskCount(); - searcher.search(termQuery, new TotalHitCountCollectorManager()); - assertBusy( - () -> assertEquals( - "DFS supports parallel collection, so the number of slices should be > 1.", - expectedSlices, - executor.getCompletedTaskCount() - priorExecutorTaskCount - ) - ); - } - { - SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.QUERY, true); - ContextIndexSearcher searcher = searchContext.searcher(); - assertNotNull(searcher.getExecutor()); - - final int maxPoolSize = executor.getMaximumPoolSize(); - assertEquals( - "Sanity check to ensure this isn't the default of 1 when pool size is unset", - configuredMaxPoolSize, - maxPoolSize - ); - - final int expectedSlices = ContextIndexSearcher.computeSlices(searcher.getIndexReader().leaves(), maxPoolSize, 1).length; - assertNotEquals("Sanity check to ensure this isn't the default of 1 when pool size is unset", 1, expectedSlices); - - final long priorExecutorTaskCount = executor.getCompletedTaskCount(); - searcher.search(termQuery, new TotalHitCountCollectorManager()); - assertBusy( - () -> assertEquals( - "QUERY supports parallel collection when enabled, so the number of slices should be > 1.", - expectedSlices, - executor.getCompletedTaskCount() - priorExecutorTaskCount - ) - ); - } - { - SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.FETCH, true); - ContextIndexSearcher searcher = searchContext.searcher(); - assertNotNull(searcher.getExecutor()); - final long priorExecutorTaskCount = executor.getCompletedTaskCount(); - searcher.search(termQuery, new TotalHitCountCollectorManager()); - assertBusy( - () -> assertEquals( - "The number of slices should be 1 as FETCH does not support parallel collection.", - 1, - executor.getCompletedTaskCount() - priorExecutorTaskCount - ) - ); - } - { - SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.NONE, true); - ContextIndexSearcher searcher = searchContext.searcher(); - assertNotNull(searcher.getExecutor()); - final long priorExecutorTaskCount = executor.getCompletedTaskCount(); - searcher.search(termQuery, new TotalHitCountCollectorManager()); - assertBusy( - () -> assertEquals( - "The number of slices should be 1 as NONE does not support parallel collection.", - 1, - executor.getCompletedTaskCount() - priorExecutorTaskCount - ) - ); - } - - try { - ClusterUpdateSettingsResponse response = client().admin() - .cluster() - .prepareUpdateSettings() - .setPersistentSettings(Settings.builder().put(QUERY_PHASE_PARALLEL_COLLECTION_ENABLED.getKey(), false).build()) - .get(); - assertTrue(response.isAcknowledged()); - { - SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.QUERY, true); + try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.DFS, true)) { ContextIndexSearcher searcher = searchContext.searcher(); assertNotNull(searcher.getExecutor()); + + final int maxPoolSize = executor.getMaximumPoolSize(); + assertEquals( + "Sanity check to ensure this isn't the default of 1 when pool size is unset", + configuredMaxPoolSize, + maxPoolSize + ); + + final int expectedSlices = ContextIndexSearcher.computeSlices( + searcher.getIndexReader().leaves(), + maxPoolSize, + 1 + ).length; + assertNotEquals("Sanity check to ensure this isn't the default of 1 when pool size is unset", 1, expectedSlices); + final long priorExecutorTaskCount = executor.getCompletedTaskCount(); searcher.search(termQuery, new TotalHitCountCollectorManager()); assertBusy( () -> assertEquals( - "The number of slices should be 1 when QUERY parallel collection is disabled.", - 1, + "DFS supports parallel collection, so the number of slices should be > 1.", + expectedSlices, executor.getCompletedTaskCount() - priorExecutorTaskCount ) ); } - } finally { - // Reset to the original default setting and check to ensure it takes effect. - client().admin() - .cluster() - .prepareUpdateSettings() - .setPersistentSettings(Settings.builder().putNull(QUERY_PHASE_PARALLEL_COLLECTION_ENABLED.getKey()).build()) - .get(); - { - SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.QUERY, true); + } + { + try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.QUERY, true)) { ContextIndexSearcher searcher = searchContext.searcher(); assertNotNull(searcher.getExecutor()); @@ -2204,73 +2039,96 @@ public void testSlicingBehaviourForParallelCollection() throws Exception { ); } } - } - } - - public void testIsParallelCollectionSupportedForResults() throws Exception { - SearchSourceBuilder searchSourceBuilderOrNull = randomBoolean() ? null : new SearchSourceBuilder(); - for (var resultsType : ResultsType.values()) { - switch (resultsType) { - case NONE, FETCH -> assertFalse( - "NONE and FETCH phases do not support parallel collection.", - SearchService.isParallelCollectionSupportedForResults(resultsType, searchSourceBuilderOrNull, randomBoolean()) - ); - case DFS -> assertTrue( - "DFS phase always supports parallel collection.", - SearchService.isParallelCollectionSupportedForResults(resultsType, searchSourceBuilderOrNull, randomBoolean()) - ); - case QUERY -> { - SearchSourceBuilder searchSourceBuilderNoAgg = new SearchSourceBuilder(); - assertTrue( - "Parallel collection should be supported for the query phase when no agg is present.", - SearchService.isParallelCollectionSupportedForResults(resultsType, searchSourceBuilderNoAgg, true) - ); - assertTrue( - "Parallel collection should be supported for the query phase when the source is null.", - SearchService.isParallelCollectionSupportedForResults(resultsType, null, true) - ); - - SearchSourceBuilder searchSourceAggSupportsParallelCollection = new SearchSourceBuilder(); - searchSourceAggSupportsParallelCollection.aggregation(new DateRangeAggregationBuilder("dateRange")); - assertTrue( - "Parallel collection should be supported for the query phase when when enabled && contains supported agg.", - SearchService.isParallelCollectionSupportedForResults(resultsType, searchSourceAggSupportsParallelCollection, true) - ); - - assertFalse( - "Parallel collection should not be supported for the query phase when disabled.", - SearchService.isParallelCollectionSupportedForResults(resultsType, searchSourceBuilderNoAgg, false) - ); - assertFalse( - "Parallel collection should not be supported for the query phase when disabled and source is null.", - SearchService.isParallelCollectionSupportedForResults(resultsType, null, false) - ); - - SearchSourceBuilder searchSourceAggDoesNotSupportParallelCollection = new SearchSourceBuilder(); - searchSourceAggDoesNotSupportParallelCollection.aggregation(new TermsAggregationBuilder("terms")); - assertFalse( - "Parallel collection should not be supported for the query phase when " - + "enabled && does not contains supported agg.", - SearchService.isParallelCollectionSupportedForResults( - resultsType, - searchSourceAggDoesNotSupportParallelCollection, - true + { + try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.FETCH, true)) { + ContextIndexSearcher searcher = searchContext.searcher(); + assertNotNull(searcher.getExecutor()); + final long priorExecutorTaskCount = executor.getCompletedTaskCount(); + searcher.search(termQuery, new TotalHitCountCollectorManager()); + assertBusy( + () -> assertEquals( + "The number of slices should be 1 as FETCH does not support parallel collection.", + 1, + executor.getCompletedTaskCount() - priorExecutorTaskCount ) ); - - SearchSourceBuilder searchSourceMultiAggDoesNotSupportParallelCollection = new SearchSourceBuilder(); - searchSourceMultiAggDoesNotSupportParallelCollection.aggregation(new TermsAggregationBuilder("terms")); - searchSourceMultiAggDoesNotSupportParallelCollection.aggregation(new DateRangeAggregationBuilder("dateRange")); - assertFalse( - "Parallel collection should not be supported for the query phase when when enabled && contains unsupported agg.", - SearchService.isParallelCollectionSupportedForResults( - resultsType, - searchSourceMultiAggDoesNotSupportParallelCollection, - true + } + } + { + try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.NONE, true)) { + ContextIndexSearcher searcher = searchContext.searcher(); + assertNotNull(searcher.getExecutor()); + final long priorExecutorTaskCount = executor.getCompletedTaskCount(); + searcher.search(termQuery, new TotalHitCountCollectorManager()); + assertBusy( + () -> assertEquals( + "The number of slices should be 1 as NONE does not support parallel collection.", + 1, + executor.getCompletedTaskCount() - priorExecutorTaskCount ) ); } - default -> throw new UnsupportedOperationException("Untested ResultsType added, please add new testcases."); + } + + try { + ClusterUpdateSettingsResponse response = client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put(QUERY_PHASE_PARALLEL_COLLECTION_ENABLED.getKey(), false).build()) + .get(); + assertTrue(response.isAcknowledged()); + { + try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.QUERY, true)) { + ContextIndexSearcher searcher = searchContext.searcher(); + assertNotNull(searcher.getExecutor()); + final long priorExecutorTaskCount = executor.getCompletedTaskCount(); + searcher.search(termQuery, new TotalHitCountCollectorManager()); + assertBusy( + () -> assertEquals( + "The number of slices should be 1 when QUERY parallel collection is disabled.", + 1, + executor.getCompletedTaskCount() - priorExecutorTaskCount + ) + ); + } + } + } finally { + // Reset to the original default setting and check to ensure it takes effect. + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().putNull(QUERY_PHASE_PARALLEL_COLLECTION_ENABLED.getKey()).build()) + .get(); + { + try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.QUERY, true)) { + ContextIndexSearcher searcher = searchContext.searcher(); + assertNotNull(searcher.getExecutor()); + + final int maxPoolSize = executor.getMaximumPoolSize(); + assertEquals( + "Sanity check to ensure this isn't the default of 1 when pool size is unset", + configuredMaxPoolSize, + maxPoolSize + ); + + final int expectedSlices = ContextIndexSearcher.computeSlices( + searcher.getIndexReader().leaves(), + maxPoolSize, + 1 + ).length; + assertNotEquals("Sanity check to ensure this isn't the default of 1 when pool size is unset", 1, expectedSlices); + + final long priorExecutorTaskCount = executor.getCompletedTaskCount(); + searcher.search(termQuery, new TotalHitCountCollectorManager()); + assertBusy( + () -> assertEquals( + "QUERY supports parallel collection when enabled, so the number of slices should be > 1.", + expectedSlices, + executor.getCompletedTaskCount() - priorExecutorTaskCount + ) + ); + } + } } } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/AggregatorFactoriesTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/AggregatorFactoriesTests.java index d67e99e4521f1..d5fb8f1b63e7e 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/AggregatorFactoriesTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/AggregatorFactoriesTests.java @@ -24,11 +24,9 @@ import org.elasticsearch.index.query.WrapperQueryBuilder; import org.elasticsearch.script.Script; import org.elasticsearch.search.SearchModule; -import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.composite.TermsValuesSourceBuilder; import org.elasticsearch.search.aggregations.bucket.filter.FilterAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.nested.NestedAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.terms.SignificantTermsAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.CardinalityAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.AbstractPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.BucketScriptPipelineAggregationBuilder; @@ -45,12 +43,12 @@ import java.io.IOException; import java.util.Collection; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Random; import java.util.concurrent.CountDownLatch; import java.util.function.Supplier; +import java.util.function.ToLongFunction; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -318,35 +316,27 @@ public void testBuildPipelineTreeResolvesPipelineOrder() { } public void testSupportsParallelCollection() { + ToLongFunction randomCardinality = name -> randomLongBetween(1, 200); { AggregatorFactories.Builder builder = new AggregatorFactories.Builder(); - assertTrue(builder.supportsParallelCollection()); + assertTrue(builder.supportsParallelCollection(randomCardinality)); builder.addAggregator(new FilterAggregationBuilder("name", new MatchAllQueryBuilder())); - assertTrue(builder.supportsParallelCollection()); - builder.addAggregator(new TermsAggregationBuilder("terms")); - assertFalse(builder.supportsParallelCollection()); - } - { - AggregatorFactories.Builder builder = new AggregatorFactories.Builder(); - builder.addAggregator(new TermsAggregationBuilder("terms")); - assertFalse(builder.supportsParallelCollection()); + assertTrue(builder.supportsParallelCollection(randomCardinality)); } { AggregatorFactories.Builder builder = new AggregatorFactories.Builder(); builder.addAggregator(new CardinalityAggregationBuilder("cardinality")); - assertTrue(builder.supportsParallelCollection()); + assertTrue(builder.supportsParallelCollection(randomCardinality)); } { AggregatorFactories.Builder builder = new AggregatorFactories.Builder(); builder.addAggregator(new NestedAggregationBuilder("nested", "path")); - assertTrue(builder.supportsParallelCollection()); + assertTrue(builder.supportsParallelCollection(randomCardinality)); } { AggregatorFactories.Builder builder = new AggregatorFactories.Builder(); - builder.addAggregator( - new CompositeAggregationBuilder("composite", Collections.singletonList(new TermsValuesSourceBuilder("name"))) - ); - assertTrue(builder.supportsParallelCollection()); + builder.addAggregator(new SignificantTermsAggregationBuilder("name")); + assertFalse(builder.supportsParallelCollection(randomCardinality)); } { AggregatorFactories.Builder builder = new AggregatorFactories.Builder(); @@ -356,7 +346,7 @@ public boolean isInSortOrderExecutionRequired() { return true; } }); - assertFalse(builder.supportsParallelCollection()); + assertFalse(builder.supportsParallelCollection(randomCardinality)); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTestCase.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTestCase.java index f5912872e004a..549461418cabf 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTestCase.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTestCase.java @@ -91,8 +91,7 @@ protected void indexData() throws Exception { protected List indexDoc(String shard, String key, int times) throws Exception { IndexRequestBuilder[] builders = new IndexRequestBuilder[times]; for (int i = 0; i < times; i++) { - builders[i] = client().prepareIndex("idx") - .setRouting(shard) + builders[i] = prepareIndex("idx").setRouting(shard) .setSource(jsonBuilder().startObject().field("key", key).field("value", 1).endObject()); } return Arrays.asList(builders); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsTests.java index 66978b981e261..4a9e086d72143 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsTests.java @@ -20,6 +20,7 @@ import java.util.List; import java.util.SortedSet; import java.util.TreeSet; +import java.util.function.ToLongFunction; public class TermsTests extends BaseAggregationTestCase { @@ -164,4 +165,54 @@ private List randomOrder() { return orders; } + public void testSupportsParallelCollection() { + { + TermsAggregationBuilder terms = new TermsAggregationBuilder("terms").executionHint("map"); + assertFalse(terms.supportsParallelCollection(field -> randomIntBetween(-1, 100))); + } + { + TermsAggregationBuilder terms = new TermsAggregationBuilder("terms").executionHint("global_ordinals"); + assertTrue(terms.supportsParallelCollection(field -> 0)); + } + { + TermsAggregationBuilder terms = new TermsAggregationBuilder("terms"); + terms.order(randomBoolean() ? BucketOrder.key(randomBoolean()) : BucketOrder.compound(BucketOrder.key(randomBoolean()))); + if (randomBoolean()) { + terms.shardSize(randomIntBetween(1, 100)); + } + assertTrue(terms.supportsParallelCollection(field -> randomIntBetween(0, 49))); + } + { + TermsAggregationBuilder terms = new TermsAggregationBuilder("terms"); + terms.order(randomBoolean() ? BucketOrder.key(randomBoolean()) : BucketOrder.compound(BucketOrder.key(randomBoolean()))); + if (randomBoolean()) { + terms.shardSize(randomIntBetween(1, 100)); + } + assertFalse(terms.supportsParallelCollection(field -> randomIntBetween(51, 100))); + } + { + TermsAggregationBuilder terms = new TermsAggregationBuilder("terms"); + assertFalse(terms.supportsParallelCollection(field -> -1)); + } + { + TermsAggregationBuilder terms = new TermsAggregationBuilder("terms"); + assertTrue(terms.supportsParallelCollection(field -> 0)); + } + { + TermsAggregationBuilder terms = new TermsAggregationBuilder("terms"); + terms.subAggregation(new TermsAggregationBuilder("name") { + @Override + public boolean supportsParallelCollection(ToLongFunction fieldCardinalityResolver) { + return false; + } + }); + assertFalse(terms.supportsParallelCollection(field -> 0)); + } + { + TermsAggregationBuilder terms = new TermsAggregationBuilder("terms"); + terms.shardSize(10); + assertTrue(terms.supportsParallelCollection(field -> randomIntBetween(1, 10))); + assertFalse(terms.supportsParallelCollection(field -> randomIntBetween(11, 100))); + } + } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilderTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilderTests.java index 4d3abd765a2b7..be0963fce8131 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilderTests.java @@ -13,10 +13,13 @@ import org.elasticsearch.search.aggregations.BaseAggregationTestCase; import org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileUtils; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.sort.SortOrder; import java.util.ArrayList; +import java.util.Collections; import java.util.List; +import java.util.function.ToLongFunction; public class CompositeAggregationBuilderTests extends BaseAggregationTestCase { private DateHistogramValuesSourceBuilder randomDateHistogramSourceBuilder() { @@ -93,11 +96,9 @@ private HistogramValuesSourceBuilder randomHistogramSourceBuilder() { @Override protected CompositeAggregationBuilder createTestAggregatorBuilder() { int numSources = randomIntBetween(1, 10); - numSources = 1; List> sources = new ArrayList<>(); for (int i = 0; i < numSources; i++) { int type = randomIntBetween(0, 3); - type = 3; switch (type) { case 0 -> sources.add(randomTermsSourceBuilder()); case 1 -> sources.add(randomDateHistogramSourceBuilder()); @@ -108,4 +109,51 @@ protected CompositeAggregationBuilder createTestAggregatorBuilder() { } return new CompositeAggregationBuilder(randomAlphaOfLength(10), sources); } + + public void testSupportsParallelCollection() { + assertTrue( + new CompositeAggregationBuilder(randomAlphaOfLength(10), Collections.singletonList(randomDateHistogramSourceBuilder())) + .supportsParallelCollection(null) + ); + assertTrue( + new CompositeAggregationBuilder(randomAlphaOfLength(10), Collections.singletonList(randomHistogramSourceBuilder())) + .supportsParallelCollection(null) + ); + CompositeAggregationBuilder builder = new CompositeAggregationBuilder( + randomAlphaOfLength(10), + Collections.singletonList(randomGeoTileGridValuesSourceBuilder()) + ); + assertTrue(builder.supportsParallelCollection(null)); + builder.subAggregation(new TermsAggregationBuilder("name") { + @Override + public boolean supportsParallelCollection(ToLongFunction fieldCardinalityResolver) { + return false; + } + }); + assertFalse(builder.supportsParallelCollection(null)); + assertFalse( + new CompositeAggregationBuilder(randomAlphaOfLength(10), Collections.singletonList(new TermsValuesSourceBuilder("name"))) + .supportsParallelCollection(field -> -1) + ); + assertTrue( + new CompositeAggregationBuilder(randomAlphaOfLength(10), Collections.singletonList(new TermsValuesSourceBuilder("name"))) + .supportsParallelCollection(field -> randomIntBetween(0, 50)) + ); + assertFalse( + new CompositeAggregationBuilder(randomAlphaOfLength(10), Collections.singletonList(new TermsValuesSourceBuilder("name"))) + .supportsParallelCollection(field -> randomIntBetween(51, 100)) + ); + assertFalse( + new CompositeAggregationBuilder( + randomAlphaOfLength(10), + Collections.singletonList(new TermsValuesSourceBuilder("name").script(new Script("id"))) + ).supportsParallelCollection(field -> randomIntBetween(-1, 100)) + ); + assertFalse( + new CompositeAggregationBuilder( + randomAlphaOfLength(10), + List.of(randomDateHistogramSourceBuilder(), new TermsValuesSourceBuilder("name")) + ).supportsParallelCollection(field -> randomIntBetween(51, 100)) + ); + } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/prefix/IpPrefixAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/prefix/IpPrefixAggregatorTests.java index 2be452f3065b6..f3fe9c6bc05dd 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/prefix/IpPrefixAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/prefix/IpPrefixAggregatorTests.java @@ -65,26 +65,14 @@ private static final class TestIpDataHolder { this.time = time; } - public String getIpAddressAsString() { - return ipAddressAsString; - } - public InetAddress getIpAddress() { return ipAddress; } - public InetAddress getSubnet() { - return subnet; - } - public String getSubnetAsString() { return subnetAsString; } - public int getPrefixLength() { - return prefixLength; - } - public long getTime() { return time; } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregatorTests.java index db7d93d1cd05c..5c467893179ee 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregatorTests.java @@ -706,12 +706,6 @@ private void addManyMixedTextDocs(IndexWriter w) throws IOException { } } - private void addFields(Document doc, List createFields) { - for (Field field : createFields) { - doc.add(field); - } - } - public String randomExecutionHint() { return randomBoolean() ? null : randomFrom(ExecutionMode.values()).toString(); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java index fe5b63ca79f0a..b0d67879b26a1 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java @@ -35,12 +35,10 @@ import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.NumericUtils; -import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.MockPageCacheRecycler; import org.elasticsearch.core.CheckedConsumer; @@ -87,7 +85,6 @@ import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; -import org.elasticsearch.search.aggregations.MultiBucketConsumerService; import org.elasticsearch.search.aggregations.MultiBucketConsumerService.TooManyBucketsException; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; import org.elasticsearch.search.aggregations.bucket.filter.Filter; @@ -2248,29 +2245,6 @@ private InternalAggregation buildInternalAggregation(TermsAggregationBuilder bui return searchAndReduce(reader, new AggTestConfig(builder, fieldType)); } - private T reduce(AggregationBuilder builder, Aggregator agg, BigArrays bigArrays) throws IOException { - // now do the final reduce - MultiBucketConsumerService.MultiBucketConsumer reduceBucketConsumer = new MultiBucketConsumerService.MultiBucketConsumer( - Integer.MAX_VALUE, - new NoneCircuitBreakerService().getBreaker(CircuitBreaker.REQUEST) - ); - AggregationReduceContext context = new AggregationReduceContext.ForFinal( - bigArrays, - getMockScriptService(), - () -> false, - builder, - reduceBucketConsumer, - PipelineTree.EMPTY - ); - - @SuppressWarnings("unchecked") - T topLevel = (T) agg.buildTopLevel(); - @SuppressWarnings("unchecked") - T result = (T) topLevel.reduce(Collections.singletonList(topLevel), context); - doAssertReducedMultiBucketConsumer(result, reduceBucketConsumer); - return result; - } - @Override protected List objectMappers() { return List.of(NestedAggregatorTests.nestedObject("nested_object")); diff --git a/server/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java b/server/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java index d5dd265fb1ea0..7c8496d0f4b20 100644 --- a/server/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java @@ -69,6 +69,7 @@ import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.function.Supplier; +import java.util.function.ToLongFunction; import static java.util.Collections.emptyMap; import static org.hamcrest.CoreMatchers.containsString; @@ -899,81 +900,81 @@ public void testSupportsParallelCollection() { searchSourceBuilder.profile(false); return searchSourceBuilder; }; + ToLongFunction fieldCardinality = name -> -1; { SearchSourceBuilder searchSourceBuilder = newSearchSourceBuilder.get(); if (searchSourceBuilder.aggregations() == null) { - assertTrue(searchSourceBuilder.supportsParallelCollection()); + assertTrue(searchSourceBuilder.supportsParallelCollection(fieldCardinality)); } else { assertEquals( - searchSourceBuilder.aggregations().supportsParallelCollection(), - searchSourceBuilder.supportsParallelCollection() + searchSourceBuilder.aggregations().supportsParallelCollection(fieldCardinality), + searchSourceBuilder.supportsParallelCollection(fieldCardinality) ); } } { SearchSourceBuilder searchSourceBuilder = newSearchSourceBuilder.get(); searchSourceBuilder.aggregation(new MaxAggregationBuilder("max")); - assertTrue(searchSourceBuilder.supportsParallelCollection()); - + assertTrue(searchSourceBuilder.supportsParallelCollection(fieldCardinality)); } { SearchSourceBuilder searchSourceBuilder = newSearchSourceBuilder.get(); searchSourceBuilder.aggregation(new TermsAggregationBuilder("terms")); - assertFalse(searchSourceBuilder.supportsParallelCollection()); + assertFalse(searchSourceBuilder.supportsParallelCollection(fieldCardinality)); } { SearchSourceBuilder searchSourceBuilder = newSearchSourceBuilder.get(); searchSourceBuilder.collapse(CollapseBuilderTests.randomCollapseBuilder()); - assertFalse(searchSourceBuilder.supportsParallelCollection()); + assertFalse(searchSourceBuilder.supportsParallelCollection(fieldCardinality)); } { SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().collapse(CollapseBuilderTests.randomCollapseBuilder()); - assertFalse(searchSourceBuilder.supportsParallelCollection()); + assertFalse(searchSourceBuilder.supportsParallelCollection(fieldCardinality)); } { SearchSourceBuilder searchSourceBuilder = newSearchSourceBuilder.get(); searchSourceBuilder.sort(SortBuilders.scoreSort().order(randomFrom(SortOrder.values()))); - assertTrue(searchSourceBuilder.supportsParallelCollection()); + assertTrue(searchSourceBuilder.supportsParallelCollection(fieldCardinality)); searchSourceBuilder.sort( SortBuilders.scriptSort( new Script(ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, "foo", emptyMap()), ScriptSortBuilder.ScriptSortType.NUMBER ).order(randomFrom(SortOrder.values())) ); - assertFalse(searchSourceBuilder.supportsParallelCollection()); + assertFalse(searchSourceBuilder.supportsParallelCollection(fieldCardinality)); } { SearchSourceBuilder searchSourceBuilder = newSearchSourceBuilder.get(); searchSourceBuilder.sort(SortBuilders.scoreSort().order(randomFrom(SortOrder.values()))); - assertTrue(searchSourceBuilder.supportsParallelCollection()); + assertTrue(searchSourceBuilder.supportsParallelCollection(fieldCardinality)); searchSourceBuilder.sort(SortBuilders.fieldSort("field")); - assertFalse(searchSourceBuilder.supportsParallelCollection()); + assertFalse(searchSourceBuilder.supportsParallelCollection(fieldCardinality)); } { SearchSourceBuilder searchSourceBuilder = newSearchSourceBuilder.get(); searchSourceBuilder.sort(SortBuilders.scoreSort().order(randomFrom(SortOrder.values()))); - assertTrue(searchSourceBuilder.supportsParallelCollection()); + assertTrue(searchSourceBuilder.supportsParallelCollection(fieldCardinality)); searchSourceBuilder.sort(SortBuilders.geoDistanceSort("field", 0, 0)); - assertFalse(searchSourceBuilder.supportsParallelCollection()); + assertFalse(searchSourceBuilder.supportsParallelCollection(fieldCardinality)); } { SearchSourceBuilder searchSourceBuilder = newSearchSourceBuilder.get(); searchSourceBuilder.sort(SortBuilders.scoreSort().order(randomFrom(SortOrder.values()))); - assertTrue(searchSourceBuilder.supportsParallelCollection()); + assertTrue(searchSourceBuilder.supportsParallelCollection(fieldCardinality)); searchSourceBuilder.sort(SortBuilders.pitTiebreaker()); - assertFalse(searchSourceBuilder.supportsParallelCollection()); + assertFalse(searchSourceBuilder.supportsParallelCollection(fieldCardinality)); } { SearchSourceBuilder searchSourceBuilder = newSearchSourceBuilder.get(); searchSourceBuilder.sort(SortBuilders.scoreSort().order(randomFrom(SortOrder.values()))); - assertTrue(searchSourceBuilder.supportsParallelCollection()); + assertTrue(searchSourceBuilder.supportsParallelCollection(fieldCardinality)); searchSourceBuilder.sort(SortBuilders.fieldSort(FieldSortBuilder.DOC_FIELD_NAME)); - assertFalse(searchSourceBuilder.supportsParallelCollection()); + assertFalse(searchSourceBuilder.supportsParallelCollection(fieldCardinality)); } { SearchSourceBuilder searchSourceBuilder = newSearchSourceBuilder.get(); searchSourceBuilder.profile(true); - assertFalse(searchSourceBuilder.supportsParallelCollection()); + assertFalse(searchSourceBuilder.supportsParallelCollection(fieldCardinality)); } } diff --git a/server/src/test/java/org/elasticsearch/search/geo/GeoPointShapeQueryTests.java b/server/src/test/java/org/elasticsearch/search/geo/GeoPointShapeQueryTests.java index 139e88aa5aebd..cfa0087731b60 100644 --- a/server/src/test/java/org/elasticsearch/search/geo/GeoPointShapeQueryTests.java +++ b/server/src/test/java/org/elasticsearch/search/geo/GeoPointShapeQueryTests.java @@ -73,8 +73,7 @@ public void testFieldAlias() throws IOException { ensureGreen(); Point point = GeometryTestUtils.randomPoint(false); - client().prepareIndex(defaultIndexName) - .setId("1") + prepareIndex(defaultIndexName).setId("1") .setSource(jsonBuilder().startObject().field(defaultFieldName, WellKnownText.toWKT(point)).endObject()) .setRefreshPolicy(IMMEDIATE) .get(); diff --git a/server/src/test/java/org/elasticsearch/search/profile/SearchProfileResultsBuilderTests.java b/server/src/test/java/org/elasticsearch/search/profile/SearchProfileResultsBuilderTests.java index 9d801f0303386..66879e5a90a3f 100644 --- a/server/src/test/java/org/elasticsearch/search/profile/SearchProfileResultsBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/profile/SearchProfileResultsBuilderTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.fetch.FetchSearchResult; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.TransportMessage; import java.util.List; import java.util.Map; @@ -30,24 +31,32 @@ public void testFetchWithoutQuery() { randomValueOtherThanMany(searchPhase::containsKey, SearchProfileResultsBuilderTests::randomTarget), null ); - Exception e = expectThrows(IllegalStateException.class, () -> builder(searchPhase).build(List.of(fetchPhase))); - assertThat( - e.getMessage(), - matchesPattern( - "Profile returned fetch phase information for .+ but didn't return query phase information\\. Query phase keys were .+" - ) - ); + try { + Exception e = expectThrows(IllegalStateException.class, () -> builder(searchPhase).build(List.of(fetchPhase))); + assertThat( + e.getMessage(), + matchesPattern( + "Profile returned fetch phase information for .+ but didn't return query phase information\\. Query phase keys were .+" + ) + ); + } finally { + fetchPhase.decRef(); + } } public void testQueryWithoutAnyFetch() { Map searchPhase = randomSearchPhaseResults(between(1, 2)); FetchSearchResult fetchPhase = fetchResult(searchPhase.keySet().iterator().next(), null); - SearchProfileResults result = builder(searchPhase).build(List.of(fetchPhase)); - assertThat( - result.getShardResults().values().stream().filter(r -> r.getQueryPhase() != null).count(), - equalTo((long) searchPhase.size()) - ); - assertThat(result.getShardResults().values().stream().filter(r -> r.getFetchPhase() != null).count(), equalTo(0L)); + try { + SearchProfileResults result = builder(searchPhase).build(List.of(fetchPhase)); + assertThat( + result.getShardResults().values().stream().filter(r -> r.getQueryPhase() != null).count(), + equalTo((long) searchPhase.size()) + ); + assertThat(result.getShardResults().values().stream().filter(r -> r.getFetchPhase() != null).count(), equalTo(0L)); + } finally { + fetchPhase.decRef(); + } } public void testQueryAndFetch() { @@ -56,15 +65,19 @@ public void testQueryAndFetch() { .stream() .map(e -> fetchResult(e.getKey(), new ProfileResult("fetch", "", Map.of(), Map.of(), 1, List.of()))) .collect(toList()); - SearchProfileResults result = builder(searchPhase).build(fetchPhase); - assertThat( - result.getShardResults().values().stream().filter(r -> r.getQueryPhase() != null).count(), - equalTo((long) searchPhase.size()) - ); - assertThat( - result.getShardResults().values().stream().filter(r -> r.getFetchPhase() != null).count(), - equalTo((long) searchPhase.size()) - ); + try { + SearchProfileResults result = builder(searchPhase).build(fetchPhase); + assertThat( + result.getShardResults().values().stream().filter(r -> r.getQueryPhase() != null).count(), + equalTo((long) searchPhase.size()) + ); + assertThat( + result.getShardResults().values().stream().filter(r -> r.getFetchPhase() != null).count(), + equalTo((long) searchPhase.size()) + ); + } finally { + fetchPhase.forEach(TransportMessage::decRef); + } } private static Map randomSearchPhaseResults(int size) { diff --git a/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java b/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java index 9569bd982363e..fafe66c743ce8 100644 --- a/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java @@ -135,13 +135,14 @@ private TestSearchContext createContext(ContextIndexSearcher searcher, Query que private void countTestCase(Query query, IndexReader reader, boolean shouldCollectSearch, boolean shouldCollectCount) throws Exception { ContextIndexSearcher searcher = shouldCollectSearch ? newContextSearcher(reader) : noCollectionContextSearcher(reader); - TestSearchContext context = createContext(searcher, query); - context.setSize(0); + try (TestSearchContext context = createContext(searcher, query)) { + context.setSize(0); - QueryPhase.addCollectorsAndSearch(context); + QueryPhase.addCollectorsAndSearch(context); - ContextIndexSearcher countSearcher = shouldCollectCount ? newContextSearcher(reader) : noCollectionContextSearcher(reader); - assertEquals(countSearcher.count(query), context.queryResult().topDocs().topDocs.totalHits.value); + ContextIndexSearcher countSearcher = shouldCollectCount ? newContextSearcher(reader) : noCollectionContextSearcher(reader); + assertEquals(countSearcher.count(query), context.queryResult().topDocs().topDocs.totalHits.value); + } } private void countTestCase(boolean withDeletions) throws Exception { @@ -226,34 +227,30 @@ private int indexDocs(IndexWriterConfig iwc) throws IOException { public void testPostFilterDisablesHitCountShortcut() throws Exception { int numDocs = indexDocs(); - { - TestSearchContext context = createContext(noCollectionContextSearcher(reader), new MatchAllDocsQuery()); + try (TestSearchContext context = createContext(noCollectionContextSearcher(reader), new MatchAllDocsQuery())) { context.setSize(0); QueryPhase.addCollectorsAndSearch(context); assertEquals(numDocs, context.queryResult().topDocs().topDocs.totalHits.value); assertEquals(TotalHits.Relation.EQUAL_TO, context.queryResult().topDocs().topDocs.totalHits.relation); } - { + try (TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 10), new MatchAllDocsQuery())) { // shortcutTotalHitCount makes us not track total hits as part of the top docs collection, hence size is the threshold - TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 10), new MatchAllDocsQuery()); context.setSize(10); QueryPhase.addCollectorsAndSearch(context); assertEquals(numDocs, context.queryResult().topDocs().topDocs.totalHits.value); assertEquals(TotalHits.Relation.EQUAL_TO, context.queryResult().topDocs().topDocs.totalHits.relation); } - { + try (TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery())) { // QueryPhaseCollector does not propagate Weight#count when a post_filter is provided, hence it forces collection despite // the inner TotalHitCountCollector can shortcut - TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery()); context.setSize(0); context.parsedPostFilter(new ParsedQuery(new MatchNoDocsQuery())); QueryPhase.executeQuery(context); assertEquals(0, context.queryResult().topDocs().topDocs.totalHits.value); assertEquals(TotalHits.Relation.EQUAL_TO, context.queryResult().topDocs().topDocs.totalHits.relation); } - { + try (TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery())) { // shortcutTotalHitCount is disabled for filter collectors, hence we collect until track_total_hits - TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery()); context.setSize(10); context.parsedPostFilter(new ParsedQuery(new MatchNoDocsQuery())); QueryPhase.addCollectorsAndSearch(context); @@ -264,46 +261,43 @@ public void testPostFilterDisablesHitCountShortcut() throws Exception { public void testTerminateAfterWithFilter() throws Exception { indexDocs(); - TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery()); - context.terminateAfter(1); - context.setSize(10); - context.parsedPostFilter(new ParsedQuery(new TermQuery(new Term("foo", "bar")))); - QueryPhase.addCollectorsAndSearch(context); - assertEquals(1, context.queryResult().topDocs().topDocs.totalHits.value); - assertEquals(TotalHits.Relation.EQUAL_TO, context.queryResult().topDocs().topDocs.totalHits.relation); - assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + try (TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery())) { + context.terminateAfter(1); + context.setSize(10); + context.parsedPostFilter(new ParsedQuery(new TermQuery(new Term("foo", "bar")))); + QueryPhase.addCollectorsAndSearch(context); + assertEquals(1, context.queryResult().topDocs().topDocs.totalHits.value); + assertEquals(TotalHits.Relation.EQUAL_TO, context.queryResult().topDocs().topDocs.totalHits.relation); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + } } public void testMinScoreDisablesHitCountShortcut() throws Exception { int numDocs = indexDocs(); - { - TestSearchContext context = createContext(noCollectionContextSearcher(reader), new MatchAllDocsQuery()); + try (TestSearchContext context = createContext(noCollectionContextSearcher(reader), new MatchAllDocsQuery())) { context.setSize(0); QueryPhase.addCollectorsAndSearch(context); assertEquals(numDocs, context.queryResult().topDocs().topDocs.totalHits.value); assertEquals(TotalHits.Relation.EQUAL_TO, context.queryResult().topDocs().topDocs.totalHits.relation); } - { + try (TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 10), new MatchAllDocsQuery())) { // shortcutTotalHitCount makes us not track total hits as part of the top docs collection, hence size is the threshold - TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 10), new MatchAllDocsQuery()); context.setSize(10); QueryPhase.addCollectorsAndSearch(context); assertEquals(numDocs, context.queryResult().topDocs().topDocs.totalHits.value); assertEquals(TotalHits.Relation.EQUAL_TO, context.queryResult().topDocs().topDocs.totalHits.relation); } - { + try (TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery())) { // QueryPhaseCollector does not propagate Weight#count when min_score is provided, hence it forces collection despite // the inner TotalHitCountCollector can shortcut - TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery()); context.setSize(0); context.minimumScore(100); QueryPhase.addCollectorsAndSearch(context); assertEquals(0, context.queryResult().topDocs().topDocs.totalHits.value); assertEquals(TotalHits.Relation.EQUAL_TO, context.queryResult().topDocs().topDocs.totalHits.relation); } - { + try (TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery())) { // shortcutTotalHitCount is disabled for filter collectors, hence we collect until track_total_hits - TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery()); context.setSize(10); context.minimumScore(100); QueryPhase.executeQuery(context); @@ -314,11 +308,12 @@ public void testMinScoreDisablesHitCountShortcut() throws Exception { public void testQueryCapturesThreadPoolStats() throws Exception { indexDocs(); - TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery()); - QueryPhase.addCollectorsAndSearch(context); - QuerySearchResult results = context.queryResult(); - assertThat(results.serviceTimeEWMA(), greaterThanOrEqualTo(0L)); - assertThat(results.nodeQueueSize(), greaterThanOrEqualTo(0)); + try (TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery())) { + QueryPhase.addCollectorsAndSearch(context); + QuerySearchResult results = context.queryResult(); + assertThat(results.serviceTimeEWMA(), greaterThanOrEqualTo(0L)); + assertThat(results.nodeQueueSize(), greaterThanOrEqualTo(0)); + } } public void testInOrderScrollOptimization() throws Exception { @@ -327,29 +322,30 @@ public void testInOrderScrollOptimization() throws Exception { int numDocs = indexDocs(iwc); ScrollContext scrollContext = new ScrollContext(); - TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader), scrollContext); - context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); - context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); - context.sort(new SortAndFormats(sort, new DocValueFormat[] { DocValueFormat.RAW })); - scrollContext.lastEmittedDoc = null; - scrollContext.maxScore = Float.NaN; - scrollContext.totalHits = null; - int size = randomIntBetween(2, 5); - context.setSize(size); - - QueryPhase.addCollectorsAndSearch(context); - assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); - assertEquals(TotalHits.Relation.EQUAL_TO, context.queryResult().topDocs().topDocs.totalHits.relation); - assertNull(context.queryResult().terminatedEarly()); - assertThat(context.terminateAfter(), equalTo(0)); - assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); - - context.setSearcher(earlyTerminationContextSearcher(reader, size)); - QueryPhase.addCollectorsAndSearch(context); - assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); - assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); - assertEquals(TotalHits.Relation.EQUAL_TO, context.queryResult().topDocs().topDocs.totalHits.relation); - assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0].doc, greaterThanOrEqualTo(size)); + try (TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader), scrollContext)) { + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); + context.sort(new SortAndFormats(sort, new DocValueFormat[] { DocValueFormat.RAW })); + scrollContext.lastEmittedDoc = null; + scrollContext.maxScore = Float.NaN; + scrollContext.totalHits = null; + int size = randomIntBetween(2, 5); + context.setSize(size); + + QueryPhase.addCollectorsAndSearch(context); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertEquals(TotalHits.Relation.EQUAL_TO, context.queryResult().topDocs().topDocs.totalHits.relation); + assertNull(context.queryResult().terminatedEarly()); + assertThat(context.terminateAfter(), equalTo(0)); + assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); + + context.setSearcher(earlyTerminationContextSearcher(reader, size)); + QueryPhase.addCollectorsAndSearch(context); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); + assertEquals(TotalHits.Relation.EQUAL_TO, context.queryResult().topDocs().topDocs.totalHits.relation); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0].doc, greaterThanOrEqualTo(size)); + } } /** @@ -360,8 +356,7 @@ public void testInOrderScrollOptimization() throws Exception { */ public void testTerminateAfterSize0HitCountShortcut() throws Exception { int numDocs = indexDocs(); - { - TestSearchContext context = createContext(noCollectionContextSearcher(reader), new MatchAllDocsQuery()); + try (TestSearchContext context = createContext(noCollectionContextSearcher(reader), new MatchAllDocsQuery())) { context.terminateAfter(1); context.setSize(0); QueryPhase.addCollectorsAndSearch(context); @@ -371,8 +366,7 @@ public void testTerminateAfterSize0HitCountShortcut() throws Exception { assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); } // test interaction between trackTotalHits and terminateAfter - { - TestSearchContext context = createContext(noCollectionContextSearcher(reader), new MatchAllDocsQuery()); + try (TestSearchContext context = createContext(noCollectionContextSearcher(reader), new MatchAllDocsQuery())) { context.terminateAfter(10); context.setSize(0); context.trackTotalHitsUpTo(-1); @@ -382,8 +376,7 @@ public void testTerminateAfterSize0HitCountShortcut() throws Exception { assertThat(context.queryResult().topDocs().topDocs.totalHits.relation, equalTo(TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); } - { - TestSearchContext context = createContext(noCollectionContextSearcher(reader), new MatchAllDocsQuery()); + try (TestSearchContext context = createContext(noCollectionContextSearcher(reader), new MatchAllDocsQuery())) { context.terminateAfter(10); context.setSize(0); // terminate_after is not honored, no matter the value of track_total_hits. @@ -406,8 +399,7 @@ public void testTerminateAfterSize0HitCountShortcut() throws Exception { public void testTerminateAfterSize0NoHitCountShortcut() throws Exception { indexDocs(); Query query = new NonCountingTermQuery(new Term("foo", "bar")); - { - TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 1), query); + try (TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 1), query)) { context.terminateAfter(1); context.setSize(0); QueryPhase.executeQuery(context); @@ -417,8 +409,7 @@ public void testTerminateAfterSize0NoHitCountShortcut() throws Exception { assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); } // test interaction between trackTotalHits and terminateAfter - { - TestSearchContext context = createContext(noCollectionContextSearcher(reader), query); + try (TestSearchContext context = createContext(noCollectionContextSearcher(reader), query)) { context.terminateAfter(10); context.setSize(0); // not tracking total hits makes the hit count collection early terminate, in which case terminate_after can't be honored @@ -434,18 +425,21 @@ public void testTerminateAfterSize0NoHitCountShortcut() throws Exception { // we don't use 9 (terminate_after - 1) because it makes the test unpredictable depending on the number of segments and // documents distribution: terminate_after may be honored at time due to the check before pulling each leaf collector. int trackTotalHits = randomIntBetween(1, 8); - TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, trackTotalHits), query); - context.terminateAfter(10); - context.setSize(0); - context.trackTotalHitsUpTo(trackTotalHits); - QueryPhase.executeQuery(context); - assertFalse(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) trackTotalHits)); - assertThat(context.queryResult().topDocs().topDocs.totalHits.relation, equalTo(TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO)); - assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); + try (TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, trackTotalHits), query)) { + context.terminateAfter(10); + context.setSize(0); + context.trackTotalHitsUpTo(trackTotalHits); + QueryPhase.executeQuery(context); + assertFalse(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) trackTotalHits)); + assertThat( + context.queryResult().topDocs().topDocs.totalHits.relation, + equalTo(TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO) + ); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); + } } - { - TestSearchContext context = createContext(newContextSearcher(reader), query); + try (TestSearchContext context = createContext(newContextSearcher(reader), query)) { context.terminateAfter(10); context.setSize(0); // track total hits is higher than terminate_after, in which case collection effectively terminates after 10 documents @@ -466,8 +460,7 @@ public void testTerminateAfterSize0NoHitCountShortcut() throws Exception { */ public void testTerminateAfterWithHitsHitCountShortcut() throws Exception { int numDocs = indexDocs(); - { - TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery()); + try (TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery())) { context.terminateAfter(numDocs); context.setSize(10); QueryPhase.executeQuery(context); @@ -475,8 +468,7 @@ public void testTerminateAfterWithHitsHitCountShortcut() throws Exception { assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(10)); } - { - TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 1), new MatchAllDocsQuery()); + try (TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 1), new MatchAllDocsQuery())) { context.terminateAfter(1); // default track_total_hits, size 1: terminate_after kicks in first context.setSize(1); @@ -487,8 +479,7 @@ public void testTerminateAfterWithHitsHitCountShortcut() throws Exception { assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); } // test interaction between trackTotalHits and terminateAfter - { - TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 7), new MatchAllDocsQuery()); + try (TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 7), new MatchAllDocsQuery())) { context.terminateAfter(7); // total hits tracking disabled but 10 hits need to be collected, terminate_after is lower than size, so it kicks in first context.setSize(10); @@ -499,8 +490,7 @@ public void testTerminateAfterWithHitsHitCountShortcut() throws Exception { assertThat(context.queryResult().topDocs().topDocs.totalHits.relation, equalTo(TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(7)); } - { - TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 7), new MatchAllDocsQuery()); + try (TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 7), new MatchAllDocsQuery())) { context.terminateAfter(7); // size is greater than terminate_after (track_total_hits does not matter): terminate_after kicks in first context.setSize(10); @@ -513,18 +503,19 @@ public void testTerminateAfterWithHitsHitCountShortcut() throws Exception { } { int size = randomIntBetween(1, 6); - TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, size), new MatchAllDocsQuery()); - context.terminateAfter(7); - // size is lower than terminate_after, track_total_hits does not matter: depending on docs distribution we may or may not be - // able to honor terminate_after. low scoring hits are skipped via setMinCompetitiveScore, which bypasses terminate_after - // until the next leaf collector is pulled, when that happens. - context.setSize(size); - context.trackTotalHitsUpTo(randomIntBetween(1, Integer.MAX_VALUE)); - QueryPhase.executeQuery(context); - assertThat(context.queryResult().terminatedEarly(), either(is(true)).or(is(false))); - assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); - assertThat(context.queryResult().topDocs().topDocs.totalHits.relation, equalTo(TotalHits.Relation.EQUAL_TO)); - assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(size)); + try (TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, size), new MatchAllDocsQuery())) { + context.terminateAfter(7); + // size is lower than terminate_after, track_total_hits does not matter: depending on docs distribution we may or may not be + // able to honor terminate_after. low scoring hits are skipped via setMinCompetitiveScore, which bypasses terminate_after + // until the next leaf collector is pulled, when that happens. + context.setSize(size); + context.trackTotalHitsUpTo(randomIntBetween(1, Integer.MAX_VALUE)); + QueryPhase.executeQuery(context); + assertThat(context.queryResult().terminatedEarly(), either(is(true)).or(is(false))); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().topDocs.totalHits.relation, equalTo(TotalHits.Relation.EQUAL_TO)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(size)); + } } } @@ -536,8 +527,7 @@ public void testTerminateAfterWithHitsHitCountShortcut() throws Exception { public void testTerminateAfterWithHitsNoHitCountShortcut() throws Exception { indexDocs(); TermQuery query = new NonCountingTermQuery(new Term("foo", "bar")); - { - TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 1), query); + try (TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 1), query)) { context.terminateAfter(1); context.setSize(1); QueryPhase.addCollectorsAndSearch(context); @@ -547,8 +537,7 @@ public void testTerminateAfterWithHitsNoHitCountShortcut() throws Exception { assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); } // test interaction between trackTotalHits and terminateAfter - { - TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 7), query); + try (TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 7), query)) { context.terminateAfter(7); context.setSize(10); context.trackTotalHitsUpTo(-1); @@ -558,8 +547,7 @@ public void testTerminateAfterWithHitsNoHitCountShortcut() throws Exception { assertThat(context.queryResult().topDocs().topDocs.totalHits.relation, equalTo(TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(7)); } - { - TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 7), query); + try (TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 7), query)) { context.terminateAfter(7); // size is greater than terminate_after context.setSize(10); @@ -571,8 +559,7 @@ public void testTerminateAfterWithHitsNoHitCountShortcut() throws Exception { assertThat(context.queryResult().topDocs().topDocs.totalHits.relation, equalTo(TotalHits.Relation.EQUAL_TO)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(7)); } - { - TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 7), query); + try (TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 7), query)) { context.terminateAfter(7); // size is lower than terminate_after context.setSize(5); @@ -586,8 +573,7 @@ public void testTerminateAfterWithHitsNoHitCountShortcut() throws Exception { assertThat(context.queryResult().topDocs().topDocs.totalHits.relation, equalTo(TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(5)); } - { - TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 7), query); + try (TestSearchContext context = createContext(earlyTerminationContextSearcher(reader, 7), query)) { context.terminateAfter(7); // size is greater than terminate_after context.setSize(10); @@ -606,8 +592,7 @@ public void testIndexSortingEarlyTermination() throws Exception { final Sort sort = new Sort(new SortField("rank", SortField.Type.INT)); IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort); int numDocs = indexDocs(iwc); - { - TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery()); + try (TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery())) { context.setSize(1); context.sort(new SortAndFormats(sort, new DocValueFormat[] { DocValueFormat.RAW })); QueryPhase.addCollectorsAndSearch(context); @@ -618,8 +603,7 @@ public void testIndexSortingEarlyTermination() throws Exception { FieldDoc fieldDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[0]; assertThat(fieldDoc.fields[0], equalTo(1)); } - { - TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery()); + try (TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery())) { context.setSize(1); context.sort(new SortAndFormats(sort, new DocValueFormat[] { DocValueFormat.RAW })); context.parsedPostFilter(new ParsedQuery(new MinDocQuery(1))); @@ -631,8 +615,7 @@ public void testIndexSortingEarlyTermination() throws Exception { FieldDoc fieldDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[0]; assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); } - { - TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery()); + try (TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery())) { context.setSize(1); context.sort(new SortAndFormats(sort, new DocValueFormat[] { DocValueFormat.RAW })); QueryPhase.executeQuery(context); @@ -643,8 +626,7 @@ public void testIndexSortingEarlyTermination() throws Exception { FieldDoc fieldDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[0]; assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); } - { - TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery()); + try (TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery())) { context.setSize(1); context.sort(new SortAndFormats(sort, new DocValueFormat[] { DocValueFormat.RAW })); context.setSearcher(earlyTerminationContextSearcher(reader, 1)); @@ -656,8 +638,7 @@ public void testIndexSortingEarlyTermination() throws Exception { FieldDoc fieldDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[0]; assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); } - { - TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery()); + try (TestSearchContext context = createContext(newContextSearcher(reader), new MatchAllDocsQuery())) { context.setSize(1); context.sort(new SortAndFormats(sort, new DocValueFormat[] { DocValueFormat.RAW })); QueryPhase.addCollectorsAndSearch(context); @@ -692,42 +673,43 @@ public void testIndexSortScrollOptimization() throws Exception { searchSortAndFormats.add(new SortAndFormats(new Sort(indexSort.getSort()[0]), new DocValueFormat[] { DocValueFormat.RAW })); for (SortAndFormats searchSortAndFormat : searchSortAndFormats) { ScrollContext scrollContext = new ScrollContext(); - TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader), scrollContext); - context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); - scrollContext.lastEmittedDoc = null; - scrollContext.maxScore = Float.NaN; - scrollContext.totalHits = null; - context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); - context.setSize(10); - context.sort(searchSortAndFormat); - - QueryPhase.addCollectorsAndSearch(context); - assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); - assertNull(context.queryResult().terminatedEarly()); - assertThat(context.terminateAfter(), equalTo(0)); - assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); - int sizeMinus1 = context.queryResult().topDocs().topDocs.scoreDocs.length - 1; - FieldDoc lastDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[sizeMinus1]; - - context.setSearcher(earlyTerminationContextSearcher(reader, 10)); - QueryPhase.addCollectorsAndSearch(context); - assertNull(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); - assertThat(context.terminateAfter(), equalTo(0)); - assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); - FieldDoc firstDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[0]; - for (int i = 0; i < searchSortAndFormat.sort.getSort().length; i++) { - @SuppressWarnings("unchecked") - FieldComparator comparator = (FieldComparator) searchSortAndFormat.sort.getSort()[i].getComparator( - 1, - i == 0 - ); - int cmp = comparator.compareValues(firstDoc.fields[i], lastDoc.fields[i]); - if (cmp == 0) { - continue; + try (TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader), scrollContext)) { + context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); + scrollContext.lastEmittedDoc = null; + scrollContext.maxScore = Float.NaN; + scrollContext.totalHits = null; + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + context.setSize(10); + context.sort(searchSortAndFormat); + + QueryPhase.addCollectorsAndSearch(context); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertNull(context.queryResult().terminatedEarly()); + assertThat(context.terminateAfter(), equalTo(0)); + assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); + int sizeMinus1 = context.queryResult().topDocs().topDocs.scoreDocs.length - 1; + FieldDoc lastDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[sizeMinus1]; + + context.setSearcher(earlyTerminationContextSearcher(reader, 10)); + QueryPhase.addCollectorsAndSearch(context); + assertNull(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertThat(context.terminateAfter(), equalTo(0)); + assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); + FieldDoc firstDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[0]; + for (int i = 0; i < searchSortAndFormat.sort.getSort().length; i++) { + @SuppressWarnings("unchecked") + FieldComparator comparator = (FieldComparator) searchSortAndFormat.sort.getSort()[i].getComparator( + 1, + i == 0 + ); + int cmp = comparator.compareValues(firstDoc.fields[i], lastDoc.fields[i]); + if (cmp == 0) { + continue; + } + assertThat(cmp, equalTo(1)); + break; } - assertThat(cmp, equalTo(1)); - break; } } } @@ -751,8 +733,7 @@ public void testDisableTopScoreCollection() throws Exception { Query q = new SpanNearQuery.Builder("title", true).addClause(new SpanTermQuery(new Term("title", "foo"))) .addClause(new SpanTermQuery(new Term("title", "bar"))) .build(); - { - TestSearchContext context = createContext(newContextSearcher(reader), q); + try (TestSearchContext context = createContext(newContextSearcher(reader), q)) { context.setSize(3); context.trackTotalHitsUpTo(3); CollectorManager collectorManager = QueryPhaseCollectorManager.createQueryPhaseCollectorManager( @@ -767,8 +748,7 @@ public void testDisableTopScoreCollection() throws Exception { assertEquals(context.queryResult().topDocs().topDocs.totalHits.relation, TotalHits.Relation.EQUAL_TO); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(3)); } - { - TestSearchContext context = createContext(newContextSearcher(reader), q); + try (TestSearchContext context = createContext(newContextSearcher(reader), q)) { context.setSize(3); context.trackTotalHitsUpTo(3); context.sort( @@ -834,8 +814,7 @@ public void testNumericSortOptimization() throws Exception { Query q = LongPoint.newRangeQuery(fieldNameLong, startLongValue, startLongValue + numDocs); // 1. Test sort optimization on long field - { - TestSearchContext searchContext = createContext(newContextSearcher(reader), q); + try (TestSearchContext searchContext = createContext(newContextSearcher(reader), q)) { searchContext.sort(formatsLong); searchContext.trackTotalHitsUpTo(10); searchContext.setSize(10); @@ -845,8 +824,7 @@ public void testNumericSortOptimization() throws Exception { } // 2. Test sort optimization on long field with after - { - TestSearchContext searchContext = createContext(newContextSearcher(reader), q); + try (TestSearchContext searchContext = createContext(newContextSearcher(reader), q)) { int afterDoc = (int) randomLongBetween(0, 30); long afterValue = startLongValue + afterDoc; FieldDoc after = new FieldDoc(afterDoc, Float.NaN, new Long[] { afterValue }); @@ -863,8 +841,7 @@ public void testNumericSortOptimization() throws Exception { } // 3. Test sort optimization on long field + date field - { - TestSearchContext searchContext = createContext(newContextSearcher(reader), q); + try (TestSearchContext searchContext = createContext(newContextSearcher(reader), q)) { searchContext.sort(formatsLongDate); searchContext.trackTotalHitsUpTo(10); searchContext.setSize(10); @@ -874,8 +851,7 @@ public void testNumericSortOptimization() throws Exception { } // 4. Test sort optimization on date field - { - TestSearchContext searchContext = createContext(newContextSearcher(reader), q); + try (TestSearchContext searchContext = createContext(newContextSearcher(reader), q)) { searchContext.sort(formatsDate); searchContext.trackTotalHitsUpTo(10); searchContext.setSize(10); @@ -885,8 +861,7 @@ public void testNumericSortOptimization() throws Exception { } // 5. Test sort optimization on date field + long field - { - TestSearchContext searchContext = createContext(newContextSearcher(reader), q); + try (TestSearchContext searchContext = createContext(newContextSearcher(reader), q)) { searchContext.sort(formatsDateLong); searchContext.trackTotalHitsUpTo(10); searchContext.setSize(10); @@ -896,8 +871,7 @@ public void testNumericSortOptimization() throws Exception { } // 6. Test sort optimization on when from > 0 and size = 0 - { - TestSearchContext searchContext = createContext(newContextSearcher(reader), q); + try (TestSearchContext searchContext = createContext(newContextSearcher(reader), q)) { searchContext.sort(formatsLong); searchContext.trackTotalHitsUpTo(10); searchContext.from(5); @@ -910,8 +884,7 @@ public void testNumericSortOptimization() throws Exception { } // 7. Test that sort optimization doesn't break a case where from = 0 and size= 0 - { - TestSearchContext searchContext = createContext(newContextSearcher(reader), q); + try (TestSearchContext searchContext = createContext(newContextSearcher(reader), q)) { searchContext.sort(formatsLong); searchContext.setSize(0); QueryPhase.addCollectorsAndSearch(searchContext); @@ -1009,13 +982,14 @@ public void testMinScore() throws Exception { BooleanQuery booleanQuery = new BooleanQuery.Builder().add(new TermQuery(new Term("foo", "bar")), Occur.MUST) .add(new TermQuery(new Term("filter", "f1")), Occur.SHOULD) .build(); - TestSearchContext context = createContext(newContextSearcher(reader), booleanQuery); - context.minimumScore(0.01f); - context.setSize(1); - context.trackTotalHitsUpTo(5); + try (TestSearchContext context = createContext(newContextSearcher(reader), booleanQuery)) { + context.minimumScore(0.01f); + context.setSize(1); + context.trackTotalHitsUpTo(5); - QueryPhase.addCollectorsAndSearch(context); - assertEquals(10, context.queryResult().topDocs().topDocs.totalHits.value); + QueryPhase.addCollectorsAndSearch(context); + assertEquals(10, context.queryResult().topDocs().topDocs.totalHits.value); + } } public void testCancellationDuringRewrite() throws IOException { @@ -1030,12 +1004,13 @@ public void testCancellationDuringRewrite() throws IOException { reader = DirectoryReader.open(dir); PrefixQuery prefixQuery = new PrefixQuery(new Term("foo", "a"), MultiTermQuery.SCORING_BOOLEAN_REWRITE); - TestSearchContext context = createContext(newContextSearcher(reader), prefixQuery); - SearchShardTask task = new SearchShardTask(randomLong(), "transport", "", "", TaskId.EMPTY_TASK_ID, Collections.emptyMap()); - TaskCancelHelper.cancel(task, "simulated"); - context.setTask(task); - context.searcher().addQueryCancellation(task::ensureNotCancelled); - expectThrows(TaskCancelledException.class, context::rewrittenQuery); + try (TestSearchContext context = createContext(newContextSearcher(reader), prefixQuery)) { + SearchShardTask task = new SearchShardTask(randomLong(), "transport", "", "", TaskId.EMPTY_TASK_ID, Collections.emptyMap()); + TaskCancelHelper.cancel(task, "simulated"); + context.setTask(task); + context.searcher().addQueryCancellation(task::ensureNotCancelled); + expectThrows(TaskCancelledException.class, context::rewrittenQuery); + } } public void testRank() throws IOException { @@ -1065,7 +1040,7 @@ public T search(Query query, CollectorManager col } }; - SearchContext context = new TestSearchContext(null, indexShard, searcher) { + try (SearchContext context = new TestSearchContext(null, indexShard, searcher) { @Override public Query buildFilteredQuery(Query query) { return query; @@ -1075,37 +1050,38 @@ public Query buildFilteredQuery(Query query) { public ReaderContext readerContext() { return new ReaderContext(new ShardSearchContextId("test", 1L), null, indexShard, null, 0L, false); } - }; + }) { - List queries = List.of(new TermQuery(new Term("field0", "term")), new TermQuery(new Term("field1", "term0"))); - context.parsedQuery( - new ParsedQuery(new BooleanQuery.Builder().add(queries.get(0), Occur.SHOULD).add(queries.get(1), Occur.SHOULD).build()) - ); - context.rankShardContext(new RankShardContext(queries, 0, 100) { - @Override - public RankShardResult combine(List rankResults) { - return null; - } - }); - - context.trackTotalHitsUpTo(SearchContext.TRACK_TOTAL_HITS_DISABLED); - context.aggregations(null); - QueryPhase.executeRank(context); - assertEquals(queries, executed); - - executed.clear(); - context.trackTotalHitsUpTo(100); - context.aggregations(null); - QueryPhase.executeRank(context); - assertEquals(context.rewrittenQuery(), executed.get(0)); - assertEquals(queries, executed.subList(1, executed.size())); - - executed.clear(); - context.trackTotalHitsUpTo(SearchContext.TRACK_TOTAL_HITS_DISABLED); - context.aggregations(new SearchContextAggregations(AggregatorFactories.EMPTY, () -> null)); - QueryPhase.executeRank(context); - assertEquals(context.rewrittenQuery(), executed.get(0)); - assertEquals(queries, executed.subList(1, executed.size())); + List queries = List.of(new TermQuery(new Term("field0", "term")), new TermQuery(new Term("field1", "term0"))); + context.parsedQuery( + new ParsedQuery(new BooleanQuery.Builder().add(queries.get(0), Occur.SHOULD).add(queries.get(1), Occur.SHOULD).build()) + ); + context.rankShardContext(new RankShardContext(queries, 0, 100) { + @Override + public RankShardResult combine(List rankResults) { + return null; + } + }); + + context.trackTotalHitsUpTo(SearchContext.TRACK_TOTAL_HITS_DISABLED); + context.aggregations(null); + QueryPhase.executeRank(context); + assertEquals(queries, executed); + + executed.clear(); + context.trackTotalHitsUpTo(100); + context.aggregations(null); + QueryPhase.executeRank(context); + assertEquals(context.rewrittenQuery(), executed.get(0)); + assertEquals(queries, executed.subList(1, executed.size())); + + executed.clear(); + context.trackTotalHitsUpTo(SearchContext.TRACK_TOTAL_HITS_DISABLED); + context.aggregations(new SearchContextAggregations(AggregatorFactories.EMPTY, () -> null)); + QueryPhase.executeRank(context); + assertEquals(context.rewrittenQuery(), executed.get(0)); + assertEquals(queries, executed.subList(1, executed.size())); + } } private static final QueryCachingPolicy NEVER_CACHE_POLICY = new QueryCachingPolicy() { diff --git a/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTimeoutTests.java b/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTimeoutTests.java index f0e3c9ac28f00..9c1bdb236c031 100644 --- a/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTimeoutTests.java +++ b/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTimeoutTests.java @@ -128,20 +128,22 @@ public void testScorerTimeoutPoints() throws IOException { private void scorerTimeoutTest(int size, CheckedConsumer timeoutTrigger) throws IOException { { TimeoutQuery query = newMatchAllScorerTimeoutQuery(timeoutTrigger, false); - SearchContext context = createSearchContext(query, size); - QueryPhase.executeQuery(context); - assertFalse(context.queryResult().searchTimedOut()); - assertEquals(numDocs, context.queryResult().topDocs().topDocs.totalHits.value); - assertEquals(size, context.queryResult().topDocs().topDocs.scoreDocs.length); + try (SearchContext context = createSearchContext(query, size)) { + QueryPhase.executeQuery(context); + assertFalse(context.queryResult().searchTimedOut()); + assertEquals(numDocs, context.queryResult().topDocs().topDocs.totalHits.value); + assertEquals(size, context.queryResult().topDocs().topDocs.scoreDocs.length); + } } { TimeoutQuery query = newMatchAllScorerTimeoutQuery(timeoutTrigger, true); - SearchContext context = createSearchContextWithTimeout(query, size); - QueryPhase.executeQuery(context); - assertTrue(context.queryResult().searchTimedOut()); - int firstSegmentMaxDoc = reader.leaves().get(0).reader().maxDoc(); - assertEquals(Math.min(2048, firstSegmentMaxDoc), context.queryResult().topDocs().topDocs.totalHits.value); - assertEquals(Math.min(size, firstSegmentMaxDoc), context.queryResult().topDocs().topDocs.scoreDocs.length); + try (SearchContext context = createSearchContextWithTimeout(query, size)) { + QueryPhase.executeQuery(context); + assertTrue(context.queryResult().searchTimedOut()); + int firstSegmentMaxDoc = reader.leaves().get(0).reader().maxDoc(); + assertEquals(Math.min(2048, firstSegmentMaxDoc), context.queryResult().topDocs().topDocs.totalHits.value); + assertEquals(Math.min(size, firstSegmentMaxDoc), context.queryResult().topDocs().topDocs.scoreDocs.length); + } } } @@ -174,20 +176,22 @@ public void testBulkScorerTimeout() throws IOException { int size = randomBoolean() ? 0 : randomIntBetween(100, 500); { TimeoutQuery query = newMatchAllBulkScorerTimeoutQuery(false); - SearchContext context = createSearchContext(query, size); - QueryPhase.executeQuery(context); - assertFalse(context.queryResult().searchTimedOut()); - assertEquals(numDocs, context.queryResult().topDocs().topDocs.totalHits.value); - assertEquals(size, context.queryResult().topDocs().topDocs.scoreDocs.length); + try (SearchContext context = createSearchContext(query, size)) { + QueryPhase.executeQuery(context); + assertFalse(context.queryResult().searchTimedOut()); + assertEquals(numDocs, context.queryResult().topDocs().topDocs.totalHits.value); + assertEquals(size, context.queryResult().topDocs().topDocs.scoreDocs.length); + } } { TimeoutQuery query = newMatchAllBulkScorerTimeoutQuery(true); - SearchContext context = createSearchContextWithTimeout(query, size); - QueryPhase.executeQuery(context); - assertTrue(context.queryResult().searchTimedOut()); - int firstSegmentMaxDoc = reader.leaves().get(0).reader().maxDoc(); - assertEquals(Math.min(2048, firstSegmentMaxDoc), context.queryResult().topDocs().topDocs.totalHits.value); - assertEquals(Math.min(size, firstSegmentMaxDoc), context.queryResult().topDocs().topDocs.scoreDocs.length); + try (SearchContext context = createSearchContextWithTimeout(query, size)) { + QueryPhase.executeQuery(context); + assertTrue(context.queryResult().searchTimedOut()); + int firstSegmentMaxDoc = reader.leaves().get(0).reader().maxDoc(); + assertEquals(Math.min(2048, firstSegmentMaxDoc), context.queryResult().topDocs().topDocs.totalHits.value); + assertEquals(Math.min(size, firstSegmentMaxDoc), context.queryResult().topDocs().topDocs.scoreDocs.length); + } } } diff --git a/server/src/test/java/org/elasticsearch/search/query/QuerySearchResultTests.java b/server/src/test/java/org/elasticsearch/search/query/QuerySearchResultTests.java index c728bed5ed7bb..516ffeb9418bd 100644 --- a/server/src/test/java/org/elasticsearch/search/query/QuerySearchResultTests.java +++ b/server/src/test/java/org/elasticsearch/search/query/QuerySearchResultTests.java @@ -97,28 +97,36 @@ private static QuerySearchResult createTestInstance() throws Exception { public void testSerialization() throws Exception { QuerySearchResult querySearchResult = createTestInstance(); - boolean delayed = randomBoolean(); - QuerySearchResult deserialized = copyWriteable( - querySearchResult, - namedWriteableRegistry, - delayed ? in -> new QuerySearchResult(in, true) : QuerySearchResult::new, - TransportVersion.current() - ); - assertEquals(querySearchResult.getContextId().getId(), deserialized.getContextId().getId()); - assertNull(deserialized.getSearchShardTarget()); - assertEquals(querySearchResult.topDocs().maxScore, deserialized.topDocs().maxScore, 0f); - assertEquals(querySearchResult.topDocs().topDocs.totalHits, deserialized.topDocs().topDocs.totalHits); - assertEquals(querySearchResult.from(), deserialized.from()); - assertEquals(querySearchResult.size(), deserialized.size()); - assertEquals(querySearchResult.hasAggs(), deserialized.hasAggs()); - if (deserialized.hasAggs()) { - assertThat(deserialized.aggregations().isSerialized(), is(delayed)); - Aggregations aggs = querySearchResult.consumeAggs(); - Aggregations deserializedAggs = deserialized.consumeAggs(); - assertEquals(aggs.asList(), deserializedAggs.asList()); - assertThat(deserialized.aggregations(), is(nullValue())); + try { + boolean delayed = randomBoolean(); + QuerySearchResult deserialized = copyWriteable( + querySearchResult, + namedWriteableRegistry, + delayed ? in -> new QuerySearchResult(in, true) : QuerySearchResult::new, + TransportVersion.current() + ); + try { + assertEquals(querySearchResult.getContextId().getId(), deserialized.getContextId().getId()); + assertNull(deserialized.getSearchShardTarget()); + assertEquals(querySearchResult.topDocs().maxScore, deserialized.topDocs().maxScore, 0f); + assertEquals(querySearchResult.topDocs().topDocs.totalHits, deserialized.topDocs().topDocs.totalHits); + assertEquals(querySearchResult.from(), deserialized.from()); + assertEquals(querySearchResult.size(), deserialized.size()); + assertEquals(querySearchResult.hasAggs(), deserialized.hasAggs()); + if (deserialized.hasAggs()) { + assertThat(deserialized.aggregations().isSerialized(), is(delayed)); + Aggregations aggs = querySearchResult.consumeAggs(); + Aggregations deserializedAggs = deserialized.consumeAggs(); + assertEquals(aggs.asList(), deserializedAggs.asList()); + assertThat(deserialized.aggregations(), is(nullValue())); + } + assertEquals(querySearchResult.terminatedEarly(), deserialized.terminatedEarly()); + } finally { + deserialized.decRef(); + } + } finally { + querySearchResult.decRef(); } - assertEquals(querySearchResult.terminatedEarly(), deserialized.terminatedEarly()); } public void testNullResponse() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/search/vectors/KnnSearchRequestParserTests.java b/server/src/test/java/org/elasticsearch/search/vectors/KnnSearchRequestParserTests.java index 37fbb6c5fcea1..5271289e37b7f 100644 --- a/server/src/test/java/org/elasticsearch/search/vectors/KnnSearchRequestParserTests.java +++ b/server/src/test/java/org/elasticsearch/search/vectors/KnnSearchRequestParserTests.java @@ -8,9 +8,9 @@ package org.elasticsearch.search.vectors; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilder; @@ -224,7 +224,7 @@ private SearchRequestBuilder parseSearchRequest(XContentBuilder builder, Map blobs = blobContainer.listBlobsByPrefix(OperationPurpose.SNAPSHOT, "blob-"); + Map blobs = blobContainer.listBlobsByPrefix(randomPurpose(), "blob-"); assertEquals(blobs.size(), 2); assertThat(blobs.get("blob-not-comp").length(), greaterThan(blobs.get("blob-comp").length())); } @@ -147,8 +147,8 @@ protected BlobStore createTestBlobStore() throws IOException { } protected void randomCorruption(BlobContainer blobContainer, String blobName) throws IOException { - final byte[] buffer = new byte[(int) blobContainer.listBlobsByPrefix(OperationPurpose.SNAPSHOT, blobName).get(blobName).length()]; - try (InputStream inputStream = blobContainer.readBlob(OperationPurpose.SNAPSHOT, blobName)) { + final byte[] buffer = new byte[(int) blobContainer.listBlobsByPrefix(randomPurpose(), blobName).get(blobName).length()]; + try (InputStream inputStream = blobContainer.readBlob(randomPurpose(), blobName)) { Streams.readFully(inputStream, buffer); } final BytesArray corruptedBytes; @@ -164,7 +164,7 @@ protected void randomCorruption(BlobContainer blobContainer, String blobName) th // another sequence of 8 zero bytes anywhere in the file, let alone such a sequence followed by a correct checksum. corruptedBytes = new BytesArray(buffer, 0, location); } - blobContainer.writeBlob(OperationPurpose.SNAPSHOT, blobName, corruptedBytes, false); + blobContainer.writeBlob(randomPurpose(), blobName, corruptedBytes, false); } } diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index f46517b848117..6cf4430bfd962 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -58,7 +58,6 @@ import org.elasticsearch.action.bulk.TransportShardBulkAction; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.resync.TransportResyncReplicationAction; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchExecutionStatsCollector; import org.elasticsearch.action.search.SearchPhaseController; import org.elasticsearch.action.search.SearchRequest; @@ -71,6 +70,7 @@ import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.action.support.GroupedActionListener; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -129,12 +129,12 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue; -import org.elasticsearch.common.util.concurrent.ListenableFuture; import org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.gateway.MetaStateService; import org.elasticsearch.gateway.TransportNodesListGatewayStartedShards; import org.elasticsearch.index.Index; @@ -146,8 +146,10 @@ import org.elasticsearch.index.seqno.RetentionLeaseSyncer; import org.elasticsearch.index.shard.PrimaryReplicaSyncer; import org.elasticsearch.indices.EmptySystemIndices; +import org.elasticsearch.indices.IndicesFeatures; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.indices.IndicesServiceBuilder; import org.elasticsearch.indices.ShardLimitValidator; import org.elasticsearch.indices.TestIndexNameExpressionResolver; import org.elasticsearch.indices.analysis.AnalysisModule; @@ -244,8 +246,8 @@ public void verifyReposThenStopServices() { try { clearDisruptionsAndAwaitSync(); - final ListenableFuture cleanupResponse = new ListenableFuture<>(); - final ListenableFuture createSnapshotResponse = new ListenableFuture<>(); + final SubscribableListener cleanupResponse = new SubscribableListener<>(); + final SubscribableListener createSnapshotResponse = new SubscribableListener<>(); // Create another snapshot and then clean up the repository to verify that the repository works correctly no matter the // failures seen during the previous test. client().admin() @@ -290,7 +292,7 @@ public void testSuccessfulSnapshotAndRestore() { testClusterNodes.nodes.values().iterator().next().clusterService.state() ); - final ListenableFuture createSnapshotResponseListener = new ListenableFuture<>(); + final SubscribableListener createSnapshotResponseListener = new SubscribableListener<>(); continueOrDie(createRepoAndIndex(repoName, index, shards), createIndexResponse -> { final Runnable afterIndexing = () -> client().admin() @@ -305,7 +307,7 @@ public void testSuccessfulSnapshotAndRestore() { for (int i = 0; i < documents; ++i) { bulkRequest.add(new IndexRequest(index).source(Collections.singletonMap("foo", "bar" + i))); } - final ListenableFuture bulkResponseStepListener = new ListenableFuture<>(); + final SubscribableListener bulkResponseStepListener = new SubscribableListener<>(); client().bulk(bulkRequest, bulkResponseStepListener); continueOrDie(bulkResponseStepListener, bulkResponse -> { assertFalse("Failures in bulk response: " + bulkResponse.buildFailureMessage(), bulkResponse.hasFailures()); @@ -315,14 +317,14 @@ public void testSuccessfulSnapshotAndRestore() { } }); - final ListenableFuture deleteIndexListener = new ListenableFuture<>(); + final SubscribableListener deleteIndexListener = new SubscribableListener<>(); continueOrDie( createSnapshotResponseListener, createSnapshotResponse -> client().admin().indices().delete(new DeleteIndexRequest(index), deleteIndexListener) ); - final ListenableFuture restoreSnapshotResponseListener = new ListenableFuture<>(); + final SubscribableListener restoreSnapshotResponseListener = new SubscribableListener<>(); continueOrDie( deleteIndexListener, ignored -> client().admin() @@ -333,7 +335,7 @@ public void testSuccessfulSnapshotAndRestore() { ) ); - final ListenableFuture searchResponseListener = new ListenableFuture<>(); + final SubscribableListener searchResponseListener = new SubscribableListener<>(); continueOrDie(restoreSnapshotResponseListener, restoreSnapshotResponse -> { assertEquals(shards, restoreSnapshotResponse.getRestoreInfo().totalShards()); client().search( @@ -349,8 +351,8 @@ public void testSuccessfulSnapshotAndRestore() { }); runUntil(documentCountVerified::get, TimeUnit.MINUTES.toMillis(5L)); - assertNotNull(createSnapshotResponseListener.result()); - assertNotNull(restoreSnapshotResponseListener.result()); + assertNotNull(safeResult(createSnapshotResponseListener)); + assertNotNull(safeResult(restoreSnapshotResponseListener)); assertTrue(documentCountVerified.get()); assertTrue(SnapshotsInProgress.get(masterNode.clusterService.state()).isEmpty()); final Repository repository = masterNode.repositoriesService.repository(repoName); @@ -365,10 +367,10 @@ public void testSuccessfulSnapshotAndRestore() { } private SnapshotInfo getSnapshotInfo(Repository repository, SnapshotId snapshotId) { - final ListenableFuture listener = new ListenableFuture<>(); + final SubscribableListener listener = new SubscribableListener<>(); repository.getSnapshotInfo(snapshotId, listener); deterministicTaskQueue.runAllRunnableTasks(); - return listener.result(); + return safeResult(listener); } public void testSnapshotWithNodeDisconnects() { @@ -381,7 +383,7 @@ public void testSnapshotWithNodeDisconnects() { final String index = "test"; final int shards = randomIntBetween(1, 10); - final ListenableFuture createSnapshotResponseStepListener = new ListenableFuture<>(); + final SubscribableListener createSnapshotResponseStepListener = new SubscribableListener<>(); final boolean partial = randomBoolean(); continueOrDie(createRepoAndIndex(repoName, index, shards), createIndexResponse -> { @@ -459,7 +461,7 @@ public void testSnapshotDeleteWithMasterFailover() { final int shards = randomIntBetween(1, 10); final boolean waitForSnapshot = randomBoolean(); - final ListenableFuture createSnapshotResponseStepListener = new ListenableFuture<>(); + final SubscribableListener createSnapshotResponseStepListener = new SubscribableListener<>(); continueOrDie( createRepoAndIndex(repoName, index, shards), createIndexResponse -> testClusterNodes.randomMasterNodeSafe().client.admin() @@ -510,7 +512,7 @@ public void testConcurrentSnapshotCreateAndDelete() { testClusterNodes.nodes.values().iterator().next().clusterService.state() ); - final ListenableFuture createSnapshotResponseStepListener = new ListenableFuture<>(); + final SubscribableListener createSnapshotResponseStepListener = new SubscribableListener<>(); continueOrDie( createRepoAndIndex(repoName, index, shards), @@ -520,7 +522,7 @@ public void testConcurrentSnapshotCreateAndDelete() { .execute(createSnapshotResponseStepListener) ); - final ListenableFuture deleteSnapshotStepListener = new ListenableFuture<>(); + final SubscribableListener deleteSnapshotStepListener = new SubscribableListener<>(); masterNode.clusterService.addListener(new ClusterStateListener() { @Override @@ -532,7 +534,7 @@ public void clusterChanged(ClusterChangedEvent event) { } }); - final ListenableFuture createAnotherSnapshotResponseStepListener = new ListenableFuture<>(); + final SubscribableListener createAnotherSnapshotResponseStepListener = new SubscribableListener<>(); continueOrDie( deleteSnapshotStepListener, @@ -549,8 +551,8 @@ public void clusterChanged(ClusterChangedEvent event) { deterministicTaskQueue.runAllRunnableTasks(); - assertNotNull(createSnapshotResponseStepListener.result()); - assertNotNull(createAnotherSnapshotResponseStepListener.result()); + assertNotNull(safeResult(createSnapshotResponseStepListener)); + assertNotNull(safeResult(createAnotherSnapshotResponseStepListener)); assertTrue(masterNode.clusterService.state().custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY).isEmpty()); final Repository repository = masterNode.repositoriesService.repository(repoName); Collection snapshotIds = getRepositoryData(repository).getSnapshotIds(); @@ -575,7 +577,7 @@ public void testConcurrentSnapshotCreateAndDeleteOther() { testClusterNodes.nodes.values().iterator().next().clusterService.state() ); - final ListenableFuture createSnapshotResponseStepListener = new ListenableFuture<>(); + final SubscribableListener createSnapshotResponseStepListener = new SubscribableListener<>(); continueOrDie( createRepoAndIndex(repoName, index, shards), @@ -586,7 +588,7 @@ public void testConcurrentSnapshotCreateAndDeleteOther() { .execute(createSnapshotResponseStepListener) ); - final ListenableFuture createOtherSnapshotResponseStepListener = new ListenableFuture<>(); + final SubscribableListener createOtherSnapshotResponseStepListener = new SubscribableListener<>(); continueOrDie( createSnapshotResponseStepListener, @@ -596,7 +598,7 @@ public void testConcurrentSnapshotCreateAndDeleteOther() { .execute(createOtherSnapshotResponseStepListener) ); - final ListenableFuture deleteSnapshotStepListener = new ListenableFuture<>(); + final SubscribableListener deleteSnapshotStepListener = new SubscribableListener<>(); continueOrDie( createOtherSnapshotResponseStepListener, @@ -606,7 +608,7 @@ public void testConcurrentSnapshotCreateAndDeleteOther() { .execute(deleteSnapshotStepListener) ); - final ListenableFuture createAnotherSnapshotResponseStepListener = new ListenableFuture<>(); + final SubscribableListener createAnotherSnapshotResponseStepListener = new SubscribableListener<>(); continueOrDie(deleteSnapshotStepListener, deleted -> { client().admin() @@ -649,7 +651,7 @@ public void testBulkSnapshotDeleteWithAbort() { testClusterNodes.nodes.values().iterator().next().clusterService.state() ); - final ListenableFuture createSnapshotResponseStepListener = new ListenableFuture<>(); + final SubscribableListener createSnapshotResponseStepListener = new SubscribableListener<>(); continueOrDie( createRepoAndIndex(repoName, index, shards), @@ -661,7 +663,7 @@ public void testBulkSnapshotDeleteWithAbort() { ); final int inProgressSnapshots = randomIntBetween(1, 5); - final ListenableFuture> createOtherSnapshotResponseStepListener = new ListenableFuture<>(); + final var createOtherSnapshotResponseStepListener = new SubscribableListener>(); final ActionListener createSnapshotListener = new GroupedActionListener<>( inProgressSnapshots, createOtherSnapshotResponseStepListener @@ -673,7 +675,7 @@ public void testBulkSnapshotDeleteWithAbort() { } }); - final ListenableFuture deleteSnapshotStepListener = new ListenableFuture<>(); + final SubscribableListener deleteSnapshotStepListener = new SubscribableListener<>(); continueOrDie( createOtherSnapshotResponseStepListener, @@ -703,7 +705,7 @@ public void testConcurrentSnapshotRestoreAndDeleteOther() { testClusterNodes.nodes.values().iterator().next().clusterService.state() ); - final ListenableFuture createSnapshotResponseStepListener = new ListenableFuture<>(); + final SubscribableListener createSnapshotResponseStepListener = new SubscribableListener<>(); final int documentsFirstSnapshot = randomIntBetween(0, 100); @@ -722,7 +724,7 @@ public void testConcurrentSnapshotRestoreAndDeleteOther() { final int documentsSecondSnapshot = randomIntBetween(0, 100); - final ListenableFuture createOtherSnapshotResponseStepListener = new ListenableFuture<>(); + final SubscribableListener createOtherSnapshotResponseStepListener = new SubscribableListener<>(); final String secondSnapshotName = "snapshot-2"; continueOrDie( @@ -738,8 +740,8 @@ public void testConcurrentSnapshotRestoreAndDeleteOther() { ) ); - final ListenableFuture deleteSnapshotStepListener = new ListenableFuture<>(); - final ListenableFuture restoreSnapshotResponseListener = new ListenableFuture<>(); + final SubscribableListener deleteSnapshotStepListener = new SubscribableListener<>(); + final SubscribableListener restoreSnapshotResponseListener = new SubscribableListener<>(); continueOrDie(createOtherSnapshotResponseStepListener, createSnapshotResponse -> { scheduleNow(() -> client().admin().cluster().prepareDeleteSnapshot(repoName, snapshotName).execute(deleteSnapshotStepListener)); @@ -755,7 +757,7 @@ public void testConcurrentSnapshotRestoreAndDeleteOther() { ); }); - final ListenableFuture searchResponseListener = new ListenableFuture<>(); + final SubscribableListener searchResponseListener = new SubscribableListener<>(); continueOrDie(restoreSnapshotResponseListener, restoreSnapshotResponse -> { assertEquals(shards, restoreSnapshotResponse.getRestoreInfo().totalShards()); client().search( @@ -768,14 +770,14 @@ public void testConcurrentSnapshotRestoreAndDeleteOther() { assertEquals( documentsFirstSnapshot + documentsSecondSnapshot, - Objects.requireNonNull(searchResponseListener.result().getHits().getTotalHits()).value + Objects.requireNonNull(safeResult(searchResponseListener).getHits().getTotalHits()).value ); - assertThat(deleteSnapshotStepListener.result().isAcknowledged(), is(true)); - assertThat(restoreSnapshotResponseListener.result().getRestoreInfo().failedShards(), is(0)); + assertThat(safeResult(deleteSnapshotStepListener).isAcknowledged(), is(true)); + assertThat(safeResult(restoreSnapshotResponseListener).getRestoreInfo().failedShards(), is(0)); final Repository repository = masterNode.repositoriesService.repository(repoName); Collection snapshotIds = getRepositoryData(repository).getSnapshotIds(); - assertThat(snapshotIds, contains(createOtherSnapshotResponseStepListener.result().getSnapshotInfo().snapshotId())); + assertThat(snapshotIds, contains(safeResult(createOtherSnapshotResponseStepListener).getSnapshotInfo().snapshotId())); for (SnapshotId snapshotId : snapshotIds) { final SnapshotInfo snapshotInfo = getSnapshotInfo(repository, snapshotId); @@ -795,7 +797,7 @@ private void indexNDocuments(int documents, String index, Runnable afterIndexing for (int i = 0; i < documents; ++i) { bulkRequest.add(new IndexRequest(index).source(Collections.singletonMap("foo", "bar" + i))); } - final ListenableFuture bulkResponseStepListener = new ListenableFuture<>(); + final SubscribableListener bulkResponseStepListener = new SubscribableListener<>(); client().bulk(bulkRequest, bulkResponseStepListener); continueOrDie(bulkResponseStepListener, bulkResponse -> { assertFalse("Failures in bulk response: " + bulkResponse.buildFailureMessage(), bulkResponse.hasFailures()); @@ -815,7 +817,7 @@ public void testConcurrentSnapshotDeleteAndDeleteIndex() throws IOException { testClusterNodes.nodes.values().iterator().next().clusterService.state() ); - final ListenableFuture> createIndicesListener = new ListenableFuture<>(); + final SubscribableListener> createIndicesListener = new SubscribableListener<>(); final int indices = randomIntBetween(5, 20); final SetOnce firstIndex = new SetOnce<>(); @@ -829,7 +831,7 @@ public void testConcurrentSnapshotDeleteAndDeleteIndex() throws IOException { } }); - final ListenableFuture createSnapshotResponseStepListener = new ListenableFuture<>(); + final SubscribableListener createSnapshotResponseStepListener = new SubscribableListener<>(); final boolean partialSnapshot = randomBoolean(); @@ -906,7 +908,7 @@ public void testConcurrentDeletes() { testClusterNodes.nodes.values().iterator().next().clusterService.state() ); - final ListenableFuture createSnapshotResponseStepListener = new ListenableFuture<>(); + final SubscribableListener createSnapshotResponseStepListener = new SubscribableListener<>(); continueOrDie( createRepoAndIndex(repoName, index, shards), @@ -917,15 +919,15 @@ public void testConcurrentDeletes() { .execute(createSnapshotResponseStepListener) ); - final Collection> deleteSnapshotStepListeners = List.of( - new ListenableFuture<>(), - new ListenableFuture<>() + final Collection> deleteSnapshotStepListeners = List.of( + new SubscribableListener<>(), + new SubscribableListener<>() ); final AtomicInteger successfulDeletes = new AtomicInteger(0); continueOrDie(createSnapshotResponseStepListener, createSnapshotResponse -> { - for (ListenableFuture deleteListener : deleteSnapshotStepListeners) { + for (SubscribableListener deleteListener : deleteSnapshotStepListeners) { client().admin() .cluster() .prepareDeleteSnapshot(repoName, snapshotName) @@ -941,7 +943,7 @@ public void testConcurrentDeletes() { } }); - for (ListenableFuture deleteListener : deleteSnapshotStepListeners) { + for (SubscribableListener deleteListener : deleteSnapshotStepListeners) { continueOrDie(deleteListener, deleted -> { if (deleted) { successfulDeletes.incrementAndGet(); @@ -982,7 +984,7 @@ public void testSnapshotPrimaryRelocations() { final AtomicBoolean createdSnapshot = new AtomicBoolean(); final AdminClient masterAdminClient = masterNode.client.admin(); - final ListenableFuture clusterStateResponseStepListener = new ListenableFuture<>(); + final SubscribableListener clusterStateResponseStepListener = new SubscribableListener<>(); continueOrDie( createRepoAndIndex(repoName, index, shards), @@ -997,7 +999,7 @@ public void testSnapshotPrimaryRelocations() { scheduleNow(new Runnable() { @Override public void run() { - final ListenableFuture updatedClusterStateResponseStepListener = new ListenableFuture<>(); + final SubscribableListener updatedClusterStateResponseStepListener = new SubscribableListener<>(); masterAdminClient.cluster().state(new ClusterStateRequest(), updatedClusterStateResponseStepListener); continueOrDie(updatedClusterStateResponseStepListener, updatedClusterState -> { final ShardRouting shardRouting = updatedClusterState.getState() @@ -1069,7 +1071,7 @@ public void testSuccessfulSnapshotWithConcurrentDynamicMappingUpdates() { testClusterNodes.nodes.values().iterator().next().clusterService.state() ); - final ListenableFuture createSnapshotResponseStepListener = new ListenableFuture<>(); + final SubscribableListener createSnapshotResponseStepListener = new SubscribableListener<>(); continueOrDie(createRepoAndIndex(repoName, index, shards), createIndexResponse -> { final AtomicBoolean initiatedSnapshot = new AtomicBoolean(false); @@ -1094,7 +1096,7 @@ public void testSuccessfulSnapshotWithConcurrentDynamicMappingUpdates() { final String restoredIndex = "restored"; - final ListenableFuture restoreSnapshotResponseStepListener = new ListenableFuture<>(); + final SubscribableListener restoreSnapshotResponseStepListener = new SubscribableListener<>(); continueOrDie( createSnapshotResponseStepListener, @@ -1108,7 +1110,7 @@ public void testSuccessfulSnapshotWithConcurrentDynamicMappingUpdates() { ) ); - final ListenableFuture searchResponseStepListener = new ListenableFuture<>(); + final SubscribableListener searchResponseStepListener = new SubscribableListener<>(); continueOrDie(restoreSnapshotResponseStepListener, restoreSnapshotResponse -> { assertEquals(shards, restoreSnapshotResponse.getRestoreInfo().totalShards()); @@ -1139,8 +1141,8 @@ public void testSuccessfulSnapshotWithConcurrentDynamicMappingUpdates() { runUntil(documentCountVerified::get, TimeUnit.MINUTES.toMillis(5L)); - assertNotNull(createSnapshotResponseStepListener.result()); - assertNotNull(restoreSnapshotResponseStepListener.result()); + assertNotNull(safeResult(createSnapshotResponseStepListener)); + assertNotNull(safeResult(restoreSnapshotResponseStepListener)); assertTrue(masterNode.clusterService.state().custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY).isEmpty()); final Repository repository = masterNode.repositoriesService.repository(repoName); Collection snapshotIds = getRepositoryData(repository).getSnapshotIds(); @@ -1166,7 +1168,7 @@ public void testRunConcurrentSnapshots() { testClusterNodes.nodes.values().iterator().next().clusterService.state() ); - final ListenableFuture> allSnapshotsListener = new ListenableFuture<>(); + final SubscribableListener> allSnapshotsListener = new SubscribableListener<>(); final ActionListener snapshotListener = new GroupedActionListener<>( snapshotNames.size(), allSnapshotsListener @@ -1186,7 +1188,7 @@ public void testRunConcurrentSnapshots() { for (int i = 0; i < documents; ++i) { bulkRequest.add(new IndexRequest(index).source(Collections.singletonMap("foo", "bar" + i))); } - final ListenableFuture bulkResponseStepListener = new ListenableFuture<>(); + final SubscribableListener bulkResponseStepListener = new SubscribableListener<>(); client().bulk(bulkRequest, bulkResponseStepListener); continueOrDie(bulkResponseStepListener, bulkResponse -> { assertFalse("Failures in bulk response: " + bulkResponse.buildFailureMessage(), bulkResponse.hasFailures()); @@ -1220,15 +1222,15 @@ public void testRunConcurrentSnapshots() { } private RepositoryData getRepositoryData(Repository repository) { - final PlainActionFuture res = PlainActionFuture.newFuture(); + final PlainActionFuture res = new PlainActionFuture<>(); repository.getRepositoryData(deterministicTaskQueue::scheduleNow, res); deterministicTaskQueue.runAllRunnableTasks(); assertTrue(res.isDone()); return res.actionGet(); } - private ListenableFuture createRepoAndIndex(String repoName, String index, int shards) { - final ListenableFuture createRepositoryListener = new ListenableFuture<>(); + private SubscribableListener createRepoAndIndex(String repoName, String index, int shards) { + final SubscribableListener createRepositoryListener = new SubscribableListener<>(); client().admin() .cluster() @@ -1237,7 +1239,7 @@ private ListenableFuture createRepoAndIndex(String repoName .setSettings(Settings.builder().put("location", randomAlphaOfLength(10))) .execute(createRepositoryListener); - final ListenableFuture createIndexResponseStepListener = new ListenableFuture<>(); + final SubscribableListener createIndexResponseStepListener = new SubscribableListener<>(); continueOrDie( createRepositoryListener, @@ -1366,7 +1368,7 @@ private static Settings defaultIndexSettings(int shards) { .build(); } - private static void continueOrDie(ListenableFuture listener, CheckedConsumer onResponse) { + private static void continueOrDie(SubscribableListener listener, CheckedConsumer onResponse) { listener.addListener(ActionTestUtils.assertNoFailureListener(onResponse)); } @@ -1754,43 +1756,39 @@ protected void assertSnapshotOrGenericThread() { IndexScopedSettings.BUILT_IN_INDEX_SETTINGS ); final MapperRegistry mapperRegistry = new IndicesModule(Collections.emptyList()).getMapperRegistry(); - indicesService = new IndicesService( - settings, - mock(PluginsService.class), - nodeEnv, - namedXContentRegistry, - new AnalysisRegistry( - environment, - emptyMap(), - emptyMap(), - emptyMap(), - emptyMap(), - emptyMap(), - emptyMap(), - emptyMap(), - emptyMap(), - emptyMap() - ), - indexNameExpressionResolver, - mapperRegistry, - namedWriteableRegistry, - threadPool, - indexScopedSettings, - new NoneCircuitBreakerService(), - bigArrays, - scriptService, - clusterService, - client, - new MetaStateService(nodeEnv, namedXContentRegistry), - Collections.emptyList(), - emptyMap(), - null, - emptyMap(), - List.of(), - emptyMap(), - null, - () -> DocumentParsingObserver.EMPTY_INSTANCE - ); + + indicesService = new IndicesServiceBuilder().settings(settings) + .pluginsService(mock(PluginsService.class)) + .nodeEnvironment(nodeEnv) + .xContentRegistry(namedXContentRegistry) + .analysisRegistry( + new AnalysisRegistry( + environment, + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap() + ) + ) + .indexNameExpressionResolver(indexNameExpressionResolver) + .mapperRegistry(mapperRegistry) + .namedWriteableRegistry(namedWriteableRegistry) + .threadPool(threadPool) + .indexScopedSettings(indexScopedSettings) + .circuitBreakerService(new NoneCircuitBreakerService()) + .bigArrays(bigArrays) + .scriptService(scriptService) + .clusterService(clusterService) + .client(client) + .featureService(new FeatureService(List.of(new IndicesFeatures()))) + .metaStateService(new MetaStateService(nodeEnv, namedXContentRegistry)) + .documentParsingObserverSupplier(() -> DocumentParsingObserver.EMPTY_INSTANCE) + .build(); final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); snapshotShardsService = new SnapshotShardsService( settings, @@ -2001,7 +1999,7 @@ protected void assertSnapshotOrGenericThread() { SearchPhaseController searchPhaseController = new SearchPhaseController(searchService::aggReduceContextBuilder); actions.put( - SearchAction.INSTANCE, + TransportSearchAction.TYPE, new TransportSearchAction( threadPool, new NoneCircuitBreakerService(), @@ -2201,7 +2199,7 @@ public void start(ClusterState initialState) { LeaderHeartbeatService.NO_OP, StatefulPreVoteCollector::new, CompatibilityVersionsUtils.staticCurrent(), - Set.of() + new FeatureService(List.of()) ); masterService.setClusterStatePublisher(coordinator); coordinator.start(); @@ -2215,4 +2213,9 @@ public void start(ClusterState initialState) { } } } + + private static T safeResult(SubscribableListener listener) { + assertTrue("listener is not complete", listener.isDone()); + return safeAwait(listener); + } } diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java index b5ab6be586670..74277954b8002 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java @@ -523,8 +523,15 @@ public void testXContent() throws IOException { } public static State randomState(Map shards) { - return SnapshotsInProgress.completed(shards.values()) - ? randomFrom(State.SUCCESS, State.FAILED) - : randomFrom(State.STARTED, State.INIT, State.ABORTED); + if (SnapshotsInProgress.completed(shards.values())) { + return randomFrom(State.SUCCESS, State.FAILED); + } + if (shards.values() + .stream() + .map(SnapshotsInProgress.ShardSnapshotStatus::state) + .allMatch(st -> st.completed() || st == ShardState.ABORTED)) { + return State.ABORTED; + } + return randomFrom(State.STARTED, State.INIT); } } diff --git a/server/src/test/java/org/elasticsearch/test/search/aggregations/bucket/SharedSignificantTermsTestMethods.java b/server/src/test/java/org/elasticsearch/test/search/aggregations/bucket/SharedSignificantTermsTestMethods.java index 57fe0bfe3a9e3..dd93eb6b51ea6 100644 --- a/server/src/test/java/org/elasticsearch/test/search/aggregations/bucket/SharedSignificantTermsTestMethods.java +++ b/server/src/test/java/org/elasticsearch/test/search/aggregations/bucket/SharedSignificantTermsTestMethods.java @@ -50,7 +50,7 @@ public static void aggregateAndCheckFromSeveralShards(ESIntegTestCase testCase) private static void checkSignificantTermsAggregationCorrect(ESIntegTestCase testCase) { SearchResponse response = prepareSearch(INDEX_NAME).addAggregation( terms("class").field(CLASS_FIELD).subAggregation(significantTerms("sig_terms").field(TEXT_FIELD)) - ).execute().actionGet(); + ).get(); assertNoFailures(response); StringTerms classes = response.getAggregations().get("class"); Assert.assertThat(classes.getBuckets().size(), equalTo(2)); diff --git a/server/src/test/java/org/elasticsearch/transport/ClusterConnectionManagerTests.java b/server/src/test/java/org/elasticsearch/transport/ClusterConnectionManagerTests.java index 712f44bfa1115..2612f69034903 100644 --- a/server/src/test/java/org/elasticsearch/transport/ClusterConnectionManagerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/ClusterConnectionManagerTests.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.Level; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -755,11 +754,6 @@ public DiscoveryNode getNode() { return node; } - @Override - public Version getVersion() { - return node.getVersion(); - } - @Override public TransportVersion getTransportVersion() { return TransportVersion.current(); diff --git a/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java b/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java index 191ce130805a8..6ace25021348c 100644 --- a/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java @@ -137,7 +137,7 @@ public void testRequestAndResponse() throws Exception { AtomicReference exceptionCaptor = new AtomicReference<>(); AtomicReference channelCaptor = new AtomicReference<>(); - long requestId = responseHandlers.add(new Transport.ResponseContext<>(new TransportResponseHandler() { + long requestId = responseHandlers.add(new TransportResponseHandler() { @Override public Executor executor(ThreadPool threadPool) { return TransportResponseHandler.TRANSPORT_WORKER; @@ -157,7 +157,7 @@ public void handleException(TransportException exp) { public TestResponse read(StreamInput in) throws IOException { return new TestResponse(in); } - }, null, action)); + }, null, action).requestId(); RequestHandlerRegistry registry = new RequestHandlerRegistry<>( action, TestRequest::new, diff --git a/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java b/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java index 9896dbf4a861b..631a7e662105e 100644 --- a/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java @@ -335,7 +335,7 @@ public void testSlowLogOutboundMessage() throws Exception { try { final int length = randomIntBetween(1, 100); - final PlainActionFuture f = PlainActionFuture.newFuture(); + final PlainActionFuture f = new PlainActionFuture<>(); handler.sendBytes(new FakeTcpChannel() { @Override public void sendMessage(BytesReference reference, ActionListener listener) { diff --git a/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java b/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java index 49e0ab1653432..ead43d0bac05e 100644 --- a/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java +++ b/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java @@ -142,7 +142,7 @@ public void testProxyStrategyWillOpenExpectedNumberOfConnectionsToAddress() { ) { assertFalse(connectionManager.getAllConnectedNodes().stream().anyMatch(n -> n.getAddress().equals(address1))); - PlainActionFuture connectFuture = PlainActionFuture.newFuture(); + PlainActionFuture connectFuture = new PlainActionFuture<>(); strategy.connect(connectFuture); connectFuture.actionGet(); @@ -203,7 +203,7 @@ public void testProxyStrategyWillOpenNewConnectionsOnDisconnect() throws Excepti assertFalse(connectionManager.getAllConnectedNodes().stream().anyMatch(n -> n.getAddress().equals(address1))); assertFalse(connectionManager.getAllConnectedNodes().stream().anyMatch(n -> n.getAddress().equals(address2))); - PlainActionFuture connectFuture = PlainActionFuture.newFuture(); + PlainActionFuture connectFuture = new PlainActionFuture<>(); strategy.connect(connectFuture); connectFuture.actionGet(); @@ -274,7 +274,7 @@ public void testConnectFailsWithIncompatibleNodes() { ) ) { - PlainActionFuture connectFuture = PlainActionFuture.newFuture(); + PlainActionFuture connectFuture = new PlainActionFuture<>(); strategy.connect(connectFuture); final NoSeedNodeLeftException exception = expectThrows(NoSeedNodeLeftException.class, connectFuture::actionGet); assertThat( @@ -339,7 +339,7 @@ public void testConnectFailsWithNonRetryableException() { ) ) { - PlainActionFuture connectFuture = PlainActionFuture.newFuture(); + PlainActionFuture connectFuture = new PlainActionFuture<>(); strategy.connect(connectFuture); final ElasticsearchException exception = expectThrows(ElasticsearchException.class, connectFuture::actionGet); assertThat(exception.getMessage(), containsString("non-retryable")); @@ -403,7 +403,7 @@ public void testClusterNameValidationPreventConnectingToDifferentClusters() thro assertFalse(connectionManager.getAllConnectedNodes().stream().anyMatch(n -> n.getAddress().equals(address1))); assertFalse(connectionManager.getAllConnectedNodes().stream().anyMatch(n -> n.getAddress().equals(address2))); - PlainActionFuture connectFuture = PlainActionFuture.newFuture(); + PlainActionFuture connectFuture = new PlainActionFuture<>(); strategy.connect(connectFuture); connectFuture.actionGet(); @@ -471,7 +471,7 @@ public void testProxyStrategyWillResolveAddressesEachConnect() throws Exception null ) ) { - PlainActionFuture connectFuture = PlainActionFuture.newFuture(); + PlainActionFuture connectFuture = new PlainActionFuture<>(); strategy.connect(connectFuture); connectFuture.actionGet(); @@ -521,7 +521,7 @@ public void onNodeConnected(DiscoveryNode node, Transport.Connection connection) address.toString() ) ) { - final PlainActionFuture connectFuture = PlainActionFuture.newFuture(); + final PlainActionFuture connectFuture = new PlainActionFuture<>(); strategy.connect(connectFuture); // Should see no error and the connection size is 0 connectFuture.actionGet(); @@ -565,7 +565,7 @@ public void testProxyStrategyWillNeedToBeRebuiltIfNumOfSocketsOrAddressesOrServe "server-name" ) ) { - PlainActionFuture connectFuture = PlainActionFuture.newFuture(); + PlainActionFuture connectFuture = new PlainActionFuture<>(); strategy.connect(connectFuture); connectFuture.actionGet(); @@ -685,7 +685,7 @@ public void testServerNameAttributes() { ) { assertFalse(connectionManager.getAllConnectedNodes().stream().anyMatch(n -> n.getAddress().equals(address1))); - PlainActionFuture connectFuture = PlainActionFuture.newFuture(); + PlainActionFuture connectFuture = new PlainActionFuture<>(); strategy.connect(connectFuture); connectFuture.actionGet(); diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterAwareClientTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterAwareClientTests.java index e606da040bab4..b745756eece0e 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterAwareClientTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterAwareClientTests.java @@ -10,9 +10,9 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.search.SearchShardsAction; import org.elasticsearch.action.search.SearchShardsRequest; import org.elasticsearch.action.search.SearchShardsResponse; +import org.elasticsearch.action.search.TransportSearchShardsAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -104,7 +104,7 @@ public void testSearchShards() throws Exception { ); final SearchShardsResponse searchShardsResponse = PlainActionFuture.get( future -> client.execute( - SearchShardsAction.INSTANCE, + TransportSearchShardsAction.TYPE, searchShardsRequest, ActionListener.runBefore( future, @@ -169,7 +169,7 @@ public void testSearchShardsThreadContextHeader() { null ); client.execute( - SearchShardsAction.INSTANCE, + TransportSearchShardsAction.TYPE, searchShardsRequest, ActionListener.runBefore( future, diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java index 3e05743741f73..51b81cf1862e2 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java @@ -162,7 +162,7 @@ public void testEnsureWeReconnect() throws Exception { assertBusy(remoteClusterConnection::assertNoRunningConnections); ConnectionManager connectionManager = remoteClusterConnection.getConnectionManager(); Transport.Connection connection = connectionManager.getConnection(remoteNode); - PlainActionFuture closeFuture = PlainActionFuture.newFuture(); + PlainActionFuture closeFuture = new PlainActionFuture<>(); connection.addCloseListener(closeFuture); connectionManager.disconnectFromNode(remoteNode); closeFuture.get(); diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java index 44b48b60a9c5e..d4f03f1027838 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java @@ -15,13 +15,13 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchShardsAction; import org.elasticsearch.action.search.SearchShardsRequest; import org.elasticsearch.action.search.SearchShardsResponse; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.action.search.TransportSearchShardsAction; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -125,7 +125,7 @@ public static MockTransportService startTransport( MockTransportService newService = MockTransportService.createNewService(s, version, transportVersion, threadPool, null); try { newService.registerRequestHandler( - SearchShardsAction.NAME, + TransportSearchShardsAction.TYPE.name(), EsExecutors.DIRECT_EXECUTOR_SERVICE, SearchShardsRequest::new, (request, channel, task) -> { @@ -137,7 +137,7 @@ public static MockTransportService startTransport( } ); newService.registerRequestHandler( - SearchAction.NAME, + TransportSearchAction.TYPE.name(), EsExecutors.DIRECT_EXECUTOR_SERVICE, SearchRequest::new, (request, channel, task) -> { diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java index bc5709c77b74d..1a530a1602b18 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java @@ -480,7 +480,7 @@ public void testIncrementallyAddClusters() throws IOException { "cluster_1", Collections.singletonList(cluster1Seed.getAddress().toString()) ); - PlainActionFuture clusterAdded = PlainActionFuture.newFuture(); + PlainActionFuture clusterAdded = new PlainActionFuture<>(); // Add the cluster on a different thread to test that we wait for a new cluster to // connect before returning. new Thread(() -> { diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteConnectionManagerTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteConnectionManagerTests.java index 800b0041676de..839138d3c7c34 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteConnectionManagerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteConnectionManagerTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.transport; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -18,6 +17,7 @@ import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.RemoteConnectionManager.ProxyConnection; import org.mockito.Mockito; import java.io.IOException; @@ -27,8 +27,8 @@ import java.util.concurrent.ExecutionException; import static org.elasticsearch.transport.RemoteClusterService.REMOTE_CLUSTER_HANDSHAKE_ACTION_NAME; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasItems; import static org.hamcrest.core.IsInstanceOf.instanceOf; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; @@ -61,15 +61,15 @@ public void setUp() throws Exception { public void testGetConnection() { DiscoveryNode node1 = DiscoveryNodeUtils.create("node-1", address); - PlainActionFuture future1 = PlainActionFuture.newFuture(); + PlainActionFuture future1 = new PlainActionFuture<>(); remoteConnectionManager.connectToRemoteClusterNode(node1, validator, future1); assertTrue(future1.isDone()); // Add duplicate connect attempt to ensure that we do not get duplicate connections in the round robin - remoteConnectionManager.connectToRemoteClusterNode(node1, validator, PlainActionFuture.newFuture()); + remoteConnectionManager.connectToRemoteClusterNode(node1, validator, new PlainActionFuture<>()); - DiscoveryNode node2 = DiscoveryNodeUtils.create("node-2", address, Version.CURRENT.minimumCompatibilityVersion()); - PlainActionFuture future2 = PlainActionFuture.newFuture(); + DiscoveryNode node2 = DiscoveryNodeUtils.create("node-2", address); + PlainActionFuture future2 = new PlainActionFuture<>(); remoteConnectionManager.connectToRemoteClusterNode(node2, validator, future2); assertTrue(future2.isDone()); @@ -77,29 +77,28 @@ public void testGetConnection() { assertEquals(node2, remoteConnectionManager.getConnection(node2).getNode()); DiscoveryNode node4 = DiscoveryNodeUtils.create("node-4", address); - assertThat(remoteConnectionManager.getConnection(node4), instanceOf(RemoteConnectionManager.ProxyConnection.class)); + assertThat(remoteConnectionManager.getConnection(node4), instanceOf(ProxyConnection.class)); // Test round robin - Set versions = new HashSet<>(); - versions.add(remoteConnectionManager.getConnection(node4).getVersion()); - versions.add(remoteConnectionManager.getConnection(node4).getVersion()); + Set proxyNodes = new HashSet<>(); + proxyNodes.add(((ProxyConnection) remoteConnectionManager.getConnection(node4)).getConnection().getNode().getId()); + proxyNodes.add(((ProxyConnection) remoteConnectionManager.getConnection(node4)).getConnection().getNode().getId()); - assertThat(versions, hasItems(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion())); + assertThat(proxyNodes, containsInAnyOrder("node-1", "node-2")); // Test that the connection is cleared from the round robin list when it is closed remoteConnectionManager.getConnection(node1).close(); - versions.clear(); - versions.add(remoteConnectionManager.getConnection(node4).getVersion()); - versions.add(remoteConnectionManager.getConnection(node4).getVersion()); + proxyNodes.clear(); + proxyNodes.add(((ProxyConnection) remoteConnectionManager.getConnection(node4)).getConnection().getNode().getId()); + proxyNodes.add(((ProxyConnection) remoteConnectionManager.getConnection(node4)).getConnection().getNode().getId()); - assertThat(versions, hasItems(Version.CURRENT.minimumCompatibilityVersion())); - assertEquals(1, versions.size()); + assertThat(proxyNodes, containsInAnyOrder("node-2")); } public void testResolveRemoteClusterAlias() throws ExecutionException, InterruptedException { DiscoveryNode remoteNode1 = DiscoveryNodeUtils.create("remote-node-1", address); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); remoteConnectionManager.connectToRemoteClusterNode(remoteNode1, validator, future); assertTrue(future.isDone()); @@ -111,10 +110,10 @@ public void testResolveRemoteClusterAlias() throws ExecutionException, Interrupt DiscoveryNode remoteNode2 = DiscoveryNodeUtils.create("remote-node-2", address); Transport.Connection proxyConnection = remoteConnectionManager.getConnection(remoteNode2); - assertThat(proxyConnection, instanceOf(RemoteConnectionManager.ProxyConnection.class)); + assertThat(proxyConnection, instanceOf(ProxyConnection.class)); assertThat(RemoteConnectionManager.resolveRemoteClusterAlias(proxyConnection).get(), equalTo("remote-cluster")); - PlainActionFuture future2 = PlainActionFuture.newFuture(); + PlainActionFuture future2 = new PlainActionFuture<>(); remoteConnectionManager.openConnection(remoteNode1, null, future2); assertThat(RemoteConnectionManager.resolveRemoteClusterAlias(future2.get()).get(), equalTo("remote-cluster")); } @@ -156,11 +155,6 @@ public DiscoveryNode getNode() { return node; } - @Override - public Version getVersion() { - return node.getVersion(); - } - @Override public TransportVersion getTransportVersion() { return TransportVersion.current(); diff --git a/server/src/test/java/org/elasticsearch/transport/SniffConnectionStrategyTests.java b/server/src/test/java/org/elasticsearch/transport/SniffConnectionStrategyTests.java index 31096a53e67c0..3c955258d45c8 100644 --- a/server/src/test/java/org/elasticsearch/transport/SniffConnectionStrategyTests.java +++ b/server/src/test/java/org/elasticsearch/transport/SniffConnectionStrategyTests.java @@ -204,7 +204,7 @@ public void testSniffStrategyWillConnectToAndDiscoverNodes() { seedNodes(seedNode) ) ) { - PlainActionFuture connectFuture = PlainActionFuture.newFuture(); + PlainActionFuture connectFuture = new PlainActionFuture<>(); strategy.connect(connectFuture); connectFuture.actionGet(); @@ -275,7 +275,7 @@ public void testSniffStrategyWillResolveDiscoveryNodesEachConnect() throws Excep Collections.singletonList(seedNodeSupplier) ) ) { - PlainActionFuture connectFuture = PlainActionFuture.newFuture(); + PlainActionFuture connectFuture = new PlainActionFuture<>(); strategy.connect(connectFuture); connectFuture.actionGet(); @@ -348,7 +348,7 @@ public void testSniffStrategyWillConnectToMaxAllowedNodesAndOpenNewConnectionsOn seedNodes(seedNode) ) ) { - PlainActionFuture connectFuture = PlainActionFuture.newFuture(); + PlainActionFuture connectFuture = new PlainActionFuture<>(); strategy.connect(connectFuture); connectFuture.actionGet(); @@ -436,7 +436,7 @@ public void testDiscoverWithSingleIncompatibleSeedNode() { seedNodes(seedNode) ) ) { - PlainActionFuture connectFuture = PlainActionFuture.newFuture(); + PlainActionFuture connectFuture = new PlainActionFuture<>(); strategy.connect(connectFuture); connectFuture.actionGet(); @@ -498,7 +498,7 @@ public void testConnectFailsWithIncompatibleNodes() { seedNodes(incompatibleSeedNode) ) ) { - PlainActionFuture connectFuture = PlainActionFuture.newFuture(); + PlainActionFuture connectFuture = new PlainActionFuture<>(); strategy.connect(connectFuture); expectThrows(Exception.class, connectFuture::actionGet); @@ -561,7 +561,7 @@ public void testFilterNodesWithNodePredicate() { seedNodes(seedNode) ) ) { - PlainActionFuture connectFuture = PlainActionFuture.newFuture(); + PlainActionFuture connectFuture = new PlainActionFuture<>(); strategy.connect(connectFuture); connectFuture.actionGet(); @@ -629,7 +629,7 @@ public void testConnectFailsIfNoConnectionsOpened() { seedNodes(seedNode) ) ) { - PlainActionFuture connectFuture = PlainActionFuture.newFuture(); + PlainActionFuture connectFuture = new PlainActionFuture<>(); strategy.connect(connectFuture); final IllegalStateException ise = expectThrows(IllegalStateException.class, connectFuture::actionGet); assertEquals("Unable to open any connections to remote cluster [cluster-alias]", ise.getMessage()); @@ -706,7 +706,7 @@ public void testClusterNameValidationPreventConnectingToDifferentClusters() thro seedNodes(seedNode, otherSeedNode) ) ) { - PlainActionFuture connectFuture = PlainActionFuture.newFuture(); + PlainActionFuture connectFuture = new PlainActionFuture<>(); strategy.connect(connectFuture); connectFuture.actionGet(); @@ -718,7 +718,7 @@ public void testClusterNameValidationPreventConnectingToDifferentClusters() thro assertBusy(strategy::assertNoRunningConnections); - PlainActionFuture newConnect = PlainActionFuture.newFuture(); + PlainActionFuture newConnect = new PlainActionFuture<>(); strategy.connect(newConnect); IllegalStateException ise = expectThrows(IllegalStateException.class, newConnect::actionGet); assertThat( @@ -799,7 +799,7 @@ public void testMultipleCallsToConnectEnsuresConnection() { assertFalse(connectionManager.nodeConnected(discoverableNode)); assertTrue(strategy.assertNoRunningConnections()); - PlainActionFuture connectFuture = PlainActionFuture.newFuture(); + PlainActionFuture connectFuture = new PlainActionFuture<>(); strategy.connect(connectFuture); connectFuture.actionGet(); @@ -808,7 +808,7 @@ public void testMultipleCallsToConnectEnsuresConnection() { assertTrue(strategy.assertNoRunningConnections()); // exec again we are already connected - PlainActionFuture ensureConnectFuture = PlainActionFuture.newFuture(); + PlainActionFuture ensureConnectFuture = new PlainActionFuture<>(); strategy.connect(ensureConnectFuture); ensureConnectFuture.actionGet(); @@ -911,7 +911,7 @@ public void testConfiguredProxyAddressModeWillReplaceNodeAddress() { assertFalse(connectionManager.nodeConnected(discoverableNode)); assertTrue(strategy.assertNoRunningConnections()); - PlainActionFuture connectFuture = PlainActionFuture.newFuture(); + PlainActionFuture connectFuture = new PlainActionFuture<>(); strategy.connect(connectFuture); connectFuture.actionGet(); @@ -976,7 +976,7 @@ public void testSniffStrategyWillNeedToBeRebuiltIfNumOfConnectionsOrSeedsOrProxy seedNodes(seedNode) ) ) { - PlainActionFuture connectFuture = PlainActionFuture.newFuture(); + PlainActionFuture connectFuture = new PlainActionFuture<>(); strategy.connect(connectFuture); connectFuture.actionGet(); diff --git a/server/src/test/java/org/elasticsearch/transport/TransportHandshakerTests.java b/server/src/test/java/org/elasticsearch/transport/TransportHandshakerTests.java index c6a9d93942707..e474ce7312ad0 100644 --- a/server/src/test/java/org/elasticsearch/transport/TransportHandshakerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TransportHandshakerTests.java @@ -59,7 +59,7 @@ public void tearDown() throws Exception { } public void testHandshakeRequestAndResponse() throws IOException { - PlainActionFuture versionFuture = PlainActionFuture.newFuture(); + PlainActionFuture versionFuture = new PlainActionFuture<>(); long reqId = randomLongBetween(1, 10); handshaker.sendHandshake(reqId, node, channel, new TimeValue(30, TimeUnit.SECONDS), versionFuture); @@ -71,7 +71,7 @@ public void testHandshakeRequestAndResponse() throws IOException { BytesStreamOutput bytesStreamOutput = new BytesStreamOutput(); handshakeRequest.writeTo(bytesStreamOutput); StreamInput input = bytesStreamOutput.bytes().streamInput(); - final PlainActionFuture responseFuture = PlainActionFuture.newFuture(); + final PlainActionFuture responseFuture = new PlainActionFuture<>(); final TestTransportChannel channel = new TestTransportChannel(responseFuture); handshaker.handleHandshake(channel, reqId, input); @@ -84,7 +84,7 @@ public void testHandshakeRequestAndResponse() throws IOException { public void testHandshakeRequestFutureVersionsCompatibility() throws IOException { long reqId = randomLongBetween(1, 10); - handshaker.sendHandshake(reqId, node, channel, new TimeValue(30, TimeUnit.SECONDS), PlainActionFuture.newFuture()); + handshaker.sendHandshake(reqId, node, channel, new TimeValue(30, TimeUnit.SECONDS), new PlainActionFuture<>()); verify(requestSender).sendRequest(node, channel, reqId, TransportHandshaker.REQUEST_HANDSHAKE_VERSION); @@ -107,7 +107,7 @@ public void testHandshakeRequestFutureVersionsCompatibility() throws IOException // Otherwise, we need to update the test. assertEquals(currentHandshakeBytes.bytes().length(), lengthCheckingHandshake.bytes().length()); assertEquals(1031, futureHandshakeStream.available()); - final PlainActionFuture responseFuture = PlainActionFuture.newFuture(); + final PlainActionFuture responseFuture = new PlainActionFuture<>(); final TestTransportChannel channel = new TestTransportChannel(responseFuture); handshaker.handleHandshake(channel, reqId, futureHandshakeStream); assertEquals(0, futureHandshakeStream.available()); @@ -118,7 +118,7 @@ public void testHandshakeRequestFutureVersionsCompatibility() throws IOException } public void testHandshakeError() throws IOException { - PlainActionFuture versionFuture = PlainActionFuture.newFuture(); + PlainActionFuture versionFuture = new PlainActionFuture<>(); long reqId = randomLongBetween(1, 10); handshaker.sendHandshake(reqId, node, channel, new TimeValue(30, TimeUnit.SECONDS), versionFuture); @@ -135,7 +135,7 @@ public void testHandshakeError() throws IOException { } public void testSendRequestThrowsException() throws IOException { - PlainActionFuture versionFuture = PlainActionFuture.newFuture(); + PlainActionFuture versionFuture = new PlainActionFuture<>(); long reqId = randomLongBetween(1, 10); doThrow(new IOException("boom")).when(requestSender) .sendRequest(node, channel, reqId, TransportHandshaker.REQUEST_HANDSHAKE_VERSION); @@ -149,7 +149,7 @@ public void testSendRequestThrowsException() throws IOException { } public void testHandshakeTimeout() throws IOException { - PlainActionFuture versionFuture = PlainActionFuture.newFuture(); + PlainActionFuture versionFuture = new PlainActionFuture<>(); long reqId = randomLongBetween(1, 10); handshaker.sendHandshake(reqId, node, channel, new TimeValue(100, TimeUnit.MILLISECONDS), versionFuture); diff --git a/server/src/test/java/org/elasticsearch/transport/TransportInfoTests.java b/server/src/test/java/org/elasticsearch/transport/TransportInfoTests.java index 83ac8f883dae1..8e23f0e3984b9 100644 --- a/server/src/test/java/org/elasticsearch/transport/TransportInfoTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TransportInfoTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.transport; -import org.elasticsearch.Version; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; @@ -33,10 +32,6 @@ private TransportInfo createTransportInfo(InetAddress address, int port, boolean return new TransportInfo(boundAddress, profiles, cnameInPublishAddressProperty); } - public void testDoNotForgetToRemoveProperty() { - assertTrue("Remove es.transport.cname_in_publish_address property from TransportInfo in 9.0.0", Version.CURRENT.major < 9); - } - public void testCorrectlyDisplayPublishedCname() throws Exception { InetAddress address = InetAddress.getByName("localhost"); int port = 9200; diff --git a/settings.gradle b/settings.gradle index 09aaef7ede189..74315c6516653 100644 --- a/settings.gradle +++ b/settings.gradle @@ -106,7 +106,8 @@ List projects = [ 'test:logger-usage', 'test:test-clusters', 'test:x-content', - 'test:yaml-rest-runner' + 'test:yaml-rest-runner', + 'test:metadata-extractor' ] /** diff --git a/test/external-modules/apm-integration/src/javaRestTest/java/org/elasticsearch/test/apmintegration/MetricsApmIT.java b/test/external-modules/apm-integration/src/javaRestTest/java/org/elasticsearch/test/apmintegration/MetricsApmIT.java index 0c33cd4984d86..41e6a818a62da 100644 --- a/test/external-modules/apm-integration/src/javaRestTest/java/org/elasticsearch/test/apmintegration/MetricsApmIT.java +++ b/test/external-modules/apm-integration/src/javaRestTest/java/org/elasticsearch/test/apmintegration/MetricsApmIT.java @@ -60,8 +60,10 @@ protected String getTestRestCluster() { public void testApmIntegration() throws Exception { Map>> sampleAssertions = new HashMap<>( Map.ofEntries( - assertion("testDoubleCounter", m -> (Double) m.get("value"), closeTo(1.0, 0.001)), + assertion(TestMeterUsages.VERY_LONG_NAME, m -> (Double) m.get("value"), closeTo(1.0, 0.001)), assertion("testLongCounter", m -> (Double) m.get("value"), closeTo(1.0, 0.001)), + assertion("testAsyncDoubleCounter", m -> (Double) m.get("value"), closeTo(1.0, 0.001)), + assertion("testAsyncLongCounter", m -> (Integer) m.get("value"), equalTo(1)), assertion("testDoubleGauge", m -> (Double) m.get("value"), closeTo(1.0, 0.001)), assertion("testLongGauge", m -> (Integer) m.get("value"), equalTo(1)), assertion( diff --git a/test/external-modules/apm-integration/src/main/java/org/elasticsearch/test/apmintegration/TestMeterUsages.java b/test/external-modules/apm-integration/src/main/java/org/elasticsearch/test/apmintegration/TestMeterUsages.java index 22d06d31b4193..8a71738a0b420 100644 --- a/test/external-modules/apm-integration/src/main/java/org/elasticsearch/test/apmintegration/TestMeterUsages.java +++ b/test/external-modules/apm-integration/src/main/java/org/elasticsearch/test/apmintegration/TestMeterUsages.java @@ -26,14 +26,18 @@ public class TestMeterUsages { private final LongHistogram longHistogram; private final AtomicReference doubleWithAttributes = new AtomicReference<>(); private final AtomicReference longWithAttributes = new AtomicReference<>(); + public static String VERY_LONG_NAME = "a1234567890123456789012345678901234567890123456789012345678901234567890"; public TestMeterUsages(MeterRegistry meterRegistry) { - this.doubleCounter = meterRegistry.registerDoubleCounter("testDoubleCounter", "test", "unit"); + this.doubleCounter = meterRegistry.registerDoubleCounter(VERY_LONG_NAME, "test", "unit"); this.longCounter = meterRegistry.registerDoubleCounter("testLongCounter", "test", "unit"); this.doubleHistogram = meterRegistry.registerDoubleHistogram("testDoubleHistogram", "test", "unit"); this.longHistogram = meterRegistry.registerLongHistogram("testLongHistogram", "test", "unit"); meterRegistry.registerDoubleGauge("testDoubleGauge", "test", "unit", doubleWithAttributes::get); meterRegistry.registerLongGauge("testLongGauge", "test", "unit", longWithAttributes::get); + + meterRegistry.registerLongAsyncCounter("testAsyncLongCounter", "test", "unit", longWithAttributes::get); + meterRegistry.registerDoubleAsyncCounter("testAsyncDoubleCounter", "test", "unit", doubleWithAttributes::get); } public void testUponRequest() { @@ -43,6 +47,8 @@ public void testUponRequest() { doubleHistogram.record(2.0); longHistogram.record(1); longHistogram.record(2); + + // triggers gauges and async counters doubleWithAttributes.set(new DoubleWithAttributes(1.0, Map.of())); longWithAttributes.set(new LongWithAttributes(1, Map.of())); } diff --git a/test/external-modules/die-with-dignity/src/javaRestTest/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java b/test/external-modules/die-with-dignity/src/javaRestTest/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java index 291e4124e17ea..b313b87fc2153 100644 --- a/test/external-modules/die-with-dignity/src/javaRestTest/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java +++ b/test/external-modules/die-with-dignity/src/javaRestTest/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java @@ -24,8 +24,6 @@ import java.io.InputStream; import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; @@ -144,12 +142,6 @@ private boolean containsAll(String line, String... subStrings) { return true; } - private void debugLogs(Path path) throws IOException { - try (BufferedReader reader = Files.newBufferedReader(path)) { - reader.lines().forEach(line -> logger.info(line)); - } - } - @Override protected boolean preserveClusterUponCompletion() { // as the cluster is dead its state can not be wiped successfully so we have to bypass wiping the cluster diff --git a/test/external-modules/latency-simulating-directory/src/internalClusterTest/java/org/elasticsearch/test/simulatedlatencyrepo/LatencySimulatingBlobStoreRepositoryTests.java b/test/external-modules/latency-simulating-directory/src/internalClusterTest/java/org/elasticsearch/test/simulatedlatencyrepo/LatencySimulatingBlobStoreRepositoryTests.java index b442a6158c471..7e1d1ac376cf0 100644 --- a/test/external-modules/latency-simulating-directory/src/internalClusterTest/java/org/elasticsearch/test/simulatedlatencyrepo/LatencySimulatingBlobStoreRepositoryTests.java +++ b/test/external-modules/latency-simulating-directory/src/internalClusterTest/java/org/elasticsearch/test/simulatedlatencyrepo/LatencySimulatingBlobStoreRepositoryTests.java @@ -109,7 +109,7 @@ public void testRetrieveSnapshots() throws Exception { int numDocs = randomIntBetween(10, 20); for (int i = 0; i < numDocs; i++) { String id = Integer.toString(i); - client().prepareIndex(indexName).setId(id).setSource("text", "sometext").get(); + prepareIndex(indexName).setId(id).setSource("text", "sometext").get(); } indicesAdmin().prepareFlush(indexName).get(); diff --git a/test/external-modules/seek-tracking-directory/src/internalClusterTest/java/org/elasticsearch/test/seektracker/SeekTrackerPluginIT.java b/test/external-modules/seek-tracking-directory/src/internalClusterTest/java/org/elasticsearch/test/seektracker/SeekTrackerPluginIT.java index d9c239905f343..54e6583d5f483 100644 --- a/test/external-modules/seek-tracking-directory/src/internalClusterTest/java/org/elasticsearch/test/seektracker/SeekTrackerPluginIT.java +++ b/test/external-modules/seek-tracking-directory/src/internalClusterTest/java/org/elasticsearch/test/seektracker/SeekTrackerPluginIT.java @@ -41,7 +41,7 @@ public void testSeekTrackerPlugin() throws InterruptedException { assertAcked(indicesAdmin().prepareCreate("index")); List docs = new ArrayList<>(); for (int i = 0; i < 100; i++) { - docs.add(client().prepareIndex("index").setSource("field", "term" + i % 5)); + docs.add(prepareIndex("index").setSource("field", "term" + i % 5)); } indexRandom(true, docs); diff --git a/test/fixtures/minio-fixture/build.gradle b/test/fixtures/minio-fixture/build.gradle index 0823482331e84..8673c51d46038 100644 --- a/test/fixtures/minio-fixture/build.gradle +++ b/test/fixtures/minio-fixture/build.gradle @@ -6,6 +6,27 @@ * Side Public License, v 1. */ apply plugin: 'elasticsearch.test.fixtures' +apply plugin: 'java' +apply plugin: 'elasticsearch.java' description = 'Fixture for MinIO Storage service' +configurations.all { + transitive = false +} + +dependencies { + testImplementation project(':test:framework') + + api "junit:junit:${versions.junit}" + api "org.testcontainers:testcontainers:${versions.testcontainer}" + implementation "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" + implementation "org.slf4j:slf4j-api:${versions.slf4j}" + implementation "com.github.docker-java:docker-java-api:${versions.dockerJava}" + runtimeOnly "com.github.docker-java:docker-java-transport-zerodep:${versions.dockerJava}" + runtimeOnly "com.github.docker-java:docker-java-transport:${versions.dockerJava}" + runtimeOnly "com.github.docker-java:docker-java-core:${versions.dockerJava}" + runtimeOnly "org.apache.commons:commons-compress:${versions.commonsCompress}" + runtimeOnly "org.rnorth.duct-tape:duct-tape:${versions.ductTape}" + runtimeOnly "org.rnorth.duct-tape:duct-tape:${versions.ductTape}" +} diff --git a/test/fixtures/minio-fixture/src/main/java/org/elasticsearch/test/fixtures/minio/MinioTestContainer.java b/test/fixtures/minio-fixture/src/main/java/org/elasticsearch/test/fixtures/minio/MinioTestContainer.java new file mode 100644 index 0000000000000..fcb95890ace31 --- /dev/null +++ b/test/fixtures/minio-fixture/src/main/java/org/elasticsearch/test/fixtures/minio/MinioTestContainer.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test.fixtures.minio; + +import org.elasticsearch.test.fixtures.testcontainers.DockerEnvironmentAwareTestContainer; +import org.junit.rules.TestRule; +import org.testcontainers.images.builder.ImageFromDockerfile; + +public final class MinioTestContainer extends DockerEnvironmentAwareTestContainer implements TestRule { + + private static final int servicePort = 9000; + private final boolean enabled; + + public MinioTestContainer() { + this(true); + } + + public MinioTestContainer(boolean enabled) { + super( + new ImageFromDockerfile().withDockerfileFromBuilder( + builder -> builder.from("minio/minio:RELEASE.2021-03-01T04-20-55Z") + .env("MINIO_ACCESS_KEY", "s3_test_access_key") + .env("MINIO_SECRET_KEY", "s3_test_secret_key") + .run("mkdir -p /minio/data/bucket") + .cmd("server", "/minio/data") + .build() + ) + ); + if (enabled) { + addExposedPort(servicePort); + } + this.enabled = enabled; + } + + @Override + public void start() { + if (enabled) { + super.start(); + } + } + + public String getAddress() { + return "http://127.0.0.1:" + getMappedPort(servicePort); + } +} diff --git a/test/fixtures/minio-fixture/src/main/java/org/elasticsearch/test/fixtures/testcontainers/DockerEnvironmentAwareTestContainer.java b/test/fixtures/minio-fixture/src/main/java/org/elasticsearch/test/fixtures/testcontainers/DockerEnvironmentAwareTestContainer.java new file mode 100644 index 0000000000000..c0fb83e5206f4 --- /dev/null +++ b/test/fixtures/minio-fixture/src/main/java/org/elasticsearch/test/fixtures/testcontainers/DockerEnvironmentAwareTestContainer.java @@ -0,0 +1,133 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test.fixtures.testcontainers; + +import org.elasticsearch.test.fixtures.minio.MinioTestContainer; +import org.junit.Assume; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testcontainers.DockerClientFactory; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.images.builder.ImageFromDockerfile; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +public class DockerEnvironmentAwareTestContainer extends GenericContainer { + protected static final Logger LOGGER = LoggerFactory.getLogger(DockerEnvironmentAwareTestContainer.class); + + private static final String DOCKER_ON_LINUX_EXCLUSIONS_FILE = ".ci/dockerOnLinuxExclusions"; + + private static final boolean CI = Boolean.parseBoolean(System.getProperty("CI", "false")); + private static final boolean EXCLUDED_OS = isExcludedOs(); + private static final boolean DOCKER_PROBING_SUCCESSFUL = isDockerAvailable(); + + /** + * see https://github.com/elastic/elasticsearch/issues/102532 + * */ + private static boolean isDockerAvailable() { + try { + LOGGER.info("Probing docker environment..."); + DockerClientFactory.instance().client(); + LOGGER.info("Probing docker environment successful"); + return true; + } catch (Throwable ex) { + LOGGER.warn("Probing docker has failed; disabling test", ex); + return false; + } + } + + public DockerEnvironmentAwareTestContainer(ImageFromDockerfile imageFromDockerfile) { + super(imageFromDockerfile); + } + + @Override + public void start() { + Assume.assumeFalse("Docker support excluded on OS", EXCLUDED_OS); + Assume.assumeTrue("Docker probing succesful", DOCKER_PROBING_SUCCESSFUL); + super.start(); + } + + static String deriveId(Map values) { + return values.get("ID") + "-" + values.get("VERSION_ID"); + } + + private static boolean isExcludedOs() { + if (CI) { + // we dont exclude OS outside of CI environment + return false; + } + if (System.getProperty("os.name").toLowerCase().startsWith("windows")) { + return true; + } + final Path osRelease = Paths.get("/etc/os-release"); + if (Files.exists(osRelease)) { + Map values; + + try { + final List osReleaseLines = Files.readAllLines(osRelease); + values = parseOsRelease(osReleaseLines); + } catch (IOException e) { + throw new RuntimeException("Failed to read /etc/os-release", e); + } + + final String id = deriveId(values); + final boolean excluded = getLinuxExclusionList().contains(id); + + if (excluded) { + LOGGER.warn("Linux OS id [{}] is present in the Docker exclude list. Tasks requiring Docker will be disabled.", id); + } + + return excluded; + } + + return false; + } + + private static List getLinuxExclusionList() { + File exclusionsFile = new File(DOCKER_ON_LINUX_EXCLUSIONS_FILE); + if (exclusionsFile.exists()) { + try { + return Files.readAllLines(exclusionsFile.toPath()) + .stream() + .map(String::trim) + .filter(line -> (line.isEmpty() || line.startsWith("#")) == false) + .collect(Collectors.toList()); + } catch (IOException e) { + throw new RuntimeException("Failed to read " + exclusionsFile.getAbsolutePath(), e); + } + } else { + return Collections.emptyList(); + } + } + + // visible for testing + static Map parseOsRelease(final List osReleaseLines) { + final Map values = new HashMap<>(); + + osReleaseLines.stream().map(String::trim).filter(line -> (line.isEmpty() || line.startsWith("#")) == false).forEach(line -> { + final String[] parts = line.split("=", 2); + final String key = parts[0]; + // remove optional leading and trailing quotes and whitespace + final String value = parts[1].replaceAll("^['\"]?\\s*", "").replaceAll("\\s*['\"]?$", ""); + + values.put(key, value.toLowerCase()); + }); + + return values; + } +} diff --git a/test/fixtures/minio-fixture/src/main/java/org/elasticsearch/test/fixtures/testcontainers/TestContainersThreadFilter.java b/test/fixtures/minio-fixture/src/main/java/org/elasticsearch/test/fixtures/testcontainers/TestContainersThreadFilter.java new file mode 100644 index 0000000000000..1b0dacbacfd1a --- /dev/null +++ b/test/fixtures/minio-fixture/src/main/java/org/elasticsearch/test/fixtures/testcontainers/TestContainersThreadFilter.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test.fixtures.testcontainers; + +import com.carrotsearch.randomizedtesting.ThreadFilter; + +/** + * test container spawns extra threads, which causes our thread leak + * detection to fail. Filter these threads out since we can't clean them up. + */ +public class TestContainersThreadFilter implements ThreadFilter { + @Override + public boolean reject(Thread t) { + return t.getName().startsWith("testcontainers-") || t.getName().startsWith("ducttape"); + } +} diff --git a/test/fixtures/s3-fixture/Dockerfile b/test/fixtures/s3-fixture/Dockerfile deleted file mode 100644 index 97b56ee40a28c..0000000000000 --- a/test/fixtures/s3-fixture/Dockerfile +++ /dev/null @@ -1,20 +0,0 @@ -FROM openjdk:17.0.2 - -ARG fixtureClass -ARG port -ARG bucket -ARG basePath -ARG accessKey -ARG sessionToken - -ENV S3_FIXTURE_CLASS=${fixtureClass} -ENV S3_FIXTURE_PORT=${port} -ENV S3_FIXTURE_BUCKET=${bucket} -ENV S3_FIXTURE_BASE_PATH=${basePath} -ENV S3_FIXTURE_ACCESS_KEY=${accessKey} -ENV S3_FIXTURE_SESSION_TOKEN=${sessionToken} - -ENTRYPOINT exec java -classpath "/fixture/shared/*" \ - $S3_FIXTURE_CLASS 0.0.0.0 "$S3_FIXTURE_PORT" "$S3_FIXTURE_BUCKET" "$S3_FIXTURE_BASE_PATH" "$S3_FIXTURE_ACCESS_KEY" "$S3_FIXTURE_SESSION_TOKEN" - -EXPOSE $port diff --git a/test/fixtures/s3-fixture/build.gradle b/test/fixtures/s3-fixture/build.gradle index a46af72de5cab..0f031ee029f75 100644 --- a/test/fixtures/s3-fixture/build.gradle +++ b/test/fixtures/s3-fixture/build.gradle @@ -6,24 +6,14 @@ * Side Public License, v 1. */ apply plugin: 'elasticsearch.java' -apply plugin: 'elasticsearch.test.fixtures' description = 'Fixture for S3 Storage service' -tasks.named("test").configure { enabled = false } +//tasks.named("test").configure { enabled = false } dependencies { api project(':server') - testImplementation project(':test:framework') -} - -tasks.named("preProcessFixture").configure { - dependsOn "jar", configurations.runtimeClasspath - doLast { - file("${testFixturesDir}/shared").mkdirs() - project.copy { - from jar - from configurations.runtimeClasspath - into "${testFixturesDir}/shared" - } + api("junit:junit:${versions.junit}") { + transitive = false } + testImplementation project(':test:framework') } diff --git a/test/fixtures/s3-fixture/docker-compose.yml b/test/fixtures/s3-fixture/docker-compose.yml deleted file mode 100644 index b07fe378158b4..0000000000000 --- a/test/fixtures/s3-fixture/docker-compose.yml +++ /dev/null @@ -1,141 +0,0 @@ -version: '3' -services: - s3-fixture: - build: - context: . - args: - fixtureClass: fixture.s3.S3HttpFixture - port: 80 - bucket: "bucket" - basePath: "base_path_integration_tests" - accessKey: "s3_test_access_key" - dockerfile: Dockerfile - volumes: - - ./testfixtures_shared/shared:/fixture/shared - ports: - - "80" - - s3-fixture-other: - build: - context: . - args: - fixtureClass: fixture.s3.S3HttpFixture - port: 80 - bucket: "bucket" - basePath: "base_path" - accessKey: "s3_test_access_key" - dockerfile: Dockerfile - volumes: - - ./testfixtures_shared/shared:/fixture/shared - ports: - - "80" - - s3-fixture-repositories-metering: - build: - context: . - args: - fixtureClass: fixture.s3.S3HttpFixture - port: 80 - bucket: "bucket" - basePath: "base_path" - accessKey: "s3_test_access_key" - dockerfile: Dockerfile - volumes: - - ./testfixtures_shared/shared:/fixture/shared - ports: - - "80" - - s3-fixture-repository-test-kit: - build: - context: . - args: - fixtureClass: fixture.s3.S3HttpFixture - port: 80 - bucket: "bucket" - basePath: "base_path" - accessKey: "s3_test_access_key" - dockerfile: Dockerfile - volumes: - - ./testfixtures_shared/shared:/fixture/shared - ports: - - "80" - - s3-snapshot-based-recoveries: - build: - context: . - args: - fixtureClass: fixture.s3.S3HttpFixture - port: 80 - bucket: "bucket" - basePath: "base_path" - accessKey: "s3_test_access_key" - dockerfile: Dockerfile - volumes: - - ./testfixtures_shared/shared:/fixture/shared - ports: - - "80" - - s3-fixture-with-session-token: - build: - context: . - args: - fixtureClass: fixture.s3.S3HttpFixtureWithSessionToken - port: 80 - bucket: "session_token_bucket" - basePath: "session_token_base_path_integration_tests" - accessKey: "session_token_access_key" - sessionToken: "session_token" - dockerfile: Dockerfile - volumes: - - ./testfixtures_shared/shared:/fixture/shared - ports: - - "80" - - s3-fixture-with-ec2: - build: - context: . - args: - fixtureClass: fixture.s3.S3HttpFixtureWithEC2 - port: 80 - bucket: "ec2_bucket" - basePath: "ec2_base_path" - accessKey: "ec2_access_key" - sessionToken: "ec2_session_token" - dockerfile: Dockerfile - volumes: - - ./testfixtures_shared/shared:/fixture/shared - ports: - - "80" - - s3-fixture-with-ecs: - build: - context: . - args: - fixtureClass: fixture.s3.S3HttpFixtureWithECS - port: 80 - bucket: "ecs_bucket" - basePath: "ecs_base_path" - accessKey: "ecs_access_key" - sessionToken: "ecs_session_token" - dockerfile: Dockerfile - volumes: - - ./testfixtures_shared/shared:/fixture/shared - ports: - - "80" - - s3-fixture-with-sts: - build: - context: . - args: - fixtureClass: fixture.s3.S3HttpFixtureWithSTS - port: 80 - bucket: "sts_bucket" - basePath: "sts_base_path" - accessKey: "sts_access_key" - sessionToken: "sts_session_token" - webIdentityToken: "Atza|IQEBLjAsAhRFiXuWpUXuRvQ9PZL3GMFcYevydwIUFAHZwXZXXXXXXXXJnrulxKDHwy87oGKPznh0D6bEQZTSCzyoCtL_8S07pLpr0zMbn6w1lfVZKNTBdDansFBmtGnIsIapjI6xKR02Yc_2bQ8LZbUXSGm6Ry6_BG7PrtLZtj_dfCTj92xNGed-CrKqjG7nPBjNIL016GGvuS5gSvPRUxWES3VYfm1wl7WTI7jn-Pcb6M-buCgHhFOzTQxod27L9CqnOLio7N3gZAGpsp6n1-AJBOCJckcyXe2c6uD0srOJeZlKUm2eTDVMf8IehDVI0r1QOnTV6KzzAI3OY87Vd_cVMQ" - dockerfile: sts/Dockerfile - volumes: - - ./testfixtures_shared/shared:/fixture/shared - ports: - - "80" diff --git a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixture.java b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixture.java index c8127646f7e79..539905b4a815f 100644 --- a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixture.java +++ b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixture.java @@ -12,36 +12,39 @@ import com.sun.net.httpserver.HttpServer; import org.elasticsearch.rest.RestStatus; +import org.junit.rules.ExternalResource; import java.io.IOException; import java.net.InetAddress; import java.net.InetSocketAddress; +import java.net.UnknownHostException; import java.util.Objects; -public class S3HttpFixture { +public class S3HttpFixture extends ExternalResource { - private final HttpServer server; + private HttpServer server; - S3HttpFixture(final String[] args) throws Exception { - this.server = HttpServer.create(new InetSocketAddress(InetAddress.getByName(args[0]), Integer.parseInt(args[1])), 0); - this.server.createContext("/", Objects.requireNonNull(createHandler(args))); + private boolean enabled; + private final String bucket; + private final String basePath; + protected final String accessKey; + + public S3HttpFixture() { + this(true); } - final void start() throws Exception { - try { - server.start(); - // wait to be killed - Thread.sleep(Long.MAX_VALUE); - } finally { - server.stop(0); - } + public S3HttpFixture(boolean enabled) { + this(enabled, "bucket", "base_path_integration_tests", "s3_test_access_key"); } - protected HttpHandler createHandler(final String[] args) { - final String bucket = Objects.requireNonNull(args[2]); - final String basePath = args[3]; - final String accessKey = Objects.requireNonNull(args[4]); + public S3HttpFixture(boolean enabled, String bucket, String basePath, String accessKey) { + this.enabled = enabled; + this.bucket = bucket; + this.basePath = basePath; + this.accessKey = accessKey; + } + protected HttpHandler createHandler() { return new S3HttpHandler(bucket, basePath) { @Override public void handle(final HttpExchange exchange) throws IOException { @@ -55,11 +58,36 @@ public void handle(final HttpExchange exchange) throws IOException { }; } - public static void main(final String[] args) throws Exception { - if (args == null || args.length < 5) { - throw new IllegalArgumentException("S3HttpFixture expects 5 arguments [address, port, bucket, base path, access key]"); + public String getAddress() { + return "http://" + server.getAddress().getHostString() + ":" + server.getAddress().getPort(); + } + + public void stop(int delay) { + server.stop(delay); + } + + protected void before() throws Throwable { + if (enabled) { + InetSocketAddress inetSocketAddress = resolveAddress("localhost", 0); + this.server = HttpServer.create(inetSocketAddress, 0); + HttpHandler handler = createHandler(); + this.server.createContext("/", Objects.requireNonNull(handler)); + server.start(); + } + } + + @Override + protected void after() { + if (enabled) { + stop(0); + } + } + + private static InetSocketAddress resolveAddress(String address, int port) { + try { + return new InetSocketAddress(InetAddress.getByName(address), port); + } catch (UnknownHostException e) { + throw new RuntimeException(e); } - final S3HttpFixture fixture = new S3HttpFixture(args); - fixture.start(); } } diff --git a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithEC2.java b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithEC2.java index 05b931817fea4..0d8cf9c3e4513 100644 --- a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithEC2.java +++ b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithEC2.java @@ -15,22 +15,27 @@ import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; import java.util.Locale; -import java.util.Objects; public class S3HttpFixtureWithEC2 extends S3HttpFixtureWithSessionToken { private static final String EC2_PATH = "/latest/meta-data/iam/security-credentials/"; private static final String EC2_PROFILE = "ec2Profile"; - S3HttpFixtureWithEC2(final String[] args) throws Exception { - super(args); + public S3HttpFixtureWithEC2() { + this(true); + } + + public S3HttpFixtureWithEC2(boolean enabled) { + this(enabled, "ec2_bucket", "ec2_base_path", "ec2_access_key", "ec2_session_token"); + } + + public S3HttpFixtureWithEC2(boolean enabled, String bucket, String basePath, String accessKey, String sessionToken) { + super(enabled, bucket, basePath, accessKey, sessionToken); } @Override - protected HttpHandler createHandler(final String[] args) { - final String ec2AccessKey = Objects.requireNonNull(args[4]); - final String ec2SessionToken = Objects.requireNonNull(args[5], "session token is missing"); - final HttpHandler delegate = super.createHandler(args); + protected HttpHandler createHandler() { + final HttpHandler delegate = super.createHandler(); return exchange -> { final String path = exchange.getRequestURI().getPath(); @@ -45,7 +50,7 @@ protected HttpHandler createHandler(final String[] args) { return; } else if (path.equals(EC2_PATH + EC2_PROFILE)) { - final byte[] response = buildCredentialResponse(ec2AccessKey, ec2SessionToken).getBytes(StandardCharsets.UTF_8); + final byte[] response = buildCredentialResponse(accessKey, sessionToken).getBytes(StandardCharsets.UTF_8); exchange.getResponseHeaders().add("Content-Type", "application/json"); exchange.sendResponseHeaders(RestStatus.OK.getStatus(), response.length); exchange.getResponseBody().write(response); @@ -75,14 +80,4 @@ protected static String buildCredentialResponse(final String ec2AccessKey, final "Token": "%s" }""", ec2AccessKey, ZonedDateTime.now().plusDays(1L).format(DateTimeFormatter.ISO_DATE_TIME), ec2SessionToken); } - - public static void main(final String[] args) throws Exception { - if (args == null || args.length < 6) { - throw new IllegalArgumentException( - "S3HttpFixtureWithEC2 expects 6 arguments " + "[address, port, bucket, base path, ec2 access id, ec2 session token]" - ); - } - final S3HttpFixtureWithEC2 fixture = new S3HttpFixtureWithEC2(args); - fixture.start(); - } } diff --git a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithECS.java b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithECS.java index 579411207cce0..ac3f635dd4899 100644 --- a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithECS.java +++ b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithECS.java @@ -12,24 +12,29 @@ import org.elasticsearch.rest.RestStatus; import java.nio.charset.StandardCharsets; -import java.util.Objects; public class S3HttpFixtureWithECS extends S3HttpFixtureWithEC2 { - private S3HttpFixtureWithECS(final String[] args) throws Exception { - super(args); + public S3HttpFixtureWithECS() { + this(true); + } + + public S3HttpFixtureWithECS(boolean enabled) { + this(enabled, "ecs_bucket", "ecs_base_path", "ecs_access_key", "ecs_session_token"); + } + + public S3HttpFixtureWithECS(boolean enabled, String bucket, String basePath, String accessKey, String sessionToken) { + super(enabled, bucket, basePath, accessKey, sessionToken); } @Override - protected HttpHandler createHandler(final String[] args) { - final String ecsAccessKey = Objects.requireNonNull(args[4]); - final String ecsSessionToken = Objects.requireNonNull(args[5], "session token is missing"); - final HttpHandler delegate = super.createHandler(args); + protected HttpHandler createHandler() { + final HttpHandler delegate = super.createHandler(); return exchange -> { // https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html if ("GET".equals(exchange.getRequestMethod()) && exchange.getRequestURI().getPath().equals("/ecs_credentials_endpoint")) { - final byte[] response = buildCredentialResponse(ecsAccessKey, ecsSessionToken).getBytes(StandardCharsets.UTF_8); + final byte[] response = buildCredentialResponse(accessKey, sessionToken).getBytes(StandardCharsets.UTF_8); exchange.getResponseHeaders().add("Content-Type", "application/json"); exchange.sendResponseHeaders(RestStatus.OK.getStatus(), response.length); exchange.getResponseBody().write(response); @@ -39,14 +44,4 @@ protected HttpHandler createHandler(final String[] args) { delegate.handle(exchange); }; } - - public static void main(final String[] args) throws Exception { - if (args == null || args.length < 6) { - throw new IllegalArgumentException( - "S3HttpFixtureWithECS expects 6 arguments [address, port, bucket, base path, ecs access id, ecs session token]" - ); - } - final S3HttpFixtureWithECS fixture = new S3HttpFixtureWithECS(args); - fixture.start(); - } } diff --git a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithSTS.java b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithSTS.java index 26b8f17dfd76f..1846f7dffd805 100644 --- a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithSTS.java +++ b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithSTS.java @@ -18,24 +18,46 @@ import java.util.Arrays; import java.util.Locale; import java.util.Map; -import java.util.Objects; import java.util.stream.Collectors; public class S3HttpFixtureWithSTS extends S3HttpFixture { private static final String ROLE_ARN = "arn:aws:iam::123456789012:role/FederatedWebIdentityRole"; private static final String ROLE_NAME = "sts-fixture-test"; + private final String sessionToken; + private final String webIdentityToken; - private S3HttpFixtureWithSTS(final String[] args) throws Exception { - super(args); + public S3HttpFixtureWithSTS() { + this(true); + } + + public S3HttpFixtureWithSTS(boolean enabled) { + this( + enabled, + "sts_bucket", + "sts_base_path", + "sts_access_key", + "sts_session_token", + "Atza|IQEBLjAsAhRFiXuWpUXuRvQ9PZL3GMFcYevydwIUFAHZwXZXXXXXXXXJnrulxKDHwy87oGKPznh0D6bEQZTSCzyoCtL_8S07pLpr0zMbn6w1lfVZKNTBdDansFBmtGnIsIapjI6xKR02Yc_2bQ8LZbUXSGm6Ry6_BG7PrtLZtj_dfCTj92xNGed-CrKqjG7nPBjNIL016GGvuS5gSvPRUxWES3VYfm1wl7WTI7jn-Pcb6M-buCgHhFOzTQxod27L9CqnOLio7N3gZAGpsp6n1-AJBOCJckcyXe2c6uD0srOJeZlKUm2eTDVMf8IehDVI0r1QOnTV6KzzAI3OY87Vd_cVMQ" + ); + } + + public S3HttpFixtureWithSTS( + boolean enabled, + String bucket, + String basePath, + String accessKey, + String sessionToken, + String webIdentityToken + ) { + super(enabled, bucket, basePath, accessKey); + this.sessionToken = sessionToken; + this.webIdentityToken = webIdentityToken; } @Override - protected HttpHandler createHandler(final String[] args) { - String accessKey = Objects.requireNonNull(args[4]); - String sessionToken = Objects.requireNonNull(args[5], "session token is missing"); - String webIdentityToken = Objects.requireNonNull(args[6], "web identity token is missing"); - final HttpHandler delegate = super.createHandler(args); + protected HttpHandler createHandler() { + final HttpHandler delegate = super.createHandler(); return exchange -> { // https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html @@ -98,14 +120,4 @@ protected HttpHandler createHandler(final String[] args) { delegate.handle(exchange); }; } - - public static void main(final String[] args) throws Exception { - if (args == null || args.length < 7) { - throw new IllegalArgumentException( - "S3HttpFixtureWithSTS expects 7 arguments [address, port, bucket, base path, sts access id, sts session token, web identity token]" - ); - } - final S3HttpFixtureWithSTS fixture = new S3HttpFixtureWithSTS(args); - fixture.start(); - } } diff --git a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithSessionToken.java b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithSessionToken.java index f514ccd66b555..ce3b96c960384 100644 --- a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithSessionToken.java +++ b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithSessionToken.java @@ -11,20 +11,28 @@ import org.elasticsearch.rest.RestStatus; -import java.util.Objects; - import static fixture.s3.S3HttpHandler.sendError; public class S3HttpFixtureWithSessionToken extends S3HttpFixture { - S3HttpFixtureWithSessionToken(final String[] args) throws Exception { - super(args); + protected final String sessionToken; + + public S3HttpFixtureWithSessionToken() { + this(true); + } + + public S3HttpFixtureWithSessionToken(boolean enabled) { + this(enabled, "session_token_bucket", "session_token_base_path_integration_tests", "session_token_access_key", "session_token"); + } + + public S3HttpFixtureWithSessionToken(boolean enabled, String bucket, String basePath, String accessKey, String sessionToken) { + super(enabled, bucket, basePath, accessKey); + this.sessionToken = sessionToken; } @Override - protected HttpHandler createHandler(final String[] args) { - final String sessionToken = Objects.requireNonNull(args[5], "session token is missing"); - final HttpHandler delegate = super.createHandler(args); + protected HttpHandler createHandler() { + final HttpHandler delegate = super.createHandler(); return exchange -> { final String securityToken = exchange.getRequestHeaders().getFirst("x-amz-security-token"); if (securityToken == null) { @@ -38,14 +46,4 @@ protected HttpHandler createHandler(final String[] args) { delegate.handle(exchange); }; } - - public static void main(final String[] args) throws Exception { - if (args == null || args.length < 6) { - throw new IllegalArgumentException( - "S3HttpFixtureWithSessionToken expects 6 arguments [address, port, bucket, base path, access key, session token]" - ); - } - final S3HttpFixtureWithSessionToken fixture = new S3HttpFixtureWithSessionToken(args); - fixture.start(); - } } diff --git a/test/fixtures/s3-fixture/sts/Dockerfile b/test/fixtures/s3-fixture/sts/Dockerfile deleted file mode 100644 index 203ba1780286c..0000000000000 --- a/test/fixtures/s3-fixture/sts/Dockerfile +++ /dev/null @@ -1,22 +0,0 @@ -FROM openjdk:17.0.2 - -ARG fixtureClass -ARG port -ARG bucket -ARG basePath -ARG accessKey -ARG sessionToken -ARG webIdentityToken - -ENV S3_FIXTURE_CLASS=${fixtureClass} -ENV S3_FIXTURE_PORT=${port} -ENV S3_FIXTURE_BUCKET=${bucket} -ENV S3_FIXTURE_BASE_PATH=${basePath} -ENV S3_FIXTURE_ACCESS_KEY=${accessKey} -ENV S3_FIXTURE_SESSION_TOKEN=${sessionToken} -ENV S3_WEB_IDENTITY_TOKEN=${webIdentityToken} - -ENTRYPOINT exec java -classpath "/fixture/shared/*" \ - $S3_FIXTURE_CLASS 0.0.0.0 "$S3_FIXTURE_PORT" "$S3_FIXTURE_BUCKET" "$S3_FIXTURE_BASE_PATH" "$S3_FIXTURE_ACCESS_KEY" "$S3_FIXTURE_SESSION_TOKEN" "$S3_WEB_IDENTITY_TOKEN" - -EXPOSE $port diff --git a/test/framework/src/main/java/org/elasticsearch/action/support/ActionTestUtils.java b/test/framework/src/main/java/org/elasticsearch/action/support/ActionTestUtils.java index 393c326e6fcf5..187a8b6e4eab2 100644 --- a/test/framework/src/main/java/org/elasticsearch/action/support/ActionTestUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/action/support/ActionTestUtils.java @@ -65,6 +65,14 @@ public static v action.execute(task, request, listener); } + public static void execute( + TransportAction action, + Request request, + ActionListener listener + ) { + action.execute(request.createTask(1L, "direct", action.actionName, TaskId.EMPTY_TASK_ID, Map.of()), request, listener); + } + public static ActionListener assertNoFailureListener(CheckedConsumer consumer) { return ActionListener.wrap(consumer, ESTestCase::fail); } diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/DiskUsageIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/cluster/DiskUsageIntegTestCase.java index 19eb072cb0c3f..32e6602d73fc3 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/DiskUsageIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/DiskUsageIntegTestCase.java @@ -12,7 +12,6 @@ import org.apache.lucene.tests.mockfile.FilterFileSystemProvider; import org.apache.lucene.tests.mockfile.FilterPath; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.PathUtilsForTesting; import org.elasticsearch.env.Environment; @@ -231,11 +230,5 @@ TestFileStore getTestFileStore(Path path) { return trackedPaths.get(containingPaths.iterator().next()); } - void clearTrackedPaths() throws IOException { - for (Path path : trackedPaths.keySet()) { - IOUtils.rm(path); - } - trackedPaths.clear(); - } } } diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java b/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java index 138ab77035b43..e1949d78e86c2 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java @@ -43,6 +43,7 @@ import org.elasticsearch.index.IndexVersions; import org.elasticsearch.snapshots.SnapshotShardSizeInfo; import org.elasticsearch.snapshots.SnapshotsInfoService; +import org.elasticsearch.telemetry.TelemetryProvider; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.gateway.TestGatewayAllocator; @@ -161,7 +162,8 @@ private static DesiredBalanceShardsAllocator createDesiredBalanceShardsAllocator new BalancedShardsAllocator(settings), queue.getThreadPool(), clusterService, - null + null, + TelemetryProvider.NOOP ) { private RoutingAllocation lastAllocation; diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java index a5c17b32173a5..611f2ab9f5749 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java @@ -68,6 +68,7 @@ import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.SeedHostsProvider; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.gateway.ClusterStateUpdaters; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.gateway.MockGatewayMetaState; @@ -952,6 +953,7 @@ public final class ClusterNode { private AckedFakeThreadPoolMasterService masterService; private DisruptableClusterApplierService clusterApplierService; private ClusterService clusterService; + private FeatureService featureService; TransportService transportService; private MasterHistoryService masterHistoryService; CoordinationDiagnosticsService coordinationDiagnosticsService; @@ -1114,6 +1116,7 @@ public RecyclerBytesStreamOutput newNetworkBytesStream() { threadPool ); clusterService = new ClusterService(settings, clusterSettings, masterService, clusterApplierService); + featureService = new FeatureService(List.of()); masterHistoryService = new MasterHistoryService(transportService, threadPool, clusterService); clusterService.setNodeConnectionsService( new NodeConnectionsService(clusterService.getSettings(), threadPool, transportService) @@ -1152,7 +1155,7 @@ public RecyclerBytesStreamOutput newNetworkBytesStream() { coordinationServices.getLeaderHeartbeatService(), coordinationServices.getPreVoteCollectorFactory(), CompatibilityVersionsUtils.staticCurrent(), - Set.of() + featureService ); coordinationDiagnosticsService = new CoordinationDiagnosticsService( clusterService, diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/CoordinationStateTestCluster.java b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/CoordinationStateTestCluster.java index 026e1b7b975e5..7ea207c12104a 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/CoordinationStateTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/CoordinationStateTestCluster.java @@ -243,7 +243,7 @@ public void runRandomly() { if (rarely() && nextTerm < maxTerm) { final long term = rarely() ? randomLongBetween(0, maxTerm + 1) : nextTerm++; final StartJoinRequest startJoinRequest = new StartJoinRequest(randomFrom(clusterNodes).localNode, term); - broadcast(startJoinRequest.getSourceNode(), startJoinRequest); + broadcast(startJoinRequest.getMasterCandidateNode(), startJoinRequest); } else if (rarely()) { randomFrom(clusterNodes).setInitialState(initialConfiguration, initialValue); } else if (rarely() && rarely()) { diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/LinearizabilityChecker.java b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/LinearizabilityChecker.java index 223b0dc5a546b..6c43eff24be21 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/LinearizabilityChecker.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/LinearizabilityChecker.java @@ -17,6 +17,9 @@ import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.ThreadPool; +import java.io.IOException; +import java.io.StringWriter; +import java.io.Writer; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -35,7 +38,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BooleanSupplier; -import java.util.function.Consumer; import java.util.function.Function; /** @@ -308,25 +310,38 @@ private static boolean isLinearizable(SequentialSpec spec, List history, * Return a visual representation of the history */ public static String visualize(SequentialSpec spec, History history, Function missingResponseGenerator) { + final var writer = new StringWriter(); + writeVisualisation(spec, history, missingResponseGenerator, writer); + return writer.toString(); + } + + /** + * Write a visual representation of the history to the given writer + */ + public static void writeVisualisation( + SequentialSpec spec, + History history, + Function missingResponseGenerator, + Writer writer + ) { history = history.clone(); history.complete(missingResponseGenerator); final Collection> partitions = spec.partition(history.copyEvents()); - StringBuilder builder = new StringBuilder(); - partitions.forEach(new Consumer>() { + try { int index = 0; - - @Override - public void accept(List events) { - builder.append("Partition ").append(index++).append("\n"); - builder.append(visualizePartition(events)); + for (List partition : partitions) { + writer.write("Partition "); + writer.write(Integer.toString(index++)); + writer.append('\n'); + visualizePartition(partition, writer); } - }); - - return builder.toString(); + } catch (IOException e) { + logger.error("unexpected writeVisualisation failure", e); + assert false : e; // not really doing any IO + } } - private static String visualizePartition(List events) { - StringBuilder builder = new StringBuilder(); + private static void visualizePartition(List events, Writer writer) throws IOException { Entry entry = createLinkedEntries(events).next; Map, Integer> eventToPosition = new HashMap<>(); for (Event event : events) { @@ -334,28 +349,30 @@ private static String visualizePartition(List events) { } while (entry != null) { if (entry.match != null) { - builder.append(visualizeEntry(entry, eventToPosition)).append("\n"); + visualizeEntry(entry, eventToPosition, writer); + writer.append('\n'); } entry = entry.next; } - return builder.toString(); } - private static String visualizeEntry(Entry entry, Map, Integer> eventToPosition) { + private static void visualizeEntry(Entry entry, Map, Integer> eventToPosition, Writer writer) + throws IOException { + String input = String.valueOf(entry.event.value); String output = String.valueOf(entry.match.event.value); int id = entry.event.id; int beginIndex = eventToPosition.get(Tuple.tuple(EventType.INVOCATION, id)); int endIndex = eventToPosition.get(Tuple.tuple(EventType.RESPONSE, id)); input = input.substring(0, Math.min(beginIndex + 25, input.length())); - return Strings.padStart(input, beginIndex + 25, ' ') - + " " - + Strings.padStart("", endIndex - beginIndex, 'X') - + " " - + output - + " (" - + entry.event.id - + ")"; + writer.write(Strings.padStart(input, beginIndex + 25, ' ')); + writer.write(" "); + writer.write(Strings.padStart("", endIndex - beginIndex, 'X')); + writer.write(" "); + writer.write(output); + writer.write(" ("); + writer.write(Integer.toString(entry.event.id)); + writer.write(")"); } /** diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/ClusterChangedEventUtils.java b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/ClusterChangedEventUtils.java new file mode 100644 index 0000000000000..f62c1db4ceb54 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/ClusterChangedEventUtils.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +public class ClusterChangedEventUtils { + public static List indicesCreated(final ClusterChangedEvent event) { + if (event.metadataChanged() == false) { + return Collections.emptyList(); + } + final ClusterState state = event.state(); + final ClusterState previousState = event.previousState(); + final List created = new ArrayList<>(); + for (Map.Entry cursor : state.metadata().indices().entrySet()) { + final String index = cursor.getKey(); + if (previousState.metadata().hasIndex(index)) { + final IndexMetadata currIndexMetadata = cursor.getValue(); + final IndexMetadata prevIndexMetadata = previousState.metadata().index(index); + if (currIndexMetadata.getIndexUUID().equals(prevIndexMetadata.getIndexUUID()) == false) { + created.add(index); + } + } else { + created.add(index); + } + } + return Collections.unmodifiableList(created); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java index 825325c00a70b..9017e88f430b5 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java @@ -113,7 +113,7 @@ public static DataStream newInstance( boolean replicated, @Nullable DataStreamLifecycle lifecycle ) { - return new DataStream(name, indices, generation, metadata, false, replicated, false, false, null, lifecycle); + return new DataStream(name, indices, generation, metadata, false, replicated, false, false, null, lifecycle, false, List.of()); } public static String getLegacyDefaultBackingIndexName( @@ -244,6 +244,11 @@ public static DataStream randomInstance(String dataStreamName, LongSupplier time if (randomBoolean()) { metadata = Map.of("key", "value"); } + List failureIndices = List.of(); + boolean failureStore = randomBoolean(); + if (failureStore) { + failureIndices = randomIndexInstances(); + } return new DataStream( dataStreamName, @@ -256,7 +261,9 @@ public static DataStream randomInstance(String dataStreamName, LongSupplier time timeProvider, randomBoolean(), randomBoolean() ? IndexMode.STANDARD : null, // IndexMode.TIME_SERIES triggers validation that many unit tests doesn't pass - randomBoolean() ? DataStreamLifecycle.newBuilder().dataRetention(randomMillisUpToYear9999()).build() : null + randomBoolean() ? DataStreamLifecycle.newBuilder().dataRetention(randomMillisUpToYear9999()).build() : null, + failureStore, + failureIndices ); } @@ -329,7 +336,10 @@ public static void getClusterStateWithDataStreams( ) { builder.put( "template_1", - new ComposableIndexTemplate(List.of("*"), null, null, null, null, null, new ComposableIndexTemplate.DataStreamTemplate()) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("*")) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ); List allIndices = new ArrayList<>(); diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java b/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java index 8306fded6c29d..89c8546d6b7d2 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java @@ -23,6 +23,7 @@ import static org.elasticsearch.test.ESTestCase.randomBoolean; import static org.elasticsearch.test.ESTestCase.randomFrom; import static org.elasticsearch.test.ESTestCase.randomIntBetween; +import static org.junit.Assert.assertNotEquals; /** * A helper that allows to create shard routing instances within tests, while not requiring to expose @@ -35,6 +36,7 @@ public static ShardRouting newShardRouting(String index, int shardId, String cur } public static ShardRouting newShardRouting(ShardId shardId, String currentNodeId, boolean primary, ShardRoutingState state) { + assertNotEquals(ShardRoutingState.RELOCATING, state); return new ShardRouting( shardId, currentNodeId, diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldTypeTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldTypeTestCase.java index 56ad35bee83d5..1e6401d79d3fc 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldTypeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldTypeTestCase.java @@ -383,11 +383,13 @@ public final void testCacheable() throws IOException { } } - protected final List blockLoaderReadValues(DirectoryReader reader, MappedFieldType fieldType) throws IOException { + protected final List blockLoaderReadValuesFromColumnAtATimeReader(DirectoryReader reader, MappedFieldType fieldType) + throws IOException { BlockLoader loader = fieldType.blockLoader(blContext()); List all = new ArrayList<>(); for (LeafReaderContext ctx : reader.leaves()) { - TestBlock block = (TestBlock) loader.reader(ctx).readValues(TestBlock.FACTORY, TestBlock.docs(ctx)); + TestBlock block = (TestBlock) loader.columnAtATimeReader(ctx) + .read(TestBlock.factory(ctx.reader().numDocs()), TestBlock.docs(ctx)); for (int i = 0; i < block.size(); i++) { all.add(block.get(i)); } @@ -395,15 +397,17 @@ protected final List blockLoaderReadValues(DirectoryReader reader, Mappe return all; } - protected final List blockLoaderReadValuesFromSingleDoc(DirectoryReader reader, MappedFieldType fieldType) throws IOException { + protected final List blockLoaderReadValuesFromRowStrideReader(DirectoryReader reader, MappedFieldType fieldType) + throws IOException { BlockLoader loader = fieldType.blockLoader(blContext()); List all = new ArrayList<>(); for (LeafReaderContext ctx : reader.leaves()) { - BlockDocValuesReader blockReader = loader.reader(ctx); - TestBlock block = (TestBlock) blockReader.builder(TestBlock.FACTORY, ctx.reader().numDocs()); + BlockLoader.RowStrideReader blockReader = loader.rowStrideReader(ctx); + BlockLoader.Builder builder = loader.builder(TestBlock.factory(ctx.reader().numDocs()), ctx.reader().numDocs()); for (int i = 0; i < ctx.reader().numDocs(); i++) { - blockReader.readValuesFromSingleDoc(i, block); + blockReader.read(i, null, builder); } + TestBlock block = (TestBlock) builder.build(); for (int i = 0; i < block.size(); i++) { all.add(block.get(i)); } @@ -427,6 +431,11 @@ public SearchLookup lookup() { public Set sourcePaths(String name) { throw new UnsupportedOperationException(); } + + @Override + public String parentField(String field) { + throw new UnsupportedOperationException(); + } }; } diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java index e34072fbf1668..44e28132beec0 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java @@ -31,7 +31,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.CheckedConsumer; -import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; @@ -1240,26 +1239,32 @@ public final void testSyntheticEmptyListNoDocValuesLoader() throws IOException { assertNoDocValueLoader(b -> b.startArray("field").endArray()); } - public final void testBlockLoaderReadValues() throws IOException { - testBlockLoader(blockReader -> (TestBlock) blockReader.readValues(TestBlock.FACTORY, TestBlock.docs(0))); + public final void testBlockLoaderFromColumnReader() throws IOException { + testBlockLoader(true); } - public final void testBlockLoaderReadValuesFromSingleDoc() throws IOException { - testBlockLoader(blockReader -> { - TestBlock block = (TestBlock) blockReader.builder(TestBlock.FACTORY, 1); - blockReader.readValuesFromSingleDoc(0, block); - return block; - }); + public final void testBlockLoaderFromRowStrideReader() throws IOException { + testBlockLoader(false); + } + + protected boolean supportsColumnAtATimeReader(MapperService mapper, MappedFieldType ft) { + return ft.hasDocValues(); } - private void testBlockLoader(CheckedFunction body) throws IOException { + private void testBlockLoader(boolean columnReader) throws IOException { SyntheticSourceExample example = syntheticSourceSupport(false).example(5); - MapperService mapper = createMapperService(syntheticSourceMapping(b -> { + MapperService mapper = createMapperService(syntheticSourceMapping(b -> { // TODO randomly use syntheticSourceMapping or normal b.startObject("field"); example.mapping().accept(b); b.endObject(); })); - BlockLoader loader = mapper.fieldType("field").blockLoader(new MappedFieldType.BlockLoaderContext() { + testBlockLoader(columnReader, example, mapper, "field"); + } + + protected final void testBlockLoader(boolean columnReader, SyntheticSourceExample example, MapperService mapper, String loaderFieldName) + throws IOException { + SearchLookup searchLookup = new SearchLookup(mapper.mappingLookup().fieldTypesLookup()::get, null, null); + BlockLoader loader = mapper.fieldType(loaderFieldName).blockLoader(new MappedFieldType.BlockLoaderContext() { @Override public String indexName() { throw new UnsupportedOperationException(); @@ -1267,13 +1272,18 @@ public String indexName() { @Override public SearchLookup lookup() { - throw new UnsupportedOperationException(); + return searchLookup; } @Override public Set sourcePaths(String name) { return mapper.mappingLookup().sourcePaths(name); } + + @Override + public String parentField(String field) { + return mapper.mappingLookup().parentField(field); + } }); Function valuesConvert = loadBlockExpected(); if (valuesConvert == null) { @@ -1289,7 +1299,26 @@ public Set sourcePaths(String name) { iw.addDocument(doc); iw.close(); try (DirectoryReader reader = DirectoryReader.open(directory)) { - TestBlock block = body.apply(loader.reader(reader.leaves().get(0))); + LeafReaderContext ctx = reader.leaves().get(0); + TestBlock block; + if (columnReader) { + if (supportsColumnAtATimeReader(mapper, mapper.fieldType(loaderFieldName))) { + block = (TestBlock) loader.columnAtATimeReader(ctx) + .read(TestBlock.factory(ctx.reader().numDocs()), TestBlock.docs(0)); + } else { + assertNull(loader.columnAtATimeReader(ctx)); + return; + } + } else { + BlockLoaderStoredFieldsFromLeafLoader storedFieldsLoader = new BlockLoaderStoredFieldsFromLeafLoader( + StoredFieldLoader.fromSpec(loader.rowStrideStoredFieldSpec()).getLoader(ctx, null), + loader.rowStrideStoredFieldSpec().requiresSource() ? SourceLoader.FROM_STORED_SOURCE.leaf(ctx.reader(), null) : null + ); + storedFieldsLoader.advanceTo(0); + BlockLoader.Builder builder = loader.builder(TestBlock.factory(ctx.reader().numDocs()), 1); + loader.rowStrideReader(ctx).read(0, storedFieldsLoader, builder); + block = (TestBlock) builder.build(); + } Object inBlock = block.get(0); if (inBlock != null) { if (inBlock instanceof List l) { @@ -1298,6 +1327,7 @@ public Set sourcePaths(String name) { inBlock = valuesConvert.apply(inBlock); } } + // If we're reading from _source we expect the order to be preserved, otherwise it's jumbled. Object expected = loader instanceof BlockSourceReader ? example.expectedParsed() : example.expectedParsedBlockLoader(); if (List.of().equals(expected)) { assertThat(inBlock, nullValue()); @@ -1319,7 +1349,7 @@ public Set sourcePaths(String name) { } /** - * Matcher for {@link #testBlockLoaderReadValues} and {@link #testBlockLoaderReadValuesFromSingleDoc}. + * Matcher for {@link #testBlockLoaderFromColumnReader} and {@link #testBlockLoaderFromRowStrideReader}. */ protected Matcher blockItemMatcher(Object expected) { return equalTo(expected); diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/TestBlock.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/TestBlock.java index 298acb9519532..928ae8f4f45f0 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/TestBlock.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/TestBlock.java @@ -11,7 +11,6 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.core.Nullable; import java.io.IOException; import java.io.UncheckedIOException; @@ -21,76 +20,134 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; -public class TestBlock - implements - BlockLoader.BooleanBuilder, - BlockLoader.BytesRefBuilder, - BlockLoader.DoubleBuilder, - BlockLoader.IntBuilder, - BlockLoader.LongBuilder, - BlockLoader.SingletonOrdinalsBuilder, - BlockLoader.Block { - public static BlockLoader.BuilderFactory FACTORY = new BlockLoader.BuilderFactory() { - @Override - public BlockLoader.BooleanBuilder booleansFromDocValues(int expectedCount) { - return new TestBlock(null); - } +public class TestBlock implements BlockLoader.Block { + public static BlockLoader.BlockFactory factory(int pageSize) { + return new BlockLoader.BlockFactory() { + @Override + public BlockLoader.BooleanBuilder booleansFromDocValues(int expectedCount) { + return booleans(expectedCount); + } - @Override - public BlockLoader.BooleanBuilder booleans(int expectedCount) { - return new TestBlock(null); - } + @Override + public BlockLoader.BooleanBuilder booleans(int expectedCount) { + class BooleansBuilder extends TestBlock.Builder implements BlockLoader.BooleanBuilder { + @Override + public BooleansBuilder appendBoolean(boolean value) { + add(value); + return this; + } + } + return new BooleansBuilder(); + } - @Override - public BlockLoader.BytesRefBuilder bytesRefsFromDocValues(int expectedCount) { - return new TestBlock(null); - } + @Override + public BlockLoader.BytesRefBuilder bytesRefsFromDocValues(int expectedCount) { + return bytesRefs(expectedCount); + } - @Override - public BlockLoader.BytesRefBuilder bytesRefs(int expectedCount) { - return new TestBlock(null); - } + @Override + public BlockLoader.BytesRefBuilder bytesRefs(int expectedCount) { + class BytesRefsBuilder extends TestBlock.Builder implements BlockLoader.BytesRefBuilder { + @Override + public BytesRefsBuilder appendBytesRef(BytesRef value) { + add(BytesRef.deepCopyOf(value)); + return this; + } + } + return new BytesRefsBuilder(); + } - @Override - public BlockLoader.DoubleBuilder doublesFromDocValues(int expectedCount) { - return new TestBlock(null); - } + @Override + public BlockLoader.DoubleBuilder doublesFromDocValues(int expectedCount) { + return doubles(expectedCount); + } - @Override - public BlockLoader.DoubleBuilder doubles(int expectedCount) { - return new TestBlock(null); - } + @Override + public BlockLoader.DoubleBuilder doubles(int expectedCount) { + class DoublesBuilder extends TestBlock.Builder implements BlockLoader.DoubleBuilder { + @Override + public DoublesBuilder appendDouble(double value) { + add(value); + return this; + } + } + return new DoublesBuilder(); + } - @Override - public BlockLoader.IntBuilder intsFromDocValues(int expectedCount) { - return new TestBlock(null); - } + @Override + public BlockLoader.IntBuilder intsFromDocValues(int expectedCount) { + return ints(expectedCount); + } - @Override - public BlockLoader.IntBuilder ints(int expectedCount) { - return new TestBlock(null); - } + @Override + public BlockLoader.IntBuilder ints(int expectedCount) { + class IntsBuilder extends TestBlock.Builder implements BlockLoader.IntBuilder { + @Override + public IntsBuilder appendInt(int value) { + add(value); + return this; + } + } + return new IntsBuilder(); + } - @Override - public BlockLoader.LongBuilder longsFromDocValues(int expectedCount) { - return new TestBlock(null); - } + @Override + public BlockLoader.LongBuilder longsFromDocValues(int expectedCount) { + return longs(expectedCount); + } - @Override - public BlockLoader.LongBuilder longs(int expectedCount) { - return new TestBlock(null); - } + @Override + public BlockLoader.LongBuilder longs(int expectedCount) { + class LongsBuilder extends TestBlock.Builder implements BlockLoader.LongBuilder { + @Override + public LongsBuilder appendLong(long value) { + add(value); + return this; + } + } + return new LongsBuilder(); + } - @Override - public BlockLoader.Builder nulls(int expectedCount) { - return new TestBlock(null); - } + @Override + public BlockLoader.Builder nulls(int expectedCount) { + return longs(expectedCount); + } - @Override - public BlockLoader.SingletonOrdinalsBuilder singletonOrdinalsBuilder(SortedDocValues ordinals, int count) { - return new TestBlock(ordinals); - } - }; + @Override + public BlockLoader.Block constantNulls() { + BlockLoader.LongBuilder builder = longs(pageSize); + for (int i = 0; i < pageSize; i++) { + builder.appendNull(); + } + return builder.build(); + } + + @Override + public BlockLoader.Block constantBytes(BytesRef value) { + BlockLoader.BytesRefBuilder builder = bytesRefs(pageSize); + for (int i = 0; i < pageSize; i++) { + builder.appendBytesRef(value); + } + return builder.build(); + } + + @Override + public BlockLoader.SingletonOrdinalsBuilder singletonOrdinalsBuilder(SortedDocValues ordinals, int count) { + class SingletonOrdsBuilder extends TestBlock.Builder implements BlockLoader.SingletonOrdinalsBuilder { + @Override + public SingletonOrdsBuilder appendOrd(int value) { + try { + add(ordinals.lookupOrd(value)); + return this; + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + } + return new SingletonOrdsBuilder(); + } + }; + } public static final BlockLoader.Docs docs(int... docs) { return new BlockLoader.Docs() { @@ -120,13 +177,10 @@ public int get(int i) { }; } - private final SortedDocValues sortedDocValues; - private final List values = new ArrayList<>(); + private final List values; - private List currentPosition = null; - - private TestBlock(@Nullable SortedDocValues sortedDocValues) { - this.sortedDocValues = sortedDocValues; + private TestBlock(List values) { + this.values = values; } public Object get(int i) { @@ -138,73 +192,49 @@ public int size() { } @Override - public TestBlock appendNull() { - assertNull(currentPosition); - values.add(null); - return this; - } - - @Override - public TestBlock beginPositionEntry() { - assertNull(currentPosition); - currentPosition = new ArrayList<>(); - values.add(currentPosition); - return this; - } - - @Override - public TestBlock endPositionEntry() { - assertNotNull(currentPosition); - currentPosition = null; - return this; + public void close() { + // TODO assert that we close the test blocks } - @Override - public TestBlock appendBoolean(boolean value) { - return add(value); - } + private abstract static class Builder implements BlockLoader.Builder { + private final List values = new ArrayList<>(); - @Override - public TestBlock appendBytesRef(BytesRef value) { - return add(BytesRef.deepCopyOf(value)); - } + private List currentPosition = null; - @Override - public TestBlock appendDouble(double value) { - return add(value); - } - - @Override - public TestBlock appendInt(int value) { - return add(value); - } + @Override + public Builder appendNull() { + assertNull(currentPosition); + values.add(null); + return this; + } - @Override - public TestBlock appendLong(long value) { - return add(value); - } + @Override + public Builder beginPositionEntry() { + assertNull(currentPosition); + currentPosition = new ArrayList<>(); + values.add(currentPosition); + return this; + } - @Override - public TestBlock appendOrd(int value) { - try { - return add(sortedDocValues.lookupOrd(value)); - } catch (IOException e) { - throw new UncheckedIOException(e); + @Override + public Builder endPositionEntry() { + assertNotNull(currentPosition); + currentPosition = null; + return this; } - } - @Override - public TestBlock build() { - return this; - } + protected void add(Object value) { + (currentPosition == null ? values : currentPosition).add(value); + } - private TestBlock add(Object value) { - (currentPosition == null ? values : currentPosition).add(value); - return this; - } + @Override + public TestBlock build() { + return new TestBlock(values); + } - @Override - public void close() { - // TODO assert that we close the test blocks + @Override + public void close() { + // TODO assert that we close the test block builders + } } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index beaa800b72588..fb222f67eef69 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -57,6 +57,7 @@ import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.indices.breaker.CircuitBreakerMetrics; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.indices.recovery.AsyncRecoveryTarget; @@ -512,6 +513,7 @@ protected IndexShard newShard( final Engine.Warmer warmer = createTestWarmer(indexSettings); ClusterSettings clusterSettings = new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); CircuitBreakerService breakerService = new HierarchyCircuitBreakerService( + CircuitBreakerMetrics.NOOP, nodeSettings, Collections.emptyList(), clusterSettings @@ -1064,7 +1066,7 @@ protected void flushShard(IndexShard shard, boolean force) { } public static boolean recoverFromStore(IndexShard newShard) { - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); newShard.recoverFromStore(future); return future.actionGet(); } @@ -1083,7 +1085,7 @@ protected void recoverShardFromSnapshot(final IndexShard shard, final Snapshot s ); final ShardRouting shardRouting = newShardRouting(shardId, node.getId(), true, ShardRoutingState.INITIALIZING, recoverySource); shard.markAsRecovering("from snapshot", new RecoveryState(shardRouting, node, null)); - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); repository.restoreShard(shard.store(), snapshot.getSnapshotId(), indexId, shard.shardId(), shard.recoveryState(), future); future.actionGet(); } @@ -1102,7 +1104,7 @@ protected ShardGeneration snapshotShard(final IndexShard shard, final Snapshot s .shardGenerations() .getShardGen(indexId, shard.shardId().getId()) ); - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); final ShardGeneration shardGen; try (Engine.IndexCommitRef indexCommitRef = shard.acquireLastIndexCommit(true)) { repository.snapshotShard( diff --git a/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java index 6fc6b349fc989..b12bcd8b55880 100644 --- a/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java @@ -122,14 +122,14 @@ protected void checkTransientErrorsDuringRecoveryAreRetried(String recoveryActio // is a mix of file chunks and translog ops int threeFourths = (int) (numDocs * 0.75); for (int i = 0; i < threeFourths; i++) { - requests.add(client().prepareIndex(indexName).setSource("{}", XContentType.JSON)); + requests.add(prepareIndex(indexName).setSource("{}", XContentType.JSON)); } indexRandom(true, requests); flush(indexName); requests.clear(); for (int i = threeFourths; i < numDocs; i++) { - requests.add(client().prepareIndex(indexName).setSource("{}", XContentType.JSON)); + requests.add(prepareIndex(indexName).setSource("{}", XContentType.JSON)); } indexRandom(true, requests); ensureSearchable(indexName); @@ -225,7 +225,7 @@ public void checkDisconnectsWhileRecovering(String recoveryActionToBlock) throws List requests = new ArrayList<>(); int numDocs = scaledRandomIntBetween(25, 250); for (int i = 0; i < numDocs; i++) { - requests.add(client().prepareIndex(indexName).setSource("{}", XContentType.JSON)); + requests.add(prepareIndex(indexName).setSource("{}", XContentType.JSON)); } indexRandom(true, requests); ensureSearchable(indexName); @@ -326,7 +326,7 @@ public void checkDisconnectsDuringRecovery(boolean useSnapshotBasedRecoveries) t List requests = new ArrayList<>(); int numDocs = scaledRandomIntBetween(25, 250); for (int i = 0; i < numDocs; i++) { - requests.add(client().prepareIndex(indexName).setSource("{}", XContentType.JSON)); + requests.add(prepareIndex(indexName).setSource("{}", XContentType.JSON)); } indexRandom(true, requests); ensureSearchable(indexName); diff --git a/test/framework/src/main/java/org/elasticsearch/node/MockNode.java b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java index 60ddb4d2eeb5c..ef29f9fca4f93 100644 --- a/test/framework/src/main/java/org/elasticsearch/node/MockNode.java +++ b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java @@ -54,7 +54,6 @@ import java.nio.file.Path; import java.util.Collection; import java.util.Collections; -import java.util.HashSet; import java.util.Map; import java.util.function.Function; import java.util.function.LongSupplier; @@ -191,7 +190,7 @@ protected TransportService newTransportService( interceptor, localNodeFactory, clusterSettings, - new HashSet<>(taskManager.getTaskHeaders()) + taskManager.getTaskHeaders() ); } } diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java index c0377f8ef1b9a..15f33131fa114 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java @@ -14,10 +14,12 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; -import org.elasticsearch.common.blobstore.OperationPurpose; import org.elasticsearch.common.blobstore.support.BlobMetadata; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.settings.SecureSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Streams; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.repositories.blobstore.BlobStoreTestUtil; import org.elasticsearch.snapshots.SnapshotState; @@ -25,13 +27,17 @@ import org.elasticsearch.threadpool.ThreadPool; import java.io.ByteArrayInputStream; +import java.io.IOException; import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.concurrent.Executor; +import static org.elasticsearch.repositories.blobstore.BlobStoreTestUtil.randomPurpose; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.empty; @@ -68,10 +74,8 @@ public void tearDown() throws Exception { private void deleteAndAssertEmpty(BlobPath path) { final BlobStoreRepository repo = getRepository(); - final PlainActionFuture future = PlainActionFuture.newFuture(); - repo.threadPool() - .generic() - .execute(ActionRunnable.run(future, () -> repo.blobStore().blobContainer(path).delete(OperationPurpose.SNAPSHOT))); + final PlainActionFuture future = new PlainActionFuture<>(); + repo.threadPool().generic().execute(ActionRunnable.run(future, () -> repo.blobStore().blobContainer(path).delete(randomPurpose()))); future.actionGet(); final BlobPath parent = path.parent(); if (parent == null) { @@ -89,9 +93,9 @@ public void testCreateSnapshot() { logger.info("--> indexing some data"); for (int i = 0; i < 100; i++) { - client().prepareIndex("test-idx-1").setId(Integer.toString(i)).setSource("foo", "bar" + i).get(); - client().prepareIndex("test-idx-2").setId(Integer.toString(i)).setSource("foo", "bar" + i).get(); - client().prepareIndex("test-idx-3").setId(Integer.toString(i)).setSource("foo", "bar" + i).get(); + prepareIndex("test-idx-1").setId(Integer.toString(i)).setSource("foo", "bar" + i).get(); + prepareIndex("test-idx-2").setId(Integer.toString(i)).setSource("foo", "bar" + i).get(); + prepareIndex("test-idx-3").setId(Integer.toString(i)).setSource("foo", "bar" + i).get(); } client().admin().indices().prepareRefresh().get(); @@ -118,35 +122,23 @@ public void testCreateSnapshot() { public void testListChildren() throws Exception { final BlobStoreRepository repo = getRepository(); - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); final Executor genericExec = repo.threadPool().generic(); final int testBlobLen = randomIntBetween(1, 100); genericExec.execute(ActionRunnable.run(future, () -> { final BlobStore blobStore = repo.blobStore(); blobStore.blobContainer(repo.basePath().add("foo")) .writeBlob( - OperationPurpose.SNAPSHOT, + randomPurpose(), "nested-blob", new ByteArrayInputStream(randomByteArrayOfLength(testBlobLen)), testBlobLen, false ); blobStore.blobContainer(repo.basePath().add("foo").add("nested")) - .writeBlob( - OperationPurpose.SNAPSHOT, - "bar", - new ByteArrayInputStream(randomByteArrayOfLength(testBlobLen)), - testBlobLen, - false - ); + .writeBlob(randomPurpose(), "bar", new ByteArrayInputStream(randomByteArrayOfLength(testBlobLen)), testBlobLen, false); blobStore.blobContainer(repo.basePath().add("foo").add("nested2")) - .writeBlob( - OperationPurpose.SNAPSHOT, - "blub", - new ByteArrayInputStream(randomByteArrayOfLength(testBlobLen)), - testBlobLen, - false - ); + .writeBlob(randomPurpose(), "blub", new ByteArrayInputStream(randomByteArrayOfLength(testBlobLen)), testBlobLen, false); })); future.actionGet(); assertChildren(repo.basePath(), Collections.singleton("foo")); @@ -174,9 +166,9 @@ public void testCleanup() throws Exception { logger.info("--> indexing some data"); for (int i = 0; i < 100; i++) { - client().prepareIndex("test-idx-1").setId(Integer.toString(i)).setSource("foo", "bar" + i).get(); - client().prepareIndex("test-idx-2").setId(Integer.toString(i)).setSource("foo", "bar" + i).get(); - client().prepareIndex("test-idx-3").setId(Integer.toString(i)).setSource("foo", "bar" + i).get(); + prepareIndex("test-idx-1").setId(Integer.toString(i)).setSource("foo", "bar" + i).get(); + prepareIndex("test-idx-2").setId(Integer.toString(i)).setSource("foo", "bar" + i).get(); + prepareIndex("test-idx-3").setId(Integer.toString(i)).setSource("foo", "bar" + i).get(); } client().admin().indices().prepareRefresh().get(); @@ -218,31 +210,83 @@ public void testCleanup() throws Exception { assertCleanupResponse(response, 3L, 1L); } + public void testIndexLatest() throws Exception { + // This test verifies that every completed snapshot operation updates a blob called literally 'index.latest' (by default at least), + // which is important because some external systems use the freshness of this specific blob as an indicator of whether a repository + // is in use. Most notably, ESS checks this blob as an extra layer of protection against a bug in the delete-old-repositories + // process incorrectly deleting repositories that have seen recent writes. It's possible that some future development might change + // the meaning of this blob, and that's ok, but we must continue to update it to keep those external systems working. + + createIndex("test-idx-1"); + for (int i = 0; i < 100; i++) { + client().prepareIndex("test-idx-1").setId(Integer.toString(i)).setSource("foo", "bar" + i).get(); + } + + final var repository = getRepository(); + final var blobContents = new HashSet(); + + final var createSnapshot1Response = clusterAdmin().prepareCreateSnapshot(TEST_REPO_NAME, randomIdentifier()) + .setWaitForCompletion(true) + .get(); + assertTrue(blobContents.add(readIndexLatest(repository))); + + clusterAdmin().prepareGetSnapshots(TEST_REPO_NAME).get(); + assertFalse(blobContents.add(readIndexLatest(repository))); + + final var createSnapshot2Response = clusterAdmin().prepareCreateSnapshot(TEST_REPO_NAME, randomIdentifier()) + .setWaitForCompletion(true) + .get(); + assertTrue(blobContents.add(readIndexLatest(repository))); + + assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REPO_NAME, createSnapshot1Response.getSnapshotInfo().snapshotId().getName())); + assertTrue(blobContents.add(readIndexLatest(repository))); + + assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REPO_NAME, createSnapshot2Response.getSnapshotInfo().snapshotId().getName())); + assertTrue(blobContents.add(readIndexLatest(repository))); + } + + private static BytesReference readIndexLatest(BlobStoreRepository repository) throws IOException { + try (var baos = new BytesStreamOutput()) { + Streams.copy( + repository.blobStore() + .blobContainer(repository.basePath()) + .readBlob( + randomPurpose(), + // Deliberately not using BlobStoreRepository#INDEX_LATEST_BLOB here, it's important for external systems that a + // blob with literally this name is updated on each write: + "index.latest" + ), + baos + ); + return baos.bytes(); + } + } + protected void assertCleanupResponse(CleanupRepositoryResponse response, long bytes, long blobs) { assertThat(response.result().blobs(), equalTo(1L + 2L)); assertThat(response.result().bytes(), equalTo(3L + 2 * 3L)); } private static void createDanglingIndex(final BlobStoreRepository repo, final Executor genericExec) throws Exception { - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); genericExec.execute(ActionRunnable.run(future, () -> { final BlobStore blobStore = repo.blobStore(); blobStore.blobContainer(repo.basePath().add("indices").add("foo")) - .writeBlob(OperationPurpose.SNAPSHOT, "bar", new ByteArrayInputStream(new byte[3]), 3, false); + .writeBlob(randomPurpose(), "bar", new ByteArrayInputStream(new byte[3]), 3, false); for (String prefix : Arrays.asList("snap-", "meta-")) { blobStore.blobContainer(repo.basePath()) - .writeBlob(OperationPurpose.SNAPSHOT, prefix + "foo.dat", new ByteArrayInputStream(new byte[3]), 3, false); + .writeBlob(randomPurpose(), prefix + "foo.dat", new ByteArrayInputStream(new byte[3]), 3, false); } })); future.get(); - final PlainActionFuture corruptionFuture = PlainActionFuture.newFuture(); + final PlainActionFuture corruptionFuture = new PlainActionFuture<>(); genericExec.execute(ActionRunnable.supply(corruptionFuture, () -> { final BlobStore blobStore = repo.blobStore(); - return blobStore.blobContainer(repo.basePath().add("indices")).children(OperationPurpose.SNAPSHOT).containsKey("foo") - && blobStore.blobContainer(repo.basePath().add("indices").add("foo")).blobExists(OperationPurpose.SNAPSHOT, "bar") - && blobStore.blobContainer(repo.basePath()).blobExists(OperationPurpose.SNAPSHOT, "meta-foo.dat") - && blobStore.blobContainer(repo.basePath()).blobExists(OperationPurpose.SNAPSHOT, "snap-foo.dat"); + return blobStore.blobContainer(repo.basePath().add("indices")).children(randomPurpose()).containsKey("foo") + && blobStore.blobContainer(repo.basePath().add("indices").add("foo")).blobExists(randomPurpose(), "bar") + && blobStore.blobContainer(repo.basePath()).blobExists(randomPurpose(), "meta-foo.dat") + && blobStore.blobContainer(repo.basePath()).blobExists(randomPurpose(), "snap-foo.dat"); })); assertTrue(corruptionFuture.get()); } @@ -258,13 +302,11 @@ private void assertChildren(BlobPath path, Collection children) { } private Set listChildren(BlobPath path) { - final PlainActionFuture> future = PlainActionFuture.newFuture(); + final PlainActionFuture> future = new PlainActionFuture<>(); final BlobStoreRepository repository = getRepository(); repository.threadPool() .generic() - .execute( - ActionRunnable.supply(future, () -> repository.blobStore().blobContainer(path).children(OperationPurpose.SNAPSHOT).keySet()) - ); + .execute(ActionRunnable.supply(future, () -> repository.blobStore().blobContainer(path).children(randomPurpose()).keySet())); return future.actionGet(); } diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/AbstractBlobContainerRetriesTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/AbstractBlobContainerRetriesTestCase.java index e23b26c73a811..4110472e8ef76 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/AbstractBlobContainerRetriesTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/AbstractBlobContainerRetriesTestCase.java @@ -14,7 +14,6 @@ import org.apache.http.ConnectionClosedException; import org.apache.http.HttpStatus; import org.elasticsearch.common.blobstore.BlobContainer; -import org.elasticsearch.common.blobstore.OperationPurpose; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.unit.ByteSizeValue; @@ -42,6 +41,7 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; +import static org.elasticsearch.repositories.blobstore.BlobStoreTestUtil.randomPurpose; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; @@ -94,9 +94,9 @@ public void testReadNonexistentBlobThrowsNoSuchFileException() { final int length = randomIntBetween(1, Math.toIntExact(Math.min(Integer.MAX_VALUE, MAX_RANGE_VAL - position))); final Exception exception = expectThrows(NoSuchFileException.class, () -> { if (randomBoolean()) { - Streams.readFully(blobContainer.readBlob(OperationPurpose.SNAPSHOT, "read_nonexistent_blob")); + Streams.readFully(blobContainer.readBlob(randomPurpose(), "read_nonexistent_blob")); } else { - Streams.readFully(blobContainer.readBlob(OperationPurpose.SNAPSHOT, "read_nonexistent_blob", 0, 1)); + Streams.readFully(blobContainer.readBlob(randomPurpose(), "read_nonexistent_blob", 0, 1)); } }); final String fullBlobPath = blobContainer.path().buildAsString() + "read_nonexistent_blob"; @@ -104,7 +104,7 @@ public void testReadNonexistentBlobThrowsNoSuchFileException() { assertThat( expectThrows( NoSuchFileException.class, - () -> Streams.readFully(blobContainer.readBlob(OperationPurpose.SNAPSHOT, "read_nonexistent_blob", position, length)) + () -> Streams.readFully(blobContainer.readBlob(randomPurpose(), "read_nonexistent_blob", position, length)) ).getMessage().toLowerCase(Locale.ROOT), containsString("blob object [" + fullBlobPath + "] not found") ); @@ -146,7 +146,7 @@ public void testReadBlobWithRetries() throws Exception { } }); - try (InputStream inputStream = blobContainer.readBlob(OperationPurpose.SNAPSHOT, "read_blob_max_retries")) { + try (InputStream inputStream = blobContainer.readBlob(randomPurpose(), "read_blob_max_retries")) { final int readLimit; final InputStream wrappedStream; if (randomBoolean()) { @@ -212,7 +212,7 @@ public void testReadRangeBlobWithRetries() throws Exception { final int position = randomIntBetween(0, bytes.length - 1); final int length = randomIntBetween(0, randomBoolean() ? bytes.length : Integer.MAX_VALUE); - try (InputStream inputStream = blobContainer.readBlob(OperationPurpose.SNAPSHOT, "read_range_blob_max_retries", position, length)) { + try (InputStream inputStream = blobContainer.readBlob(randomPurpose(), "read_range_blob_max_retries", position, length)) { final int readLimit; final InputStream wrappedStream; if (randomBoolean()) { @@ -252,7 +252,7 @@ public void testReadBlobWithReadTimeouts() { Exception exception = expectThrows( unresponsiveExceptionType(), - () -> Streams.readFully(blobContainer.readBlob(OperationPurpose.SNAPSHOT, "read_blob_unresponsive")) + () -> Streams.readFully(blobContainer.readBlob(randomPurpose(), "read_blob_unresponsive")) ); assertThat(exception.getMessage().toLowerCase(Locale.ROOT), containsString("read timed out")); assertThat(exception.getCause(), instanceOf(SocketTimeoutException.class)); @@ -269,8 +269,8 @@ public void testReadBlobWithReadTimeouts() { exception = expectThrows(Exception.class, () -> { try ( InputStream stream = randomBoolean() - ? blobContainer.readBlob(OperationPurpose.SNAPSHOT, "read_blob_incomplete") - : blobContainer.readBlob(OperationPurpose.SNAPSHOT, "read_blob_incomplete", position, length) + ? blobContainer.readBlob(randomPurpose(), "read_blob_incomplete") + : blobContainer.readBlob(randomPurpose(), "read_blob_incomplete", position, length) ) { Streams.readFully(stream); } @@ -298,9 +298,9 @@ public void testReadBlobWithNoHttpResponse() { Exception exception = expectThrows(unresponsiveExceptionType(), () -> { if (randomBoolean()) { - Streams.readFully(blobContainer.readBlob(OperationPurpose.SNAPSHOT, "read_blob_no_response")); + Streams.readFully(blobContainer.readBlob(randomPurpose(), "read_blob_no_response")); } else { - Streams.readFully(blobContainer.readBlob(OperationPurpose.SNAPSHOT, "read_blob_no_response", 0, 1)); + Streams.readFully(blobContainer.readBlob(randomPurpose(), "read_blob_no_response", 0, 1)); } }); assertThat( @@ -323,8 +323,8 @@ public void testReadBlobWithPrematureConnectionClose() { final Exception exception = expectThrows(Exception.class, () -> { try ( InputStream stream = randomBoolean() - ? blobContainer.readBlob(OperationPurpose.SNAPSHOT, "read_blob_incomplete", 0, 1) - : blobContainer.readBlob(OperationPurpose.SNAPSHOT, "read_blob_incomplete") + ? blobContainer.readBlob(randomPurpose(), "read_blob_incomplete", 0, 1) + : blobContainer.readBlob(randomPurpose(), "read_blob_incomplete") ) { Streams.readFully(stream); } diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreTestUtil.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreTestUtil.java index cbc6b58cfdb28..383c2b3c2d13b 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreTestUtil.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreTestUtil.java @@ -65,6 +65,7 @@ import java.util.stream.Collectors; import static org.apache.lucene.tests.util.LuceneTestCase.random; +import static org.elasticsearch.test.ESTestCase.randomFrom; import static org.elasticsearch.test.ESTestCase.randomIntBetween; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.empty; @@ -99,12 +100,12 @@ public static void assertConsistency(BlobStoreRepository repository) { * Same as {@link #assertConsistency(BlobStoreRepository)} but async so it can be used in tests that don't allow blocking. */ public static PlainActionFuture assertConsistencyAsync(BlobStoreRepository repository) { - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); repository.threadPool().generic().execute(ActionRunnable.wrap(future, listener -> { try { final BlobContainer blobContainer = repository.blobContainer(); final long latestGen; - try (DataInputStream inputStream = new DataInputStream(blobContainer.readBlob(OperationPurpose.SNAPSHOT, "index.latest"))) { + try (DataInputStream inputStream = new DataInputStream(blobContainer.readBlob(randomPurpose(), "index.latest"))) { latestGen = inputStream.readLong(); } catch (NoSuchFileException e) { throw new AssertionError("Could not find index.latest blob for repo [" + repository + "]"); @@ -112,7 +113,7 @@ public static PlainActionFuture assertConsistencyAsync(BlobStore assertIndexGenerations(blobContainer, latestGen); final RepositoryData repositoryData; try ( - InputStream blob = blobContainer.readBlob(OperationPurpose.SNAPSHOT, BlobStoreRepository.INDEX_FILE_PREFIX + latestGen); + InputStream blob = blobContainer.readBlob(randomPurpose(), BlobStoreRepository.INDEX_FILE_PREFIX + latestGen); XContentParser parser = XContentType.JSON.xContent() .createParser(XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), blob) ) { @@ -153,7 +154,7 @@ public void onFailure(Exception e) { } private static void assertIndexGenerations(BlobContainer repoRoot, long latestGen) throws IOException { - final long[] indexGenerations = repoRoot.listBlobsByPrefix(OperationPurpose.SNAPSHOT, BlobStoreRepository.INDEX_FILE_PREFIX) + final long[] indexGenerations = repoRoot.listBlobsByPrefix(randomPurpose(), BlobStoreRepository.INDEX_FILE_PREFIX) .keySet() .stream() .map(s -> s.replace(BlobStoreRepository.INDEX_FILE_PREFIX, "")) @@ -165,12 +166,12 @@ private static void assertIndexGenerations(BlobContainer repoRoot, long latestGe } private static void assertShardIndexGenerations(BlobContainer repoRoot, ShardGenerations shardGenerations) throws IOException { - final BlobContainer indicesContainer = repoRoot.children(OperationPurpose.SNAPSHOT).get("indices"); + final BlobContainer indicesContainer = repoRoot.children(randomPurpose()).get("indices"); for (IndexId index : shardGenerations.indices()) { final List gens = shardGenerations.getGens(index); if (gens.isEmpty() == false) { - final BlobContainer indexContainer = indicesContainer.children(OperationPurpose.SNAPSHOT).get(index.getId()); - final Map shardContainers = indexContainer.children(OperationPurpose.SNAPSHOT); + final BlobContainer indexContainer = indicesContainer.children(randomPurpose()).get(index.getId()); + final Map shardContainers = indexContainer.children(randomPurpose()); for (int i = 0; i < gens.size(); i++) { final ShardGeneration generation = gens.get(i); assertThat(generation, not(ShardGenerations.DELETED_SHARD_GEN)); @@ -178,8 +179,7 @@ private static void assertShardIndexGenerations(BlobContainer repoRoot, ShardGen final String shardId = Integer.toString(i); assertThat(shardContainers, hasKey(shardId)); assertThat( - shardContainers.get(shardId) - .listBlobsByPrefix(OperationPurpose.SNAPSHOT, BlobStoreRepository.INDEX_FILE_PREFIX), + shardContainers.get(shardId).listBlobsByPrefix(randomPurpose(), BlobStoreRepository.INDEX_FILE_PREFIX), hasKey(BlobStoreRepository.INDEX_FILE_PREFIX + generation) ); } @@ -190,13 +190,13 @@ private static void assertShardIndexGenerations(BlobContainer repoRoot, ShardGen private static void assertIndexUUIDs(BlobStoreRepository repository, RepositoryData repositoryData) throws IOException { final List expectedIndexUUIDs = repositoryData.getIndices().values().stream().map(IndexId::getId).toList(); - final BlobContainer indicesContainer = repository.blobContainer().children(OperationPurpose.SNAPSHOT).get("indices"); + final BlobContainer indicesContainer = repository.blobContainer().children(randomPurpose()).get("indices"); final List foundIndexUUIDs; if (indicesContainer == null) { foundIndexUUIDs = Collections.emptyList(); } else { // Skip Lucene MockFS extraN directory - foundIndexUUIDs = indicesContainer.children(OperationPurpose.SNAPSHOT) + foundIndexUUIDs = indicesContainer.children(randomPurpose()) .keySet() .stream() .filter(s -> s.startsWith("extra") == false) @@ -204,9 +204,9 @@ private static void assertIndexUUIDs(BlobStoreRepository repository, RepositoryD } assertThat(foundIndexUUIDs, containsInAnyOrder(expectedIndexUUIDs.toArray(Strings.EMPTY_ARRAY))); for (String indexId : foundIndexUUIDs) { - final Set indexMetaGenerationsFound = indicesContainer.children(OperationPurpose.SNAPSHOT) + final Set indexMetaGenerationsFound = indicesContainer.children(randomPurpose()) .get(indexId) - .listBlobsByPrefix(OperationPurpose.SNAPSHOT, BlobStoreRepository.METADATA_PREFIX) + .listBlobsByPrefix(randomPurpose(), BlobStoreRepository.METADATA_PREFIX) .keySet() .stream() .map(p -> p.replace(BlobStoreRepository.METADATA_PREFIX, "").replace(".dat", "")) @@ -231,7 +231,7 @@ private static void assertSnapshotUUIDs( final Collection snapshotIds = repositoryData.getSnapshotIds(); final List expectedSnapshotUUIDs = snapshotIds.stream().map(SnapshotId::getUUID).toList(); for (String prefix : new String[] { BlobStoreRepository.SNAPSHOT_PREFIX, BlobStoreRepository.METADATA_PREFIX }) { - final Collection foundSnapshotUUIDs = repoRoot.listBlobs(OperationPurpose.SNAPSHOT) + final Collection foundSnapshotUUIDs = repoRoot.listBlobs(randomPurpose()) .keySet() .stream() .filter(p -> p.startsWith(prefix)) @@ -240,12 +240,12 @@ private static void assertSnapshotUUIDs( assertThat(foundSnapshotUUIDs, containsInAnyOrder(expectedSnapshotUUIDs.toArray(Strings.EMPTY_ARRAY))); } - final BlobContainer indicesContainer = repository.getBlobContainer().children(OperationPurpose.SNAPSHOT).get("indices"); + final BlobContainer indicesContainer = repository.getBlobContainer().children(randomPurpose()).get("indices"); final Map indices; if (indicesContainer == null) { indices = Collections.emptyMap(); } else { - indices = indicesContainer.children(OperationPurpose.SNAPSHOT); + indices = indicesContainer.children(randomPurpose()); } if (snapshotIds.isEmpty()) { listener.onResponse(null); @@ -298,7 +298,7 @@ private static void assertSnapshotInfosConsistency( assertThat(indices, hasKey(indexId.getId())); final BlobContainer indexContainer = indices.get(indexId.getId()); assertThat( - indexContainer.listBlobs(OperationPurpose.SNAPSHOT), + indexContainer.listBlobs(randomPurpose()), hasKey( String.format( Locale.ROOT, @@ -308,7 +308,7 @@ private static void assertSnapshotInfosConsistency( ) ); final IndexMetadata indexMetadata = repository.getSnapshotIndexMetaData(repositoryData, snapshotId, indexId); - for (Map.Entry entry : indexContainer.children(OperationPurpose.SNAPSHOT).entrySet()) { + for (Map.Entry entry : indexContainer.children(randomPurpose()).entrySet()) { // Skip Lucene MockFS extraN directory if (entry.getKey().startsWith("extra")) { continue; @@ -322,10 +322,7 @@ private static void assertSnapshotInfosConsistency( final BlobContainer shardContainer = entry.getValue(); // TODO: we shouldn't be leaking empty shard directories when a shard (but not all of the index it belongs to) // becomes unreferenced. We should fix that and remove this conditional once its fixed. - if (shardContainer.listBlobs(OperationPurpose.SNAPSHOT) - .keySet() - .stream() - .anyMatch(blob -> blob.startsWith("extra") == false)) { + if (shardContainer.listBlobs(randomPurpose()).keySet().stream().anyMatch(blob -> blob.startsWith("extra") == false)) { final int impliedCount = shardId - 1; maxShardCountsSeen.compute( indexId, @@ -336,7 +333,7 @@ private static void assertSnapshotInfosConsistency( && snapshotInfo.shardFailures() .stream() .noneMatch(shardFailure -> shardFailure.index().equals(index) && shardFailure.shardId() == shardId)) { - final Map shardPathContents = shardContainer.listBlobs(OperationPurpose.SNAPSHOT); + final Map shardPathContents = shardContainer.listBlobs(randomPurpose()); assertThat( shardPathContents, hasKey(String.format(Locale.ROOT, BlobStoreRepository.SNAPSHOT_NAME_FORMAT, snapshotId.getUUID())) @@ -372,14 +369,11 @@ private static void assertSnapshotInfosConsistency( } public static void assertBlobsByPrefix(BlobStoreRepository repository, BlobPath path, String prefix, Map blobs) { - final PlainActionFuture> future = PlainActionFuture.newFuture(); + final PlainActionFuture> future = new PlainActionFuture<>(); repository.threadPool() .generic() .execute( - ActionRunnable.supply( - future, - () -> repository.blobStore().blobContainer(path).listBlobsByPrefix(OperationPurpose.SNAPSHOT, prefix) - ) + ActionRunnable.supply(future, () -> repository.blobStore().blobContainer(path).listBlobsByPrefix(randomPurpose(), prefix)) ); Map foundBlobs = future.actionGet(); if (blobs.isEmpty()) { @@ -464,4 +458,8 @@ private static ClusterService mockClusterService(ClusterState initialState) { when(clusterApplierService.threadPool()).thenReturn(threadPool); return clusterService; } + + public static OperationPurpose randomPurpose() { + return randomFrom(OperationPurpose.values()); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java index ec9b9da967917..578a7898bcd1e 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; -import org.elasticsearch.common.blobstore.OperationPurpose; import org.elasticsearch.common.blobstore.support.BlobMetadata; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; @@ -63,6 +62,7 @@ import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.READONLY_SETTING_KEY; import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.SNAPSHOT_INDEX_NAME_FORMAT; import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.SNAPSHOT_NAME_FORMAT; +import static org.elasticsearch.repositories.blobstore.BlobStoreTestUtil.randomPurpose; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.equalTo; @@ -124,7 +124,7 @@ public void testReadNonExistingPath() throws IOException { try (BlobStore store = newBlobStore()) { final BlobContainer container = store.blobContainer(BlobPath.EMPTY); expectThrows(NoSuchFileException.class, () -> { - try (InputStream is = container.readBlob(OperationPurpose.SNAPSHOT, "non-existing")) { + try (InputStream is = container.readBlob(randomPurpose(), "non-existing")) { is.read(); } }); @@ -141,7 +141,7 @@ public void testWriteRead() throws IOException { data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16))); writeBlob(container, "foobar", new BytesArray(data), false); } - try (InputStream stream = container.readBlob(OperationPurpose.SNAPSHOT, "foobar")) { + try (InputStream stream = container.readBlob(randomPurpose(), "foobar")) { BytesRefBuilder target = new BytesRefBuilder(); while (target.length() < data.length) { byte[] buffer = new byte[scaledRandomIntBetween(1, data.length - target.length())]; @@ -156,14 +156,14 @@ public void testWriteRead() throws IOException { assertEquals(data.length, target.length()); assertArrayEquals(data, Arrays.copyOfRange(target.bytes(), 0, target.length())); } - container.delete(OperationPurpose.SNAPSHOT); + container.delete(randomPurpose()); } } public void testList() throws IOException { try (BlobStore store = newBlobStore()) { final BlobContainer container = store.blobContainer(BlobPath.EMPTY); - assertThat(container.listBlobs(OperationPurpose.SNAPSHOT).size(), CoreMatchers.equalTo(0)); + assertThat(container.listBlobs(randomPurpose()).size(), CoreMatchers.equalTo(0)); int numberOfFooBlobs = randomIntBetween(0, 10); int numberOfBarBlobs = randomIntBetween(3, 20); Map generatedBlobs = new HashMap<>(); @@ -184,7 +184,7 @@ public void testList() throws IOException { generatedBlobs.put(name, (long) length); writeRandomBlob(container, name, length); - Map blobs = container.listBlobs(OperationPurpose.SNAPSHOT); + Map blobs = container.listBlobs(randomPurpose()); assertThat(blobs.size(), CoreMatchers.equalTo(numberOfFooBlobs + numberOfBarBlobs)); for (Map.Entry generated : generatedBlobs.entrySet()) { BlobMetadata blobMetadata = blobs.get(generated.getKey()); @@ -193,10 +193,10 @@ public void testList() throws IOException { assertThat(blobMetadata.length(), CoreMatchers.equalTo(blobLengthFromContentLength(generated.getValue()))); } - assertThat(container.listBlobsByPrefix(OperationPurpose.SNAPSHOT, "foo-").size(), CoreMatchers.equalTo(numberOfFooBlobs)); - assertThat(container.listBlobsByPrefix(OperationPurpose.SNAPSHOT, "bar-").size(), CoreMatchers.equalTo(numberOfBarBlobs)); - assertThat(container.listBlobsByPrefix(OperationPurpose.SNAPSHOT, "baz-").size(), CoreMatchers.equalTo(0)); - container.delete(OperationPurpose.SNAPSHOT); + assertThat(container.listBlobsByPrefix(randomPurpose(), "foo-").size(), CoreMatchers.equalTo(numberOfFooBlobs)); + assertThat(container.listBlobsByPrefix(randomPurpose(), "bar-").size(), CoreMatchers.equalTo(numberOfBarBlobs)); + assertThat(container.listBlobsByPrefix(randomPurpose(), "baz-").size(), CoreMatchers.equalTo(0)); + container.delete(randomPurpose()); } } @@ -204,17 +204,17 @@ public void testDeleteBlobs() throws IOException { try (BlobStore store = newBlobStore()) { final List blobNames = Arrays.asList("foobar", "barfoo"); final BlobContainer container = store.blobContainer(BlobPath.EMPTY); - container.deleteBlobsIgnoringIfNotExists(OperationPurpose.SNAPSHOT, blobNames.iterator()); // does not raise when blobs + container.deleteBlobsIgnoringIfNotExists(randomPurpose(), blobNames.iterator()); // does not raise when blobs // don't exist byte[] data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16))); final BytesArray bytesArray = new BytesArray(data); for (String blobName : blobNames) { writeBlob(container, blobName, bytesArray, randomBoolean()); } - assertEquals(container.listBlobs(OperationPurpose.SNAPSHOT).size(), 2); - container.deleteBlobsIgnoringIfNotExists(OperationPurpose.SNAPSHOT, blobNames.iterator()); - assertTrue(container.listBlobs(OperationPurpose.SNAPSHOT).isEmpty()); - container.deleteBlobsIgnoringIfNotExists(OperationPurpose.SNAPSHOT, blobNames.iterator()); // does not raise when blobs + assertEquals(container.listBlobs(randomPurpose()).size(), 2); + container.deleteBlobsIgnoringIfNotExists(randomPurpose(), blobNames.iterator()); + assertTrue(container.listBlobs(randomPurpose()).isEmpty()); + container.deleteBlobsIgnoringIfNotExists(randomPurpose(), blobNames.iterator()); // does not raise when blobs // don't exist } } @@ -226,9 +226,9 @@ public static void writeBlob( boolean failIfAlreadyExists ) throws IOException { if (randomBoolean()) { - container.writeBlob(OperationPurpose.SNAPSHOT, blobName, bytesArray, failIfAlreadyExists); + container.writeBlob(randomPurpose(), blobName, bytesArray, failIfAlreadyExists); } else { - container.writeBlobAtomic(OperationPurpose.SNAPSHOT, blobName, bytesArray, failIfAlreadyExists); + container.writeBlobAtomic(randomPurpose(), blobName, bytesArray, failIfAlreadyExists); } } @@ -244,10 +244,10 @@ public void testContainerCreationAndDeletion() throws IOException { assertArrayEquals(readBlobFully(containerFoo, "test", data1.length), data1); assertArrayEquals(readBlobFully(containerBar, "test", data2.length), data2); - assertTrue(containerFoo.blobExists(OperationPurpose.SNAPSHOT, "test")); - assertTrue(containerBar.blobExists(OperationPurpose.SNAPSHOT, "test")); - containerBar.delete(OperationPurpose.SNAPSHOT); - containerFoo.delete(OperationPurpose.SNAPSHOT); + assertTrue(containerFoo.blobExists(randomPurpose(), "test")); + assertTrue(containerBar.blobExists(randomPurpose(), "test")); + containerBar.delete(randomPurpose()); + containerFoo.delete(randomPurpose()); } } @@ -259,7 +259,7 @@ public static byte[] writeRandomBlob(BlobContainer container, String name, int l public static byte[] readBlobFully(BlobContainer container, String name, int length) throws IOException { byte[] data = new byte[length]; - try (InputStream inputStream = container.readBlob(OperationPurpose.SNAPSHOT, name)) { + try (InputStream inputStream = container.readBlob(randomPurpose(), name)) { assertThat(Streams.readFully(inputStream, data), CoreMatchers.equalTo(length)); assertThat(inputStream.read(), CoreMatchers.equalTo(-1)); } @@ -275,7 +275,7 @@ public static byte[] randomBytes(int length) { } protected static void writeBlob(BlobContainer container, String blobName, BytesArray bytesArray) throws IOException { - container.writeBlob(OperationPurpose.SNAPSHOT, blobName, bytesArray, true); + container.writeBlob(randomPurpose(), blobName, bytesArray, true); } protected BlobStore newBlobStore() { @@ -366,10 +366,7 @@ protected void testSnapshotAndRestore(boolean recreateRepositoryBeforeRestore) t logger.info("--> delete snapshot {}:{}", repoName, snapshotName); assertAcked(clusterAdmin().prepareDeleteSnapshot(repoName, snapshotName).get()); - expectThrows( - SnapshotMissingException.class, - () -> clusterAdmin().prepareGetSnapshots(repoName).setSnapshots(snapshotName).execute().actionGet() - ); + expectThrows(SnapshotMissingException.class, () -> clusterAdmin().prepareGetSnapshots(repoName).setSnapshots(snapshotName).get()); expectThrows(SnapshotMissingException.class, () -> clusterAdmin().prepareDeleteSnapshot(repoName, snapshotName).get()); @@ -483,7 +480,7 @@ public void testIndicesDeletedFromRepository() throws Exception { BlobStoreRepository repository = (BlobStoreRepository) repositoriesSvc.repository(repoName); final SetOnce indicesBlobContainer = new SetOnce<>(); - final PlainActionFuture repositoryData = PlainActionFuture.newFuture(); + final PlainActionFuture repositoryData = new PlainActionFuture<>(); threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> { indicesBlobContainer.set(repository.blobStore().blobContainer(repository.basePath().add("indices"))); repository.getRepositoryData(EsExecutors.DIRECT_EXECUTOR_SERVICE, repositoryData); @@ -491,7 +488,7 @@ public void testIndicesDeletedFromRepository() throws Exception { for (IndexId indexId : repositoryData.actionGet().getIndices().values()) { if (indexId.getName().equals("test-idx-3")) { - assertFalse(indicesBlobContainer.get().blobExists(OperationPurpose.SNAPSHOT, indexId.getId())); // deleted index + assertFalse(indicesBlobContainer.get().blobExists(randomPurpose(), indexId.getId())); // deleted index } } @@ -510,7 +507,7 @@ public void testBlobStoreBulkDeletion() throws Exception { for (int j = 0; j < numberOfBlobsPerContainer; j++) { byte[] bytes = randomBytes(randomInt(100)); String blobName = randomAlphaOfLength(10); - container.writeBlob(OperationPurpose.SNAPSHOT, blobName, new BytesArray(bytes), false); + container.writeBlob(randomPurpose(), blobName, new BytesArray(bytes), false); if (randomBoolean()) { blobsToDelete.add(containerPath.buildAsString() + blobName); } else { @@ -519,14 +516,14 @@ public void testBlobStoreBulkDeletion() throws Exception { } } - store.deleteBlobsIgnoringIfNotExists(OperationPurpose.SNAPSHOT, blobsToDelete.iterator()); + store.deleteBlobsIgnoringIfNotExists(randomPurpose(), blobsToDelete.iterator()); for (var containerEntry : expectedBlobsPerContainer.entrySet()) { BlobContainer blobContainer = store.blobContainer(containerEntry.getKey()); - Map blobsInContainer = blobContainer.listBlobs(OperationPurpose.SNAPSHOT); + Map blobsInContainer = blobContainer.listBlobs(randomPurpose()); for (String expectedBlob : containerEntry.getValue()) { assertThat(blobsInContainer, hasKey(expectedBlob)); } - blobContainer.delete(OperationPurpose.SNAPSHOT); + blobContainer.delete(randomPurpose()); } } } @@ -559,7 +556,7 @@ public void testDanglingShardLevelBlobCleanup() throws Exception { // Create an extra dangling blob as if from an earlier snapshot that failed to clean up shardContainer.writeBlob( - OperationPurpose.SNAPSHOT, + randomPurpose(), BlobStoreRepository.UPLOADED_DATA_BLOB_PREFIX + UUIDs.randomBase64UUID(random()), BytesArray.EMPTY, true @@ -583,7 +580,7 @@ public void testDanglingShardLevelBlobCleanup() throws Exception { assertAcked(client.admin().cluster().prepareDeleteSnapshot(repoName, "snapshot-1")); // Retrieve the blobs actually present - final var actualBlobs = shardContainer.listBlobs(OperationPurpose.SNAPSHOT) + final var actualBlobs = shardContainer.listBlobs(randomPurpose()) .keySet() .stream() .filter(f -> ExtrasFS.isExtra(f) == false) @@ -624,8 +621,7 @@ public void testDanglingShardLevelBlobCleanup() throws Exception { protected void addRandomDocuments(String name, int numDocs) throws InterruptedException { IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { - indexRequestBuilders[i] = client().prepareIndex(name) - .setId(Integer.toString(i)) + indexRequestBuilders[i] = prepareIndex(name).setId(Integer.toString(i)) .setRouting(randomAlphaOfLength(randomIntBetween(1, 10))) .setSource("field", "value"); } diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESFsBasedRepositoryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESFsBasedRepositoryIntegTestCase.java index 8e94b3fa41fcf..43b0fb7025bd8 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESFsBasedRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESFsBasedRepositoryIntegTestCase.java @@ -11,7 +11,6 @@ import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; -import org.elasticsearch.common.blobstore.OperationPurpose; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.IOUtils; @@ -24,6 +23,7 @@ import java.util.stream.Stream; import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.READONLY_SETTING_KEY; +import static org.elasticsearch.repositories.blobstore.BlobStoreTestUtil.randomPurpose; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.instanceOf; @@ -114,7 +114,7 @@ public void testReadOnly() throws Exception { byte[] data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16))); writeBlob(container, "test", new BytesArray(data)); assertArrayEquals(readBlobFully(container, "test", data.length), data); - assertTrue(container.blobExists(OperationPurpose.SNAPSHOT, "test")); + assertTrue(container.blobExists(randomPurpose(), "test")); } } } diff --git a/test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java b/test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java index ebc5ca4cd0fd3..c1c4d70e0b906 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java +++ b/test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java @@ -132,7 +132,12 @@ protected SearchContext createContext( boolean includeAggregations ) throws IOException { SearchContext searchContext = super.createContext(readerContext, request, task, resultsType, includeAggregations); - onCreateSearchContext.accept(searchContext); + try { + onCreateSearchContext.accept(searchContext); + } catch (Exception e) { + searchContext.close(); + throw e; + } return searchContext; } diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java index c5a9a9ae7c6de..9e669e772b605 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java @@ -17,11 +17,17 @@ import org.apache.lucene.document.StoredField; import org.apache.lucene.index.CompositeReaderContext; import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.FieldInfos; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.index.OrdinalMap; +import org.apache.lucene.index.SortedDocValues; +import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.index.TermsEnum; import org.apache.lucene.sandbox.document.HalfFloatPoint; import org.apache.lucene.search.Collector; import org.apache.lucene.search.IndexSearcher; @@ -41,6 +47,7 @@ import org.apache.lucene.util.Accountable; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.NumericUtils; +import org.apache.lucene.util.packed.PackedInts; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -143,6 +150,7 @@ import org.junit.Before; import java.io.IOException; +import java.io.UncheckedIOException; import java.net.InetAddress; import java.util.ArrayList; import java.util.Arrays; @@ -179,7 +187,7 @@ */ public abstract class AggregatorTestCase extends ESTestCase { private NamedWriteableRegistry namedWriteableRegistry; - private final List releasables = new ArrayList<>(); + private final List releasables = new ArrayList<>(); protected ValuesSourceRegistry valuesSourceRegistry; private AnalysisModule analysisModule; @@ -328,7 +336,7 @@ private AggregationContext createAggregationContext( int maxBucket, boolean isInSortOrderExecutionRequired, MappedFieldType... fieldTypes - ) throws IOException { + ) { MappingLookup mappingLookup = MappingLookup.fromMappers( Mapping.EMPTY, Arrays.stream(fieldTypes).map(this::buildMockFieldMapper).collect(toList()), @@ -416,7 +424,7 @@ protected List objectMappers() { /** * Build a {@link SubSearchContext}s to power {@code top_hits}. */ - private static SubSearchContext buildSubSearchContext( + private SubSearchContext buildSubSearchContext( IndexSettings indexSettings, SearchExecutionContext searchExecutionContext, BitsetFilterCache bitsetFilterCache @@ -455,7 +463,9 @@ private static SubSearchContext buildSubSearchContext( when(ctx.indexShard()).thenReturn(indexShard); when(ctx.newSourceLoader()).thenAnswer(inv -> searchExecutionContext.newSourceLoader(false)); when(ctx.newIdLoader()).thenReturn(IdLoader.fromLeafStoredFieldLoader()); - return new SubSearchContext(ctx); + var res = new SubSearchContext(ctx); + releasables.add(res); // TODO: nasty workaround for not getting the standard resource handling behavior of a real search context + return res; } protected IndexSettings createIndexSettings() { @@ -484,9 +494,11 @@ protected ScriptService getMockScriptService() { * It runs the aggregation as well using a circuit breaker that randomly throws {@link CircuitBreakingException} * in order to mak sure the implementation does not leak. */ - protected A searchAndReduce(IndexReader reader, AggTestConfig aggTestConfig) - throws IOException { - IndexSearcher searcher = newIndexSearcher(reader, aggTestConfig.builder.supportsParallelCollection()); + protected A searchAndReduce(IndexReader reader, AggTestConfig aggTestConfig) throws IOException { + IndexSearcher searcher = newIndexSearcher( + reader, + aggTestConfig.builder.supportsParallelCollection(field -> getCardinality(reader, field)) + ); IndexSettings indexSettings = createIndexSettings(); // First run it to find circuit breaker leaks on the aggregator runWithCrankyCircuitBreaker(indexSettings, searcher, aggTestConfig); @@ -791,7 +803,10 @@ protected void debugTestCase( MappedFieldType... fieldTypes ) throws IOException { // Don't use searchAndReduce because we only want a single aggregator. - IndexSearcher searcher = newIndexSearcher(reader, aggregationBuilder.supportsParallelCollection()); + IndexSearcher searcher = newIndexSearcher( + reader, + aggregationBuilder.supportsParallelCollection(field -> getCardinality(reader, field)) + ); if (queryCachingPolicy != null) { searcher.setQueryCachingPolicy(queryCachingPolicy); } @@ -1330,6 +1345,40 @@ protected final NamedWriteableRegistry writableRegistry() { return namedWriteableRegistry; } + long getCardinality(IndexReader reader, String field) { + try { + final TermsEnum[] subs = new TermsEnum[reader.leaves().size()]; + final long[] weights = new long[reader.leaves().size()]; + for (int i = 0; i < reader.leaves().size(); i++) { + LeafReaderContext context = reader.leaves().get(i); + FieldInfos fieldInfos = context.reader().getFieldInfos(); + FieldInfo fieldInfo = fieldInfos.fieldInfo(field); + if (fieldInfo == null) { + return -1; + } + switch (fieldInfo.getDocValuesType()) { + case SORTED -> { + SortedDocValues sortedDocValues = context.reader().getSortedDocValues(field); + subs[i] = sortedDocValues.termsEnum(); + weights[i] = sortedDocValues.getValueCount(); + } + case SORTED_SET -> { + SortedSetDocValues sortedDocValues = context.reader().getSortedSetDocValues(field); + subs[i] = sortedDocValues.termsEnum(); + weights[i] = sortedDocValues.getValueCount(); + } + default -> { + return -1; + } + } + } + final OrdinalMap ordinalMap = OrdinalMap.build(null, subs, weights, PackedInts.DEFAULT); + return ordinalMap.getValueCount(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + /** * Request an aggregation that returns the {@link CardinalityUpperBound} * that was passed to its ctor. diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java index 0a4af80c93a34..c03058f22da5d 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java @@ -59,12 +59,13 @@ public void testFromXContent() throws IOException { } public void testSupportsConcurrentExecution() { + int cardinality = randomIntBetween(-1, 100); AB builder = createTestAggregatorBuilder(); - boolean supportsConcurrency = builder.supportsParallelCollection(); + boolean supportsConcurrency = builder.supportsParallelCollection(field -> cardinality); AggregationBuilder bucketBuilder = new HistogramAggregationBuilder("test"); - assertTrue(bucketBuilder.supportsParallelCollection()); + assertTrue(bucketBuilder.supportsParallelCollection(field -> cardinality)); bucketBuilder.subAggregation(builder); - assertThat(bucketBuilder.supportsParallelCollection(), equalTo(supportsConcurrency)); + assertThat(bucketBuilder.supportsParallelCollection(field -> cardinality), equalTo(supportsConcurrency)); } /** diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractGeoTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractGeoTestCase.java index 2138c0f750ac2..7fc1826952477 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractGeoTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractGeoTestCase.java @@ -121,24 +121,23 @@ public void setupSuiteScopeCluster() throws Exception { allMultiVal[2 * i] = multiValues[i % numUniqueGeoPoints]; allMultiVal[2 * i + 1] = multiValues[(i + 1) % numUniqueGeoPoints]; builders.add( - client().prepareIndex(IDX_NAME) - .setSource( - jsonBuilder().startObject() - .array(SINGLE_VALUED_FIELD_NAME, allSingleVal[i].getX(), allSingleVal[i].getY()) - .startArray(MULTI_VALUED_FIELD_NAME) - .startArray() - .value(allMultiVal[2 * i].getX()) - .value(allMultiVal[2 * i].getY()) - .endArray() - .startArray() - .value(allMultiVal[2 * i + 1].getX()) - .value(allMultiVal[2 * i + 1].getY()) - .endArray() - .endArray() - .field(NUMBER_FIELD_NAME, i) - .field("tag", "tag" + i) - .endObject() - ) + prepareIndex(IDX_NAME).setSource( + jsonBuilder().startObject() + .array(SINGLE_VALUED_FIELD_NAME, allSingleVal[i].getX(), allSingleVal[i].getY()) + .startArray(MULTI_VALUED_FIELD_NAME) + .startArray() + .value(allMultiVal[2 * i].getX()) + .value(allMultiVal[2 * i].getY()) + .endArray() + .startArray() + .value(allMultiVal[2 * i + 1].getX()) + .value(allMultiVal[2 * i + 1].getY()) + .endArray() + .endArray() + .field(NUMBER_FIELD_NAME, i) + .field("tag", "tag" + i) + .endObject() + ) ); } singleCentroid = computeCentroid(allSingleVal); @@ -167,14 +166,13 @@ public void setupSuiteScopeCluster() throws Exception { for (int i = 0; i < 5; i++) { builders.add( - client().prepareIndex(DATELINE_IDX_NAME) - .setSource( - jsonBuilder().startObject() - .array(SINGLE_VALUED_FIELD_NAME, geoValues[i].getX(), geoValues[i].getY()) - .field(NUMBER_FIELD_NAME, i) - .field("tag", "tag" + i) - .endObject() - ) + prepareIndex(DATELINE_IDX_NAME).setSource( + jsonBuilder().startObject() + .array(SINGLE_VALUED_FIELD_NAME, geoValues[i].getX(), geoValues[i].getY()) + .field(NUMBER_FIELD_NAME, i) + .field("tag", "tag" + i) + .endObject() + ) ); } assertAcked( @@ -194,31 +192,29 @@ public void setupSuiteScopeCluster() throws Exception { for (int i = 0; i < 2000; i++) { SpatialPoint singleVal = singleValues[i % numUniqueGeoPoints]; builders.add( - client().prepareIndex(HIGH_CARD_IDX_NAME) - .setSource( - jsonBuilder().startObject() - .array(SINGLE_VALUED_FIELD_NAME, singleVal.getX(), singleVal.getY()) - .startArray(MULTI_VALUED_FIELD_NAME) - .startArray() - .value(multiValues[i % numUniqueGeoPoints].getX()) - .value(multiValues[i % numUniqueGeoPoints].getY()) - .endArray() - .startArray() - .value(multiValues[(i + 1) % numUniqueGeoPoints].getX()) - .value(multiValues[(i + 1) % numUniqueGeoPoints].getY()) - .endArray() - .endArray() - .field(NUMBER_FIELD_NAME, i) - .field("tag", "tag" + i) - .endObject() - ) + prepareIndex(HIGH_CARD_IDX_NAME).setSource( + jsonBuilder().startObject() + .array(SINGLE_VALUED_FIELD_NAME, singleVal.getX(), singleVal.getY()) + .startArray(MULTI_VALUED_FIELD_NAME) + .startArray() + .value(multiValues[i % numUniqueGeoPoints].getX()) + .value(multiValues[i % numUniqueGeoPoints].getY()) + .endArray() + .startArray() + .value(multiValues[(i + 1) % numUniqueGeoPoints].getX()) + .value(multiValues[(i + 1) % numUniqueGeoPoints].getY()) + .endArray() + .endArray() + .field(NUMBER_FIELD_NAME, i) + .field("tag", "tag" + i) + .endObject() + ) ); updateGeohashBucketsCentroid(singleVal); } builders.add( - client().prepareIndex(IDX_ZERO_NAME) - .setSource(jsonBuilder().startObject().array(SINGLE_VALUED_FIELD_NAME, 0.0, 1.0).endObject()) + prepareIndex(IDX_ZERO_NAME).setSource(jsonBuilder().startObject().array(SINGLE_VALUED_FIELD_NAME, 0.0, 1.0).endObject()) ); assertAcked(prepareCreate(IDX_ZERO_NAME).setMapping(SINGLE_VALUED_FIELD_NAME, "type=" + fieldTypeName())); diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTestCase.java index ac0e14aa50103..a41c60cba4a4a 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTestCase.java @@ -29,8 +29,7 @@ public void setupSuiteScopeCluster() throws Exception { final int numDocs = 10; for (int i = 0; i < numDocs; i++) { // TODO randomize the size and the params in here? builders.add( - client().prepareIndex("idx") - .setId(String.valueOf(i)) + prepareIndex("idx").setId(String.valueOf(i)) .setSource( jsonBuilder().startObject() .field("value", i + 1) @@ -52,12 +51,11 @@ public void setupSuiteScopeCluster() throws Exception { // two docs {value: 0} and {value : 2}, then building a histogram agg with interval 1 and with empty // buckets computed.. the empty bucket is the one associated with key "1". then each test will have // to check that this bucket exists with the appropriate sub aggregations. - prepareCreate("empty_bucket_idx").setMapping("value", "type=integer").execute().actionGet(); + prepareCreate("empty_bucket_idx").setMapping("value", "type=integer").get(); builders = new ArrayList<>(); for (int i = 0; i < 2; i++) { builders.add( - client().prepareIndex("empty_bucket_idx") - .setId(String.valueOf(i)) + prepareIndex("empty_bucket_idx").setId(String.valueOf(i)) .setSource(jsonBuilder().startObject().field("value", i * 2).endObject()) ); } diff --git a/test/framework/src/main/java/org/elasticsearch/search/geo/BasePointShapeQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/geo/BasePointShapeQueryTestCase.java index ed6f0e1c87f2a..3f394c1384432 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/geo/BasePointShapeQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/geo/BasePointShapeQueryTestCase.java @@ -85,8 +85,7 @@ public void testNullShape() throws Exception { createMapping(defaultIndexName, defaultFieldName); ensureGreen(); - client().prepareIndex(defaultIndexName) - .setId("aNullshape") + prepareIndex(defaultIndexName).setId("aNullshape") .setSource("{\"geo\": null}", XContentType.JSON) .setRefreshPolicy(IMMEDIATE) .get(); @@ -98,14 +97,12 @@ public void testIndexPointsFilterRectangle() throws Exception { createMapping(defaultIndexName, defaultFieldName); ensureGreen(); - client().prepareIndex(defaultIndexName) - .setId("1") + prepareIndex(defaultIndexName).setId("1") .setSource(jsonBuilder().startObject().field("name", "Document 1").field(defaultFieldName, "POINT(-30 -30)").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); - client().prepareIndex(defaultIndexName) - .setId("2") + prepareIndex(defaultIndexName).setId("2") .setSource(jsonBuilder().startObject().field("name", "Document 2").field(defaultFieldName, "POINT(-45 -50)").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); @@ -135,14 +132,12 @@ public void testIndexPointsCircle() throws Exception { createMapping(defaultIndexName, defaultFieldName); ensureGreen(); - client().prepareIndex(defaultIndexName) - .setId("1") + prepareIndex(defaultIndexName).setId("1") .setSource(jsonBuilder().startObject().field("name", "Document 1").field(defaultFieldName, "POINT(-30 -30)").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); - client().prepareIndex(defaultIndexName) - .setId("2") + prepareIndex(defaultIndexName).setId("2") .setSource(jsonBuilder().startObject().field("name", "Document 2").field(defaultFieldName, "POINT(-45 -50)").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); @@ -165,14 +160,12 @@ public void testIndexPointsPolygon() throws Exception { createMapping(defaultIndexName, defaultFieldName); ensureGreen(); - client().prepareIndex(defaultIndexName) - .setId("1") + prepareIndex(defaultIndexName).setId("1") .setSource(jsonBuilder().startObject().field(defaultFieldName, "POINT(-30 -30)").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); - client().prepareIndex(defaultIndexName) - .setId("2") + prepareIndex(defaultIndexName).setId("2") .setSource(jsonBuilder().startObject().field(defaultFieldName, "POINT(-45 -50)").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); @@ -194,20 +187,17 @@ public void testIndexPointsMultiPolygon() throws Exception { createMapping(defaultIndexName, defaultFieldName); ensureGreen(); - client().prepareIndex(defaultIndexName) - .setId("1") + prepareIndex(defaultIndexName).setId("1") .setSource(jsonBuilder().startObject().field("name", "Document 1").field(defaultFieldName, "POINT(-30 -30)").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); - client().prepareIndex(defaultIndexName) - .setId("2") + prepareIndex(defaultIndexName).setId("2") .setSource(jsonBuilder().startObject().field("name", "Document 2").field(defaultFieldName, "POINT(-40 -40)").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); - client().prepareIndex(defaultIndexName) - .setId("3") + prepareIndex(defaultIndexName).setId("3") .setSource(jsonBuilder().startObject().field("name", "Document 3").field(defaultFieldName, "POINT(-50 -50)").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); @@ -265,14 +255,12 @@ public void testIndexPointsRectangle() throws Exception { createMapping(defaultIndexName, defaultFieldName); ensureGreen(); - client().prepareIndex(defaultIndexName) - .setId("1") + prepareIndex(defaultIndexName).setId("1") .setSource(jsonBuilder().startObject().field("name", "Document 1").field(defaultFieldName, "POINT(-30 -30)").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); - client().prepareIndex(defaultIndexName) - .setId("2") + prepareIndex(defaultIndexName).setId("2") .setSource(jsonBuilder().startObject().field("name", "Document 2").field(defaultFieldName, "POINT(-45 -50)").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); @@ -294,14 +282,12 @@ public void testIndexPointsIndexedRectangle() throws Exception { createMapping(defaultIndexName, defaultFieldName); ensureGreen(); - client().prepareIndex(defaultIndexName) - .setId("point1") + prepareIndex(defaultIndexName).setId("point1") .setSource(jsonBuilder().startObject().field(defaultFieldName, "POINT(-30 -30)").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); - client().prepareIndex(defaultIndexName) - .setId("point2") + prepareIndex(defaultIndexName).setId("point2") .setSource(jsonBuilder().startObject().field(defaultFieldName, "POINT(-45 -50)").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); @@ -321,14 +307,12 @@ public void testIndexPointsIndexedRectangle() throws Exception { client().admin().indices().prepareCreate(indexedShapeIndex).setMapping(queryShapesMapping).get(); ensureGreen(); - client().prepareIndex(indexedShapeIndex) - .setId("shape1") + prepareIndex(indexedShapeIndex).setId("shape1") .setSource(jsonBuilder().startObject().field(indexedShapePath, "BBOX(-50, -40, -45, -55)").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); - client().prepareIndex(indexedShapeIndex) - .setId("shape2") + prepareIndex(indexedShapeIndex).setId("shape2") .setSource(jsonBuilder().startObject().field(indexedShapePath, "BBOX(-60, -50, -50, -60)").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); @@ -416,8 +400,7 @@ public void testQueryPoint() throws Exception { createMapping(defaultIndexName, defaultFieldName); ensureGreen(); - client().prepareIndex(defaultIndexName) - .setId("1") + prepareIndex(defaultIndexName).setId("1") .setSource(jsonBuilder().startObject().field(defaultFieldName, "POINT(-35 -25)").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); @@ -452,8 +435,7 @@ public void testQueryMultiPoint() throws Exception { createMapping(defaultIndexName, defaultFieldName); ensureGreen(); - client().prepareIndex(defaultIndexName) - .setId("1") + prepareIndex(defaultIndexName).setId("1") .setSource(jsonBuilder().startObject().field(defaultFieldName, "POINT(-35 -25)").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); @@ -547,8 +529,7 @@ public void testQueryPointFromMultiPoint() throws Exception { Point pointC = new Point(35, 25); Point pointD = new Point(45, 35); Object[] points = samplePointDataMultiFormat(pointA, pointB, pointC, pointD); - client().prepareIndex(defaultIndexName) - .setId("1") + prepareIndex(defaultIndexName).setId("1") .setSource(jsonBuilder().startObject().field(defaultFieldName, points).endObject()) .setRefreshPolicy(IMMEDIATE) .get(); @@ -598,9 +579,9 @@ public void testIndexPointsFromLine() throws Exception { ); for (int i = 0; i < line.length(); i++) { Point point = new Point(line.getLon(i), line.getLat(i)); - client().prepareIndex(defaultIndexName) - .setSource(jsonBuilder().startObject().field(defaultFieldName, WellKnownText.toWKT(point)).endObject()) - .get(); + prepareIndex(defaultIndexName).setSource( + jsonBuilder().startObject().field(defaultFieldName, WellKnownText.toWKT(point)).endObject() + ).get(); } client().admin().indices().prepareRefresh(defaultIndexName).get(); // all points from a line intersect with the line @@ -623,9 +604,9 @@ public void testIndexPointsFromPolygon() throws Exception { LinearRing linearRing = polygon.getPolygon(); for (int i = 0; i < linearRing.length(); i++) { Point point = new Point(linearRing.getLon(i), linearRing.getLat(i)); - client().prepareIndex(defaultIndexName) - .setSource(jsonBuilder().startObject().field(defaultFieldName, WellKnownText.toWKT(point)).endObject()) - .get(); + prepareIndex(defaultIndexName).setSource( + jsonBuilder().startObject().field(defaultFieldName, WellKnownText.toWKT(point)).endObject() + ).get(); } client().admin().indices().prepareRefresh(defaultIndexName).get(); // all points from a polygon intersect with the polygon diff --git a/test/framework/src/main/java/org/elasticsearch/search/geo/BaseShapeIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/geo/BaseShapeIntegTestCase.java index 97e68507f0c55..58328671c58e8 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/geo/BaseShapeIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/geo/BaseShapeIntegTestCase.java @@ -240,7 +240,7 @@ public void testDisallowExpensiveQueries() throws InterruptedException, IOExcept } }"""; - indexRandom(true, client().prepareIndex("test").setId("0").setSource(source, XContentType.JSON)); + indexRandom(true, prepareIndex("test").setId("0").setSource(source, XContentType.JSON)); refresh(); try { @@ -299,7 +299,7 @@ public void testShapeRelations() throws Exception { jsonBuilder().startObject().field("area", WellKnownText.toWKT(new MultiPolygon(polygons))).endObject() ); - client().prepareIndex("shapes").setId("1").setSource(data, XContentType.JSON).get(); + prepareIndex("shapes").setId("1").setSource(data, XContentType.JSON).get(); client().admin().indices().prepareRefresh().get(); // Point in polygon @@ -363,7 +363,7 @@ public void testShapeRelations() throws Exception { ); data = BytesReference.bytes(jsonBuilder().startObject().field("area", WellKnownText.toWKT(inverse)).endObject()); - client().prepareIndex("shapes").setId("2").setSource(data, XContentType.JSON).get(); + prepareIndex("shapes").setId("2").setSource(data, XContentType.JSON).get(); client().admin().indices().prepareRefresh().get(); // re-check point on polygon hole @@ -384,7 +384,7 @@ public void testShapeRelations() throws Exception { Polygon crossing = new Polygon(new LinearRing(new double[] { 170, 190, 190, 170, 170 }, new double[] { -10, -10, 10, 10, -10 })); data = BytesReference.bytes(jsonBuilder().startObject().field("area", WellKnownText.toWKT(crossing)).endObject()); - client().prepareIndex("shapes").setId("1").setSource(data, XContentType.JSON).get(); + prepareIndex("shapes").setId("1").setSource(data, XContentType.JSON).get(); client().admin().indices().prepareRefresh().get(); // Create a polygon crossing longitude 180 with hole. @@ -394,7 +394,7 @@ public void testShapeRelations() throws Exception { ); data = BytesReference.bytes(jsonBuilder().startObject().field("area", WellKnownText.toWKT(crossing)).endObject()); - client().prepareIndex("shapes").setId("1").setSource(data, XContentType.JSON).get(); + prepareIndex("shapes").setId("1").setSource(data, XContentType.JSON).get(); client().admin().indices().prepareRefresh().get(); assertHitCount( diff --git a/test/framework/src/main/java/org/elasticsearch/search/geo/BaseShapeQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/geo/BaseShapeQueryTestCase.java index 5da9103d49771..406625b33813f 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/geo/BaseShapeQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/geo/BaseShapeQueryTestCase.java @@ -87,8 +87,7 @@ public void testFieldAlias() throws IOException { ensureGreen(); MultiPoint multiPoint = GeometryTestUtils.randomMultiPoint(false); - client().prepareIndex(defaultIndexName) - .setId("1") + prepareIndex(defaultIndexName).setId("1") .setSource(GeoJson.toXContent(multiPoint, jsonBuilder().startObject().field(defaultFieldName), null).endObject()) .setRefreshPolicy(IMMEDIATE) .get(); @@ -103,11 +102,10 @@ public void testShapeFetchingPath() throws Exception { String geo = """ "geo" : {"type":"polygon", "coordinates":[[[-10,-10],[10,-10],[10,10],[-10,10],[-10,-10]]]}"""; - client().prepareIndex("shapes").setId("1").setSource(Strings.format(""" + prepareIndex("shapes").setId("1").setSource(Strings.format(""" { %s, "1" : { %s, "2" : { %s, "3" : { %s } }} } """, geo, geo, geo, geo), XContentType.JSON).setRefreshPolicy(IMMEDIATE).get(); - client().prepareIndex(defaultIndexName) - .setId("1") + prepareIndex(defaultIndexName).setId("1") .setSource( jsonBuilder().startObject() .startObject(defaultFieldName) @@ -193,7 +191,7 @@ public void testRandomGeoCollectionQuery() throws Exception { ensureGreen(); XContentBuilder docSource = GeoJson.toXContent(gcb, jsonBuilder().startObject().field(defaultFieldName), null).endObject(); - client().prepareIndex(defaultIndexName).setId("1").setSource(docSource).setRefreshPolicy(IMMEDIATE).get(); + prepareIndex(defaultIndexName).setId("1").setSource(docSource).setRefreshPolicy(IMMEDIATE).get(); // Create a random geometry collection to query GeometryCollection randomQueryCollection = makeRandomGeometryCollection(); @@ -314,8 +312,7 @@ public void testEdgeCases() throws Exception { client().admin().indices().prepareCreate(defaultIndexName).setMapping(mapping).get(); ensureGreen(); - client().prepareIndex(defaultIndexName) - .setId("blakely") + prepareIndex(defaultIndexName).setId("blakely") .setSource( jsonBuilder().startObject() .field("name", "Blakely Island") @@ -369,8 +366,7 @@ public void testIndexedShapeReferenceSourceDisabled() throws Exception { Rectangle shape = new Rectangle(-45, 45, 45, -45); - client().prepareIndex("shapes") - .setId("Big_Rectangle") + prepareIndex("shapes").setId("Big_Rectangle") .setSource(jsonBuilder().startObject().field("shape", WellKnownText.toWKT(shape)).endObject()) .setRefreshPolicy(IMMEDIATE) .get(); @@ -396,7 +392,7 @@ public void testPointQuery() throws Exception { XContentBuilder docSource = GeoJson.toXContent(gcb, jsonBuilder().startObject().field(defaultFieldName), ToXContent.EMPTY_PARAMS) .endObject(); - client().prepareIndex(defaultIndexName).setId("1").setSource(docSource).setRefreshPolicy(IMMEDIATE).get(); + prepareIndex(defaultIndexName).setId("1").setSource(docSource).setRefreshPolicy(IMMEDIATE).get(); assertHitCountAndNoFailures( client().prepareSearch(defaultIndexName).setQuery(queryBuilder().intersectionQuery(defaultFieldName, point)), @@ -410,7 +406,7 @@ public void testContainsShapeQuery() throws Exception { createMapping(defaultIndexName, defaultFieldName); XContentBuilder docSource = GeoJson.toXContent(polygon, jsonBuilder().startObject().field(defaultFieldName), null).endObject(); - client().prepareIndex(defaultIndexName).setId("1").setSource(docSource).setRefreshPolicy(IMMEDIATE).get(); + prepareIndex(defaultIndexName).setId("1").setSource(docSource).setRefreshPolicy(IMMEDIATE).get(); QueryBuilder filter = queryBuilder().shapeQuery(defaultFieldName, innerPolygon).relation(ShapeRelation.CONTAINS); assertHitCountAndNoFailures(client().prepareSearch(defaultIndexName).setQuery(filter), 1L); @@ -424,7 +420,7 @@ public void testExistsQuery() throws Exception { createMapping(defaultIndexName, defaultFieldName); XContentBuilder docSource = GeoJson.toXContent(gcb, jsonBuilder().startObject().field(defaultFieldName), null).endObject(); - client().prepareIndex(defaultIndexName).setId("1").setSource(docSource).setRefreshPolicy(IMMEDIATE).get(); + prepareIndex(defaultIndexName).setId("1").setSource(docSource).setRefreshPolicy(IMMEDIATE).get(); ExistsQueryBuilder eqb = existsQuery(defaultFieldName); assertHitCountAndNoFailures(client().prepareSearch(defaultIndexName).setQuery(eqb), 1L); @@ -437,13 +433,11 @@ public void testIndexedShapeReference() throws Exception { Rectangle shape = new Rectangle(-45, 45, 45, -45); - client().prepareIndex("shapes") - .setId("Big_Rectangle") + prepareIndex("shapes").setId("Big_Rectangle") .setSource(GeoJson.toXContent(shape, jsonBuilder().startObject().field("shape"), null).endObject()) .setRefreshPolicy(IMMEDIATE) .get(); - client().prepareIndex(defaultIndexName) - .setId("1") + prepareIndex(defaultIndexName).setId("1") .setSource( jsonBuilder().startObject() .field("name", "Document 1") @@ -489,7 +483,7 @@ public void testQueryRandomGeoCollection() throws Exception { ensureGreen(); XContentBuilder docSource = GeoJson.toXContent(gcb, jsonBuilder().startObject().field(defaultFieldName), null).endObject(); - client().prepareIndex(defaultIndexName).setId("1").setSource(docSource).setRefreshPolicy(IMMEDIATE).get(); + prepareIndex(defaultIndexName).setId("1").setSource(docSource).setRefreshPolicy(IMMEDIATE).get(); assertHitCountAndNoFailures( client().prepareSearch(defaultIndexName).setQuery(queryBuilder().intersectionQuery(defaultFieldName, polygon)), @@ -528,7 +522,7 @@ public void testShapeFilterWithDefinedGeoCollection() throws Exception { .endArray() .endObject() .endObject(); - client().prepareIndex(defaultIndexName).setId("1").setSource(docSource).setRefreshPolicy(IMMEDIATE).get(); + prepareIndex(defaultIndexName).setId("1").setSource(docSource).setRefreshPolicy(IMMEDIATE).get(); Polygon polygon1 = new Polygon( new LinearRing(new double[] { 99.0, 99.0, 103.0, 103.0, 99.0 }, new double[] { -1.0, 3.0, 3.0, -1.0, -1.0 }) @@ -616,8 +610,7 @@ public void testIndexLineQueryPoints() throws Exception { Line line = makeRandomLine(); - client().prepareIndex(defaultIndexName) - .setSource(jsonBuilder().startObject().field(defaultFieldName, WellKnownText.toWKT(line)).endObject()) + prepareIndex(defaultIndexName).setSource(jsonBuilder().startObject().field(defaultFieldName, WellKnownText.toWKT(line)).endObject()) .setRefreshPolicy(IMMEDIATE) .get(); // all points from a line intersect with the line @@ -638,10 +631,9 @@ public void testIndexPolygonQueryPoints() throws Exception { Polygon polygon = makeRandomPolygon(); - client().prepareIndex(defaultIndexName) - .setSource(jsonBuilder().startObject().field(defaultFieldName, WellKnownText.toWKT(polygon)).endObject()) - .setRefreshPolicy(IMMEDIATE) - .get(); + prepareIndex(defaultIndexName).setSource( + jsonBuilder().startObject().field(defaultFieldName, WellKnownText.toWKT(polygon)).endObject() + ).setRefreshPolicy(IMMEDIATE).get(); // all points from a polygon intersect with the polygon LinearRing linearRing = polygon.getPolygon(); @@ -673,8 +665,7 @@ public void testNeighbours() throws Exception { }; for (String polygon : polygons) { - client().prepareIndex(defaultIndexName) - .setSource(jsonBuilder().startObject().field(defaultFieldName, polygon).endObject()) + prepareIndex(defaultIndexName).setSource(jsonBuilder().startObject().field(defaultFieldName, polygon).endObject()) .setRefreshPolicy(IMMEDIATE) .get(); } diff --git a/test/framework/src/main/java/org/elasticsearch/search/geo/GeoBoundingBoxQueryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/geo/GeoBoundingBoxQueryIntegTestCase.java index 7cc53560e8403..fe5c7b8572b35 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/geo/GeoBoundingBoxQueryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/geo/GeoBoundingBoxQueryIntegTestCase.java @@ -55,50 +55,43 @@ public void testSimpleBoundingBoxTest() throws Exception { assertAcked(prepareCreate("test").setSettings(settings).setMapping(xContentBuilder)); ensureGreen(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("name", "New York").field("location", "POINT(-74.0059731 40.7143528)").endObject()) .get(); // to NY: 5.286 km - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource( jsonBuilder().startObject().field("name", "Times Square").field("location", "POINT(-73.9844722 40.759011)").endObject() ) .get(); // to NY: 0.4621 km - client().prepareIndex("test") - .setId("3") + prepareIndex("test").setId("3") .setSource(jsonBuilder().startObject().field("name", "Tribeca").field("location", "POINT(-74.007819 40.718266)").endObject()) .get(); // to NY: 1.055 km - client().prepareIndex("test") - .setId("4") + prepareIndex("test").setId("4") .setSource( jsonBuilder().startObject().field("name", "Wall Street").field("location", "POINT(-74.0088305 40.7051157)").endObject() ) .get(); // to NY: 1.258 km - client().prepareIndex("test") - .setId("5") + prepareIndex("test").setId("5") .setSource(jsonBuilder().startObject().field("name", "Soho").field("location", "POINT(-74 40.7247222)").endObject()) .get(); // to NY: 2.029 km - client().prepareIndex("test") - .setId("6") + prepareIndex("test").setId("6") .setSource( jsonBuilder().startObject().field("name", "Greenwich Village").field("location", "POINT(-73.9962255 40.731033)").endObject() ) .get(); // to NY: 8.572 km - client().prepareIndex("test") - .setId("7") + prepareIndex("test").setId("7") .setSource(jsonBuilder().startObject().field("name", "Brooklyn").field("location", "POINT(-73.95 40.65)").endObject()) .get(); @@ -185,8 +178,7 @@ public void testLimit2BoundingBox() throws Exception { assertAcked(prepareCreate("test").setSettings(settings).setMapping(xContentBuilder)); ensureGreen(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("userid", 880) @@ -197,8 +189,7 @@ public void testLimit2BoundingBox() throws Exception { .setRefreshPolicy(IMMEDIATE) .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource( jsonBuilder().startObject() .field("userid", 534) @@ -339,8 +330,7 @@ public void testCompleteLonRange() throws Exception { assertAcked(prepareCreate("test").setSettings(settings).setMapping(xContentBuilder)); ensureGreen(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("userid", 880) @@ -351,8 +341,7 @@ public void testCompleteLonRange() throws Exception { .setRefreshPolicy(IMMEDIATE) .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource( jsonBuilder().startObject() .field("userid", 534) diff --git a/test/framework/src/main/java/org/elasticsearch/search/geo/GeoShapeIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/geo/GeoShapeIntegTestCase.java index 8397dece4f537..5bab4907f45a9 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/geo/GeoShapeIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/geo/GeoShapeIntegTestCase.java @@ -55,7 +55,7 @@ public void testIndexPolygonDateLine() throws Exception { "shape": "POLYGON((179 0, -179 0, -179 2, 179 2, 179 0))" }"""; - indexRandom(true, client().prepareIndex("test").setId("0").setSource(source, XContentType.JSON)); + indexRandom(true, prepareIndex("test").setId("0").setSource(source, XContentType.JSON)); assertHitCount(prepareSearch("test").setQuery(geoShapeQuery("shape", new Point(-179.75, 1))), 1L); assertHitCount(prepareSearch("test").setQuery(geoShapeQuery("shape", new Point(90, 1))), 0L); diff --git a/test/framework/src/main/java/org/elasticsearch/search/geo/GeoShapeQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/geo/GeoShapeQueryTestCase.java index 5bd3a3ba69f2f..1da562da3dd0e 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/geo/GeoShapeQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/geo/GeoShapeQueryTestCase.java @@ -155,7 +155,7 @@ public void testIndexRectangleSpanningDateLine() throws Exception { Rectangle envelope = new Rectangle(178, -178, 10, -10); XContentBuilder docSource = GeoJson.toXContent(envelope, jsonBuilder().startObject().field(defaultFieldName), null).endObject(); - client().prepareIndex(defaultIndexName).setId("1").setSource(docSource).setRefreshPolicy(IMMEDIATE).get(); + prepareIndex(defaultIndexName).setId("1").setSource(docSource).setRefreshPolicy(IMMEDIATE).get(); Point filterShape = new Point(179, 0); diff --git a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index 94c40b2d53b00..570d583335a12 100644 --- a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -196,9 +196,14 @@ public static long getFailureCount(String repository) { } public static void assertFileCount(Path dir, int expectedCount) throws IOException { + final List found = getAllFilesInDirectoryAndDescendants(dir); + assertEquals("Unexpected file count, found: [" + found + "].", expectedCount, found.size()); + } + + protected static List getAllFilesInDirectoryAndDescendants(Path dir) throws IOException { final List found = new ArrayList<>(); forEachFileRecursively(dir, ((path, basicFileAttributes) -> found.add(path))); - assertEquals("Unexpected file count, found: [" + found + "].", expectedCount, found.size()); + return found; } protected void stopNode(final String node) throws IOException { @@ -489,7 +494,7 @@ protected void indexRandomDocs(String index, int numdocs) throws InterruptedExce logger.info("--> indexing [{}] documents into [{}]", numdocs, index); IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex(index).setId(Integer.toString(i)).setSource("field1", "bar " + i); + builders[i] = prepareIndex(index).setId(Integer.toString(i)).setSource("field1", "bar " + i); } indexRandom(true, builders); flushAndRefresh(index); @@ -659,7 +664,7 @@ protected ActionFuture startDeleteSnapshots(String repoNam } protected static void updateClusterState(final Function updater) throws Exception { - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); final ClusterService clusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); clusterService.submitUnbatchedStateUpdateTask("test", new ClusterStateUpdateTask() { @Override @@ -704,7 +709,7 @@ protected List createNSnapshots(String repoName, int count) throws Excep } public static List createNSnapshots(Logger logger, String repoName, int count) throws Exception { - final PlainActionFuture> allSnapshotsDone = PlainActionFuture.newFuture(); + final PlainActionFuture> allSnapshotsDone = new PlainActionFuture<>(); final ActionListener snapshotsListener = new GroupedActionListener<>(count, allSnapshotsDone); final List snapshotNames = new ArrayList<>(count); final String prefix = RANDOM_SNAPSHOT_NAME_PREFIX + UUIDs.randomBase64UUID(random()).toLowerCase(Locale.ROOT) + "-"; diff --git a/test/framework/src/main/java/org/elasticsearch/telemetry/InstrumentType.java b/test/framework/src/main/java/org/elasticsearch/telemetry/InstrumentType.java index 3930adf1af638..53cc4995fa831 100644 --- a/test/framework/src/main/java/org/elasticsearch/telemetry/InstrumentType.java +++ b/test/framework/src/main/java/org/elasticsearch/telemetry/InstrumentType.java @@ -8,11 +8,13 @@ package org.elasticsearch.telemetry; +import org.elasticsearch.telemetry.metric.DoubleAsyncCounter; import org.elasticsearch.telemetry.metric.DoubleCounter; import org.elasticsearch.telemetry.metric.DoubleGauge; import org.elasticsearch.telemetry.metric.DoubleHistogram; import org.elasticsearch.telemetry.metric.DoubleUpDownCounter; import org.elasticsearch.telemetry.metric.Instrument; +import org.elasticsearch.telemetry.metric.LongAsyncCounter; import org.elasticsearch.telemetry.metric.LongCounter; import org.elasticsearch.telemetry.metric.LongGauge; import org.elasticsearch.telemetry.metric.LongHistogram; @@ -27,6 +29,8 @@ public enum InstrumentType { DOUBLE_COUNTER(true), LONG_COUNTER(false), + LONG_ASYNC_COUNTER(false), + DOUBLE_ASYNC_COUNTER(true), DOUBLE_UP_DOWN_COUNTER(true), LONG_UP_DOWN_COUNTER(false), DOUBLE_HISTOGRAM(true), @@ -48,6 +52,10 @@ public static InstrumentType fromInstrument(Instrument instrument) { return InstrumentType.DOUBLE_COUNTER; } else if (instrument instanceof LongCounter) { return InstrumentType.LONG_COUNTER; + } else if (instrument instanceof LongAsyncCounter) { + return InstrumentType.LONG_ASYNC_COUNTER; + } else if (instrument instanceof DoubleAsyncCounter) { + return InstrumentType.DOUBLE_ASYNC_COUNTER; } else if (instrument instanceof DoubleUpDownCounter) { return InstrumentType.DOUBLE_UP_DOWN_COUNTER; } else if (instrument instanceof LongUpDownCounter) { diff --git a/test/framework/src/main/java/org/elasticsearch/telemetry/RecordingInstruments.java b/test/framework/src/main/java/org/elasticsearch/telemetry/RecordingInstruments.java index 7067c390ef5ae..35417c16e7e1c 100644 --- a/test/framework/src/main/java/org/elasticsearch/telemetry/RecordingInstruments.java +++ b/test/framework/src/main/java/org/elasticsearch/telemetry/RecordingInstruments.java @@ -10,12 +10,14 @@ import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.core.Tuple; +import org.elasticsearch.telemetry.metric.DoubleAsyncCounter; import org.elasticsearch.telemetry.metric.DoubleCounter; import org.elasticsearch.telemetry.metric.DoubleGauge; import org.elasticsearch.telemetry.metric.DoubleHistogram; import org.elasticsearch.telemetry.metric.DoubleUpDownCounter; import org.elasticsearch.telemetry.metric.DoubleWithAttributes; import org.elasticsearch.telemetry.metric.Instrument; +import org.elasticsearch.telemetry.metric.LongAsyncCounter; import org.elasticsearch.telemetry.metric.LongCounter; import org.elasticsearch.telemetry.metric.LongGauge; import org.elasticsearch.telemetry.metric.LongHistogram; @@ -168,6 +170,28 @@ public void incrementBy(long inc, Map attributes) { } } + public static class RecordingAsyncLongCounter extends CallbackRecordingInstrument implements LongAsyncCounter { + + public RecordingAsyncLongCounter(String name, Supplier observer, MetricRecorder recorder) { + super(name, () -> { + var observation = observer.get(); + return new Tuple<>(observation.value(), observation.attributes()); + }, recorder); + } + + } + + public static class RecordingAsyncDoubleCounter extends CallbackRecordingInstrument implements DoubleAsyncCounter { + + public RecordingAsyncDoubleCounter(String name, Supplier observer, MetricRecorder recorder) { + super(name, () -> { + var observation = observer.get(); + return new Tuple<>(observation.value(), observation.attributes()); + }, recorder); + } + + } + public static class RecordingLongGauge extends CallbackRecordingInstrument implements LongGauge { public RecordingLongGauge(String name, Supplier observer, MetricRecorder recorder) { diff --git a/test/framework/src/main/java/org/elasticsearch/telemetry/RecordingMeterRegistry.java b/test/framework/src/main/java/org/elasticsearch/telemetry/RecordingMeterRegistry.java index f552b2d001b42..86bfd9bf38c26 100644 --- a/test/framework/src/main/java/org/elasticsearch/telemetry/RecordingMeterRegistry.java +++ b/test/framework/src/main/java/org/elasticsearch/telemetry/RecordingMeterRegistry.java @@ -8,12 +8,14 @@ package org.elasticsearch.telemetry; +import org.elasticsearch.telemetry.metric.DoubleAsyncCounter; import org.elasticsearch.telemetry.metric.DoubleCounter; import org.elasticsearch.telemetry.metric.DoubleGauge; import org.elasticsearch.telemetry.metric.DoubleHistogram; import org.elasticsearch.telemetry.metric.DoubleUpDownCounter; import org.elasticsearch.telemetry.metric.DoubleWithAttributes; import org.elasticsearch.telemetry.metric.Instrument; +import org.elasticsearch.telemetry.metric.LongAsyncCounter; import org.elasticsearch.telemetry.metric.LongCounter; import org.elasticsearch.telemetry.metric.LongGauge; import org.elasticsearch.telemetry.metric.LongHistogram; @@ -106,6 +108,36 @@ public LongCounter registerLongCounter(String name, String description, String u return instrument; } + @Override + public LongAsyncCounter registerLongAsyncCounter(String name, String description, String unit, Supplier observer) { + LongAsyncCounter instrument = new RecordingInstruments.RecordingAsyncLongCounter(name, observer, recorder); + recorder.register(instrument, InstrumentType.fromInstrument(instrument), name, description, unit); + return instrument; + } + + @Override + public LongAsyncCounter getLongAsyncCounter(String name) { + return (LongAsyncCounter) recorder.getInstrument(InstrumentType.LONG_ASYNC_COUNTER, name); + } + + @Override + public DoubleAsyncCounter registerDoubleAsyncCounter( + String name, + String description, + String unit, + Supplier observer + ) { + DoubleAsyncCounter instrument = new RecordingInstruments.RecordingAsyncDoubleCounter(name, observer, recorder); + recorder.register(instrument, InstrumentType.fromInstrument(instrument), name, description, unit); + return instrument; + } + + @Override + public DoubleAsyncCounter getDoubleAsyncCounter(String name) { + return (DoubleAsyncCounter) recorder.getInstrument(InstrumentType.DOUBLE_ASYNC_COUNTER, name); + + } + @Override public LongCounter getLongCounter(String name) { return (LongCounter) recorder.getInstrument(InstrumentType.LONG_COUNTER, name); diff --git a/test/framework/src/main/java/org/elasticsearch/telemetry/TestTelemetryPlugin.java b/test/framework/src/main/java/org/elasticsearch/telemetry/TestTelemetryPlugin.java index 53aef542f0d1a..e237f6c9bbb4b 100644 --- a/test/framework/src/main/java/org/elasticsearch/telemetry/TestTelemetryPlugin.java +++ b/test/framework/src/main/java/org/elasticsearch/telemetry/TestTelemetryPlugin.java @@ -65,6 +65,10 @@ public List getLongHistogramMeasurement(String name) { return meter.getRecorder().getMeasurements(InstrumentType.LONG_HISTOGRAM, name); } + public void resetMeter() { + meter.getRecorder().resetCalls(); + } + @Override public TelemetryProvider getTelemetryProvider(Settings settings) { return new TelemetryProvider() { diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractMultiClustersTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractMultiClustersTestCase.java index 4037ed796011f..7dc9374da02ea 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractMultiClustersTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractMultiClustersTestCase.java @@ -239,7 +239,7 @@ Set clusterAliases() { @Override public void close() throws IOException { - IOUtils.close(clusters.values()); + IOUtils.close(CloseableTestClusterWrapper.wrap(clusters.values())); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractSearchCancellationTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractSearchCancellationTestCase.java index 3422241450e9e..7049954dc43fa 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractSearchCancellationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractSearchCancellationTestCase.java @@ -74,7 +74,7 @@ protected void indexTestData() { // Make sure we have a few segments BulkRequestBuilder bulkRequestBuilder = client().prepareBulk().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); for (int j = 0; j < 20; j++) { - bulkRequestBuilder.add(client().prepareIndex("test").setId(Integer.toString(i * 5 + j)).setSource("field", "value")); + bulkRequestBuilder.add(prepareIndex("test").setId(Integer.toString(i * 5 + j)).setSource("field", "value")); } assertNoFailures(bulkRequestBuilder.get()); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/CloseableTestClusterWrapper.java b/test/framework/src/main/java/org/elasticsearch/test/CloseableTestClusterWrapper.java new file mode 100644 index 0000000000000..60d35cfe2b3b1 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/test/CloseableTestClusterWrapper.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test; + +import org.elasticsearch.common.collect.Iterators; + +import java.io.Closeable; +import java.io.IOException; + +/** + * Adapter to make one or more {@link TestCluster} instances compatible with things like try-with-resources blocks and IOUtils. + */ +// NB it is deliberate that TestCluster does not implement AutoCloseable or Closeable, because if we do that then IDEs tell us that we +// should be using a try-with-resources block everywhere and that is almost never correct. The lifecycle of these clusters is managed by the +// test framework itself and should not be touched by most test code. This class provides adapters for the few cases where you do want to +// auto-close these things. +public record CloseableTestClusterWrapper(TestCluster testCluster) implements Closeable { + @Override + public void close() throws IOException { + testCluster().close(); + } + + public static Iterable wrap(Iterable clusters) { + return () -> Iterators.map(clusters.iterator(), CloseableTestClusterWrapper::new); + } + + public static Iterable wrap(TestCluster... clusters) { + return wrap(() -> Iterators.forArray(clusters)); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java b/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java index 8ef8e9ea78be7..c3287bc88c322 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java @@ -198,7 +198,7 @@ public static void setAllElapsedMillis(ClusterStatePublicationEvent clusterState public static void awaitClusterState(Logger logger, Predicate statePredicate, ClusterService clusterService) throws Exception { - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); ClusterStateObserver.waitForState( clusterService, clusterService.getClusterApplierService().threadPool().getThreadContext(), diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 37e4176e1818d..2f6286092b535 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -430,7 +430,7 @@ private void randomIndexTemplate() { .setPatterns(Collections.singletonList("*")) .setOrder(0) .setSettings(randomSettingsBuilder); - assertAcked(putTemplate.execute().actionGet()); + assertAcked(putTemplate.get()); } } @@ -521,16 +521,18 @@ private TestCluster buildAndPutCluster(Scope currentClusterScope, long seed) thr TestCluster testCluster = clusters.remove(clazz); // remove this cluster first clearClusters(); // all leftovers are gone by now... this is really just a double safety if we miss something somewhere switch (currentClusterScope) { - case SUITE: + case SUITE -> { if (testCluster == null) { // only build if it's not there yet testCluster = buildWithPrivateContext(currentClusterScope, seed); } - break; - case TEST: + } + case TEST -> { // close the previous one and create a new one - IOUtils.closeWhileHandlingException(testCluster); + if (testCluster != null) { + IOUtils.closeWhileHandlingException(testCluster::close); + } testCluster = buildTestCluster(currentClusterScope, seed); - break; + } } clusters.put(clazz, testCluster); return testCluster; @@ -538,7 +540,7 @@ private TestCluster buildAndPutCluster(Scope currentClusterScope, long seed) thr private static void clearClusters() throws Exception { if (clusters.isEmpty() == false) { - IOUtils.close(clusters.values()); + IOUtils.close(CloseableTestClusterWrapper.wrap(clusters.values())); clusters.clear(); } if (restClient != null) { @@ -568,7 +570,7 @@ private void afterInternal(boolean afterClass) throws Exception { try { if (cluster() != null) { if (currentClusterScope != Scope.TEST) { - Metadata metadata = clusterAdmin().prepareState().execute().actionGet().getState().getMetadata(); + Metadata metadata = clusterAdmin().prepareState().get().getState().getMetadata(); final Set persistentKeys = new HashSet<>(metadata.persistentSettings().keySet()); assertThat("test leaves persistent cluster metadata behind", persistentKeys, empty()); @@ -835,7 +837,7 @@ public CreateIndexRequestBuilder prepareCreate(String index, int numNodes, Setti public static void updateIndexSettings(Settings.Builder settingsBuilder, String... index) { UpdateSettingsRequestBuilder settingsRequest = indicesAdmin().prepareUpdateSettings(index); settingsRequest.setSettings(settingsBuilder); - assertAcked(settingsRequest.execute().actionGet()); + assertAcked(settingsRequest.get()); } public static void setReplicaCount(int replicas, String index) { @@ -1387,56 +1389,60 @@ protected void ensureFullyConnectedCluster() { NetworkDisruption.ensureFullyConnectedCluster(internalCluster()); } + protected static IndexRequestBuilder prepareIndex(String index) { + return client().prepareIndex(index); + } + /** * Syntactic sugar for: *
    -     *   client().prepareIndex(index).setSource(source).execute().actionGet();
    +     *   client().prepareIndex(index).setSource(source).get();
          * 
    */ protected final DocWriteResponse index(String index, XContentBuilder source) { - return client().prepareIndex(index).setSource(source).execute().actionGet(); + return prepareIndex(index).setSource(source).get(); } /** * Syntactic sugar for: *
    -     *   client().prepareIndex(index).setSource(source).execute().actionGet();
    +     *   client().prepareIndex(index).setSource(source).get();
          * 
    */ protected final DocWriteResponse index(String index, String id, Map source) { - return client().prepareIndex(index).setId(id).setSource(source).execute().actionGet(); + return prepareIndex(index).setId(id).setSource(source).get(); } /** * Syntactic sugar for: *
    -     *   return client().prepareIndex(index).setId(id).setSource(source).execute().actionGet();
    +     *   return client().prepareIndex(index).setId(id).setSource(source).get();
          * 
    */ protected final DocWriteResponse index(String index, String id, XContentBuilder source) { - return client().prepareIndex(index).setId(id).setSource(source).execute().actionGet(); + return prepareIndex(index).setId(id).setSource(source).get(); } /** * Syntactic sugar for: *
    -     *   return client().prepareIndex(index).setId(id).setSource(source).execute().actionGet();
    +     *   return client().prepareIndex(index).setId(id).setSource(source).get();
          * 
    */ protected final DocWriteResponse indexDoc(String index, String id, Object... source) { - return client().prepareIndex(index).setId(id).setSource(source).execute().actionGet(); + return prepareIndex(index).setId(id).setSource(source).get(); } /** * Syntactic sugar for: *
    -     *   return client().prepareIndex(index).setId(id).setSource(source).execute().actionGet();
    +     *   return client().prepareIndex(index).setId(id).setSource(source).get();
          * 
    *

    * where source is a JSON String. */ protected final DocWriteResponse index(String index, String id, String source) { - return client().prepareIndex(index).setId(id).setSource(source, XContentType.JSON).execute().actionGet(); + return prepareIndex(index).setId(id).setSource(source, XContentType.JSON).get(); } /** @@ -1448,8 +1454,7 @@ protected final RefreshResponse refresh(String... indices) { waitForRelocation(); RefreshResponse actionGet = indicesAdmin().prepareRefresh(indices) .setIndicesOptions(IndicesOptions.STRICT_EXPAND_OPEN_HIDDEN_FORBID_CLOSED) - .execute() - .actionGet(); + .get(); assertNoFailures(actionGet); return actionGet; } @@ -1467,7 +1472,7 @@ protected final void flushAndRefresh(String... indices) { */ protected final FlushResponse flush(String... indices) { waitForRelocation(); - FlushResponse actionGet = indicesAdmin().prepareFlush(indices).execute().actionGet(); + FlushResponse actionGet = indicesAdmin().prepareFlush(indices).get(); for (DefaultShardOperationFailedException failure : actionGet.getShardFailures()) { assertThat("unexpected flush failure " + failure.reason(), failure.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE)); } @@ -1479,7 +1484,7 @@ protected final FlushResponse flush(String... indices) { */ protected ForceMergeResponse forceMerge() { waitForRelocation(); - ForceMergeResponse actionGet = indicesAdmin().prepareForceMerge().setMaxNumSegments(1).execute().actionGet(); + ForceMergeResponse actionGet = indicesAdmin().prepareForceMerge().setMaxNumSegments(1).get(); assertNoFailures(actionGet); return actionGet; } @@ -1500,8 +1505,7 @@ public static boolean indexExists(String index, Client client) { .prepareGetIndex() .setIndices(index) .setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED) - .execute() - .actionGet(); + .get(); return getIndexResponse.getIndices().length > 0; } @@ -1549,7 +1553,7 @@ protected static IndicesAdminClient indicesAdmin() { public void indexRandom(boolean forceRefresh, String index, int numDocs) throws InterruptedException { IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex(index).setSource("field", "value"); + builders[i] = prepareIndex(index).setSource("field", "value"); } indexRandom(forceRefresh, Arrays.asList(builders)); } @@ -1632,7 +1636,7 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean ma String index = RandomPicks.randomFrom(random, indices); bogusIds.add(Arrays.asList(index, id)); // We configure a routing key in case the mapping requires it - builders.add(client().prepareIndex().setIndex(index).setId(id).setSource("{}", XContentType.JSON).setRouting(id)); + builders.add(prepareIndex(index).setId(id).setSource("{}", XContentType.JSON).setRouting(id)); } } Collections.shuffle(builders, random()); @@ -1652,7 +1656,7 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean ma } else { logger.info("Index [{}] docs async: [{}] bulk: [{}]", builders.size(), false, false); for (IndexRequestBuilder indexRequestBuilder : builders) { - indexRequestBuilder.execute().actionGet(); + indexRequestBuilder.get(); postIndexAsyncActions(indicesArray, inFlightAsyncOperations, maybeFlush); } } @@ -1667,7 +1671,7 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean ma for (IndexRequestBuilder indexRequestBuilder : segmented) { bulkBuilder.add(indexRequestBuilder); } - BulkResponse actionGet = bulkBuilder.execute().actionGet(); + BulkResponse actionGet = bulkBuilder.get(); assertThat(actionGet.hasFailures() ? actionGet.buildFailureMessage() : "", actionGet.hasFailures(), equalTo(false)); } } @@ -1679,7 +1683,7 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean ma Throwable t = ExceptionsHelper.unwrapCause(tuple.v2()); if (t instanceof EsRejectedExecutionException) { logger.debug("Error indexing doc: " + t.getMessage() + ", reindexing."); - tuple.v1().execute().actionGet(); // re-index if rejected + tuple.v1().get(); // re-index if rejected } else { actualErrors.add(tuple.v2()); } @@ -2213,7 +2217,7 @@ protected NumShards getNumShards(String index) { */ public Set assertAllShardsOnNodes(String index, String... pattern) { Set nodes = new HashSet<>(); - ClusterState clusterState = clusterAdmin().prepareState().execute().actionGet().getState(); + ClusterState clusterState = clusterAdmin().prepareState().get().getState(); for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) { for (int shardId = 0; shardId < indexRoutingTable.size(); shardId++) { final IndexShardRoutingTable indexShard = indexRoutingTable.shard(shardId); @@ -2234,7 +2238,7 @@ public Set assertAllShardsOnNodes(String index, String... pattern) { * Asserts that all segments are sorted with the provided {@link Sort}. */ public void assertSortedSegments(String indexName, Sort expectedIndexSort) { - IndicesSegmentResponse segmentResponse = indicesAdmin().prepareSegments(indexName).execute().actionGet(); + IndicesSegmentResponse segmentResponse = indicesAdmin().prepareSegments(indexName).get(); IndexSegments indexSegments = segmentResponse.getIndices().get(indexName); for (IndexShardSegments indexShardSegments : indexSegments.getShards().values()) { for (ShardSegments shardSegments : indexShardSegments.shards()) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java index b5ac94b53d3ca..1517571878fa2 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java @@ -16,6 +16,7 @@ import org.elasticsearch.action.admin.indices.template.delete.DeleteComponentTemplateAction; import org.elasticsearch.action.admin.indices.template.delete.DeleteComposableIndexTemplateAction; import org.elasticsearch.action.datastreams.DeleteDataStreamAction; +import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.AdminClient; @@ -382,6 +383,10 @@ public Index resolveIndex(String index) { return new Index(index, uuid); } + protected IndexRequestBuilder prepareIndex(String index) { + return client().prepareIndex(index); + } + /** * Create a new search context. */ diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 5589e1b94281d..a597142ae1ed0 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -144,6 +144,7 @@ import java.security.NoSuchProviderException; import java.security.Provider; import java.security.SecureRandom; +import java.time.Instant; import java.time.ZoneId; import java.util.ArrayList; import java.util.Arrays; @@ -161,8 +162,10 @@ import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BooleanSupplier; import java.util.function.Consumer; @@ -248,6 +251,7 @@ public static void resetPortCounter() { LogConfigurator.loadLog4jPlugins(); LogConfigurator.configureESLogging(); + final List testAppenders = new ArrayList<>(3); for (String leakLoggerName : Arrays.asList("io.netty.util.ResourceLeakDetector", LeakTracker.class.getName())) { Logger leakLogger = LogManager.getLogger(leakLoggerName); Appender leakAppender = new AbstractAppender(leakLoggerName, null, PatternLayout.newBuilder().withPattern("%m").build()) { @@ -263,13 +267,34 @@ public void append(LogEvent event) { }; leakAppender.start(); Loggers.addAppender(leakLogger, leakAppender); - // shutdown hook so that when the test JVM exits, logging is shutdown too - Runtime.getRuntime().addShutdownHook(new Thread(() -> { - leakAppender.stop(); - LoggerContext context = (LoggerContext) LogManager.getContext(false); - Configurator.shutdown(context); - })); + testAppenders.add(leakAppender); } + Logger promiseUncaughtLogger = LogManager.getLogger("io.netty.util.concurrent.DefaultPromise"); + final Appender uncaughtAppender = new AbstractAppender( + promiseUncaughtLogger.getName(), + null, + PatternLayout.newBuilder().withPattern("%m").build() + ) { + @Override + public void append(LogEvent event) { + if (Level.WARN.equals(event.getLevel())) { + synchronized (loggedLeaks) { + loggedLeaks.add(event.getMessage().getFormattedMessage()); + } + } + } + }; + uncaughtAppender.start(); + Loggers.addAppender(promiseUncaughtLogger, uncaughtAppender); + testAppenders.add(uncaughtAppender); + // shutdown hook so that when the test JVM exits, logging is shutdown too + Runtime.getRuntime().addShutdownHook(new Thread(() -> { + for (Appender testAppender : testAppenders) { + testAppender.stop(); + } + LoggerContext context = (LoggerContext) LogManager.getContext(false); + Configurator.shutdown(context); + })); BootstrapForTesting.ensureInitialized(); @@ -761,6 +786,16 @@ public static long randomLongBetween(long min, long max) { return RandomNumbers.randomLongBetween(random(), min, max); } + /** + * @return a random instant between a min and a max value with a random nanosecond precision + */ + public static Instant randomInstantBetween(Instant minInstant, Instant maxInstant) { + return Instant.ofEpochSecond( + randomLongBetween(minInstant.getEpochSecond(), maxInstant.getEpochSecond()), + randomLongBetween(0, 999999999) + ); + } + /** * The maximum value that can be represented as an unsigned long. */ @@ -2046,7 +2081,18 @@ public static void safeAwait(CountDownLatch countDownLatch) { } public static T safeAwait(SubscribableListener listener) { - return PlainActionFuture.get(listener::addListener, 10, TimeUnit.SECONDS); + final var future = new PlainActionFuture(); + listener.addListener(future); + try { + return future.get(10, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new AssertionError("safeAwait: interrupted", e); + } catch (ExecutionException e) { + throw new AssertionError("safeAwait: listener was completed exceptionally", e); + } catch (TimeoutException e) { + throw new AssertionError("safeAwait: listener was not completed within the timeout", e); + } } public static void safeSleep(long millis) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java index 3e3759601a1c9..ce1c5dec7757d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java @@ -188,14 +188,7 @@ public void close() throws IOException { @Override public void ensureEstimatedStats() { if (size() > 0) { - NodesStatsResponse nodeStats = client().admin() - .cluster() - .prepareNodesStats() - .clear() - .setBreaker(true) - .setIndices(true) - .execute() - .actionGet(); + NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().clear().setBreaker(true).setIndices(true).get(); for (NodeStats stats : nodeStats.getNodes()) { assertThat( "Fielddata breaker not reset to 0 on node: " + stats.getNode(), diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalMultiBucketAggregationTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/InternalMultiBucketAggregationTestCase.java index aa14fe24ac845..92a6bab9d427c 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalMultiBucketAggregationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalMultiBucketAggregationTestCase.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.indices.breaker.CircuitBreakerMetrics; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -271,6 +272,7 @@ public void accept(int value) { */ protected static void expectReduceThrowsRealMemoryBreaker(InternalAggregation agg) { HierarchyCircuitBreakerService breaker = new HierarchyCircuitBreakerService( + CircuitBreakerMetrics.NOOP, Settings.builder().put(HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "50%").build(), List.of(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java index f24b1824df01d..e13773443d4a6 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java @@ -30,7 +30,6 @@ import org.elasticsearch.indices.IndexTemplateMissingException; import org.elasticsearch.repositories.RepositoryMissingException; -import java.io.Closeable; import java.io.IOException; import java.net.InetSocketAddress; import java.util.ArrayList; @@ -44,7 +43,7 @@ * Base test cluster that exposes the basis to run tests against any elasticsearch cluster, whose layout * (e.g. number of nodes) is predefined and cannot be changed during the tests execution */ -public abstract class TestCluster implements Closeable { +public abstract class TestCluster { protected final Logger logger = LogManager.getLogger(getClass()); private final long seed; @@ -126,7 +125,10 @@ public void assertAfterTest() throws Exception { /** * Closes the current cluster */ - @Override + // NB this is deliberately not implementing AutoCloseable or Closeable, because if we do that then IDEs tell us that we should be using + // a try-with-resources block and that is almost never correct. The lifecycle of these clusters is managed by the test framework itself + // and should not be touched by most test code. CloseableTestClusterWrapper provides adapters for the few cases where you do want to + // auto-close these things. public abstract void close() throws IOException; /** @@ -150,7 +152,7 @@ public void wipeIndices(String... indices) { // Happens if `action.destructive_requires_name` is set to true // which is the case in the CloseIndexDisableCloseAllTests if ("_all".equals(indices[0])) { - ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().execute().actionGet(); + ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get(); ArrayList concreteIndices = new ArrayList<>(); for (IndexMetadata indexMetadata : clusterStateResponse.getState().metadata()) { concreteIndices.add(indexMetadata.getIndex().getName()); @@ -174,7 +176,7 @@ public void wipeAllTemplates(Set exclude) { continue; } try { - client().admin().indices().prepareDeleteTemplate(indexTemplate.getName()).execute().actionGet(); + client().admin().indices().prepareDeleteTemplate(indexTemplate.getName()).get(); } catch (IndexTemplateMissingException e) { // ignore } @@ -194,7 +196,7 @@ public void wipeTemplates(String... templates) { } for (String template : templates) { try { - client().admin().indices().prepareDeleteTemplate(template).execute().actionGet(); + client().admin().indices().prepareDeleteTemplate(template).get(); } catch (IndexTemplateMissingException e) { // ignore } diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java b/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java index 3ec327f7f3332..80d1b82fbfcfe 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java +++ b/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java @@ -452,16 +452,6 @@ public void seqNoAndPrimaryTerm(boolean seqNoAndPrimaryTerm) { } - @Override - public int[] docIdsToLoad() { - return new int[0]; - } - - @Override - public SearchContext docIdsToLoad(int[] docIdsToLoad) { - return null; - } - @Override public DfsSearchResult dfsResult() { return null; diff --git a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java index 10eaf322f9504..bba7a6e19deea 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java +++ b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java @@ -13,7 +13,9 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequestBuilder; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; @@ -330,11 +332,14 @@ public static void assertSearchHit(SearchResponse searchResponse, int number, Ma assertThat(searchResponse.getHits().getAt(number - 1), matcher); } - public static void assertNoFailures(SearchRequestBuilder searchRequestBuilder) { + public static void assertNoFailures(ActionRequestBuilder searchRequestBuilder) { assertNoFailuresAndResponse(searchRequestBuilder, r -> {}); } - public static void assertNoFailuresAndResponse(SearchRequestBuilder searchRequestBuilder, Consumer consumer) { + public static void assertNoFailuresAndResponse( + ActionRequestBuilder searchRequestBuilder, + Consumer consumer + ) { assertResponse(searchRequestBuilder, res -> { assertNoFailures(res); consumer.accept(res); @@ -352,7 +357,10 @@ public static void assertNoFailuresAndResponse(ActionFuture resp } } - public static void assertResponse(SearchRequestBuilder searchRequestBuilder, Consumer consumer) { + public static void assertResponse( + ActionRequestBuilder searchRequestBuilder, + Consumer consumer + ) { var res = searchRequestBuilder.get(); try { consumer.accept(res); @@ -361,7 +369,7 @@ public static void assertResponse(SearchRequestBuilder searchRequestBuilder, Con } } - public static void assertResponse(ActionFuture responseFuture, Consumer consumer) + public static void assertResponse(ActionFuture responseFuture, Consumer consumer) throws ExecutionException, InterruptedException { var res = responseFuture.get(); try { @@ -372,7 +380,7 @@ public static void assertResponse(ActionFuture responseFuture, C } public static void assertCheckedResponse( - SearchRequestBuilder searchRequestBuilder, + ActionRequestBuilder searchRequestBuilder, CheckedConsumer consumer ) throws IOException { var res = searchRequestBuilder.get(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index 9566456a041bc..2d0abaa5cf4ca 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -36,6 +36,7 @@ import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClientBuilder; import org.elasticsearch.client.WarningsHandler; +import org.elasticsearch.cluster.ClusterFeatures; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -52,9 +53,13 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.UpdateForV9; +import org.elasticsearch.features.FeatureSpecification; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.health.node.selection.HealthNode; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.seqno.ReplicationTracker; import org.elasticsearch.rest.RestStatus; @@ -75,6 +80,7 @@ import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; +import java.io.UncheckedIOException; import java.nio.CharBuffer; import java.nio.charset.StandardCharsets; import java.nio.file.Files; @@ -90,12 +96,14 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Base64; +import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.Set; import java.util.TreeSet; import java.util.concurrent.TimeUnit; @@ -108,6 +116,7 @@ import static java.util.Collections.sort; import static java.util.Collections.unmodifiableList; +import static org.elasticsearch.client.RestClient.IGNORE_RESPONSE_CODES_PARAM; import static org.elasticsearch.core.Strings.format; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; @@ -132,6 +141,8 @@ public abstract class ESRestTestCase extends ESTestCase { public static final String CLIENT_SOCKET_TIMEOUT = "client.socket.timeout"; public static final String CLIENT_PATH_PREFIX = "client.path.prefix"; + private static final Pattern SEMANTIC_VERSION_PATTERN = Pattern.compile("^(\\d+\\.\\d+\\.\\d+)\\D?.*"); + /** * Convert the entity from a {@link Response} into a map of maps. */ @@ -201,7 +212,16 @@ public enum ProductFeature { } private static EnumSet availableFeatures; - private static TreeSet nodeVersions; + private static Set nodeVersions; + private static TestFeatureService testFeatureService; + + protected static boolean clusterHasFeature(String featureId) { + return testFeatureService.clusterHasFeature(featureId); + } + + protected static boolean clusterHasFeature(NodeFeature feature) { + return testFeatureService.clusterHasFeature(feature.id()); + } @Before public void initClient() throws IOException { @@ -210,6 +230,7 @@ public void initClient() throws IOException { assert clusterHosts == null; assert availableFeatures == null; assert nodeVersions == null; + assert testFeatureService == null; clusterHosts = parseClusterHosts(getTestRestCluster()); logger.info("initializing REST clients against {}", clusterHosts); client = buildClient(restClientSettings(), clusterHosts.toArray(new HttpHost[clusterHosts.size()])); @@ -217,12 +238,15 @@ public void initClient() throws IOException { availableFeatures = EnumSet.of(ProductFeature.LEGACY_TEMPLATES); nodeVersions = new TreeSet<>(); + var semanticNodeVersions = new HashSet(); boolean serverless = false; Map response = entityAsMap(adminClient.performRequest(new Request("GET", "_nodes/plugins"))); Map nodes = (Map) response.get("nodes"); for (Map.Entry node : nodes.entrySet()) { Map nodeInfo = (Map) node.getValue(); - nodeVersions.add(Version.fromString(nodeInfo.get("version").toString())); + var nodeVersion = nodeInfo.get("version").toString(); + nodeVersions.add(nodeVersion); + parseLegacyVersion(nodeVersion).map(semanticNodeVersions::add); for (Object module : (List) nodeInfo.get("modules")) { Map moduleInfo = (Map) module; final String moduleName = moduleInfo.get("name").toString(); @@ -261,7 +285,24 @@ public void initClient() throws IOException { ); } } + + assert semanticNodeVersions.isEmpty() == false || serverless; + + // Historical features information is unavailable when using legacy test plugins + boolean hasHistoricalFeaturesInformation = System.getProperty("tests.features.metadata.path") != null; + var providers = hasHistoricalFeaturesInformation + ? List.of(new RestTestLegacyFeatures(), new ESRestTestCaseHistoricalFeatures()) + : List.of(new RestTestLegacyFeatures()); + + testFeatureService = new TestFeatureService( + hasHistoricalFeaturesInformation, + providers, + semanticNodeVersions, + ClusterFeatures.calculateAllNodeFeatures(getClusterStateFeatures().values()) + ); } + + assert testFeatureService != null; assert client != null; assert adminClient != null; assert clusterHosts != null; @@ -370,9 +411,7 @@ private boolean isExclusivelyTargetingCurrentVersionCluster() { public static RequestOptions expectVersionSpecificWarnings(Consumer expectationsSetter) { Builder builder = RequestOptions.DEFAULT.toBuilder(); - VersionSensitiveWarningsHandler warningsHandler = new VersionSensitiveWarningsHandler( - nodeVersions.stream().map(Version::toString).collect(Collectors.toSet()) - ); + VersionSensitiveWarningsHandler warningsHandler = new VersionSensitiveWarningsHandler(new HashSet<>(nodeVersions)); expectationsSetter.accept(warningsHandler); builder.setWarningsHandler(warningsHandler); return builder.build(); @@ -381,7 +420,7 @@ public static RequestOptions expectVersionSpecificWarnings(Consumer version.onOrAfter(Version.V_7_7_0))) { + // In case of bwc testing, we need to delete component and composable + // index templates only for clusters that support this historical feature + if (clusterHasFeature(RestTestLegacyFeatures.COMPONENT_TEMPLATE_SUPPORTED)) { try { Request getTemplatesRequest = new Request("GET", "_index_template"); Map composableIndexTemplates = XContentHelper.convertToMap( @@ -770,9 +803,9 @@ private void wipeCluster() throws Exception { .filter(name -> isXPackTemplate(name) == false) .collect(Collectors.toList()); if (names.isEmpty() == false) { - // Ideally we would want to check the version of the elected master node and - // send the delete request directly to that node. - if (nodeVersions.stream().allMatch(version -> version.onOrAfter(Version.V_7_13_0))) { + // Ideally we would want to check if the elected master node supports this feature and send the delete request + // directly to that node, but node-specific feature checks is something we want to avoid if possible. + if (clusterHasFeature(RestTestLegacyFeatures.DELETE_TEMPLATE_MULTIPLE_NAMES_SUPPORTED)) { try { adminClient().performRequest(new Request("DELETE", "_index_template/" + String.join(",", names))); } catch (ResponseException e) { @@ -801,9 +834,9 @@ private void wipeCluster() throws Exception { .filter(name -> isXPackTemplate(name) == false) .collect(Collectors.toList()); if (names.isEmpty() == false) { - // Ideally we would want to check the version of the elected master node and - // send the delete request directly to that node. - if (nodeVersions.stream().allMatch(version -> version.onOrAfter(Version.V_7_13_0))) { + // Ideally we would want to check if the elected master node supports this feature and send the delete request + // directly to that node, but node-specific feature checks is something we want to avoid if possible. + if (clusterHasFeature(RestTestLegacyFeatures.DELETE_TEMPLATE_MULTIPLE_NAMES_SUPPORTED)) { try { adminClient().performRequest(new Request("DELETE", "_component_template/" + String.join(",", names))); } catch (ResponseException e) { @@ -920,9 +953,9 @@ private Set getAllUnexpectedTemplates() throws IOException { Set unexpectedTemplates = new HashSet<>(); if (preserveDataStreamsUponCompletion() == false && preserveTemplatesUponCompletion() == false) { if (has(ProductFeature.XPACK)) { - // In case of bwc testing, if all nodes are before 7.8.0 then no need to attempt to delete component and composable - // index templates, because these were introduced in 7.8.0: - if (nodeVersions.stream().allMatch(version -> version.onOrAfter(Version.V_7_8_0))) { + // In case of bwc testing, we need to delete component and composable + // index templates only for clusters that support this historical feature + if (clusterHasFeature(RestTestLegacyFeatures.COMPONENT_TEMPLATE_SUPPORTED)) { Request getTemplatesRequest = new Request("GET", "_index_template"); Map composableIndexTemplates = XContentHelper.convertToMap( JsonXContent.jsonXContent, @@ -977,7 +1010,6 @@ protected void deleteAllNodeShutdownMetadata() throws IOException { Object nodesResponse = statusResponse.get("nodes"); final List nodeIds; if (nodesResponse instanceof List) { // `nodes` is parsed as a List<> only if it's populated (not empty) - assert minimumNodeVersion().onOrAfter(Version.V_7_15_0); List> nodesArray = (List>) nodesResponse; nodeIds = nodesArray.stream().map(nodeShutdownMetadata -> (String) nodeShutdownMetadata.get("node_id")).toList(); } else { @@ -995,7 +1027,7 @@ protected static void wipeAllIndices() throws IOException { } protected static void wipeAllIndices(boolean preserveSecurityIndices) throws IOException { - boolean includeHidden = minimumNodeVersion().onOrAfter(Version.V_7_7_0); + boolean includeHidden = clusterHasFeature(RestTestLegacyFeatures.HIDDEN_INDICES_SUPPORTED); try { // remove all indices except ilm and slm history which can pop up after deleting all data streams but shouldn't interfere final List indexPatterns = new ArrayList<>(List.of("*", "-.ds-ilm-history-*", "-.ds-.slm-history-*")); @@ -1151,7 +1183,7 @@ private void wipeRollupJobs() throws IOException { @SuppressWarnings("unchecked") String jobId = (String) ((Map) jobConfig.get("config")).get("id"); Request request = new Request("POST", "/_rollup/job/" + jobId + "/_stop"); - request.addParameter("ignore", "404"); + setIgnoredErrorResponseCodes(request, RestStatus.NOT_FOUND); request.addParameter("wait_for_completion", "true"); request.addParameter("timeout", "10s"); logger.debug("stopping rollup job [{}]", jobId); @@ -1162,14 +1194,14 @@ private void wipeRollupJobs() throws IOException { @SuppressWarnings("unchecked") String jobId = (String) ((Map) jobConfig.get("config")).get("id"); Request request = new Request("DELETE", "/_rollup/job/" + jobId); - request.addParameter("ignore", "404"); // Ignore 404s because they imply someone was racing us to delete this + setIgnoredErrorResponseCodes(request, RestStatus.NOT_FOUND); // 404s imply someone was racing us to delete this logger.debug("deleting rollup job [{}]", jobId); adminClient().performRequest(request); } } protected void refreshAllIndices() throws IOException { - boolean includeHidden = minimumNodeVersion().onOrAfter(Version.V_7_7_0); + boolean includeHidden = clusterHasFeature(RestTestLegacyFeatures.HIDDEN_INDICES_SUPPORTED); Request refreshRequest = new Request("POST", "/_refresh"); refreshRequest.addParameter("expand_wildcards", "open" + (includeHidden ? ",hidden" : "")); // Allow system index deprecation warnings @@ -1479,8 +1511,9 @@ private static Set runningTasks(Response response) throws IOException { return runningTasks; } - public static void assertOK(Response response) { + public static Response assertOK(Response response) { assertThat(response.getStatusLine().getStatusCode(), anyOf(equalTo(200), equalTo(201))); + return response; } public static ObjectPath assertOKAndCreateObjectPath(Response response) throws IOException { @@ -1671,23 +1704,20 @@ private static void updateIndexSettings(String index, Settings settings) throws client().performRequest(request); } - protected static void expectSoftDeletesWarning(Request request, String indexName) { - final List expectedWarnings = List.of( + protected static void expectSoftDeletesWarning(Request request, String indexName) throws IOException { + final String expectedWarning = "Creating indices with soft-deletes disabled is deprecated and will be removed in future Elasticsearch versions. " + "Please do not specify value for setting [index.soft_deletes.enabled] of index [" + indexName - + "]." - ); - if (nodeVersions.stream().allMatch(version -> version.onOrAfter(Version.V_7_6_0))) { - request.setOptions( - RequestOptions.DEFAULT.toBuilder().setWarningsHandler(warnings -> warnings.equals(expectedWarnings) == false) - ); - } else if (nodeVersions.stream().anyMatch(version -> version.onOrAfter(Version.V_7_6_0))) { - request.setOptions( - RequestOptions.DEFAULT.toBuilder() - .setWarningsHandler(warnings -> warnings.isEmpty() == false && warnings.equals(expectedWarnings) == false) - ); - } + + "]."; + + final var softDeleteDisabledDeprecated = minimumIndexVersion().onOrAfter(IndexVersions.V_7_6_0); + request.setOptions(expectVersionSpecificWarnings(v -> { + if (softDeleteDisabledDeprecated) { + v.current(expectedWarning); + } + v.compatible(expectedWarning); + })); } protected static Map getIndexSettings(String index) throws IOException { @@ -1721,10 +1751,11 @@ protected static boolean indexExists(String index) throws IOException { } /** - * Deprecation message emitted since {@link Version#V_7_12_0} for the rest of the 7.x series. Can be removed in v9 since it is not + * Deprecation message emitted since 7.12.0 for the rest of the 7.x series. Can be removed in v9 since it is not * emitted in v8. Note that this message is also permitted in certain YAML test cases, it can be removed there too. * See https://github.com/elastic/elasticsearch/issues/66419 for more details. */ + @UpdateForV9 private static final String WAIT_FOR_ACTIVE_SHARDS_DEFAULT_DEPRECATION_MESSAGE = "the default value for the ?wait_for_active_shards " + "parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' " + "to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour"; @@ -1842,7 +1873,7 @@ protected static void deleteSnapshot(RestClient restClient, String repository, S throws IOException { final Request request = new Request(HttpDelete.METHOD_NAME, "_snapshot/" + repository + '/' + snapshot); if (ignoreMissing) { - request.addParameter("ignore", "404"); + setIgnoredErrorResponseCodes(request, RestStatus.NOT_FOUND); } final Response response = restClient.performRequest(request); assertThat(response.getStatusLine().getStatusCode(), ignoreMissing ? anyOf(equalTo(200), equalTo(404)) : equalTo(200)); @@ -1895,7 +1926,7 @@ protected static boolean isXPackTemplate(String name) { if (name.startsWith("elastic-connectors")) { return true; } - if (name.contains("@")) { + if (name.contains("@") && name.endsWith("@custom") == false) { // We have a naming convention that internal component templates contain `@`. See also index-templates.asciidoc. return true; } @@ -1987,7 +2018,7 @@ public void assertEmptyTranslog(String index) throws Exception { * that we have renewed every PRRL to the global checkpoint of the corresponding copy and properly synced to all copies. */ public void ensurePeerRecoveryRetentionLeasesRenewedAndSynced(String index) throws Exception { - boolean mustHavePRRLs = minimumNodeVersion().onOrAfter(Version.V_7_6_0); + boolean mustHavePRRLs = minimumIndexVersion().onOrAfter(IndexVersions.V_7_6_0); assertBusy(() -> { Map stats = entityAsMap(client().performRequest(new Request("GET", index + "/_stats?level=shards"))); @SuppressWarnings("unchecked") @@ -2029,26 +2060,23 @@ public void ensurePeerRecoveryRetentionLeasesRenewedAndSynced(String index) thro }, 60, TimeUnit.SECONDS); } - /** - * Returns the minimum node version among all nodes of the cluster - */ - protected static Version minimumNodeVersion() throws IOException { - final Request request = new Request("GET", "_nodes"); - request.addParameter("filter_path", "nodes.*.version"); + private static Map> getClusterStateFeatures() throws IOException { + final Request request = new Request("GET", "_cluster/state"); + request.addParameter("filter_path", "nodes_features"); final Response response = adminClient().performRequest(request); - final Map nodes = ObjectPath.createFromResponse(response).evaluate("nodes"); - Version minVersion = null; - for (Map.Entry node : nodes.entrySet()) { - @SuppressWarnings("unchecked") - Version nodeVersion = Version.fromString((String) ((Map) node.getValue()).get("version")); - if (minVersion == null || minVersion.after(nodeVersion)) { - minVersion = nodeVersion; - } + var responseData = responseAsMap(response); + if (responseData.get("nodes_features") instanceof List nodesFeatures) { + return nodesFeatures.stream() + .map(Map.class::cast) + .collect(Collectors.toUnmodifiableMap(nodeFeatureMap -> nodeFeatureMap.get("node_id").toString(), nodeFeatureMap -> { + @SuppressWarnings("unchecked") + var nodeFeatures = (List) nodeFeatureMap.get("features"); + return new HashSet<>(nodeFeatures); + })); } - assertNotNull(minVersion); - return minVersion; + return Map.of(); } /** @@ -2068,7 +2096,9 @@ protected static IndexVersion minimumIndexVersion() throws IOException { // fallback on version if index version is not there IndexVersion indexVersion = versionStr != null ? IndexVersion.fromId(Integer.parseInt(versionStr)) - : IndexVersion.fromId(Version.fromString((String) nodeData.get("version")).id); + : IndexVersion.fromId( + parseLegacyVersion((String) nodeData.get("version")).map(Version::id).orElse(IndexVersions.MINIMUM_COMPATIBLE.id()) + ); if (minVersion == null || minVersion.after(indexVersion)) { minVersion = indexVersion; } @@ -2077,22 +2107,12 @@ protected static IndexVersion minimumIndexVersion() throws IOException { return minVersion; } - @SuppressWarnings("unchecked") - private static void ensureGlobalCheckpointSynced(String index) throws Exception { - assertBusy(() -> { - Map stats = entityAsMap(client().performRequest(new Request("GET", index + "/_stats?level=shards"))); - List> shardStats = (List>) XContentMapValues.extractValue("indices." + index + ".shards.0", stats); - shardStats.stream() - .map(shard -> (Map) XContentMapValues.extractValue("seq_no", shard)) - .filter(Objects::nonNull) - .forEach(seqNoStat -> { - long globalCheckpoint = ((Number) XContentMapValues.extractValue("global_checkpoint", seqNoStat)).longValue(); - long localCheckpoint = ((Number) XContentMapValues.extractValue("local_checkpoint", seqNoStat)).longValue(); - long maxSeqNo = ((Number) XContentMapValues.extractValue("max_seq_no", seqNoStat)).longValue(); - assertThat(shardStats.toString(), localCheckpoint, equalTo(maxSeqNo)); - assertThat(shardStats.toString(), globalCheckpoint, equalTo(maxSeqNo)); - }); - }, 60, TimeUnit.SECONDS); + private static Optional parseLegacyVersion(String version) { + var semanticVersionMatcher = SEMANTIC_VERSION_PATTERN.matcher(version); + if (semanticVersionMatcher.matches()) { + return Optional.of(Version.fromString(semanticVersionMatcher.group(1))); + } + return Optional.empty(); } /** @@ -2213,4 +2233,45 @@ private static boolean isMlEnabled() { } } + private static class ESRestTestCaseHistoricalFeatures implements FeatureSpecification { + private static Map historicalFeatures; + + @Override + public Map getHistoricalFeatures() { + if (historicalFeatures == null) { + Map historicalFeaturesMap = new HashMap<>(); + String metadataPath = System.getProperty("tests.features.metadata.path"); + if (metadataPath == null) { + throw new UnsupportedOperationException( + "Historical features information is unavailable when using legacy test plugins." + ); + } + + String[] metadataFiles = metadataPath.split(System.getProperty("path.separator")); + for (String metadataFile : metadataFiles) { + try ( + InputStream in = Files.newInputStream(PathUtils.get(metadataFile)); + XContentParser parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, in) + ) { + for (Map.Entry entry : parser.mapStrings().entrySet()) { + historicalFeaturesMap.put(new NodeFeature(entry.getKey()), Version.fromString(entry.getValue())); + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + historicalFeatures = Collections.unmodifiableMap(historicalFeaturesMap); + } + + return historicalFeatures; + } + } + + public static void setIgnoredErrorResponseCodes(Request request, RestStatus... restStatuses) { + request.addParameter( + IGNORE_RESPONSE_CODES_PARAM, + Arrays.stream(restStatuses).map(restStatus -> Integer.toString(restStatus.getStatus())).collect(Collectors.joining(",")) + ); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestLegacyFeatures.java b/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestLegacyFeatures.java new file mode 100644 index 0000000000000..60653d32e1e38 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestLegacyFeatures.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test.rest; + +import org.elasticsearch.Version; +import org.elasticsearch.features.FeatureSpecification; +import org.elasticsearch.features.NodeFeature; + +import java.util.Map; + +import static java.util.Map.entry; + +/** + * This class groups historical features that have been removed from the production codebase, but are still used by the test + * framework to support BwC tests. Rather than leaving them in the main src we group them here, so it's clear they are not used in + * production code anymore. + */ +public class RestTestLegacyFeatures implements FeatureSpecification { + public static final NodeFeature ML_STATE_RESET_FALLBACK_ON_DISABLED = new NodeFeature("ml.state_reset_fallback_on_disabled"); + public static final NodeFeature FEATURE_STATE_RESET_SUPPORTED = new NodeFeature("system_indices.feature_state_reset_supported"); + public static final NodeFeature SYSTEM_INDICES_REST_ACCESS_ENFORCED = new NodeFeature("system_indices.rest_access_enforced"); + public static final NodeFeature HIDDEN_INDICES_SUPPORTED = new NodeFeature("indices.hidden_supported"); + public static final NodeFeature COMPONENT_TEMPLATE_SUPPORTED = new NodeFeature("indices.component_template_supported"); + public static final NodeFeature DELETE_TEMPLATE_MULTIPLE_NAMES_SUPPORTED = new NodeFeature( + "indices.delete_template_multiple_names_supported" + ); + + @Override + public Map getHistoricalFeatures() { + return Map.ofEntries( + entry(FEATURE_STATE_RESET_SUPPORTED, Version.V_7_13_0), + entry(SYSTEM_INDICES_REST_ACCESS_ENFORCED, Version.V_8_0_0), + entry(HIDDEN_INDICES_SUPPORTED, Version.V_7_7_0), + entry(COMPONENT_TEMPLATE_SUPPORTED, Version.V_7_8_0), + entry(DELETE_TEMPLATE_MULTIPLE_NAMES_SUPPORTED, Version.V_7_13_0), + entry(ML_STATE_RESET_FALLBACK_ON_DISABLED, Version.V_8_7_0) + ); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/TestFeatureService.java b/test/framework/src/main/java/org/elasticsearch/test/rest/TestFeatureService.java new file mode 100644 index 0000000000000..1f7a48add1f1c --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/TestFeatureService.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test.rest; + +import org.elasticsearch.Version; +import org.elasticsearch.core.Strings; +import org.elasticsearch.features.FeatureData; +import org.elasticsearch.features.FeatureSpecification; + +import java.util.Collection; +import java.util.List; +import java.util.NavigableMap; +import java.util.Set; +import java.util.function.Predicate; + +class TestFeatureService { + private final Predicate historicalFeaturesPredicate; + private final Set clusterStateFeatures; + + TestFeatureService( + boolean hasHistoricalFeaturesInformation, + List specs, + Collection nodeVersions, + Set clusterStateFeatures + ) { + var minNodeVersion = nodeVersions.stream().min(Version::compareTo); + var featureData = FeatureData.createFromSpecifications(specs); + var historicalFeatures = featureData.getHistoricalFeatures(); + var allHistoricalFeatures = historicalFeatures.lastEntry() == null ? Set.of() : historicalFeatures.lastEntry().getValue(); + + var errorMessage = hasHistoricalFeaturesInformation + ? "Check the feature has been added to the correct FeatureSpecification in the relevant module or, if this is a " + + "legacy feature used only in tests, to a test-only FeatureSpecification" + : "This test is running on the legacy test framework; historical features from production code will not be available." + + " You need to port the test to the new test plugins in order to use historical features from production code." + + " If this is a legacy feature used only in tests, you can add it to a test-only FeatureSpecification"; + this.historicalFeaturesPredicate = minNodeVersion.>map(v -> featureId -> { + assert allHistoricalFeatures.contains(featureId) : Strings.format("Unknown historical feature %s: %s", featureId, errorMessage); + return hasHistoricalFeature(historicalFeatures, v, featureId); + }).orElse(featureId -> { + // We can safely assume that new non-semantic versions (serverless) support all historical features + assert allHistoricalFeatures.contains(featureId) : Strings.format("Unknown historical feature %s: %s", featureId, errorMessage); + return true; + }); + this.clusterStateFeatures = clusterStateFeatures; + } + + private static boolean hasHistoricalFeature(NavigableMap> historicalFeatures, Version version, String featureId) { + var features = historicalFeatures.floorEntry(version); + return features != null && features.getValue().contains(featureId); + } + + boolean clusterHasFeature(String featureId) { + if (clusterStateFeatures.contains(featureId)) { + return true; + } + return historicalFeaturesPredicate.test(featureId); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java index e0cd47c48515b..fc048bbe0758f 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java @@ -11,7 +11,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -32,7 +31,9 @@ import org.elasticsearch.common.util.concurrent.RunOnce; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.node.Node; import org.elasticsearch.plugins.Plugin; @@ -541,12 +542,7 @@ public void sendRequest( request.writeTo(bStream); final TransportRequest clonedRequest; if (request instanceof BytesTransportRequest) { - // Some request handlers read back a BytesTransportRequest - // into a different class that cannot be re-serialized (i.e. JOIN_VALIDATE_ACTION_NAME), - // in those cases we just copy the raw bytes back to a BytesTransportRequest. - // This is only needed for the BwC for JOIN_VALIDATE_ACTION_NAME and can be removed in the next major - assert Version.CURRENT.major == Version.V_7_17_0.major + 1; - clonedRequest = new BytesTransportRequest(bStream.bytes().streamInput()); + clonedRequest = copyRawBytesForBwC(bStream); } else { RequestHandlerRegistry reg = MockTransportService.this.getRequestHandler(action); clonedRequest = reg.newRequest(bStream.bytes().streamInput()); @@ -556,11 +552,23 @@ public void sendRequest( final RunOnce runnable = new RunOnce(new AbstractRunnable() { @Override public void onFailure(Exception e) { - logger.debug("failed to send delayed request", e); + logger.debug( + () -> Strings.format( + "[%d][%s] failed to send delayed request to node [%s]", + requestId, + action, + connection.getNode() + ), + e + ); + handleInternalSendException(action, connection.getNode(), requestId, null, e); } @Override protected void doRun() throws IOException { + logger.debug( + () -> Strings.format("[%d][%s] sending delayed request to node [%s]", requestId, action, connection.getNode()) + ); connection.sendRequest(requestId, action, clonedRequest, options); } }); @@ -571,11 +579,29 @@ protected void doRun() throws IOException { runnable.run(); } else { requestsToSendWhenCleared.add(runnable); + logger.debug( + () -> Strings.format( + "[%d][%s] delaying sending request to node [%s] by [%s]", + requestId, + action, + connection.getNode(), + delay + ) + ); threadPool.schedule(runnable, delay, threadPool.generic()); } } } + // Some request handlers read back a BytesTransportRequest + // into a different class that cannot be re-serialized (i.e. JOIN_VALIDATE_ACTION_NAME), + // in those cases we just copy the raw bytes back to a BytesTransportRequest. + // This is only needed for the BwC for JOIN_VALIDATE_ACTION_NAME and can be removed in the next major + @UpdateForV9 + private static TransportRequest copyRawBytesForBwC(BytesStreamOutput bStream) throws IOException { + return new BytesTransportRequest(bStream.bytes().streamInput()); + } + @Override public void clearCallback() { synchronized (this) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableTransport.java index 139890fe43fb0..6149a59306d93 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableTransport.java @@ -9,7 +9,6 @@ package org.elasticsearch.test.transport; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.Lifecycle; @@ -267,11 +266,6 @@ public boolean isClosed() { return connection.isClosed(); } - @Override - public Version getVersion() { - return connection.getVersion(); - } - @Override public TransportVersion getTransportVersion() { return connection.getTransportVersion(); diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index ea9dd001e5ce8..b835a56a6384c 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -2425,7 +2425,7 @@ public void testKeepAlivePings() throws Exception { ConnectionProfile connectionProfile = new ConnectionProfile.Builder(defaultProfile).setPingInterval(TimeValue.timeValueMillis(50)) .build(); try (TransportService service = buildService("TS_TPC", VersionInformation.CURRENT, TransportVersion.current(), Settings.EMPTY)) { - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); DiscoveryNode node = DiscoveryNodeUtils.builder("TS_TPC") .name("TS_TPC") .address(service.boundAddress().publishAddress()) @@ -2451,7 +2451,7 @@ public void testTcpHandshake() { .roles(emptySet()) .version(version0) .build(); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); serviceA.getOriginalTransport().openConnection(node, connectionProfile, future); try (Transport.Connection connection = future.actionGet()) { assertEquals(TransportVersion.current(), connection.getTransportVersion()); @@ -3553,7 +3553,7 @@ public static Future submitRequest( handler.executor(transportService.threadPool) ); responseListener.addListener(ActionListener.wrap(handler::handleResponse, e -> handler.handleException((TransportException) e))); - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); responseListener.addListener(future); transportService.sendRequest(node, action, request, options, futureHandler); return future; diff --git a/test/framework/src/main/java/org/elasticsearch/transport/DisruptableMockTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/DisruptableMockTransport.java index 05d6eca0d021d..eb85323caf5a1 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/DisruptableMockTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/DisruptableMockTransport.java @@ -150,7 +150,7 @@ protected void onSendRequest( assert destinationTransport.getLocalNode().equals(getLocalNode()) == false : "non-local message from " + getLocalNode() + " to itself"; - request.incRef(); + request.mustIncRef(); destinationTransport.execute(new RebootSensitiveRunnable() { @Override diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java index 17436ce8d8b21..cbec53b41dac1 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.node.Node; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.CloseableTestClusterWrapper; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.MockHttpTransport; @@ -237,7 +238,7 @@ public Path nodeConfigPath(int nodeOrdinal) { cluster0.afterTest(); cluster1.afterTest(); } finally { - IOUtils.close(cluster0, cluster1); + IOUtils.close(CloseableTestClusterWrapper.wrap(List.of(cluster0, cluster1))); } } diff --git a/test/metadata-extractor/build.gradle b/test/metadata-extractor/build.gradle new file mode 100644 index 0000000000000..8d720dab2dbc2 --- /dev/null +++ b/test/metadata-extractor/build.gradle @@ -0,0 +1,8 @@ +plugins { + id 'elasticsearch.java' +} + +dependencies { + implementation project(':server') + testImplementation project(':test:framework') +} diff --git a/test/metadata-extractor/src/main/java/org/elasticsearch/extractor/features/HistoricalFeaturesMetadataExtractor.java b/test/metadata-extractor/src/main/java/org/elasticsearch/extractor/features/HistoricalFeaturesMetadataExtractor.java new file mode 100644 index 0000000000000..33162bcfa1eca --- /dev/null +++ b/test/metadata-extractor/src/main/java/org/elasticsearch/extractor/features/HistoricalFeaturesMetadataExtractor.java @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.extractor.features; + +import org.elasticsearch.Version; +import org.elasticsearch.common.logging.LogConfigurator; +import org.elasticsearch.features.FeatureSpecification; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.xcontent.XContentGenerator; +import org.elasticsearch.xcontent.json.JsonXContent; + +import java.io.IOException; +import java.io.OutputStream; +import java.io.UncheckedIOException; +import java.nio.file.Files; +import java.nio.file.InvalidPathException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; +import java.util.HashMap; +import java.util.Map; +import java.util.ServiceLoader; + +public class HistoricalFeaturesMetadataExtractor { + private final ClassLoader classLoader; + + static { + // Make sure we initialize logging since this is normally done by Elasticsearch startup + LogConfigurator.configureESLogging(); + } + + public HistoricalFeaturesMetadataExtractor(ClassLoader classLoader) { + this.classLoader = classLoader; + } + + public static void main(String[] args) { + if (args.length != 1) { + printUsageAndExit(); + } + + Path outputFile = null; + try { + outputFile = Paths.get(args[0]); + } catch (InvalidPathException e) { + printUsageAndExit(); + } + + new HistoricalFeaturesMetadataExtractor(HistoricalFeaturesMetadataExtractor.class.getClassLoader()).generateMetadataFile( + outputFile + ); + } + + public void generateMetadataFile(Path outputFile) { + try ( + OutputStream os = Files.newOutputStream(outputFile, StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.CREATE); + XContentGenerator generator = JsonXContent.jsonXContent.createGenerator(os) + ) { + generator.writeStartObject(); + for (Map.Entry entry : extractHistoricalFeatureMetadata().entrySet()) { + generator.writeStringField(entry.getKey().id(), entry.getValue().toString()); + } + generator.writeEndObject(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + public Map extractHistoricalFeatureMetadata() { + Map historicalFeatures = new HashMap<>(); + ServiceLoader featureSpecLoader = ServiceLoader.load(FeatureSpecification.class, classLoader); + for (FeatureSpecification featureSpecification : featureSpecLoader) { + historicalFeatures.putAll(featureSpecification.getHistoricalFeatures()); + } + + return historicalFeatures; + } + + private static void printUsageAndExit() { + System.err.println("Usage: HistoricalFeaturesMetadataExtractor "); + System.exit(1); + } +} diff --git a/test/metadata-extractor/src/test/java/org/elasticsearch/extractor/features/HistoricalFeaturesMetadataExtractorTests.java b/test/metadata-extractor/src/test/java/org/elasticsearch/extractor/features/HistoricalFeaturesMetadataExtractorTests.java new file mode 100644 index 0000000000000..ba80decd046e6 --- /dev/null +++ b/test/metadata-extractor/src/test/java/org/elasticsearch/extractor/features/HistoricalFeaturesMetadataExtractorTests.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.extractor.features; + +import org.elasticsearch.Version; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.json.JsonXContent; +import org.junit.Rule; +import org.junit.rules.TemporaryFolder; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Map; + +import static org.elasticsearch.xcontent.XContentParserConfiguration.EMPTY; +import static org.hamcrest.Matchers.anEmptyMap; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.not; + +public class HistoricalFeaturesMetadataExtractorTests extends ESTestCase { + + @Rule + public TemporaryFolder temporaryFolder = new TemporaryFolder(); + + public void testExtractHistoricalMetadata() throws IOException { + HistoricalFeaturesMetadataExtractor extractor = new HistoricalFeaturesMetadataExtractor(this.getClass().getClassLoader()); + Map nodeFeatureVersionMap = extractor.extractHistoricalFeatureMetadata(); + assertThat(nodeFeatureVersionMap, not(anEmptyMap())); + + Path outputFile = temporaryFolder.newFile().toPath(); + extractor.generateMetadataFile(outputFile); + try (XContentParser parser = JsonXContent.jsonXContent.createParser(EMPTY, Files.newInputStream(outputFile))) { + Map parsedMap = parser.mapStrings(); + for (Map.Entry entry : nodeFeatureVersionMap.entrySet()) { + assertThat(parsedMap, hasEntry(entry.getKey().id(), entry.getValue().toString())); + } + } + } +} diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/FeatureFlag.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/FeatureFlag.java index 122989eaec65a..b83cc7bba06e5 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/FeatureFlag.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/FeatureFlag.java @@ -16,7 +16,8 @@ */ public enum FeatureFlag { TIME_SERIES_MODE("es.index_mode_feature_flag_registered=true", Version.fromString("8.0.0"), null), - INFERENCE_RESCORER("es.inference_rescorer_feature_flag_enabled=true", Version.fromString("8.10.0"), null); + INFERENCE_RESCORER("es.inference_rescorer_feature_flag_enabled=true", Version.fromString("8.10.0"), null), + FAILURE_STORE_ENABLED("es.failure_store_feature_flag_enabled=true", Version.fromString("8.12.0"), null); public final String systemProperty; public final Version from; diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/SystemPropertyProvider.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/SystemPropertyProvider.java new file mode 100644 index 0000000000000..3c2d9f65357f3 --- /dev/null +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/SystemPropertyProvider.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test.cluster; + +import org.elasticsearch.test.cluster.local.LocalClusterSpec; + +import java.util.Map; + +/** + * Functional interface for supplying system properties to an Elasticsearch node. This interface is designed to be implemented by tests + * and fixtures wanting to provide system properties to an {@link ElasticsearchCluster} in a dynamic fashion. + * Instances are evaluated lazily at cluster start time. + */ +public interface SystemPropertyProvider { + + /** + * Returns a collection of system properties to apply to an Elasticsearch cluster node. This method is called when the cluster is + * started so implementors can return dynamic environment values that may or may not be based on the given node spec. + * + * @param nodeSpec the specification for the given node to apply settings to + * @return system property variables to add to the node + */ + Map get(LocalClusterSpec.LocalNodeSpec nodeSpec); +} diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterFactory.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterFactory.java index 72c24d9dcc7ef..78c796ae8dd9c 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterFactory.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterFactory.java @@ -715,9 +715,9 @@ private Map getEnvironmentVariables() { } String systemProperties = ""; - if (spec.getSystemProperties().isEmpty() == false) { - systemProperties = spec.getSystemProperties() - .entrySet() + Map resolvedSystemProperties = new HashMap<>(spec.resolveSystemProperties()); + if (resolvedSystemProperties.isEmpty() == false) { + systemProperties = resolvedSystemProperties.entrySet() .stream() .map(entry -> "-D" + entry.getKey() + "=" + entry.getValue()) .map(p -> p.replace("${ES_PATH_CONF}", configDir.toString())) diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterSpecBuilder.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterSpecBuilder.java index 78dbb8fb1f591..1b9691842f13a 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterSpecBuilder.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterSpecBuilder.java @@ -183,6 +183,7 @@ private LocalNodeSpec build(LocalClusterSpec cluster) { getKeystoreFiles(), getKeystorePassword(), getExtraConfigFiles(), + getSystemPropertyProviders(), getSystemProperties(), getJvmArgs() ); diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java index 0cc9d4a360fb8..d8b0d6df5515c 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java @@ -11,6 +11,7 @@ import org.elasticsearch.test.cluster.EnvironmentProvider; import org.elasticsearch.test.cluster.FeatureFlag; import org.elasticsearch.test.cluster.SettingsProvider; +import org.elasticsearch.test.cluster.SystemPropertyProvider; import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.cluster.util.Version; import org.elasticsearch.test.cluster.util.resource.Resource; @@ -38,6 +39,7 @@ public abstract class AbstractLocalSpecBuilder> im private final Map keystoreFiles = new HashMap<>(); private final Map extraConfigFiles = new HashMap<>(); private final Map systemProperties = new HashMap<>(); + private final List systemPropertyProviders = new ArrayList<>(); private final List jvmArgs = new ArrayList<>(); private DistributionType distributionType; private Version version; @@ -204,10 +206,31 @@ public T systemProperty(String property, String value) { return cast(this); } + @Override + public T systemProperty(String key, Supplier supplier) { + this.systemPropertyProviders.add(s -> Map.of(key, supplier.get())); + return cast(this); + } + + public T systemProperty(SystemPropertyProvider systemPropertyProvider) { + this.systemPropertyProviders.add(systemPropertyProvider); + return cast(this); + } + + @Override + public T systemProperty(String key, Supplier value, Predicate predicate) { + this.systemPropertyProviders.add(s -> predicate.test(s) ? Map.of(key, value.get()) : Map.of()); + return cast(this); + } + public Map getSystemProperties() { return inherit(() -> parent.getSystemProperties(), systemProperties); } + public List getSystemPropertyProviders() { + return inherit(() -> parent.getSystemPropertyProviders(), systemPropertyProviders); + } + @Override public T jvmArg(String arg) { this.jvmArgs.add(arg); diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterSpecBuilder.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterSpecBuilder.java index 4b20afcf1e8b4..6cb92a3436aac 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterSpecBuilder.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterSpecBuilder.java @@ -34,4 +34,5 @@ public ElasticsearchCluster build() { ) ); } + } diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java index e87f370e2b592..de0d541c8535f 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java @@ -12,6 +12,7 @@ import org.elasticsearch.test.cluster.EnvironmentProvider; import org.elasticsearch.test.cluster.FeatureFlag; import org.elasticsearch.test.cluster.SettingsProvider; +import org.elasticsearch.test.cluster.SystemPropertyProvider; import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.cluster.local.model.User; import org.elasticsearch.test.cluster.util.Version; @@ -88,6 +89,7 @@ public static class LocalNodeSpec { private final Map keystoreFiles; private final String keystorePassword; private final Map extraConfigFiles; + private final List systemPropertyProviders; private final Map systemProperties; private final List jvmArgs; private Version version; @@ -109,6 +111,7 @@ public LocalNodeSpec( Map keystoreFiles, String keystorePassword, Map extraConfigFiles, + List systemPropertyProviders, Map systemProperties, List jvmArgs ) { @@ -128,6 +131,7 @@ public LocalNodeSpec( this.keystoreFiles = keystoreFiles; this.keystorePassword = keystorePassword; this.extraConfigFiles = extraConfigFiles; + this.systemPropertyProviders = systemPropertyProviders; this.systemProperties = systemProperties; this.jvmArgs = jvmArgs; } @@ -184,10 +188,6 @@ public Map getExtraConfigFiles() { return extraConfigFiles; } - public Map getSystemProperties() { - return systemProperties; - } - public List getJvmArgs() { return jvmArgs; } @@ -278,6 +278,24 @@ public Map resolveEnvironment() { return resolvedEnvironment; } + /** + * Resolve node system properties. Order of precedence is as follows: + *

      + *
    1. SystemProperties from cluster configured {@link SystemPropertyProvider}
    2. + *
    3. SystemProperties variables from node configured {@link SystemPropertyProvider}
    4. + *
    5. SystemProperties variables cluster settings
    6. + *
    7. SystemProperties variables node settings
    8. + *
    + * + * @return resolved system properties for node + */ + public Map resolveSystemProperties() { + Map resolvedSystemProperties = new HashMap<>(); + systemPropertyProviders.forEach(p -> resolvedSystemProperties.putAll(p.get(this))); + resolvedSystemProperties.putAll(systemProperties); + return resolvedSystemProperties; + } + /** * Returns a new {@link LocalNodeSpec} without the given {@link SettingsProvider}s. This is needed when resolving settings from a * settings provider to avoid infinite recursion. @@ -308,6 +326,7 @@ private LocalNodeSpec getFilteredSpec(SettingsProvider filteredProvider, Setting n.keystoreFiles, n.keystorePassword, n.extraConfigFiles, + n.systemPropertyProviders, n.systemProperties, n.jvmArgs ) diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalSpecBuilder.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalSpecBuilder.java index e3b6b98d84755..c18129a7c61a5 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalSpecBuilder.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalSpecBuilder.java @@ -11,6 +11,7 @@ import org.elasticsearch.test.cluster.EnvironmentProvider; import org.elasticsearch.test.cluster.FeatureFlag; import org.elasticsearch.test.cluster.SettingsProvider; +import org.elasticsearch.test.cluster.SystemPropertyProvider; import org.elasticsearch.test.cluster.local.LocalClusterSpec.LocalNodeSpec; import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.cluster.util.Version; @@ -121,6 +122,22 @@ interface LocalSpecBuilder> { */ T systemProperty(String property, String value); + /** + * Adds a system property to node JVM arguments computed by the given supplier + */ + T systemProperty(String property, Supplier supplier); + + /** + * Adds a system property to node JVM arguments computed by the given supplier + * when the given predicate evaluates to {@code true}. + */ + T systemProperty(String setting, Supplier value, Predicate predicate); + + /** + * Register a {@link SystemPropertyProvider}. + */ + T systemProperty(SystemPropertyProvider systemPropertyProvider); + /** * Adds an additional command line argument to node JVM arguments. */ diff --git a/test/test-clusters/src/main/resources/fips/fips_java.policy b/test/test-clusters/src/main/resources/fips/fips_java.policy index 4ef62e03c2546..bbfc1caf7593a 100644 --- a/test/test-clusters/src/main/resources/fips/fips_java.policy +++ b/test/test-clusters/src/main/resources/fips/fips_java.policy @@ -1,6 +1,10 @@ grant { permission java.security.SecurityPermission "putProviderProperty.BCFIPS"; permission java.security.SecurityPermission "putProviderProperty.BCJSSE"; + permission java.security.SecurityPermission "getProperty.keystore.type.compat"; + permission java.security.SecurityPermission "getProperty.jdk.tls.disabledAlgorithms"; + permission java.security.SecurityPermission "getProperty.jdk.certpath.disabledAlgorithms"; + permission java.security.SecurityPermission "getProperty.jdk.tls.server.defaultDHEParameters"; permission java.lang.RuntimePermission "getProtectionDomain"; permission java.util.PropertyPermission "java.runtime.name", "read"; permission org.bouncycastle.crypto.CryptoServicesPermission "tlsAlgorithmsEnabled"; diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/restspec/ClientYamlSuiteRestSpec.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/restspec/ClientYamlSuiteRestSpec.java index be34ee9be0ea1..8662d886cce89 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/restspec/ClientYamlSuiteRestSpec.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/restspec/ClientYamlSuiteRestSpec.java @@ -24,6 +24,8 @@ import java.util.Set; import java.util.stream.Stream; +import static org.elasticsearch.client.RestClient.IGNORE_RESPONSE_CODES_PARAM; + /** * Holds the specification used to turn {@code do} actions in the YAML suite into REST api calls. */ @@ -69,7 +71,7 @@ public boolean isGlobalParameter(String param) { * that they influence the client behaviour and don't get sent to Elasticsearch */ public boolean isClientParameter(String name) { - return "ignore".equals(name); + return IGNORE_RESPONSE_CODES_PARAM.equals(name); } /** diff --git a/x-pack/plugin/analytics/src/internalClusterTest/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsWithRequestBreakerIT.java b/x-pack/plugin/analytics/src/internalClusterTest/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsWithRequestBreakerIT.java index f8b276dbbf6a5..6f13b3b4bc528 100644 --- a/x-pack/plugin/analytics/src/internalClusterTest/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsWithRequestBreakerIT.java +++ b/x-pack/plugin/analytics/src/internalClusterTest/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsWithRequestBreakerIT.java @@ -43,8 +43,7 @@ public void testRequestBreaker() throws Exception { true, IntStream.range(0, randomIntBetween(10, 1000)) .mapToObj( - i -> client().prepareIndex("test") - .setId("id_" + i) + i -> prepareIndex("test").setId("id_" + i) .setSource(Map.of("field0", randomAlphaOfLength(5), "field1", randomAlphaOfLength(5))) ) .toArray(IndexRequestBuilder[]::new) diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregationBuilder.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregationBuilder.java index 315a72ca1d645..0d3931e9eb8c2 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregationBuilder.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregationBuilder.java @@ -19,6 +19,7 @@ import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.InternalOrder; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; @@ -36,6 +37,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.function.ToLongFunction; import java.util.stream.Collectors; public class MultiTermsAggregationBuilder extends AbstractAggregationBuilder { @@ -154,8 +156,17 @@ public boolean supportsSampling() { } @Override - public boolean supportsParallelCollection() { - return false; + public boolean supportsParallelCollection(ToLongFunction fieldCardinalityResolver) { + for (MultiValuesSourceFieldConfig sourceFieldConfig : terms) { + if (sourceFieldConfig.getScript() != null) { + return false; + } + long cardinality = fieldCardinalityResolver.applyAsLong(sourceFieldConfig.getFieldName()); + if (TermsAggregationBuilder.supportsParallelCollection(cardinality, order, bucketCountThresholds) == false) { + return false; + } + } + return super.supportsParallelCollection(fieldCardinalityResolver); } /** diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/RateAggregationBuilder.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/RateAggregationBuilder.java index 0b0becc1ae446..8bda1d59c5b57 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/RateAggregationBuilder.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/RateAggregationBuilder.java @@ -196,7 +196,6 @@ protected ValuesSourceConfig resolveConfig(AggregationContext context) { null, null, 1.0, - null, DocValueFormat.RAW, context::nowInMillis ); diff --git a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/aggregations/metrics/HistogramPercentileAggregationTests.java b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/aggregations/metrics/HistogramPercentileAggregationTests.java index bf289a601ae21..eb72480927931 100644 --- a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/aggregations/metrics/HistogramPercentileAggregationTests.java +++ b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/aggregations/metrics/HistogramPercentileAggregationTests.java @@ -35,6 +35,9 @@ import java.util.Iterator; import java.util.List; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; + public class HistogramPercentileAggregationTests extends ESSingleNodeTestCase { public void testHDRHistogram() throws Exception { @@ -97,17 +100,15 @@ public void testHDRHistogram() throws Exception { .field("counts", counts.toArray(new Integer[counts.size()])) .endObject() .endObject(); - client().prepareIndex("pre_agg").setSource(preAggDoc).get(); + prepareIndex("pre_agg").setSource(preAggDoc).get(); histogram.reset(); } } client().admin().indices().refresh(new RefreshRequest("raw", "pre_agg")).get(); - SearchResponse response = client().prepareSearch("raw").setTrackTotalHits(true).get(); - assertEquals(numDocs, response.getHits().getTotalHits().value); + assertHitCount(client().prepareSearch("raw").setTrackTotalHits(true), numDocs); - response = client().prepareSearch("pre_agg").get(); - assertEquals(numDocs / frq, response.getHits().getTotalHits().value); + assertHitCount(client().prepareSearch("pre_agg"), numDocs / frq); PercentilesAggregationBuilder builder = AggregationBuilders.percentiles("agg") .field("data") @@ -115,17 +116,21 @@ public void testHDRHistogram() throws Exception { .numberOfSignificantValueDigits(numberOfSignificantValueDigits) .percentiles(10); - SearchResponse responseRaw = client().prepareSearch("raw").addAggregation(builder).get(); - SearchResponse responsePreAgg = client().prepareSearch("pre_agg").addAggregation(builder).get(); - SearchResponse responseBoth = client().prepareSearch("pre_agg", "raw").addAggregation(builder).get(); - - InternalHDRPercentiles percentilesRaw = responseRaw.getAggregations().get("agg"); - InternalHDRPercentiles percentilesPreAgg = responsePreAgg.getAggregations().get("agg"); - InternalHDRPercentiles percentilesBoth = responseBoth.getAggregations().get("agg"); - for (int i = 1; i < 100; i++) { - assertEquals(percentilesRaw.percentile(i), percentilesPreAgg.percentile(i), 0.0); - assertEquals(percentilesRaw.percentile(i), percentilesBoth.percentile(i), 0.0); - } + assertResponse( + client().prepareSearch("raw").addAggregation(builder), + responseRaw -> assertResponse( + client().prepareSearch("pre_agg").addAggregation(builder), + responsePreAgg -> assertResponse(client().prepareSearch("pre_agg", "raw").addAggregation(builder), responseBoth -> { + InternalHDRPercentiles percentilesRaw = responseRaw.getAggregations().get("agg"); + InternalHDRPercentiles percentilesPreAgg = responsePreAgg.getAggregations().get("agg"); + InternalHDRPercentiles percentilesBoth = responseBoth.getAggregations().get("agg"); + for (int i = 1; i < 100; i++) { + assertEquals(percentilesRaw.percentile(i), percentilesPreAgg.percentile(i), 0.0); + assertEquals(percentilesRaw.percentile(i), percentilesBoth.percentile(i), 0.0); + } + }) + ) + ); } private void setupTDigestHistogram(int compression) throws Exception { @@ -200,7 +205,7 @@ private void setupTDigestHistogram(int compression) throws Exception { .endObject() .endObject() .endObject(); - client().prepareIndex("pre_agg").setSource(preAggDoc).get(); + prepareIndex("pre_agg").setSource(preAggDoc).get(); histogram = TDigestState.create(compression); } } diff --git a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregationBuilderTests.java b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregationBuilderTests.java index 2b150c39c0c9d..ff345b1dac59c 100644 --- a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregationBuilderTests.java +++ b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregationBuilderTests.java @@ -10,10 +10,13 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.script.Script; import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.BaseAggregationBuilder; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.support.MultiValuesSourceFieldConfig; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.test.AbstractXContentSerializingTestCase; @@ -27,6 +30,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.function.ToLongFunction; import static org.elasticsearch.test.InternalAggregationTestCase.randomNumericDocValueFormat; import static org.elasticsearch.xpack.analytics.multiterms.InternalMultiTermsTests.randomBucketOrder; @@ -130,4 +134,94 @@ protected NamedXContentRegistry xContentRegistry() { namedXContent.addAll(new SearchModule(Settings.EMPTY, Collections.emptyList()).getNamedXContents()); return new NamedXContentRegistry(namedXContent); } + + public void testSupportsParallelCollection() { + { + AggregatorFactories.Builder builder = new AggregatorFactories.Builder(); + MultiValuesSourceFieldConfig.Builder sourceBuilder = new MultiValuesSourceFieldConfig.Builder(); + sourceBuilder.setScript(new Script("id")); + MultiTermsAggregationBuilder terms = new MultiTermsAggregationBuilder("terms").terms( + List.of(sourceBuilder.build(), sourceBuilder.build()) + ); + builder.addAggregator(terms); + assertFalse(builder.supportsParallelCollection(field -> randomIntBetween(-1, 100))); + } + { + AggregatorFactories.Builder builder = new AggregatorFactories.Builder(); + MultiValuesSourceFieldConfig.Builder sourceBuilder1 = new MultiValuesSourceFieldConfig.Builder(); + sourceBuilder1.setFieldName("field1"); + MultiValuesSourceFieldConfig.Builder sourceBuilder2 = new MultiValuesSourceFieldConfig.Builder(); + sourceBuilder2.setFieldName("field2"); + MultiTermsAggregationBuilder terms = new MultiTermsAggregationBuilder("terms").terms( + List.of(sourceBuilder1.build(), sourceBuilder2.build()) + ); + terms.shardSize(10); + builder.addAggregator(terms); + assertFalse(builder.supportsParallelCollection(field -> -1)); + List fields = new ArrayList<>(); + assertTrue(builder.supportsParallelCollection(field -> { + fields.add(field); + return randomIntBetween(0, 10); + })); + assertEquals(List.of("field1", "field2"), fields); + assertFalse(builder.supportsParallelCollection(field -> randomIntBetween(11, 100))); + terms.terms( + List.of( + sourceBuilder1.build(), + sourceBuilder2.build(), + new MultiValuesSourceFieldConfig.Builder().setScript(new Script("id")).build() + ) + ); + assertFalse(builder.supportsParallelCollection(field -> randomIntBetween(-1, 100))); + } + { + MultiValuesSourceFieldConfig.Builder sourceBuilder1 = new MultiValuesSourceFieldConfig.Builder(); + sourceBuilder1.setFieldName("field1"); + MultiValuesSourceFieldConfig.Builder sourceBuilder2 = new MultiValuesSourceFieldConfig.Builder(); + sourceBuilder2.setFieldName("field2"); + MultiTermsAggregationBuilder terms = new MultiTermsAggregationBuilder("terms").terms( + List.of(sourceBuilder1.build(), sourceBuilder2.build()) + ); + terms.shardSize(10); + assertTrue(terms.supportsParallelCollection(field -> randomIntBetween(0, 10))); + terms.subAggregation(new TermsAggregationBuilder("name") { + @Override + public boolean supportsParallelCollection(ToLongFunction fieldCardinalityResolver) { + return false; + } + }); + assertFalse(terms.supportsParallelCollection(field -> randomIntBetween(0, 10))); + } + { + MultiValuesSourceFieldConfig.Builder sourceBuilder1 = new MultiValuesSourceFieldConfig.Builder(); + sourceBuilder1.setFieldName("field1"); + MultiValuesSourceFieldConfig.Builder sourceBuilder2 = new MultiValuesSourceFieldConfig.Builder(); + sourceBuilder2.setFieldName("field2"); + MultiTermsAggregationBuilder terms = new MultiTermsAggregationBuilder("terms").terms( + List.of(sourceBuilder1.build(), sourceBuilder2.build()) + ); + terms.order(randomBoolean() ? BucketOrder.key(randomBoolean()) : BucketOrder.compound(BucketOrder.key(randomBoolean()))); + if (randomBoolean()) { + terms.shardSize(randomIntBetween(1, 100)); + } + assertFalse(terms.supportsParallelCollection(field -> -1)); + { + List fields = new ArrayList<>(); + assertTrue(terms.supportsParallelCollection(field -> { + fields.add(field); + return randomIntBetween(0, 50); + })); + assertEquals(List.of("field1", "field2"), fields); + } + assertFalse(terms.supportsParallelCollection(field -> randomIntBetween(51, 100))); + terms.terms( + List.of( + sourceBuilder1.build(), + sourceBuilder2.build(), + new MultiValuesSourceFieldConfig.Builder().setScript(new Script("id")).build() + ) + ); + assertFalse(terms.supportsParallelCollection(field -> randomIntBetween(-1, 100))); + } + } } diff --git a/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/ResourceUtils.java b/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/ResourceUtils.java index b9a6edfb958f3..1e6a9a9998a82 100644 --- a/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/ResourceUtils.java +++ b/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/ResourceUtils.java @@ -13,15 +13,20 @@ import java.io.IOException; import java.io.InputStream; import java.nio.charset.StandardCharsets; +import java.util.Map; public class ResourceUtils { public static final String APM_TEMPLATE_VERSION_VARIABLE = "xpack.apmdata.template.version"; static byte[] loadVersionedResourceUTF8(String name, int version) { + return loadVersionedResourceUTF8(name, version, Map.of()); + } + + static byte[] loadVersionedResourceUTF8(String name, int version, Map variables) { try { String content = loadResource(name); - content = TemplateUtils.replaceVariable(content, APM_TEMPLATE_VERSION_VARIABLE, String.valueOf(version)); + content = TemplateUtils.replaceVariables(content, String.valueOf(version), APM_TEMPLATE_VERSION_VARIABLE, variables); return content.getBytes(StandardCharsets.UTF_8); } catch (IOException e) { throw new RuntimeException(e); diff --git a/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/YamlIngestPipelineConfig.java b/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/YamlIngestPipelineConfig.java index 938fd69f80abe..de1b715dd138d 100644 --- a/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/YamlIngestPipelineConfig.java +++ b/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/YamlIngestPipelineConfig.java @@ -31,6 +31,6 @@ public XContentType getXContentType() { @Override public BytesReference loadConfig() { - return new BytesArray(loadVersionedResourceUTF8("/ingest-pipelines/" + id + ".yaml", version)); + return new BytesArray(loadVersionedResourceUTF8("/ingest-pipelines/" + id + ".yaml", version, variables)); } } diff --git a/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java b/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java index 6b16de4ac6458..eb9d440106dea 100644 --- a/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java +++ b/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.ingest.IngestMetadata; import org.elasticsearch.ingest.PipelineConfiguration; import org.elasticsearch.test.ClusterServiceUtils; @@ -76,8 +77,9 @@ public void createRegistryAndClient() { threadPool = new TestThreadPool(this.getClass().getName()); client = new VerifyingClient(threadPool); clusterService = ClusterServiceUtils.createClusterService(threadPool); + FeatureService featureService = new FeatureService(List.of()); stackTemplateRegistryAccessor = new StackTemplateRegistryAccessor( - new StackTemplateRegistry(Settings.EMPTY, clusterService, threadPool, client, NamedXContentRegistry.EMPTY) + new StackTemplateRegistry(Settings.EMPTY, clusterService, threadPool, client, NamedXContentRegistry.EMPTY, featureService) ); apmIndexTemplateRegistry = new APMIndexTemplateRegistry( Settings.builder().put(APM_DATA_ENABLED.getKey(), true).build(), diff --git a/x-pack/plugin/apm-data/src/yamlRestTest/java/org/elasticsearch/xpack/apmdata/APMYamlTestSuiteIT.java b/x-pack/plugin/apm-data/src/yamlRestTest/java/org/elasticsearch/xpack/apmdata/APMYamlTestSuiteIT.java index 77cac16a4e90c..5835a41479a68 100644 --- a/x-pack/plugin/apm-data/src/yamlRestTest/java/org/elasticsearch/xpack/apmdata/APMYamlTestSuiteIT.java +++ b/x-pack/plugin/apm-data/src/yamlRestTest/java/org/elasticsearch/xpack/apmdata/APMYamlTestSuiteIT.java @@ -10,13 +10,11 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; import org.junit.ClassRule; -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/101929") public class APMYamlTestSuiteIT extends ESClientYamlSuiteTestCase { @ClassRule diff --git a/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/10_apm.yml b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/10_apm.yml index 0c538c345ebaa..0030040b572c9 100644 --- a/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/10_apm.yml +++ b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/10_apm.yml @@ -55,6 +55,10 @@ setup: --- "Test traces-apm-* data stream indexing": + - skip: + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/102360" + - do: index: index: traces-apm-testing @@ -154,6 +158,11 @@ setup: values: [1.5, 2.5, 3.5] - set: items.0.create._index: index + - do: + # Wait for cluster state changes to be applied before + # querying field mappings. + cluster.health: + wait_for_events: languid - do: indices.get_field_mapping: index: metrics-apm.app.svc1-testing diff --git a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchActionIT.java b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchActionIT.java index 7e161c3154fa9..dcf47deeebc2b 100644 --- a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchActionIT.java +++ b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchActionIT.java @@ -89,7 +89,7 @@ public void setupSuiteScopeCluster() throws InterruptedException { v.incrementAndGet(); return v; }); - reqs.add(client().prepareIndex(indexName).setSource("terms", keyword, "metric", metric)); + reqs.add(prepareIndex(indexName).setSource("terms", keyword, "metric", metric)); } indexRandom(true, true, reqs); } diff --git a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchIntegTestCase.java b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchIntegTestCase.java index 794763247ebf9..3f888685f33db 100644 --- a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchIntegTestCase.java +++ b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchIntegTestCase.java @@ -12,10 +12,10 @@ import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.get.GetResponse; -import org.elasticsearch.action.search.ClosePointInTimeAction; import org.elasticsearch.action.search.ClosePointInTimeRequest; -import org.elasticsearch.action.search.OpenPointInTimeAction; import org.elasticsearch.action.search.OpenPointInTimeRequest; +import org.elasticsearch.action.search.TransportClosePointInTimeAction; +import org.elasticsearch.action.search.TransportOpenPointInTimeAction; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; @@ -235,7 +235,7 @@ protected SearchResponseIterator assertBlockingIterator( final SubmitAsyncSearchRequest request; if (randomBoolean()) { OpenPointInTimeRequest openPIT = new OpenPointInTimeRequest(indexName).keepAlive(TimeValue.timeValueMinutes(between(5, 10))); - pitId = client().execute(OpenPointInTimeAction.INSTANCE, openPIT).actionGet().getPointInTimeId(); + pitId = client().execute(TransportOpenPointInTimeAction.TYPE, openPIT).actionGet().getPointInTimeId(); final PointInTimeBuilder pit = new PointInTimeBuilder(pitId); if (randomBoolean()) { pit.setKeepAlive(TimeValue.timeValueMillis(randomIntBetween(1, 3600))); @@ -329,7 +329,7 @@ private AsyncSearchResponse doNext() throws Exception { public void close() { if (closed.compareAndSet(false, true)) { if (pitId != null) { - client().execute(ClosePointInTimeAction.INSTANCE, new ClosePointInTimeRequest(pitId)).actionGet(); + client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pitId)).actionGet(); } queryLatch.close(); } diff --git a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java index 6dc940d191685..218a99a23be0c 100644 --- a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java +++ b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java @@ -13,7 +13,6 @@ import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.search.ShardSearchFailure; @@ -1238,7 +1237,6 @@ public void testRemoteClusterOnlyCCSWithFailuresOnAllShards() throws Exception { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/100957") public void testCancelViaTasksAPI() throws Exception { Map testClusterInfo = setupTwoClusters(); String localIndex = (String) testClusterInfo.get("local.index"); @@ -1267,7 +1265,7 @@ public void testCancelViaTasksAPI() throws Exception { ListTasksResponse listTasksResponse = client(LOCAL_CLUSTER).admin() .cluster() .prepareListTasks() - .setActions(SearchAction.INSTANCE.name()) + .setActions(TransportSearchAction.TYPE.name()) .get(); List tasks = listTasksResponse.getTasks(); assertThat(tasks.size(), equalTo(1)); @@ -1281,7 +1279,7 @@ public void testCancelViaTasksAPI() throws Exception { .get() .getTasks() .stream() - .filter(t -> t.action().contains(SearchAction.NAME)) + .filter(t -> t.action().contains(TransportSearchAction.TYPE.name())) .collect(Collectors.toList()); assertThat(remoteSearchTasks.size(), greaterThan(0)); remoteClusterSearchTasks.set(remoteSearchTasks); @@ -1299,7 +1297,7 @@ public void testCancelViaTasksAPI() throws Exception { for (TransportService transportService : transportServices) { Collection cancellableTasks = transportService.getTaskManager().getCancellableTasks().values(); for (CancellableTask cancellableTask : cancellableTasks) { - if (cancellableTask.getAction().contains(SearchAction.INSTANCE.name())) { + if (cancellableTask.getAction().contains(TransportSearchAction.TYPE.name())) { assertTrue(cancellableTask.getDescription(), cancellableTask.isCancelled()); } } @@ -1312,7 +1310,7 @@ public void testCancelViaTasksAPI() throws Exception { .get() .getTasks() .stream() - .filter(t -> t.action().contains(SearchAction.INSTANCE.name())) + .filter(t -> t.action().contains(TransportSearchAction.TYPE.name())) .toList(); for (TaskInfo taskInfo : remoteSearchTasksAfterCancellation) { assertTrue(taskInfo.description(), taskInfo.cancelled()); @@ -1324,13 +1322,11 @@ public void testCancelViaTasksAPI() throws Exception { assertTrue(searchResponseAfterCancellation.isRunning()); assertFalse(searchResponseAfterCancellation.getSearchResponse().isTimedOut()); assertThat(searchResponseAfterCancellation.getSearchResponse().getClusters().getTotal(), equalTo(2)); - assertThat(searchResponseAfterCancellation.getSearchResponse().getFailedShards(), equalTo(0)); AsyncStatusResponse statusResponse = getAsyncStatus(response.getId()); assertTrue(statusResponse.isPartial()); assertTrue(statusResponse.isRunning()); assertThat(statusResponse.getClusters().getTotal(), equalTo(2)); - assertThat(statusResponse.getFailedShards(), equalTo(0)); assertNull(statusResponse.getCompletionStatus()); } finally { @@ -1345,7 +1341,6 @@ public void testCancelViaTasksAPI() throws Exception { assertTrue(statusResponseAfterCompletion.isPartial()); assertFalse(statusResponseAfterCompletion.isRunning()); assertThat(statusResponseAfterCompletion.getClusters().getTotal(), equalTo(2)); - assertThat(statusResponseAfterCompletion.getFailedShards(), greaterThan(0)); assertThat(statusResponseAfterCompletion.getCompletionStatus(), equalTo(RestStatus.BAD_REQUEST)); AsyncSearchResponse searchResponseAfterCompletion = getAsyncSearch(response.getId()); @@ -1353,11 +1348,8 @@ public void testCancelViaTasksAPI() throws Exception { assertFalse(searchResponseAfterCompletion.isRunning()); assertFalse(searchResponseAfterCompletion.getSearchResponse().isTimedOut()); assertThat(searchResponseAfterCompletion.getSearchResponse().getClusters().getTotal(), equalTo(2)); - assertThat(searchResponseAfterCompletion.getSearchResponse().getFailedShards(), greaterThan(0)); Throwable cause = ExceptionsHelper.unwrap(searchResponseAfterCompletion.getFailure(), TaskCancelledException.class); assertNotNull("TaskCancelledException should be in the causal chain", cause); - ShardSearchFailure[] shardFailures = searchResponseAfterCompletion.getSearchResponse().getShardFailures(); - assertThat(shardFailures.length, greaterThan(0)); String json = Strings.toString( ChunkedToXContent.wrapAsToXContent(searchResponseAfterCompletion) .toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS) @@ -1392,7 +1384,7 @@ public void testCancelViaAsyncSearchDelete() throws Exception { ListTasksResponse listTasksResponse = client(LOCAL_CLUSTER).admin() .cluster() .prepareListTasks() - .setActions(SearchAction.INSTANCE.name()) + .setActions(TransportSearchAction.TYPE.name()) .get(); List tasks = listTasksResponse.getTasks(); assertThat(tasks.size(), equalTo(1)); @@ -1405,7 +1397,7 @@ public void testCancelViaAsyncSearchDelete() throws Exception { .get() .getTasks() .stream() - .filter(t -> t.action().contains(SearchAction.NAME)) + .filter(t -> t.action().contains(TransportSearchAction.TYPE.name())) .collect(Collectors.toList()); assertThat(remoteSearchTasks.size(), greaterThan(0)); remoteClusterSearchTasks.set(remoteSearchTasks); @@ -1423,7 +1415,7 @@ public void testCancelViaAsyncSearchDelete() throws Exception { for (TransportService transportService : transportServices) { Collection cancellableTasks = transportService.getTaskManager().getCancellableTasks().values(); for (CancellableTask cancellableTask : cancellableTasks) { - if (cancellableTask.getAction().contains(SearchAction.INSTANCE.name())) { + if (cancellableTask.getAction().contains(TransportSearchAction.TYPE.name())) { assertTrue(cancellableTask.getDescription(), cancellableTask.isCancelled()); } } @@ -1436,7 +1428,7 @@ public void testCancelViaAsyncSearchDelete() throws Exception { .get() .getTasks() .stream() - .filter(t -> t.action().contains(SearchAction.INSTANCE.name())) + .filter(t -> t.action().contains(TransportSearchAction.TYPE.name())) .toList(); for (TaskInfo taskInfo : remoteSearchTasksAfterCancellation) { assertTrue(taskInfo.description(), taskInfo.cancelled()); @@ -1452,7 +1444,6 @@ public void testCancelViaAsyncSearchDelete() throws Exception { assertTrue(statusResponse.isPartial()); assertTrue(statusResponse.isRunning()); assertThat(statusResponse.getClusters().getTotal(), equalTo(2)); - assertThat(statusResponse.getFailedShards(), equalTo(0)); assertNull(statusResponse.getCompletionStatus()); } finally { SearchListenerPlugin.allowQueryPhase(); @@ -1497,7 +1488,7 @@ public void testCancellationViaTimeoutWithAllowPartialResultsSetToFalse() throws ListTasksResponse listTasksResponse = client(LOCAL_CLUSTER).admin() .cluster() .prepareListTasks() - .setActions(SearchAction.INSTANCE.name()) + .setActions(TransportSearchAction.TYPE.name()) .get(); List tasks = listTasksResponse.getTasks(); assertThat(tasks.size(), equalTo(1)); @@ -1510,7 +1501,7 @@ public void testCancellationViaTimeoutWithAllowPartialResultsSetToFalse() throws .get() .getTasks() .stream() - .filter(t -> t.action().contains(SearchAction.NAME)) + .filter(t -> t.action().contains(TransportSearchAction.TYPE.name())) .collect(Collectors.toList()); assertThat(remoteSearchTasks.size(), greaterThan(0)); remoteClusterSearchTasks.set(remoteSearchTasks); @@ -1532,7 +1523,7 @@ public void testCancellationViaTimeoutWithAllowPartialResultsSetToFalse() throws ListTasksResponse listTasksResponse = client(LOCAL_CLUSTER).admin() .cluster() .prepareListTasks() - .setActions(SearchAction.INSTANCE.name()) + .setActions(TransportSearchAction.TYPE.name()) .get(); List tasks = listTasksResponse.getTasks(); assertThat(tasks.size(), equalTo(0)); @@ -1540,7 +1531,7 @@ public void testCancellationViaTimeoutWithAllowPartialResultsSetToFalse() throws ListTasksResponse remoteTasksResponse = client(REMOTE_CLUSTER).admin() .cluster() .prepareListTasks() - .setActions(SearchAction.INSTANCE.name()) + .setActions(TransportSearchAction.TYPE.name()) .get(); List remoteTasks = remoteTasksResponse.getTasks(); assertThat(remoteTasks.size(), equalTo(0)); @@ -1562,7 +1553,7 @@ private void waitForSearchTasksToFinish() throws Exception { ListTasksResponse listTasksResponse = client(LOCAL_CLUSTER).admin() .cluster() .prepareListTasks() - .setActions(SearchAction.INSTANCE.name()) + .setActions(TransportSearchAction.TYPE.name()) .get(); List tasks = listTasksResponse.getTasks(); assertThat(tasks.size(), equalTo(0)); @@ -1570,7 +1561,7 @@ private void waitForSearchTasksToFinish() throws Exception { ListTasksResponse remoteTasksResponse = client(REMOTE_CLUSTER).admin() .cluster() .prepareListTasks() - .setActions(SearchAction.INSTANCE.name()) + .setActions(TransportSearchAction.TYPE.name()) .get(); List remoteTasks = remoteTasksResponse.getTasks(); assertThat(remoteTasks.size(), equalTo(0)); diff --git a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/TransportSubmitAsyncSearchAction.java b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/TransportSubmitAsyncSearchAction.java index f6cf0bf6583b4..7cbd0662302d9 100644 --- a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/TransportSubmitAsyncSearchAction.java +++ b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/TransportSubmitAsyncSearchAction.java @@ -8,7 +8,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.ActionFilters; @@ -24,7 +23,6 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.SearchService; -import org.elasticsearch.search.aggregations.AggregationReduceContext; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.transport.TransportService; @@ -37,16 +35,13 @@ import org.elasticsearch.xpack.core.search.action.SubmitAsyncSearchRequest; import java.util.Map; -import java.util.function.BiFunction; -import java.util.function.Function; -import java.util.function.Supplier; import static org.elasticsearch.xpack.core.ClientHelper.ASYNC_SEARCH_ORIGIN; public class TransportSubmitAsyncSearchAction extends HandledTransportAction { private final ClusterService clusterService; private final NodeClient nodeClient; - private final BiFunction, SearchRequest, AggregationReduceContext> requestToAggReduceContextBuilder; + private final SearchService searchService; private final TransportSearchAction searchAction; private final ThreadContext threadContext; private final AsyncTaskIndexService store; @@ -72,10 +67,7 @@ public TransportSubmitAsyncSearchAction( ); this.clusterService = clusterService; this.nodeClient = nodeClient; - this.requestToAggReduceContextBuilder = (task, request) -> searchService.aggReduceContextBuilder( - task, - request.source().aggregations() - ).forFinalReduction(); + this.searchService = searchService; this.searchAction = searchAction; this.threadContext = transportService.getThreadPool().getThreadContext(); this.store = new AsyncTaskIndexService<>( @@ -94,7 +86,11 @@ public TransportSubmitAsyncSearchAction( protected void doExecute(Task submitTask, SubmitAsyncSearchRequest request, ActionListener submitListener) { final SearchRequest searchRequest = createSearchRequest(request, submitTask, request.getKeepAlive()); try (var ignored = threadContext.newTraceContext()) { - AsyncSearchTask searchTask = (AsyncSearchTask) taskManager.register("transport", SearchAction.INSTANCE.name(), searchRequest); + AsyncSearchTask searchTask = (AsyncSearchTask) taskManager.register( + "transport", + TransportSearchAction.TYPE.name(), + searchRequest + ); searchAction.execute(searchTask, searchRequest, searchTask.getSearchProgressActionListener()); searchTask.addCompletionListener(new ActionListener<>() { @Override @@ -162,12 +158,11 @@ private SearchRequest createSearchRequest(SubmitAsyncSearchRequest request, Task nodeClient.threadPool().getThreadContext(), clusterService.state() ); - SearchRequest searchRequest = new SearchRequest(request.getSearchRequest()) { + var originalSearchRequest = request.getSearchRequest(); + SearchRequest searchRequest = new SearchRequest(originalSearchRequest) { @Override public AsyncSearchTask createTask(long id, String type, String action, TaskId parentTaskId, Map taskHeaders) { AsyncExecutionId searchId = new AsyncExecutionId(docID, new TaskId(nodeClient.getLocalNodeId(), id)); - Function, Supplier> aggReduceContextSupplierFactory = - isCancelled -> () -> requestToAggReduceContextBuilder.apply(isCancelled, request.getSearchRequest()); return new AsyncSearchTask( id, type, @@ -180,7 +175,8 @@ public AsyncSearchTask createTask(long id, String type, String action, TaskId pa searchId, store.getClientWithOrigin(), nodeClient.threadPool(), - aggReduceContextSupplierFactory + isCancelled -> () -> searchService.aggReduceContextBuilder(isCancelled, originalSearchRequest.source().aggregations()) + .forFinalReduction() ); } }; diff --git a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/AbstractFrozenAutoscalingIntegTestCase.java b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/AbstractFrozenAutoscalingIntegTestCase.java index b3dca2688d428..656b1ddd4d952 100644 --- a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/AbstractFrozenAutoscalingIntegTestCase.java +++ b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/AbstractFrozenAutoscalingIntegTestCase.java @@ -87,7 +87,7 @@ protected void createAndMountIndex() throws InterruptedException, java.util.conc assertAcked(prepareCreate(indexName, Settings.builder().put(INDEX_SOFT_DELETES_SETTING.getKey(), true))); indexRandom( randomBoolean(), - IntStream.range(0, 10).mapToObj(i -> client().prepareIndex(indexName).setSource()).collect(Collectors.toList()) + IntStream.range(0, 10).mapToObj(i -> prepareIndex(indexName).setSource()).collect(Collectors.toList()) ); final SnapshotInfo snapshotInfo = createFullSnapshot(fsRepoName, snapshotName); diff --git a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/existence/FrozenExistenceDeciderIT.java b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/existence/FrozenExistenceDeciderIT.java index 60c6732b45000..497734fd5ac28 100644 --- a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/existence/FrozenExistenceDeciderIT.java +++ b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/existence/FrozenExistenceDeciderIT.java @@ -82,6 +82,7 @@ protected Collection> nodePlugins() { ); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/102405") public void testZeroToOne() throws Exception { internalCluster().startMasterOnlyNode(); setupRepoAndPolicy(); diff --git a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ProactiveStorageIT.java b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ProactiveStorageIT.java index c214cc6006eee..c5e062df5e77c 100644 --- a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ProactiveStorageIT.java +++ b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ProactiveStorageIT.java @@ -53,8 +53,7 @@ public void testScaleUp() throws IOException, InterruptedException { false, IntStream.range(1, 100) .mapToObj( - unused -> client().prepareIndex(dsName) - .setCreate(true) + unused -> prepareIndex(dsName).setCreate(true) .setSource("@timestamp", DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(randomMillisUpToYear9999())) ) .toArray(IndexRequestBuilder[]::new) @@ -123,16 +122,11 @@ private static void createDataStreamAndTemplate(String dataStreamName) throws IO client().execute( PutComposableIndexTemplateAction.INSTANCE, new PutComposableIndexTemplateAction.Request(dataStreamName + "_template").indexTemplate( - new ComposableIndexTemplate( - Collections.singletonList(dataStreamName), - new Template(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build(), null, null), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList(dataStreamName)) + .template(new Template(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build(), null, null)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ) ).actionGet(); client().execute(CreateDataStreamAction.INSTANCE, new CreateDataStreamAction.Request(dataStreamName)).actionGet(); diff --git a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageIT.java b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageIT.java index 51d0c2c0aef80..5c097cdc24ed1 100644 --- a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageIT.java +++ b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageIT.java @@ -68,7 +68,7 @@ public void testScaleUp() throws InterruptedException { indexRandom( true, IntStream.range(1, 100) - .mapToObj(i -> client().prepareIndex(indexName).setSource("field", randomAlphaOfLength(50))) + .mapToObj(i -> prepareIndex(indexName).setSource("field", randomAlphaOfLength(50))) .toArray(IndexRequestBuilder[]::new) ); forceMerge(); @@ -267,7 +267,7 @@ public void testScaleWhileShrinking() throws Exception { indexRandom( true, IntStream.range(1, 100) - .mapToObj(i -> client().prepareIndex(indexName).setSource("field", randomAlphaOfLength(50))) + .mapToObj(i -> prepareIndex(indexName).setSource("field", randomAlphaOfLength(50))) .toArray(IndexRequestBuilder[]::new) ); forceMerge(); @@ -420,7 +420,7 @@ public void testScaleDuringSplitOrClone() throws Exception { indexRandom( true, IntStream.range(1, 100) - .mapToObj(i -> client().prepareIndex(indexName).setSource("field", randomAlphaOfLength(50))) + .mapToObj(i -> prepareIndex(indexName).setSource("field", randomAlphaOfLength(50))) .toArray(IndexRequestBuilder[]::new) ); forceMerge(); diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodeInfoService.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodeInfoService.java index 2380082fbf66e..a14c49b4e5e21 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodeInfoService.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodeInfoService.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.admin.cluster.node.info.NodesInfoMetrics; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; +import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestParameters; import org.elasticsearch.action.support.nodes.BaseNodeResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -122,12 +123,14 @@ private void sendToMissingNodes(Function nodeLookup, Set< nodeToMemory = Collections.unmodifiableMap(builder); } }; + final NodesStatsRequest nodesStatsRequest = new NodesStatsRequest( + missingNodes.stream().map(DiscoveryNode::getId).toArray(String[]::new) + ).clear().addMetric(NodesStatsRequestParameters.Metric.OS.metricName()).timeout(fetchTimeout); + nodesStatsRequest.setIncludeShardsStats(false); client.admin() .cluster() .nodesStats( - new NodesStatsRequest(missingNodes.stream().map(DiscoveryNode::getId).toArray(String[]::new)).clear() - .addMetric(NodesStatsRequest.Metric.OS.metricName()) - .timeout(fetchTimeout), + nodesStatsRequest, ActionListener.wrap( nodesStatsResponse -> client.admin() .cluster() diff --git a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java index c6fb0613b3e8e..5c47f5a9dc6a4 100644 --- a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java +++ b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java @@ -9,7 +9,6 @@ import org.elasticsearch.Build; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; @@ -449,7 +448,7 @@ private static NodeStats statsForNode(DiscoveryNode node, long memory) { private static org.elasticsearch.action.admin.cluster.node.info.NodeInfo infoForNode(DiscoveryNode node, int processors) { OsInfo osInfo = new OsInfo(randomLong(), processors, Processors.of((double) processors), null, null, null, null); return new org.elasticsearch.action.admin.cluster.node.info.NodeInfo( - Version.CURRENT, + Build.current().version(), TransportVersion.current(), IndexVersion.current(), Map.of(), diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java index b4830ca97938f..1766d8fe47820 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java @@ -14,6 +14,7 @@ public class BlobCacheMetrics { private final LongCounter cacheMissCounter; + private final LongCounter evictedCountNonZeroFrequency; private final LongHistogram cacheMissLoadTimes; public BlobCacheMetrics(MeterRegistry meterRegistry) { @@ -23,6 +24,11 @@ public BlobCacheMetrics(MeterRegistry meterRegistry) { "The number of times there was a cache miss that triggered a read from the blob store", "count" ), + meterRegistry.registerLongCounter( + "elasticsearch.blob_cache.count_of_evicted_used_regions", + "The number of times a cache entry was evicted where the frequency was not zero", + "entries" + ), meterRegistry.registerLongHistogram( "elasticsearch.blob_cache.cache_miss_load_times", "The timing data for populating entries in the blob store resulting from a cache miss.", @@ -31,8 +37,9 @@ public BlobCacheMetrics(MeterRegistry meterRegistry) { ); } - BlobCacheMetrics(LongCounter cacheMissCounter, LongHistogram cacheMissLoadTimes) { + BlobCacheMetrics(LongCounter cacheMissCounter, LongCounter evictedCountNonZeroFrequency, LongHistogram cacheMissLoadTimes) { this.cacheMissCounter = cacheMissCounter; + this.evictedCountNonZeroFrequency = evictedCountNonZeroFrequency; this.cacheMissLoadTimes = cacheMissLoadTimes; } @@ -42,6 +49,10 @@ public LongCounter getCacheMissCounter() { return cacheMissCounter; } + public LongCounter getEvictedCountNonZeroFrequency() { + return evictedCountNonZeroFrequency; + } + public LongHistogram getCacheMissLoadTimes() { return cacheMissLoadTimes; } diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java index 7740e500344f2..847779c9066c4 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java @@ -281,8 +281,8 @@ private CacheEntry(T chunk) { private final SharedBytes sharedBytes; private final long cacheSize; private final int regionSize; - private final ByteSizeValue rangeSize; - private final ByteSizeValue recoveryRangeSize; + private final int rangeSize; + private final int recoveryRangeSize; private final int numRegions; private final ConcurrentLinkedQueue freeRegions = new ConcurrentLinkedQueue<>(); @@ -355,8 +355,8 @@ public SharedBlobCacheService( freeRegions.add(sharedBytes.getFileChannel(i)); } - this.rangeSize = SHARED_CACHE_RANGE_SIZE_SETTING.get(settings); - this.recoveryRangeSize = SHARED_CACHE_RECOVERY_RANGE_SIZE_SETTING.get(settings); + this.rangeSize = BlobCacheUtils.toIntBytes(SHARED_CACHE_RANGE_SIZE_SETTING.get(settings).getBytes()); + this.recoveryRangeSize = BlobCacheUtils.toIntBytes(SHARED_CACHE_RECOVERY_RANGE_SIZE_SETTING.get(settings).getBytes()); this.blobCacheMetrics = blobCacheMetrics; } @@ -368,11 +368,11 @@ public static long calculateCacheSize(Settings settings, long totalFsSize) { } public int getRangeSize() { - return BlobCacheUtils.toIntBytes(rangeSize.getBytes()); + return rangeSize; } public int getRecoveryRangeSize() { - return BlobCacheUtils.toIntBytes(recoveryRangeSize.getBytes()); + return recoveryRangeSize; } private int getRegion(long position) { @@ -840,7 +840,7 @@ private int readSingleRegion( RangeMissingHandler writer, int region ) throws InterruptedException, ExecutionException { - final PlainActionFuture readFuture = PlainActionFuture.newFuture(); + final PlainActionFuture readFuture = new PlainActionFuture<>(); final CacheFileRegion fileRegion = get(cacheKey, length, region); final long regionStart = getRegionStart(region); fileRegion.populateAndRead( @@ -1057,18 +1057,24 @@ public int forceEvict(Predicate cacheKeyPredicate) { } }); var evictedCount = 0; + var nonZeroFrequencyEvictedCount = 0; if (matchingEntries.isEmpty() == false) { synchronized (SharedBlobCacheService.this) { for (LFUCacheEntry entry : matchingEntries) { + int frequency = entry.freq; boolean evicted = entry.chunk.forceEvict(); if (evicted && entry.chunk.io != null) { unlink(entry); keyMapping.remove(entry.chunk.regionKey, entry); evictedCount++; + if (frequency > 0) { + nonZeroFrequencyEvictedCount++; + } } } } } + blobCacheMetrics.getEvictedCountNonZeroFrequency().incrementBy(nonZeroFrequencyEvictedCount); return evictedCount; } @@ -1088,8 +1094,12 @@ private LFUCacheEntry initChunk(LFUCacheEntry entry) { assignToSlot(entry, freeSlot); } else { // need to evict something + int frequency; synchronized (SharedBlobCacheService.this) { - maybeEvict(); + frequency = maybeEvict(); + } + if (frequency > 0) { + blobCacheMetrics.getEvictedCountNonZeroFrequency().increment(); } final SharedBytes.IO freeSlotRetry = freeRegions.poll(); if (freeSlotRetry != null) { @@ -1221,18 +1231,25 @@ private void unlink(final LFUCacheEntry entry) { assert invariant(entry, false); } - private void maybeEvict() { + /** + * Cycles through the {@link LFUCacheEntry} from 0 to max frequency and + * tries to evict a chunk if no one is holding onto its resources anymore + * + * @return the frequency of the evicted entry as integer or -1 if no entry was evicted from cache + */ + private int maybeEvict() { assert Thread.holdsLock(SharedBlobCacheService.this); - for (int i = 0; i < maxFreq; i++) { - for (LFUCacheEntry entry = freqs[i]; entry != null; entry = entry.next) { + for (int currentFreq = 0; currentFreq < maxFreq; currentFreq++) { + for (LFUCacheEntry entry = freqs[currentFreq]; entry != null; entry = entry.next) { boolean evicted = entry.chunk.tryEvict(); if (evicted && entry.chunk.io != null) { unlink(entry); keyMapping.remove(entry.chunk.regionKey, entry); - return; + return currentFreq; } } } + return -1; } private void computeDecay() { diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBytes.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBytes.java index 04347aaf6bff2..530cbbe6c6184 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBytes.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBytes.java @@ -134,7 +134,9 @@ public static Path findCacheSnapshotCacheFilePath(NodeEnvironment environment, l if (usableSpace > fileSize) { return p; } else { - throw new IOException("Not enough free space for cache file of size [" + fileSize + "] in path [" + path + "]"); + throw new IOException( + "Not enough free space [" + usableSpace + "] for cache file of size [" + fileSize + "] in path [" + path + "]" + ); } } diff --git a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/common/ProgressListenableActionFutureTests.java b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/common/ProgressListenableActionFutureTests.java index 5445cb709c1c5..a94a3214fdd9a 100644 --- a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/common/ProgressListenableActionFutureTests.java +++ b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/common/ProgressListenableActionFutureTests.java @@ -216,7 +216,7 @@ public void testListenerCalledImmediatelyWhenProgressReached() { final ProgressListenableActionFuture future = randomFuture(); final long progress = randomLongBetween(future.start, future.end); - final PlainActionFuture listenerResponse = PlainActionFuture.newFuture(); + final PlainActionFuture listenerResponse = new PlainActionFuture<>(); if (randomBoolean()) { future.onProgress(progress); future.addListener(listenerResponse, randomLongBetween(future.start, progress)); diff --git a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java index cd9bb5b5934c8..d861ff193112d 100644 --- a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java +++ b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java @@ -415,7 +415,7 @@ public void execute(Runnable command) { assertEquals(5, cacheService.freeRegionCount()); final long size = size(250); AtomicLong bytesRead = new AtomicLong(size); - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); cacheService.maybeFetchFullEntry(cacheKey, size, (channel, channelPos, relativePos, length, progressUpdater) -> { bytesRead.addAndGet(-length); progressUpdater.accept(length); diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/index/shard/CloseFollowerIndexErrorSuppressionHelper.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/index/shard/CloseFollowerIndexErrorSuppressionHelper.java new file mode 100644 index 0000000000000..89ba41317e0e3 --- /dev/null +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/index/shard/CloseFollowerIndexErrorSuppressionHelper.java @@ -0,0 +1,14 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.index.shard; + +public class CloseFollowerIndexErrorSuppressionHelper { + public static void setSuppressCreateEngineErrors(boolean value) { + IndexShard.suppressCreateEngineErrors = value; + } +} diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java index 5031a52630033..e1ec9013ef257 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java @@ -137,7 +137,7 @@ public void testAutoFollowDoNotFollowSystemIndices() throws Exception { putAutoFollowPatterns("my-pattern", new String[] { ".*", "logs-*" }); // Trigger system index creation - leaderClient().prepareIndex(FakeSystemIndex.SYSTEM_INDEX_NAME).setSource(Map.of("a", "b")).execute().actionGet(); + leaderClient().prepareIndex(FakeSystemIndex.SYSTEM_INDEX_NAME).setSource(Map.of("a", "b")).get(); Settings leaderIndexSettings = Settings.builder() .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) @@ -638,23 +638,20 @@ public void testAutoFollowDatastreamWithClosingFollowerIndex() throws Exception final String datastream = "logs-1"; PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request("template-id"); request.indexTemplate( - new ComposableIndexTemplate( - List.of("logs-*"), - new Template( - Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .build(), - null, - null - ), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("logs-*")) + .template( + new Template( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build(), + null, + null + ) + ) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ); assertAcked(leaderClient().execute(PutComposableIndexTemplateAction.INSTANCE, request).get()); diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java index 8b8af3dae2fef..f7baafa8402d0 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java @@ -548,7 +548,7 @@ public void testCcrRepositoryFetchesSnapshotShardSizeFromIndexShardStoreStats() final Map fetchedSnapshotShardSizes = new ConcurrentHashMap<>(); - final PlainActionFuture waitForRestoreInProgress = PlainActionFuture.newFuture(); + final PlainActionFuture waitForRestoreInProgress = new PlainActionFuture<>(); final ClusterStateListener listener = event -> { if (RestoreInProgress.get(event.state()).isEmpty() == false && event.state().routingTable().hasIndex(followerIndex)) { final IndexRoutingTable indexRoutingTable = event.state().routingTable().index(followerIndex); @@ -634,7 +634,7 @@ public void testCcrRepositoryFailsToFetchSnapshotShardSizes() throws Exception { try { final SnapshotsInfoService snapshotsInfoService = getFollowerCluster().getCurrentMasterNodeInstance(SnapshotsInfoService.class); - final PlainActionFuture waitForAllShardSnapshotSizesFailures = PlainActionFuture.newFuture(); + final PlainActionFuture waitForAllShardSnapshotSizesFailures = new PlainActionFuture<>(); final ClusterStateListener listener = event -> { if (RestoreInProgress.get(event.state()).isEmpty() == false && event.state().routingTable().hasIndex(followerIndex)) { try { diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CloseFollowerIndexIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CloseFollowerIndexIT.java index 64ebb20538832..8e597c3992528 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CloseFollowerIndexIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CloseFollowerIndexIT.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.engine.ReadOnlyEngine; +import org.elasticsearch.index.shard.CloseFollowerIndexErrorSuppressionHelper; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.CcrIntegTestCase; import org.elasticsearch.xpack.core.ccr.action.PutFollowAction; @@ -39,13 +40,16 @@ public class CloseFollowerIndexIT extends CcrIntegTestCase { @Before public void wrapUncaughtExceptionHandler() { + CloseFollowerIndexErrorSuppressionHelper.setSuppressCreateEngineErrors(true); uncaughtExceptionHandler = Thread.getDefaultUncaughtExceptionHandler(); AccessController.doPrivileged((PrivilegedAction) () -> { Thread.setDefaultUncaughtExceptionHandler((t, e) -> { - if (t.getThreadGroup().getName().contains(getTestClass().getSimpleName())) { + if (t.getThreadGroup().getName().contains(getTestClass().getSimpleName()) + && t.getName().equals("elasticsearch-error-rethrower")) { for (StackTraceElement element : e.getStackTrace()) { if (element.getClassName().equals(ReadOnlyEngine.class.getName())) { if (element.getMethodName().equals("assertMaxSeqNoEqualsToGlobalCheckpoint")) { + logger.error("HACK: suppressing uncaught exception thrown from assertMaxSeqNoEqualsToGlobalCheckpoint", e); return; } } @@ -59,6 +63,7 @@ public void wrapUncaughtExceptionHandler() { @After public void restoreUncaughtExceptionHandler() { + CloseFollowerIndexErrorSuppressionHelper.setSuppressCreateEngineErrors(false); AccessController.doPrivileged((PrivilegedAction) () -> { Thread.setDefaultUncaughtExceptionHandler(uncaughtExceptionHandler); return null; diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java index f21bfc07deba2..88482eabafed5 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java @@ -1754,7 +1754,7 @@ private String getIndexSettingsWithNestedMapping( private void putFollowerTemplate(String setting, String settingValue) { Template template = new Template(Settings.builder().put(setting, settingValue).build(), null, null); - ComposableIndexTemplate cit = new ComposableIndexTemplate(List.of("follower"), template, null, null, null, null); + ComposableIndexTemplate cit = ComposableIndexTemplate.builder().indexPatterns(List.of("follower")).template(template).build(); assertAcked( followerClient().execute( PutComposableIndexTemplateAction.INSTANCE, diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/LocalIndexFollowingIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/LocalIndexFollowingIT.java index 3afa7722e5318..d609b606238bc 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/LocalIndexFollowingIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/LocalIndexFollowingIT.java @@ -47,7 +47,7 @@ public void testFollowIndex() throws Exception { final long firstBatchNumDocs = randomIntBetween(2, 64); for (int i = 0; i < firstBatchNumDocs; i++) { - client().prepareIndex("leader").setSource("{}", XContentType.JSON).get(); + prepareIndex("leader").setSource("{}", XContentType.JSON).get(); } final PutFollowAction.Request followRequest = getPutFollowRequest("leader", "follower"); @@ -59,7 +59,7 @@ public void testFollowIndex() throws Exception { final long secondBatchNumDocs = randomIntBetween(2, 64); for (int i = 0; i < secondBatchNumDocs; i++) { - client().prepareIndex("leader").setSource("{}", XContentType.JSON).get(); + prepareIndex("leader").setSource("{}", XContentType.JSON).get(); } assertBusy(() -> { @@ -74,7 +74,7 @@ public void testFollowIndex() throws Exception { final long thirdBatchNumDocs = randomIntBetween(2, 64); for (int i = 0; i < thirdBatchNumDocs; i++) { - client().prepareIndex("leader").setSource("{}", XContentType.JSON).get(); + prepareIndex("leader").setSource("{}", XContentType.JSON).get(); } client().execute(ResumeFollowAction.INSTANCE, getResumeFollowRequest("follower")).get(); @@ -99,7 +99,7 @@ public void testIndexingMetricsIncremented() throws Exception { for (int i = 0; i < firstBatchNumDocs; i++) { BytesArray source = new BytesArray("{}"); sourceSize += source.length(); - client().prepareIndex("leader").setSource(source, XContentType.JSON).get(); + prepareIndex("leader").setSource(source, XContentType.JSON).get(); } ThreadPool nodeThreadPool = getInstanceFromNode(ThreadPool.class); @@ -163,7 +163,7 @@ public void testRemoveRemoteConnection() throws Exception { .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) .build(); createIndex("logs-20200101", leaderIndexSettings); - client().prepareIndex("logs-20200101").setSource("{}", XContentType.JSON).get(); + prepareIndex("logs-20200101").setSource("{}", XContentType.JSON).get(); assertBusy(() -> { CcrStatsAction.Response response = client().execute(CcrStatsAction.INSTANCE, new CcrStatsAction.Request()).actionGet(); assertThat( @@ -182,7 +182,7 @@ public void testRemoveRemoteConnection() throws Exception { // This new index should be picked up by auto follow coordinator createIndex("logs-20200102", leaderIndexSettings); // This new document should be replicated to follower index: - client().prepareIndex("logs-20200101").setSource("{}", XContentType.JSON).get(); + prepareIndex("logs-20200101").setSource("{}", XContentType.JSON).get(); assertBusy(() -> { CcrStatsAction.Response response = client().execute(CcrStatsAction.INSTANCE, new CcrStatsAction.Request()).actionGet(); assertThat( @@ -207,7 +207,7 @@ public void testChangeLeaderIndex() throws Exception { ensureGreen("index-1"); int numDocs = between(1, 100); for (int i = 0; i < numDocs; i++) { - client().prepareIndex("index-1").setSource("{}", XContentType.JSON).get(); + prepareIndex("index-1").setSource("{}", XContentType.JSON).get(); } client().execute(PutFollowAction.INSTANCE, getPutFollowRequest("index-1", "index-2")).get(); assertBusy(() -> assertThat(client().prepareSearch("index-2").get().getHits().getTotalHits().value, equalTo((long) numDocs))); @@ -221,7 +221,7 @@ public void testChangeLeaderIndex() throws Exception { newDocs = numDocs + randomIntBetween(1, 100); } for (int i = 0; i < newDocs; i++) { - client().prepareIndex("index-0").setSource("{}", XContentType.JSON).get(); + prepareIndex("index-0").setSource("{}", XContentType.JSON).get(); } if (randomBoolean()) { client().admin().indices().prepareFlush("index-0").get(); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrRepositoryManager.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrRepositoryManager.java index 1a6646ed9c1d2..bc644dd4eacc4 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrRepositoryManager.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrRepositoryManager.java @@ -48,14 +48,14 @@ protected void doClose() {} private void putRepository(String repositoryName) { ActionRequest request = new PutInternalCcrRepositoryRequest(repositoryName, CcrRepository.TYPE); - PlainActionFuture f = PlainActionFuture.newFuture(); + PlainActionFuture f = new PlainActionFuture<>(); client.execute(PutInternalCcrRepositoryAction.INSTANCE, request, f); assert f.isDone() : "Should be completed as it is executed synchronously"; } private void deleteRepository(String repositoryName) { DeleteInternalCcrRepositoryRequest request = new DeleteInternalCcrRepositoryRequest(repositoryName); - PlainActionFuture f = PlainActionFuture.newFuture(); + PlainActionFuture f = new PlainActionFuture<>(); client.execute(DeleteInternalCcrRepositoryAction.INSTANCE, request, f); assert f.isDone() : "Should be completed as it is executed synchronously"; } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java index 559cfe0cbe4cc..b06ff73e29960 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java @@ -332,7 +332,9 @@ static DataStream updateLocalDataStream( remoteDataStream.isSystem(), remoteDataStream.isAllowCustomRouting(), remoteDataStream.getIndexMode(), - remoteDataStream.getLifecycle() + remoteDataStream.getLifecycle(), + remoteDataStream.isFailureStore(), + remoteDataStream.getFailureIndices() ); } else { if (localDataStream.isReplicated() == false) { @@ -383,7 +385,9 @@ static DataStream updateLocalDataStream( localDataStream.isSystem(), localDataStream.isAllowCustomRouting(), localDataStream.getIndexMode(), - localDataStream.getLifecycle() + localDataStream.getLifecycle(), + localDataStream.isFailureStore(), + localDataStream.getFailureIndices() ); } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkAction.java index 93cd6bd32dbab..7aab281f4f7ed 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkAction.java @@ -85,7 +85,12 @@ protected void doExecute( try (CcrRestoreSourceService.SessionReader sessionReader = restoreSourceService.getSessionReader(sessionUUID)) { long offsetAfterRead = sessionReader.readFileBytes(fileName, reference); long offsetBeforeRead = offsetAfterRead - reference.length(); - listener.onResponse(new GetCcrRestoreFileChunkResponse(offsetBeforeRead, reference)); + var chunk = new GetCcrRestoreFileChunkResponse(offsetBeforeRead, reference); + try { + listener.onResponse(chunk); + } finally { + chunk.decRef(); + } } } catch (IOException e) { listener.onFailure(e); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java index 5868bba00229f..b90b203e2d29f 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java @@ -229,7 +229,7 @@ public IndexMetadata getSnapshotIndexMetaData(RepositoryData repositoryData, Sna .actionGet(ccrSettings.getRecoveryActionTimeout()); // Validates whether the leader cluster has been configured properly: - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); IndexMetadata leaderIndexMetadata = clusterState.getState().metadata().index(leaderIndex); CcrLicenseChecker.fetchLeaderHistoryUUIDs(remoteClient, leaderIndexMetadata, future::onFailure, future::onResponse); String[] leaderHistoryUUIDs = future.actionGet(ccrSettings.getRecoveryActionTimeout()); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java index c977791020ccb..9bf22bd4e0ca3 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java @@ -19,7 +19,6 @@ import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.master.AcknowledgedRequest; @@ -68,6 +67,7 @@ import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.test.BackgroundIndexer; +import org.elasticsearch.test.CloseableTestClusterWrapper; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.InternalSettingsPlugin; @@ -122,6 +122,7 @@ import static org.elasticsearch.snapshots.RestoreService.restoreInProgress; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -476,7 +477,7 @@ protected final Index resolveFollowerIndex(String index) { } protected final RefreshResponse refresh(Client client, String... indices) { - RefreshResponse actionGet = client.admin().indices().prepareRefresh(indices).execute().actionGet(); + RefreshResponse actionGet = client.admin().indices().prepareRefresh(indices).get(); assertNoFailures(actionGet); return actionGet; } @@ -706,9 +707,10 @@ protected void atLeastDocsIndexed(Client client, String index, long numDocsRepli refresh(client, index); SearchRequest request = new SearchRequest(index); request.source(new SearchSourceBuilder().size(0)); - SearchResponse response = client.search(request).actionGet(); - assertNotNull(response.getHits().getTotalHits()); - assertThat(response.getHits().getTotalHits().value, greaterThanOrEqualTo(numDocsReplicated)); + assertResponse(client.search(request), response -> { + assertNotNull(response.getHits().getTotalHits()); + assertThat(response.getHits().getTotalHits().value, greaterThanOrEqualTo(numDocsReplicated)); + }); }, 60, TimeUnit.SECONDS); } @@ -904,7 +906,7 @@ static class ClusterGroup implements Closeable { @Override public void close() throws IOException { - IOUtils.close(leaderCluster, followerCluster); + IOUtils.close(CloseableTestClusterWrapper.wrap(leaderCluster, followerCluster)); } } } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesActionTests.java index 06a3a39b08502..6f6131c8ea4e9 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesActionTests.java @@ -51,7 +51,7 @@ public void testGetOperations() throws Exception { final int numWrites = randomIntBetween(10, 4096); for (int i = 0; i < numWrites; i++) { - client().prepareIndex("index").setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get(); + prepareIndex("index").setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get(); } // A number of times, get operations within a range that exists: @@ -169,7 +169,7 @@ public void testGetOperationsExceedByteLimit() throws Exception { final long numWrites = 32; for (int i = 0; i < numWrites; i++) { - client().prepareIndex("index").setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get(); + prepareIndex("index").setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get(); } final IndexShard indexShard = indexService.getShard(0); @@ -195,7 +195,7 @@ public void testGetOperationsExceedByteLimit() throws Exception { public void testGetOperationsAlwaysReturnAtLeastOneOp() throws Exception { final IndexService indexService = createIndex("index", indexSettings(1, 0).build()); - client().prepareIndex("index").setId("0").setSource("{}", XContentType.JSON).get(); + prepareIndex("index").setId("0").setSource("{}", XContentType.JSON).get(); final IndexShard indexShard = indexService.getShard(0); final Translog.Operation[] operations = ShardChangesAction.getOperations( diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesTests.java index a8f3febfd1946..2f0643ef16c1b 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesTests.java @@ -49,9 +49,9 @@ protected Collection> getPlugins() { public void testGetOperationsBasedOnGlobalSequenceId() throws Exception { client().admin().indices().prepareCreate("index").setSettings(Settings.builder().put("index.number_of_shards", 1)).get(); - client().prepareIndex("index").setId("1").setSource("{}", XContentType.JSON).get(); - client().prepareIndex("index").setId("2").setSource("{}", XContentType.JSON).get(); - client().prepareIndex("index").setId("3").setSource("{}", XContentType.JSON).get(); + prepareIndex("index").setId("1").setSource("{}", XContentType.JSON).get(); + prepareIndex("index").setId("2").setSource("{}", XContentType.JSON).get(); + prepareIndex("index").setId("3").setSource("{}", XContentType.JSON).get(); ShardStats shardStats = client().admin().indices().prepareStats("index").get().getIndex("index").getShards()[0]; long globalCheckPoint = shardStats.getSeqNoStats().getGlobalCheckpoint(); @@ -75,9 +75,9 @@ public void testGetOperationsBasedOnGlobalSequenceId() throws Exception { assertThat(operation.seqNo(), equalTo(2L)); assertThat(operation.id(), equalTo("3")); - client().prepareIndex("index").setId("3").setSource("{}", XContentType.JSON).get(); - client().prepareIndex("index").setId("4").setSource("{}", XContentType.JSON).get(); - client().prepareIndex("index").setId("5").setSource("{}", XContentType.JSON).get(); + prepareIndex("index").setId("3").setSource("{}", XContentType.JSON).get(); + prepareIndex("index").setId("4").setSource("{}", XContentType.JSON).get(); + prepareIndex("index").setId("5").setSource("{}", XContentType.JSON).get(); shardStats = client().admin().indices().prepareStats("index").get().getIndex("index").getShards()[0]; globalCheckPoint = shardStats.getSeqNoStats().getGlobalCheckpoint(); @@ -110,7 +110,7 @@ public void testMissingOperations() throws Exception { .get(); for (int i = 0; i < 32; i++) { - client().prepareIndex("index").setId("1").setSource("{}", XContentType.JSON).get(); + prepareIndex("index").setId("1").setSource("{}", XContentType.JSON).get(); client().prepareDelete("index", "1").get(); client().admin().indices().flush(new FlushRequest("index").force(true)).actionGet(); } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java index 0d28f99a64237..980710d83d52a 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java @@ -527,7 +527,7 @@ protected synchronized void recoverPrimary(IndexShard primaryShard) { ) ); primaryShard.markAsRecovering("remote recovery from leader", new RecoveryState(routing, localNode, null)); - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); primaryShard.restoreFromRepository(new RestoreOnlyRepository(index.getName()) { @Override public void restoreShard( diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkActionTests.java index 629b98a25ba4b..61866dbf2029f 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkActionTests.java @@ -126,7 +126,7 @@ public void testRequestedShardIdMustBeConsistentWithSessionShardId() { final PlainActionFuture future1 = new PlainActionFuture<>(); action.doExecute(mock(Task.class), request1, future1); // The actual response content does not matter as long as the future executes without any error - future1.actionGet().decRef(); + future1.actionGet(); // 2. Inconsistent requested ShardId final var request2 = new GetCcrRestoreFileChunkRequest( diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java index c51b7c346272c..362f757204aa1 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java @@ -143,7 +143,7 @@ public void testRestoreShard() throws IOException { DiscoveryNode localNode = DiscoveryNodeUtils.builder("foo").roles(emptySet()).build(); target.markAsRecovering("store", new RecoveryState(routing, localNode, null)); - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); target.restoreFromRepository(new RestoreOnlyRepository("test") { @Override public void restoreShard( diff --git a/x-pack/plugin/core/build.gradle b/x-pack/plugin/core/build.gradle index 8368f6e43dfcf..7e7e04510e2a9 100644 --- a/x-pack/plugin/core/build.gradle +++ b/x-pack/plugin/core/build.gradle @@ -48,6 +48,7 @@ dependencies { // security deps api 'com.unboundid:unboundid-ldapsdk:6.0.3' + api "com.nimbusds:nimbus-jose-jwt:9.23" implementation project(":x-pack:plugin:core:template-resources") @@ -62,7 +63,7 @@ dependencies { testImplementation project(path: ':modules:health-shards-availability') testImplementation project(":client:rest-high-level") // Needed for Fips140ProviderVerificationTests - testCompileOnly('org.bouncycastle:bc-fips:1.0.2') + testCompileOnly('org.bouncycastle:bc-fips:1.0.2.4') testImplementation(project(':x-pack:license-tools')) { transitive = false @@ -131,7 +132,27 @@ tasks.named("thirdPartyAudit").configure { //commons-logging provided dependencies 'javax.servlet.ServletContextEvent', 'javax.servlet.ServletContextListener', - 'javax.jms.Message' + 'javax.jms.Message', + // Optional dependency of nimbus-jose-jwt for handling Ed25519 signatures and ECDH with X25519 (RFC 8037) + 'com.google.crypto.tink.subtle.Ed25519Sign', + 'com.google.crypto.tink.subtle.Ed25519Sign$KeyPair', + 'com.google.crypto.tink.subtle.Ed25519Verify', + 'com.google.crypto.tink.subtle.X25519', + 'com.google.crypto.tink.subtle.XChaCha20Poly1305', + // optional dependencies for nimbus-jose-jwt + 'org.bouncycastle.asn1.pkcs.PrivateKeyInfo', + 'org.bouncycastle.asn1.x509.AlgorithmIdentifier', + 'org.bouncycastle.asn1.x509.SubjectPublicKeyInfo', + 'org.bouncycastle.cert.X509CertificateHolder', + 'org.bouncycastle.cert.jcajce.JcaX509CertificateHolder', + 'org.bouncycastle.crypto.InvalidCipherTextException', + 'org.bouncycastle.crypto.engines.AESEngine', + 'org.bouncycastle.crypto.modes.GCMBlockCipher', + 'org.bouncycastle.jcajce.provider.BouncyCastleFipsProvider', + 'org.bouncycastle.jce.provider.BouncyCastleProvider', + 'org.bouncycastle.openssl.PEMKeyPair', + 'org.bouncycastle.openssl.PEMParser', + 'org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter' ) } diff --git a/x-pack/plugin/security/licenses/nimbus-jose-jwt-LICENSE.txt b/x-pack/plugin/core/licenses/nimbus-jose-jwt-LICENSE.txt similarity index 100% rename from x-pack/plugin/security/licenses/nimbus-jose-jwt-LICENSE.txt rename to x-pack/plugin/core/licenses/nimbus-jose-jwt-LICENSE.txt diff --git a/x-pack/plugin/security/licenses/nimbus-jose-jwt-NOTICE.txt b/x-pack/plugin/core/licenses/nimbus-jose-jwt-NOTICE.txt similarity index 100% rename from x-pack/plugin/security/licenses/nimbus-jose-jwt-NOTICE.txt rename to x-pack/plugin/core/licenses/nimbus-jose-jwt-NOTICE.txt diff --git a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/nodesinfo/ComponentVersionsNodesInfoIT.java b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/nodesinfo/ComponentVersionsNodesInfoIT.java index e5131fa482b7c..7f4fca7063cd5 100644 --- a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/nodesinfo/ComponentVersionsNodesInfoIT.java +++ b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/nodesinfo/ComponentVersionsNodesInfoIT.java @@ -29,7 +29,7 @@ public void testNodesInfoComponentVersions() { String server1NodeId = internalCluster().getInstance(ClusterService.class, node_1).state().nodes().getLocalNodeId(); logger.info("--> started nodes: {}", server1NodeId); - NodesInfoResponse response = clusterAdmin().prepareNodesInfo().execute().actionGet(); + NodesInfoResponse response = clusterAdmin().prepareNodesInfo().get(); assertThat(response.getNodesMap().get(server1NodeId), notNullValue()); assertThat( response.getNodesMap().get(server1NodeId).getComponentVersions().keySet(), diff --git a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotIT.java b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotIT.java index 02f61498fa93a..eca3bdea374b4 100644 --- a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotIT.java +++ b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotIT.java @@ -169,10 +169,10 @@ public void testSnapshotWithDanglingLocalSegment() throws Exception { final String indexName = "test-idx"; createIndex(indexName); - client().prepareIndex(indexName).setSource("foo", "bar").get(); + prepareIndex(indexName).setSource("foo", "bar").get(); assertSuccessful(startFullSnapshot(repo, "snapshot-1")); - client().prepareIndex(indexName).setSource("foo", "baz").get(); + prepareIndex(indexName).setSource("foo", "baz").get(); assertSuccessful(startFullSnapshot(repo, "snapshot-2")); logger.info("--> randomly deleting files from the local _snapshot path to simulate corruption"); @@ -332,7 +332,7 @@ private IndexRequestBuilder[] snapshotAndRestore(final String sourceIdx, final b source.endArray(); } source.endObject(); - builders[i] = client().prepareIndex(sourceIdx).setId(Integer.toString(i)).setSource(source).setRouting("r" + i); + builders[i] = prepareIndex(sourceIdx).setId(Integer.toString(i)).setSource(source).setRouting("r" + i); } indexRandom(true, builders); flushAndRefresh(); diff --git a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderIT.java b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderIT.java index 6421b70f9e453..f46c97e0ffda6 100644 --- a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderIT.java +++ b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderIT.java @@ -26,9 +26,9 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.xpack.core.DataTiersFeatureSetUsage; import org.elasticsearch.xpack.core.action.XPackUsageRequestBuilder; import org.elasticsearch.xpack.core.action.XPackUsageResponse; +import org.elasticsearch.xpack.core.datatiers.DataTiersFeatureSetUsage; import org.junit.Before; import java.util.ArrayList; @@ -369,9 +369,9 @@ public void testDataTierTelemetry() { indicesAdmin().prepareCreate(index + "2").setSettings(indexSettings(1, 1)).setWaitForActiveShards(0).get(); ensureGreen(); - client().prepareIndex(index).setSource("foo", "bar").get(); - client().prepareIndex(index + "2").setSource("foo", "bar").get(); - client().prepareIndex(index + "2").setSource("foo", "bar").get(); + prepareIndex(index).setSource("foo", "bar").get(); + prepareIndex(index + "2").setSource("foo", "bar").get(); + prepareIndex(index + "2").setSource("foo", "bar").get(); refresh(index, index + "2"); DataTiersFeatureSetUsage usage = getUsage(); @@ -417,7 +417,7 @@ private void updatePreference(String tier) { } private DataTiersFeatureSetUsage getUsage() { - XPackUsageResponse usages = new XPackUsageRequestBuilder(client()).execute().actionGet(); + XPackUsageResponse usages = new XPackUsageRequestBuilder(client()).get(); return usages.getUsages() .stream() .filter(u -> u instanceof DataTiersFeatureSetUsage) diff --git a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierShardAvailabilityHealthIndicatorIT.java b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierShardAvailabilityHealthIndicatorIT.java index 82784d9112a1b..628e2c18de2f9 100644 --- a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierShardAvailabilityHealthIndicatorIT.java +++ b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierShardAvailabilityHealthIndicatorIT.java @@ -171,7 +171,7 @@ private void indexRandomData(String indexName) throws Exception { int numDocs = scaledRandomIntBetween(100, 1000); IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex(indexName).setSource("field", "value"); + builders[i] = prepareIndex(indexName).setSource("field", "value"); } // we want to test both full divergent copies of the shard in terms of segments, and // a case where they are the same (using sync flush), index Random does all this goodness diff --git a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/action/DataStreamLifecycleUsageTransportActionIT.java b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/action/DataStreamLifecycleUsageTransportActionIT.java index 8c16a3c764bbc..c102470628a00 100644 --- a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/action/DataStreamLifecycleUsageTransportActionIT.java +++ b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/action/DataStreamLifecycleUsageTransportActionIT.java @@ -71,7 +71,8 @@ private void cleanup() throws Exception { @SuppressWarnings("unchecked") public void testAction() throws Exception { assertUsageResults(0, 0, 0, 0.0, true); - AtomicLong count = new AtomicLong(0); + AtomicLong totalCount = new AtomicLong(0); + AtomicLong countLifecycleWithRetention = new AtomicLong(0); AtomicLong totalRetentionTimes = new AtomicLong(0); AtomicLong minRetention = new AtomicLong(Long.MAX_VALUE); AtomicLong maxRetention = new AtomicLong(Long.MIN_VALUE); @@ -94,11 +95,13 @@ public void testAction() throws Exception { if (hasLifecycle) { if (randomBoolean()) { lifecycle = new DataStreamLifecycle(null, null, null); + totalCount.incrementAndGet(); } else { long retentionMillis = randomLongBetween(1000, 100000); boolean isEnabled = randomBoolean(); if (isEnabled) { - count.incrementAndGet(); + totalCount.incrementAndGet(); + countLifecycleWithRetention.incrementAndGet(); totalRetentionTimes.addAndGet(retentionMillis); if (retentionMillis < minRetention.get()) { @@ -129,7 +132,9 @@ public void testAction() throws Exception { systemDataStream, randomBoolean(), IndexMode.STANDARD, - lifecycle + lifecycle, + false, + List.of() ); dataStreamMap.put(dataStream.getName(), dataStream); } @@ -141,9 +146,11 @@ public void testAction() throws Exception { }); int expectedMinimumRetention = minRetention.get() == Long.MAX_VALUE ? 0 : minRetention.intValue(); int expectedMaximumRetention = maxRetention.get() == Long.MIN_VALUE ? 0 : maxRetention.intValue(); - double expectedAverageRetention = count.get() == 0 ? 0.0 : totalRetentionTimes.doubleValue() / count.get(); + double expectedAverageRetention = countLifecycleWithRetention.get() == 0 + ? 0.0 + : totalRetentionTimes.doubleValue() / countLifecycleWithRetention.get(); assertUsageResults( - count.intValue(), + totalCount.intValue(), expectedMinimumRetention, expectedMaximumRetention, expectedAverageRetention, @@ -184,7 +191,7 @@ private void assertUsageResults( * Updates the cluster state in the internal cluster using the provided function */ protected static void updateClusterState(final Function updater) throws Exception { - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); final ClusterService clusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); clusterService.submitUnbatchedStateUpdateTask("test", new ClusterStateUpdateTask() { @Override diff --git a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/rest/action/DataTiersUsageRestCancellationIT.java b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/rest/action/DataTiersUsageRestCancellationIT.java index f669bb8589fd7..faeb760b3c181 100644 --- a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/rest/action/DataTiersUsageRestCancellationIT.java +++ b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/rest/action/DataTiersUsageRestCancellationIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.core.rest.action; import org.apache.http.client.methods.HttpGet; -import org.elasticsearch.action.admin.cluster.node.stats.TransportNodesStatsAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.action.support.PlainActionFuture; @@ -35,6 +34,7 @@ import org.elasticsearch.xpack.core.action.XPackUsageAction; import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction; import org.elasticsearch.xpack.core.action.XPackUsageResponse; +import org.elasticsearch.xpack.core.datatiers.NodesDataTiersUsageTransportAction; import java.nio.file.Path; import java.util.Arrays; @@ -76,7 +76,7 @@ public void testCancellation() throws Exception { final SubscribableListener nodeStatsRequestsReleaseListener = new SubscribableListener<>(); for (TransportService transportService : internalCluster().getInstances(TransportService.class)) { ((MockTransportService) transportService).addRequestHandlingBehavior( - TransportNodesStatsAction.TYPE.name() + "[n]", + NodesDataTiersUsageTransportAction.TYPE.name() + "[n]", (handler, request, channel, task) -> { tasksBlockedLatch.countDown(); nodeStatsRequestsReleaseListener.addListener( @@ -94,14 +94,13 @@ public void testCancellation() throws Exception { safeAwait(tasksBlockedLatch); // must wait for the node-level tasks to start to avoid cancelling being handled earlier cancellable.cancel(); - // NB this test works by blocking node-level stats requests; when #100230 is addressed this will need to target a different action. - assertAllCancellableTasksAreCancelled(TransportNodesStatsAction.TYPE.name()); + assertAllCancellableTasksAreCancelled(NodesDataTiersUsageTransportAction.TYPE.name()); assertAllCancellableTasksAreCancelled(XPackUsageAction.NAME); nodeStatsRequestsReleaseListener.onResponse(null); expectThrows(CancellationException.class, future::actionGet); - assertAllTasksHaveFinished(TransportNodesStatsAction.TYPE.name()); + assertAllTasksHaveFinished(NodesDataTiersUsageTransportAction.TYPE.name()); assertAllTasksHaveFinished(XPackUsageAction.NAME); } diff --git a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/template/RolloverEnabledTestTemplateRegistry.java b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/template/RolloverEnabledTestTemplateRegistry.java index 3f2856472a182..819b0e01ac4de 100644 --- a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/template/RolloverEnabledTestTemplateRegistry.java +++ b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/template/RolloverEnabledTestTemplateRegistry.java @@ -43,15 +43,12 @@ protected String getOrigin() { protected Map getComposableTemplateConfigs() { return Map.of( TEST_INDEX_TEMPLATE_ID, - new ComposableIndexTemplate( - List.of(TEST_INDEX_PATTERN), - null, - null, - 100L, - version, - null, - new ComposableIndexTemplate.DataStreamTemplate() - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of(TEST_INDEX_PATTERN)) + .priority(100L) + .version(version) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ); } diff --git a/x-pack/plugin/core/src/main/java/module-info.java b/x-pack/plugin/core/src/main/java/module-info.java index deb3c4384a04b..eb1271edd3b06 100644 --- a/x-pack/plugin/core/src/main/java/module-info.java +++ b/x-pack/plugin/core/src/main/java/module-info.java @@ -22,6 +22,7 @@ requires unboundid.ldapsdk; requires org.elasticsearch.tdigest; requires org.elasticsearch.xcore.templates; + requires com.nimbusds.jose.jwt; exports org.elasticsearch.index.engine.frozen; exports org.elasticsearch.license; @@ -57,6 +58,7 @@ exports org.elasticsearch.xpack.core.common.validation; exports org.elasticsearch.xpack.core.common; exports org.elasticsearch.xpack.core.datastreams; + exports org.elasticsearch.xpack.core.datatiers; exports org.elasticsearch.xpack.core.deprecation; exports org.elasticsearch.xpack.core.downsample; exports org.elasticsearch.xpack.core.enrich.action; @@ -221,9 +223,12 @@ exports org.elasticsearch.xpack.core.watcher.trigger; exports org.elasticsearch.xpack.core.watcher.watch; exports org.elasticsearch.xpack.core.watcher; + exports org.elasticsearch.xpack.core.ml.ltr; provides org.elasticsearch.action.admin.cluster.node.info.ComponentVersionNumber with org.elasticsearch.xpack.core.ml.MlConfigVersionComponent, org.elasticsearch.xpack.core.transform.TransformConfigVersionComponent; + + provides org.elasticsearch.features.FeatureSpecification with org.elasticsearch.xpack.core.XPackFeatures; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesMetadata.java index 769c5b2847de6..e06d14341b61e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesMetadata.java @@ -72,7 +72,7 @@ boolean isEligibleForTrial() { if (trialLicenseVersion == null) { return true; } - return trialLicenseVersion.ableToStartNewTrialSince(TrialLicenseVersion.CURRENT); + return trialLicenseVersion.ableToStartNewTrial(); } TrialLicenseVersion getMostRecentTrialVersion() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/internal/TrialLicenseVersion.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/internal/TrialLicenseVersion.java index 6de9fec098a78..4fa8332df9fb5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/internal/TrialLicenseVersion.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/internal/TrialLicenseVersion.java @@ -81,12 +81,14 @@ int asInt() { return trialVersion; } - public boolean ableToStartNewTrialSince(TrialLicenseVersion since) { - if (since.asInt() < TRIAL_VERSION_CUTOVER) { - int sinceMajorVersion = since.asInt() / 1_000_000; // integer division is intentional + public boolean ableToStartNewTrial() { + assert trialVersion <= CURRENT.trialVersion + : "trial version [" + trialVersion + "] cannot be greater than CURRENT [" + CURRENT.trialVersion + "]"; + if (trialVersion < TRIAL_VERSION_CUTOVER) { + int sinceMajorVersion = trialVersion / 1_000_000; // integer division is intentional return sinceMajorVersion < TRIAL_VERSION_CUTOVER_MAJOR; } - return since.asInt() < trialVersion; + return trialVersion != CURRENT.trialVersion; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java index 4a2f6fbd8289f..6b32e377deed4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java @@ -192,6 +192,7 @@ private static String maybeRewriteSingleAuthenticationHeaderForVersion( public static final String LOGSTASH_MANAGEMENT_ORIGIN = "logstash_management"; public static final String FLEET_ORIGIN = "fleet"; public static final String ENT_SEARCH_ORIGIN = "enterprise_search"; + public static final String CONNECTORS_ORIGIN = "connectors"; public static final String INFERENCE_ORIGIN = "inference"; public static final String APM_ORIGIN = "apm"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/DataTiersUsageTransportAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/DataTiersUsageTransportAction.java deleted file mode 100644 index 295df1ea51b6b..0000000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/DataTiersUsageTransportAction.java +++ /dev/null @@ -1,259 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.core; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; -import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; -import org.elasticsearch.action.admin.indices.stats.IndexShardStats; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.client.internal.Client; -import org.elasticsearch.client.internal.ParentTaskAssigningClient; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.node.DiscoveryNodeRole; -import org.elasticsearch.cluster.routing.RoutingNode; -import org.elasticsearch.cluster.routing.RoutingNodes; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.ShardRoutingState; -import org.elasticsearch.cluster.routing.allocation.DataTier; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.store.StoreStats; -import org.elasticsearch.protocol.xpack.XPackUsageRequest; -import org.elasticsearch.search.aggregations.metrics.TDigestState; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction; -import org.elasticsearch.xpack.core.action.XPackUsageFeatureResponse; -import org.elasticsearch.xpack.core.action.XPackUsageFeatureTransportAction; - -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.stream.StreamSupport; - -public class DataTiersUsageTransportAction extends XPackUsageFeatureTransportAction { - - private final Client client; - - @Inject - public DataTiersUsageTransportAction( - TransportService transportService, - ClusterService clusterService, - ThreadPool threadPool, - ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, - Client client - ) { - super( - XPackUsageFeatureAction.DATA_TIERS.name(), - transportService, - clusterService, - threadPool, - actionFilters, - indexNameExpressionResolver - ); - this.client = client; - } - - @Override - protected void masterOperation( - Task task, - XPackUsageRequest request, - ClusterState state, - ActionListener listener - ) { - new ParentTaskAssigningClient(client, clusterService.localNode(), task).admin() - .cluster() - .prepareNodesStats() - .all() - .setIndices(CommonStatsFlags.ALL) - .execute(listener.delegateFailureAndWrap((delegate, nodesStatsResponse) -> { - final RoutingNodes routingNodes = state.getRoutingNodes(); - final Map indices = state.getMetadata().getIndices(); - - // Determine which tiers each index would prefer to be within - Map indicesToTiers = tierIndices(indices); - - // Generate tier specific stats for the nodes and indices - Map tierSpecificStats = calculateStats( - nodesStatsResponse.getNodes(), - indicesToTiers, - routingNodes - ); - - delegate.onResponse(new XPackUsageFeatureResponse(new DataTiersFeatureSetUsage(tierSpecificStats))); - })); - } - - // Visible for testing - // Takes a registry of indices and returns a mapping of index name to which tier it most prefers. Always 1 to 1, some may filter out. - static Map tierIndices(Map indices) { - Map indexByTier = new HashMap<>(); - indices.entrySet().forEach(entry -> { - String tierPref = entry.getValue().getSettings().get(DataTier.TIER_PREFERENCE); - if (Strings.hasText(tierPref)) { - String[] tiers = tierPref.split(","); - if (tiers.length > 0) { - indexByTier.put(entry.getKey(), tiers[0]); - } - } - }); - return indexByTier; - } - - /** - * Accumulator to hold intermediate data tier stats before final calculation. - */ - private static class TierStatsAccumulator { - int nodeCount = 0; - Set indexNames = new HashSet<>(); - int totalShardCount = 0; - long totalByteCount = 0; - long docCount = 0; - int primaryShardCount = 0; - long primaryByteCount = 0L; - final TDigestState valueSketch = TDigestState.create(1000); - } - - // Visible for testing - static Map calculateStats( - List nodesStats, - Map indexByTier, - RoutingNodes routingNodes - ) { - Map statsAccumulators = new HashMap<>(); - for (NodeStats nodeStats : nodesStats) { - aggregateDataTierNodeCounts(nodeStats, statsAccumulators); - aggregateDataTierIndexStats(nodeStats, routingNodes, indexByTier, statsAccumulators); - } - Map results = new HashMap<>(); - for (Map.Entry entry : statsAccumulators.entrySet()) { - results.put(entry.getKey(), calculateFinalTierStats(entry.getValue())); - } - return results; - } - - /** - * Determine which data tiers this node belongs to (if any), and increment the node counts for those tiers. - */ - private static void aggregateDataTierNodeCounts(NodeStats nodeStats, Map tiersStats) { - nodeStats.getNode() - .getRoles() - .stream() - .map(DiscoveryNodeRole::roleName) - .filter(DataTier::validTierName) - .forEach(tier -> tiersStats.computeIfAbsent(tier, k -> new TierStatsAccumulator()).nodeCount++); - } - - /** - * Locate which indices are hosted on the node specified by the NodeStats, then group and aggregate the available index stats by tier. - */ - private static void aggregateDataTierIndexStats( - NodeStats nodeStats, - RoutingNodes routingNodes, - Map indexByTier, - Map accumulators - ) { - final RoutingNode node = routingNodes.node(nodeStats.getNode().getId()); - if (node != null) { - StreamSupport.stream(node.spliterator(), false) - .map(ShardRouting::index) - .distinct() - .forEach(index -> classifyIndexAndCollectStats(index, nodeStats, indexByTier, node, accumulators)); - } - } - - /** - * Determine which tier an index belongs in, then accumulate its stats into that tier's stats. - */ - private static void classifyIndexAndCollectStats( - Index index, - NodeStats nodeStats, - Map indexByTier, - RoutingNode node, - Map accumulators - ) { - // Look up which tier this index belongs to (its most preferred) - String indexTier = indexByTier.get(index.getName()); - if (indexTier != null) { - final TierStatsAccumulator accumulator = accumulators.computeIfAbsent(indexTier, k -> new TierStatsAccumulator()); - accumulator.indexNames.add(index.getName()); - aggregateDataTierShardStats(nodeStats, index, node, accumulator); - } - } - - /** - * Collect shard-level data tier stats from shard stats contained in the node stats response. - */ - private static void aggregateDataTierShardStats(NodeStats nodeStats, Index index, RoutingNode node, TierStatsAccumulator accumulator) { - // Shard based stats - final List allShardStats = nodeStats.getIndices().getShardStats(index); - if (allShardStats != null) { - for (IndexShardStats shardStat : allShardStats) { - accumulator.totalByteCount += shardStat.getTotal().getStore().totalDataSetSizeInBytes(); - accumulator.docCount += shardStat.getTotal().getDocs().getCount(); - - // Accumulate stats about started shards - ShardRouting shardRouting = node.getByShardId(shardStat.getShardId()); - if (shardRouting != null && shardRouting.state() == ShardRoutingState.STARTED) { - accumulator.totalShardCount += 1; - - // Accumulate stats about started primary shards - StoreStats primaryStoreStats = shardStat.getPrimary().getStore(); - if (primaryStoreStats != null) { - // if primaryStoreStats is null, it means there is no primary on the node in question - accumulator.primaryShardCount++; - long primarySize = primaryStoreStats.totalDataSetSizeInBytes(); - accumulator.primaryByteCount += primarySize; - accumulator.valueSketch.add(primarySize); - } - } - } - } - } - - private static DataTiersFeatureSetUsage.TierSpecificStats calculateFinalTierStats(TierStatsAccumulator accumulator) { - long primaryShardSizeMedian = (long) accumulator.valueSketch.quantile(0.5); - long primaryShardSizeMAD = computeMedianAbsoluteDeviation(accumulator.valueSketch); - return new DataTiersFeatureSetUsage.TierSpecificStats( - accumulator.nodeCount, - accumulator.indexNames.size(), - accumulator.totalShardCount, - accumulator.primaryShardCount, - accumulator.docCount, - accumulator.totalByteCount, - accumulator.primaryByteCount, - primaryShardSizeMedian, - primaryShardSizeMAD - ); - } - - // Visible for testing - static long computeMedianAbsoluteDeviation(TDigestState valuesSketch) { - if (valuesSketch.size() == 0) { - return 0; - } else { - final double approximateMedian = valuesSketch.quantile(0.5); - final TDigestState approximatedDeviationsSketch = TDigestState.createUsingParamsFrom(valuesSketch); - valuesSketch.centroids().forEach(centroid -> { - final double deviation = Math.abs(approximateMedian - centroid.mean()); - approximatedDeviationsSketch.add(deviation, centroid.count()); - }); - - return (long) approximatedDeviationsSketch.quantile(0.5); - } - } -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/HealthApiUsageTransportAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/HealthApiUsageTransportAction.java index a90b8bf65fa25..bd641d1eb8c17 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/HealthApiUsageTransportAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/HealthApiUsageTransportAction.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.core; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ContextPreservingActionListener; @@ -15,6 +14,8 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.features.FeatureService; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.health.stats.HealthApiStatsAction; import org.elasticsearch.protocol.xpack.XPackUsageRequest; import org.elasticsearch.tasks.Task; @@ -28,7 +29,11 @@ * This action provides telemetry of the cluster's health api usage. */ public class HealthApiUsageTransportAction extends XPackUsageFeatureTransportAction { + + static final NodeFeature SUPPORTS_HEALTH_STATS = new NodeFeature("health.supports_health_stats"); + private final Client client; + private final FeatureService featureService; @Inject public HealthApiUsageTransportAction( @@ -37,7 +42,8 @@ public HealthApiUsageTransportAction( ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - Client client + Client client, + FeatureService featureService ) { super( XPackUsageFeatureAction.HEALTH.name(), @@ -48,6 +54,7 @@ public HealthApiUsageTransportAction( indexNameExpressionResolver ); this.client = client; + this.featureService = featureService; } @Override @@ -63,7 +70,7 @@ protected void masterOperation( client.threadPool().getThreadContext() ); - if (state.nodesIfRecovered().getMinNodeVersion().onOrAfter(Version.V_8_7_0)) { + if (state.clusterRecovered() && featureService.clusterHasFeature(state, SUPPORTS_HEALTH_STATS)) { HealthApiStatsAction.Request statsRequest = new HealthApiStatsAction.Request(); statsRequest.setParentTask(clusterService.localNode().getId(), task.getId()); client.execute(HealthApiStatsAction.INSTANCE, statsRequest, preservingListener.delegateFailureAndWrap((l, r) -> { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index 6d019e50f9d5f..ac16631bacb73 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -28,6 +28,7 @@ import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; import org.elasticsearch.xpack.core.datastreams.DataStreamFeatureSetUsage; import org.elasticsearch.xpack.core.datastreams.DataStreamLifecycleFeatureSetUsage; +import org.elasticsearch.xpack.core.datatiers.DataTiersFeatureSetUsage; import org.elasticsearch.xpack.core.downsample.DownsampleShardStatus; import org.elasticsearch.xpack.core.enrich.EnrichFeatureSetUsage; import org.elasticsearch.xpack.core.enrich.action.ExecuteEnrichPolicyStatus; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackFeatures.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackFeatures.java new file mode 100644 index 0000000000000..887b40b5ab64b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackFeatures.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core; + +import org.elasticsearch.Version; +import org.elasticsearch.features.FeatureSpecification; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.xpack.core.datatiers.NodesDataTiersUsageTransportAction; + +import java.util.Map; +import java.util.Set; + +/** + * Provides the XPack features that this version of the code supports + */ +public class XPackFeatures implements FeatureSpecification { + + @Override + public Set getFeatures() { + return Set.of( + NodesDataTiersUsageTransportAction.LOCALLY_PRECALCULATED_STATS_FEATURE // Added in 8.12 + ); + } + + @Override + public Map getHistoricalFeatures() { + return Map.of(HealthApiUsageTransportAction.SUPPORTS_HEALTH_STATS, Version.V_8_7_0); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java index d02e3f43d80cb..66534cccff064 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java @@ -98,6 +98,9 @@ import org.elasticsearch.xpack.core.action.XPackUsageResponse; import org.elasticsearch.xpack.core.async.DeleteAsyncResultAction; import org.elasticsearch.xpack.core.async.TransportDeleteAsyncResultAction; +import org.elasticsearch.xpack.core.datatiers.DataTiersInfoTransportAction; +import org.elasticsearch.xpack.core.datatiers.DataTiersUsageTransportAction; +import org.elasticsearch.xpack.core.datatiers.NodesDataTiersUsageTransportAction; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.rest.action.RestXPackInfoAction; import org.elasticsearch.xpack.core.rest.action.RestXPackUsageAction; @@ -362,6 +365,7 @@ public Collection createComponents(PluginServices services) { actions.add(new ActionHandler<>(XPackUsageFeatureAction.DATA_STREAM_LIFECYCLE, DataStreamLifecycleUsageTransportAction.class)); actions.add(new ActionHandler<>(XPackUsageFeatureAction.HEALTH, HealthApiUsageTransportAction.class)); actions.add(new ActionHandler<>(XPackUsageFeatureAction.REMOTE_CLUSTERS, RemoteClusterUsageTransportAction.class)); + actions.add(new ActionHandler<>(NodesDataTiersUsageTransportAction.TYPE, NodesDataTiersUsageTransportAction.class)); return actions; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java index 9a53653c454e3..133819cd601d7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.core; -import org.apache.logging.log4j.LogManager; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -30,7 +29,6 @@ import java.util.function.Function; import javax.crypto.SecretKeyFactory; -import javax.net.ssl.SSLContext; import static org.elasticsearch.xpack.core.security.SecurityField.USER_SETTING; import static org.elasticsearch.xpack.core.security.authc.RealmSettings.DOMAIN_TO_REALM_ASSOC_SETTING; @@ -255,19 +253,7 @@ public static Setting defaultStoredHashAlgorithmSetting(String key, Func }, Property.NodeScope); } - public static final List DEFAULT_SUPPORTED_PROTOCOLS; - - static { - boolean supportsTLSv13 = false; - try { - SSLContext.getInstance("TLSv1.3"); - supportsTLSv13 = true; - } catch (NoSuchAlgorithmException e) { - // BCJSSE in FIPS mode doesn't support TLSv1.3 yet. - LogManager.getLogger(XPackSettings.class).debug("TLSv1.3 is not supported", e); - } - DEFAULT_SUPPORTED_PROTOCOLS = supportsTLSv13 ? Arrays.asList("TLSv1.3", "TLSv1.2", "TLSv1.1") : Arrays.asList("TLSv1.2", "TLSv1.1"); - } + public static final List DEFAULT_SUPPORTED_PROTOCOLS = Arrays.asList("TLSv1.3", "TLSv1.2", "TLSv1.1"); public static final SslClientAuthenticationMode CLIENT_AUTH_DEFAULT = SslClientAuthenticationMode.REQUIRED; public static final SslClientAuthenticationMode HTTP_CLIENT_AUTH_DEFAULT = SslClientAuthenticationMode.NONE; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/DataStreamLifecycleUsageTransportAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/DataStreamLifecycleUsageTransportAction.java index 4344b59483651..fb49ba6c7e7a7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/DataStreamLifecycleUsageTransportAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/DataStreamLifecycleUsageTransportAction.java @@ -16,6 +16,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.core.Tuple; import org.elasticsearch.protocol.xpack.XPackUsageRequest; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -24,7 +25,6 @@ import java.util.Collection; import java.util.LongSummaryStatistics; -import java.util.stream.Collectors; public class DataStreamLifecycleUsageTransportAction extends XPackUsageFeatureTransportAction { @@ -54,26 +54,42 @@ protected void masterOperation( ActionListener listener ) { final Collection dataStreams = state.metadata().dataStreams().values(); - LongSummaryStatistics retentionStats = dataStreams.stream() - .filter(ds -> ds.getLifecycle() != null && ds.getLifecycle().isEnabled()) - .filter(ds -> ds.getLifecycle().getEffectiveDataRetention() != null) - .collect(Collectors.summarizingLong(ds -> ds.getLifecycle().getEffectiveDataRetention().getMillis())); - long dataStreamsWithLifecycles = retentionStats.getCount(); - long minRetention = dataStreamsWithLifecycles == 0 ? 0 : retentionStats.getMin(); - long maxRetention = dataStreamsWithLifecycles == 0 ? 0 : retentionStats.getMax(); - double averageRetention = retentionStats.getAverage(); + Tuple stats = calculateStats(dataStreams); + + long minRetention = stats.v2().getCount() == 0 ? 0 : stats.v2().getMin(); + long maxRetention = stats.v2().getCount() == 0 ? 0 : stats.v2().getMax(); + double averageRetention = stats.v2().getAverage(); RolloverConfiguration rolloverConfiguration = clusterService.getClusterSettings() .get(DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING); String rolloverConfigString = rolloverConfiguration.toString(); - final DataStreamLifecycleFeatureSetUsage.LifecycleStats stats = new DataStreamLifecycleFeatureSetUsage.LifecycleStats( - dataStreamsWithLifecycles, + final DataStreamLifecycleFeatureSetUsage.LifecycleStats lifecycleStats = new DataStreamLifecycleFeatureSetUsage.LifecycleStats( + stats.v1(), minRetention, maxRetention, averageRetention, DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING.getDefault(null).toString().equals(rolloverConfigString) ); - final DataStreamLifecycleFeatureSetUsage usage = new DataStreamLifecycleFeatureSetUsage(stats); + final DataStreamLifecycleFeatureSetUsage usage = new DataStreamLifecycleFeatureSetUsage(lifecycleStats); listener.onResponse(new XPackUsageFeatureResponse(usage)); } + + /** + * Counts the number of data streams that have a lifecycle configured (and enabled) and for + * the data streams that have a lifecycle it computes the min/max/average summary of the effective + * configured retention. + */ + public static Tuple calculateStats(Collection dataStreams) { + long dataStreamsWithLifecycles = 0; + LongSummaryStatistics retentionStats = new LongSummaryStatistics(); + for (DataStream dataStream : dataStreams) { + if (dataStream.getLifecycle() != null && dataStream.getLifecycle().isEnabled()) { + dataStreamsWithLifecycles++; + if (dataStream.getLifecycle().getEffectiveDataRetention() != null) { + retentionStats.accept(dataStream.getLifecycle().getEffectiveDataRetention().getMillis()); + } + } + } + return new Tuple<>(dataStreamsWithLifecycles, retentionStats); + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/DataTiersFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/DataTiersFeatureSetUsage.java similarity index 98% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/DataTiersFeatureSetUsage.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/DataTiersFeatureSetUsage.java index 0bf21f66b4888..f990118763bad 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/DataTiersFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/DataTiersFeatureSetUsage.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.core; +package org.elasticsearch.xpack.core.datatiers; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; @@ -16,6 +16,8 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; import java.util.Collections; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/DataTiersInfoTransportAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/DataTiersInfoTransportAction.java similarity index 91% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/DataTiersInfoTransportAction.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/DataTiersInfoTransportAction.java index 6134813dc4651..3af1945c53d3f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/DataTiersInfoTransportAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/DataTiersInfoTransportAction.java @@ -5,11 +5,12 @@ * 2.0. */ -package org.elasticsearch.xpack.core; +package org.elasticsearch.xpack.core.datatiers; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.action.XPackInfoFeatureAction; import org.elasticsearch.xpack.core.action.XPackInfoFeatureTransportAction; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/DataTiersUsageTransportAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/DataTiersUsageTransportAction.java new file mode 100644 index 0000000000000..728309926302a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/DataTiersUsageTransportAction.java @@ -0,0 +1,255 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.datatiers; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; +import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.client.internal.ParentTaskAssigningClient; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.node.DiscoveryNodeRole; +import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.allocation.DataTier; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.features.FeatureService; +import org.elasticsearch.indices.NodeIndicesStats; +import org.elasticsearch.protocol.xpack.XPackUsageRequest; +import org.elasticsearch.search.aggregations.metrics.TDigestState; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction; +import org.elasticsearch.xpack.core.action.XPackUsageFeatureResponse; +import org.elasticsearch.xpack.core.action.XPackUsageFeatureTransportAction; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; + +public class DataTiersUsageTransportAction extends XPackUsageFeatureTransportAction { + + private final Client client; + private final FeatureService featureService; + + @Inject + public DataTiersUsageTransportAction( + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + Client client, + FeatureService featureService + ) { + super( + XPackUsageFeatureAction.DATA_TIERS.name(), + transportService, + clusterService, + threadPool, + actionFilters, + indexNameExpressionResolver + ); + this.client = client; + this.featureService = featureService; + } + + @Override + protected void masterOperation( + Task task, + XPackUsageRequest request, + ClusterState state, + ActionListener listener + ) { + if (featureService.clusterHasFeature(state, NodesDataTiersUsageTransportAction.LOCALLY_PRECALCULATED_STATS_FEATURE)) { + new ParentTaskAssigningClient(client, clusterService.localNode(), task).admin() + .cluster() + .execute( + NodesDataTiersUsageTransportAction.TYPE, + new NodesDataTiersUsageTransportAction.NodesRequest(), + listener.delegateFailureAndWrap((delegate, response) -> { + // Generate tier specific stats for the nodes and indices + delegate.onResponse( + new XPackUsageFeatureResponse( + new DataTiersFeatureSetUsage( + aggregateStats(response.getNodes(), getIndicesGroupedByTier(state, response.getNodes())) + ) + ) + ); + }) + ); + } else { + new ParentTaskAssigningClient(client, clusterService.localNode(), task).admin() + .cluster() + .prepareNodesStats() + .setIndices(new CommonStatsFlags(CommonStatsFlags.Flag.Docs, CommonStatsFlags.Flag.Store)) + .execute(listener.delegateFailureAndWrap((delegate, nodesStatsResponse) -> { + List response = nodesStatsResponse.getNodes() + .stream() + .map( + nodeStats -> new NodeDataTiersUsage(nodeStats.getNode(), precalculateLocalStatsFromNodeStats(nodeStats, state)) + ) + .toList(); + delegate.onResponse( + new XPackUsageFeatureResponse( + new DataTiersFeatureSetUsage(aggregateStats(response, getIndicesGroupedByTier(state, response))) + ) + ); + })); + } + } + + // Visible for testing + static Map> getIndicesGroupedByTier(ClusterState state, List nodes) { + Set indices = nodes.stream() + .map(nodeResponse -> state.getRoutingNodes().node(nodeResponse.getNode().getId())) + .filter(Objects::nonNull) + .flatMap(node -> StreamSupport.stream(node.spliterator(), false)) + .map(ShardRouting::getIndexName) + .collect(Collectors.toSet()); + Map> indicesByTierPreference = new HashMap<>(); + for (String indexName : indices) { + IndexMetadata indexMetadata = state.metadata().index(indexName); + // If the index was deleted in the meantime, skip + if (indexMetadata == null) { + continue; + } + List tierPreference = indexMetadata.getTierPreference(); + if (tierPreference.isEmpty() == false) { + indicesByTierPreference.computeIfAbsent(tierPreference.get(0), ignored -> new HashSet<>()).add(indexName); + } + } + return indicesByTierPreference; + } + + /** + * Accumulator to hold intermediate data tier stats before final calculation. + */ + private static class TierStatsAccumulator { + int nodeCount = 0; + Set indexNames = new HashSet<>(); + int totalShardCount = 0; + long totalByteCount = 0; + long docCount = 0; + int primaryShardCount = 0; + long primaryByteCount = 0L; + final TDigestState valueSketch = TDigestState.create(1000); + } + + // Visible for testing + static Map aggregateStats( + List nodeDataTiersUsages, + Map> tierPreference + ) { + Map statsAccumulators = new HashMap<>(); + for (String tier : tierPreference.keySet()) { + statsAccumulators.put(tier, new TierStatsAccumulator()); + statsAccumulators.get(tier).indexNames.addAll(tierPreference.get(tier)); + } + for (NodeDataTiersUsage nodeDataTiersUsage : nodeDataTiersUsages) { + aggregateDataTierNodeCounts(nodeDataTiersUsage, statsAccumulators); + aggregateDataTierIndexStats(nodeDataTiersUsage, statsAccumulators); + } + Map results = new HashMap<>(); + for (Map.Entry entry : statsAccumulators.entrySet()) { + results.put(entry.getKey(), aggregateFinalTierStats(entry.getValue())); + } + return results; + } + + /** + * Determine which data tiers each node belongs to (if any), and increment the node counts for those tiers. + */ + private static void aggregateDataTierNodeCounts(NodeDataTiersUsage nodeStats, Map tiersStats) { + nodeStats.getNode() + .getRoles() + .stream() + .map(DiscoveryNodeRole::roleName) + .filter(DataTier::validTierName) + .forEach(tier -> tiersStats.computeIfAbsent(tier, k -> new TierStatsAccumulator()).nodeCount++); + } + + /** + * Iterate the preferred tiers of the indices for a node and aggregate their stats. + */ + private static void aggregateDataTierIndexStats(NodeDataTiersUsage nodeDataTiersUsage, Map accumulators) { + for (Map.Entry entry : nodeDataTiersUsage.getUsageStatsByTier().entrySet()) { + String tier = entry.getKey(); + NodeDataTiersUsage.UsageStats usage = entry.getValue(); + if (DataTier.validTierName(tier)) { + TierStatsAccumulator accumulator = accumulators.computeIfAbsent(tier, k -> new TierStatsAccumulator()); + accumulator.docCount += usage.getDocCount(); + accumulator.totalByteCount += usage.getTotalSize(); + accumulator.totalShardCount += usage.getTotalShardCount(); + for (Long primaryShardSize : usage.getPrimaryShardSizes()) { + accumulator.primaryShardCount += 1; + accumulator.primaryByteCount += primaryShardSize; + accumulator.valueSketch.add(primaryShardSize); + } + } + } + } + + private static DataTiersFeatureSetUsage.TierSpecificStats aggregateFinalTierStats(TierStatsAccumulator accumulator) { + long primaryShardSizeMedian = (long) accumulator.valueSketch.quantile(0.5); + long primaryShardSizeMAD = computeMedianAbsoluteDeviation(accumulator.valueSketch); + return new DataTiersFeatureSetUsage.TierSpecificStats( + accumulator.nodeCount, + accumulator.indexNames.size(), + accumulator.totalShardCount, + accumulator.primaryShardCount, + accumulator.docCount, + accumulator.totalByteCount, + accumulator.primaryByteCount, + primaryShardSizeMedian, + primaryShardSizeMAD + ); + } + + // Visible for testing + static long computeMedianAbsoluteDeviation(TDigestState valuesSketch) { + if (valuesSketch.size() == 0) { + return 0; + } else { + final double approximateMedian = valuesSketch.quantile(0.5); + final TDigestState approximatedDeviationsSketch = TDigestState.createUsingParamsFrom(valuesSketch); + valuesSketch.centroids().forEach(centroid -> { + final double deviation = Math.abs(approximateMedian - centroid.mean()); + approximatedDeviationsSketch.add(deviation, centroid.count()); + }); + + return (long) approximatedDeviationsSketch.quantile(0.5); + } + } + + /** + * In this method we use {@link NodesDataTiersUsageTransportAction#aggregateStats(RoutingNode, Metadata, NodeIndicesStats)} + * to precalculate the stats we need from {@link NodeStats} just like we do in NodesDataTiersUsageTransportAction. + * This way we can be backwards compatible without duplicating the calculation. This is only meant to be used to be + * backwards compatible and it should be removed afterwords. + */ + private static Map precalculateLocalStatsFromNodeStats(NodeStats nodeStats, ClusterState state) { + RoutingNode routingNode = state.getRoutingNodes().node(nodeStats.getNode().getId()); + if (routingNode == null) { + return Map.of(); + } + + return NodesDataTiersUsageTransportAction.aggregateStats(routingNode, state.metadata(), nodeStats.getIndices()); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/NodeDataTiersUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/NodeDataTiersUsage.java new file mode 100644 index 0000000000000..c1903a2910629 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/NodeDataTiersUsage.java @@ -0,0 +1,113 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.datatiers; + +import org.elasticsearch.action.support.nodes.BaseNodeResponse; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +/** + * Data tier usage statistics on a specific node. The statistics groups the indices, shard sizes, shard counts based + * on their tier preference. + */ +public class NodeDataTiersUsage extends BaseNodeResponse { + + private final Map usageStatsByTier; + + public static class UsageStats implements Writeable { + private final List primaryShardSizes; + private int totalShardCount; + private long docCount; + private long totalSize; + + public UsageStats() { + this.primaryShardSizes = new ArrayList<>(); + this.totalShardCount = 0; + this.docCount = 0; + this.totalSize = 0; + } + + public UsageStats(List primaryShardSizes, int totalShardCount, long docCount, long totalSize) { + this.primaryShardSizes = primaryShardSizes; + this.totalShardCount = totalShardCount; + this.docCount = docCount; + this.totalSize = totalSize; + } + + static UsageStats read(StreamInput in) throws IOException { + return new UsageStats(in.readCollectionAsList(StreamInput::readVLong), in.readVInt(), in.readVLong(), in.readVLong()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeCollection(primaryShardSizes, StreamOutput::writeVLong); + out.writeVInt(totalShardCount); + out.writeVLong(docCount); + out.writeVLong(totalSize); + } + + public void addPrimaryShardSize(long primaryShardSize) { + primaryShardSizes.add(primaryShardSize); + } + + public void incrementTotalSize(long totalSize) { + this.totalSize += totalSize; + } + + public void incrementDocCount(long docCount) { + this.docCount += docCount; + } + + public void incrementTotalShardCount(int totalShardCount) { + this.totalShardCount += totalShardCount; + } + + public List getPrimaryShardSizes() { + return primaryShardSizes; + } + + public int getTotalShardCount() { + return totalShardCount; + } + + public long getDocCount() { + return docCount; + } + + public long getTotalSize() { + return totalSize; + } + } + + public NodeDataTiersUsage(StreamInput in) throws IOException { + super(in); + usageStatsByTier = in.readMap(UsageStats::read); + } + + public NodeDataTiersUsage(DiscoveryNode node, Map usageStatsByTier) { + super(node); + this.usageStatsByTier = usageStatsByTier; + } + + public Map getUsageStatsByTier() { + return Map.copyOf(usageStatsByTier); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeMap(usageStatsByTier, (o, v) -> v.writeTo(o)); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/NodesDataTiersUsageTransportAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/NodesDataTiersUsageTransportAction.java new file mode 100644 index 0000000000000..06a3b47d47a65 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/NodesDataTiersUsageTransportAction.java @@ -0,0 +1,216 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.datatiers; + +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; +import org.elasticsearch.action.admin.indices.stats.IndexShardStats; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.nodes.BaseNodesRequest; +import org.elasticsearch.action.support.nodes.BaseNodesResponse; +import org.elasticsearch.action.support.nodes.TransportNodesAction; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.index.store.StoreStats; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.indices.NodeIndicesStats; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; + +/** + * Sources locally data tier usage stats mainly indices and shard sizes grouped by preferred data tier. + */ +public class NodesDataTiersUsageTransportAction extends TransportNodesAction< + NodesDataTiersUsageTransportAction.NodesRequest, + NodesDataTiersUsageTransportAction.NodesResponse, + NodesDataTiersUsageTransportAction.NodeRequest, + NodeDataTiersUsage> { + + public static final ActionType TYPE = ActionType.localOnly("cluster:monitor/nodes/data_tier_usage"); + public static final NodeFeature LOCALLY_PRECALCULATED_STATS_FEATURE = new NodeFeature("usage.data_tiers.precalculate_stats"); + + private static final CommonStatsFlags STATS_FLAGS = new CommonStatsFlags().clear() + .set(CommonStatsFlags.Flag.Docs, true) + .set(CommonStatsFlags.Flag.Store, true); + + private final IndicesService indicesService; + + @Inject + public NodesDataTiersUsageTransportAction( + ThreadPool threadPool, + ClusterService clusterService, + TransportService transportService, + IndicesService indicesService, + ActionFilters actionFilters + ) { + super( + TYPE.name(), + clusterService, + transportService, + actionFilters, + NodeRequest::new, + threadPool.executor(ThreadPool.Names.MANAGEMENT) + ); + this.indicesService = indicesService; + } + + @Override + protected NodesResponse newResponse(NodesRequest request, List responses, List failures) { + return new NodesResponse(clusterService.getClusterName(), responses, failures); + } + + @Override + protected NodeRequest newNodeRequest(NodesRequest request) { + return NodeRequest.INSTANCE; + } + + @Override + protected NodeDataTiersUsage newNodeResponse(StreamInput in, DiscoveryNode node) throws IOException { + return new NodeDataTiersUsage(in); + } + + @Override + protected NodeDataTiersUsage nodeOperation(NodeRequest nodeRequest, Task task) { + assert task instanceof CancellableTask; + + DiscoveryNode localNode = clusterService.localNode(); + NodeIndicesStats nodeIndicesStats = indicesService.stats(STATS_FLAGS, true); + ClusterState state = clusterService.state(); + RoutingNode routingNode = state.getRoutingNodes().node(localNode.getId()); + Map usageStatsByTier = aggregateStats(routingNode, state.metadata(), nodeIndicesStats); + return new NodeDataTiersUsage(clusterService.localNode(), usageStatsByTier); + } + + // For bwc & testing purposes + static Map aggregateStats( + RoutingNode routingNode, + Metadata metadata, + NodeIndicesStats nodeIndicesStats + ) { + if (routingNode == null) { + return Map.of(); + } + Map usageStatsByTier = new HashMap<>(); + Set localIndices = StreamSupport.stream(routingNode.spliterator(), false) + .map(routing -> routing.index().getName()) + .collect(Collectors.toSet()); + for (String indexName : localIndices) { + IndexMetadata indexMetadata = metadata.index(indexName); + if (indexMetadata == null) { + continue; + } + String tier = indexMetadata.getTierPreference().isEmpty() ? null : indexMetadata.getTierPreference().get(0); + if (tier != null) { + NodeDataTiersUsage.UsageStats usageStats = usageStatsByTier.computeIfAbsent( + tier, + ignored -> new NodeDataTiersUsage.UsageStats() + ); + List allShardStats = nodeIndicesStats.getShardStats(indexMetadata.getIndex()); + if (allShardStats != null) { + for (IndexShardStats indexShardStats : allShardStats) { + usageStats.incrementTotalSize(indexShardStats.getTotal().getStore().totalDataSetSizeInBytes()); + usageStats.incrementDocCount(indexShardStats.getTotal().getDocs().getCount()); + + ShardRouting shardRouting = routingNode.getByShardId(indexShardStats.getShardId()); + if (shardRouting != null && shardRouting.state() == ShardRoutingState.STARTED) { + usageStats.incrementTotalShardCount(1); + + // Accumulate stats about started primary shards + StoreStats primaryStoreStats = indexShardStats.getPrimary().getStore(); + if (shardRouting.primary() && primaryStoreStats != null) { + usageStats.addPrimaryShardSize(primaryStoreStats.totalDataSetSizeInBytes()); + } + } + } + } + } + } + return usageStatsByTier; + } + + public static class NodesRequest extends BaseNodesRequest { + + public NodesRequest() { + super((String[]) null); + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new CancellableTask(id, type, action, "", parentTaskId, headers); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + } + } + + public static class NodeRequest extends TransportRequest { + + static final NodeRequest INSTANCE = new NodeRequest(); + + public NodeRequest(StreamInput in) throws IOException { + super(in); + } + + public NodeRequest() { + + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new CancellableTask(id, type, action, "", parentTaskId, headers); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + } + } + + public static class NodesResponse extends BaseNodesResponse { + + public NodesResponse(ClusterName clusterName, List nodes, List failures) { + super(clusterName, nodes, failures); + } + + @Override + protected List readNodesFrom(StreamInput in) throws IOException { + return in.readCollectionAsList(NodeDataTiersUsage::new); + } + + @Override + protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { + out.writeCollection(nodes); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java index a72cbad790a68..22a2c3a880ce5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java @@ -6,6 +6,8 @@ */ package org.elasticsearch.xpack.core.ilm; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.TransportVersions; import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.client.internal.Client; @@ -32,6 +34,7 @@ import java.util.Objects; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.action.downsample.DownsampleConfig.generateDownsampleIndexName; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; @@ -40,6 +43,8 @@ */ public class DownsampleAction implements LifecycleAction { + private static final Logger logger = LogManager.getLogger(DownsampleAction.class); + public static final String NAME = "downsample"; public static final String DOWNSAMPLED_INDEX_PREFIX = "downsample-"; public static final String CONDITIONAL_TIME_SERIES_CHECK_KEY = BranchingStep.NAME + "-on-timeseries-check"; @@ -155,7 +160,30 @@ public List toSteps(Client client, String phase, StepKey nextStepKey) { (index, clusterState) -> { IndexMetadata indexMetadata = clusterState.metadata().index(index); assert indexMetadata != null : "invalid cluster metadata. index [" + index.getName() + "] metadata not found"; - return IndexSettings.MODE.get(indexMetadata.getSettings()) == IndexMode.TIME_SERIES; + if (IndexSettings.MODE.get(indexMetadata.getSettings()) != IndexMode.TIME_SERIES) { + return false; + } + + if (index.getName().equals(generateDownsampleIndexName(DOWNSAMPLED_INDEX_PREFIX, indexMetadata, fixedInterval))) { + var downsampleStatus = IndexMetadata.INDEX_DOWNSAMPLE_STATUS.get(indexMetadata.getSettings()); + if (downsampleStatus == IndexMetadata.DownsampleTaskStatus.UNKNOWN) { + // This isn't a downsample index, but it has the name of our target downsample index - very bad, we'll skip the + // downsample action to avoid blocking the lifecycle of this index - if there + // is another downsample action configured in the next phase, it'll be able to proceed successfully + logger.warn( + "index [{}] as part of policy [{}] cannot be downsampled at interval [{}] in phase [{}] because it has" + + " the name of the target downsample index and is itself not a downsampled index. Skipping the downsample " + + "action.", + index.getName(), + indexMetadata.getLifecyclePolicyName(), + fixedInterval, + phase + ); + } + return false; + } + + return true; } ); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/Phase.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/Phase.java index 76c2499e6847f..abb509805b60b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/Phase.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/Phase.java @@ -8,13 +8,13 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ContextParser; import org.elasticsearch.xcontent.ObjectParser.ValueType; @@ -56,7 +56,7 @@ public class Phase implements ToXContentObject, Writeable { // when the phase is read from the cluster state during startup (even before negative timevalues were strictly // disallowed) so this is a hack to treat negative `min_age`s as 0 to prevent those errors. // They will be saved as `0` so this hack can be removed once we no longer have to read cluster states from 7.x. - assert Version.CURRENT.major < 9 : "remove this hack now that we don't have to read 7.x cluster states"; + @UpdateForV9 // remove this hack now that we don't have to read 7.x cluster states final String timeValueString = p.text(); if (timeValueString.startsWith("-")) { logger.warn("phase has negative min_age value of [{}] - this will be treated as a min_age of 0", timeValueString); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlConfigVersion.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlConfigVersion.java index 7ab8e41cd2453..ffaa8489929ff 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlConfigVersion.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlConfigVersion.java @@ -150,13 +150,14 @@ private static void checkUniqueness(int id, String uniqueId) { public static final MlConfigVersion V_10 = registerMlConfigVersion(10_00_00_99, "4B940FD9-BEDD-4589-8E08-02D9B480B22D"); // V_11 is used in ELSER v2 package configs - public static final MlConfigVersion V_11 = registerMlConfigVersion(11_00_00_99, "79CB2950-57C7-11EE-AE5D-0800200C9A66"); + public static final MlConfigVersion V_11 = registerMlConfigVersion(11_00_0_0_99, "79CB2950-57C7-11EE-AE5D-0800200C9A66"); + public static final MlConfigVersion V_12 = registerMlConfigVersion(12_00_0_0_99, "Trained model config prefix strings added"); /** * Reference to the most recent Ml config version. * This should be the Ml config version with the highest id. */ - public static final MlConfigVersion CURRENT = V_11; + public static final MlConfigVersion CURRENT = V_12; /** * Reference to the first MlConfigVersion that is detached from the diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlTasks.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlTasks.java index 70ab69ae94e19..7cef2bed04ce3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlTasks.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlTasks.java @@ -22,6 +22,7 @@ import org.elasticsearch.xpack.core.ml.job.snapshot.upgrade.SnapshotUpgradeTaskState; import org.elasticsearch.xpack.core.ml.utils.MemoryTrackedTaskState; +import java.time.Instant; import java.util.Collection; import java.util.Collections; import java.util.Set; @@ -194,6 +195,17 @@ public static JobState getJobStateModifiedForReassignments(@Nullable PersistentT return jobState; } + public static Instant getLastJobTaskStateChangeTime(String jobId, @Nullable PersistentTasksCustomMetadata tasks) { + PersistentTasksCustomMetadata.PersistentTask task = getJobTask(jobId, tasks); + if (task != null) { + JobTaskState jobTaskState = (JobTaskState) task.getState(); + if (jobTaskState != null) { + return jobTaskState.getLastStateChangeTime(); + } + } + return null; + } + public static SnapshotUpgradeState getSnapshotUpgradeState( String jobId, String snapshotId, @@ -260,6 +272,17 @@ public static DataFrameAnalyticsState getDataFrameAnalyticsState(@Nullable Persi return state; } + public static Instant getLastDataFrameAnalyticsTaskStateChangeTime(String analyticsId, @Nullable PersistentTasksCustomMetadata tasks) { + PersistentTasksCustomMetadata.PersistentTask task = getDataFrameAnalyticsTask(analyticsId, tasks); + if (task != null) { + DataFrameAnalyticsTaskState taskState = (DataFrameAnalyticsTaskState) task.getState(); + if (taskState != null) { + return taskState.getLastStateChangeTime(); + } + } + return null; + } + /** * The job Ids of anomaly detector job tasks. * All anomaly detector jobs are returned regardless of the status of the diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/InferModelAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/InferModelAction.java index 61e52935f46e9..296aec12b1a63 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/InferModelAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/InferModelAction.java @@ -23,6 +23,7 @@ import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelPrefixStrings; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.EmptyConfigUpdate; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfigUpdate; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; @@ -88,6 +89,7 @@ public static Builder parseRequest(String id, XContentParser parser) { // input and so cannot construct a document. private final List textInput; private boolean highPriority; + private TrainedModelPrefixStrings.PrefixType prefixType = TrainedModelPrefixStrings.PrefixType.NONE; /** * Build a request from a list of documents as maps. @@ -190,6 +192,11 @@ public Request(StreamInput in) throws IOException { if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { highPriority = in.readBoolean(); } + if (in.getTransportVersion().onOrAfter(TransportVersions.ML_TRAINED_MODEL_PREFIX_STRINGS_ADDED)) { + prefixType = in.readEnum(TrainedModelPrefixStrings.PrefixType.class); + } else { + prefixType = TrainedModelPrefixStrings.PrefixType.NONE; + } } public int numberOfDocuments() { @@ -232,6 +239,14 @@ public void setHighPriority(boolean highPriority) { this.highPriority = highPriority; } + public void setPrefixType(TrainedModelPrefixStrings.PrefixType prefixType) { + this.prefixType = prefixType; + } + + public TrainedModelPrefixStrings.PrefixType getPrefixType() { + return prefixType; + } + @Override public ActionRequestValidationException validate() { return null; @@ -253,6 +268,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeBoolean(highPriority); } + if (out.getTransportVersion().onOrAfter(TransportVersions.ML_TRAINED_MODEL_PREFIX_STRINGS_ADDED)) { + out.writeEnum(prefixType); + } } @Override @@ -266,7 +284,8 @@ public boolean equals(Object o) { && Objects.equals(inferenceTimeout, that.inferenceTimeout) && Objects.equals(objectsToInfer, that.objectsToInfer) && Objects.equals(textInput, that.textInput) - && (highPriority == that.highPriority); + && (highPriority == that.highPriority) + && (prefixType == that.prefixType); } @Override @@ -276,7 +295,7 @@ public Task createTask(long id, String type, String action, TaskId parentTaskId, @Override public int hashCode() { - return Objects.hash(id, objectsToInfer, update, previouslyLicensed, inferenceTimeout, textInput, highPriority); + return Objects.hash(id, objectsToInfer, update, previouslyLicensed, inferenceTimeout, textInput, highPriority, prefixType); } public static class Builder { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/InferTrainedModelDeploymentAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/InferTrainedModelDeploymentAction.java index 524d5f84a177b..806f935d5f394 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/InferTrainedModelDeploymentAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/InferTrainedModelDeploymentAction.java @@ -26,6 +26,7 @@ import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelPrefixStrings; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.EmptyConfigUpdate; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfigUpdate; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; @@ -102,6 +103,7 @@ public static Request.Builder parseRequest(String id, XContentParser parser) { // and do know which field the model expects to find its // input and so cannot construct a document. private final List textInput; + private TrainedModelPrefixStrings.PrefixType prefixType = TrainedModelPrefixStrings.PrefixType.NONE; public static Request forDocs(String id, InferenceConfigUpdate update, List> docs, TimeValue inferenceTimeout) { return new Request( @@ -156,6 +158,11 @@ public Request(StreamInput in) throws IOException { } else { textInput = null; } + if (in.getTransportVersion().onOrAfter(TransportVersions.ML_TRAINED_MODEL_PREFIX_STRINGS_ADDED)) { + prefixType = in.readEnum(TrainedModelPrefixStrings.PrefixType.class); + } else { + prefixType = TrainedModelPrefixStrings.PrefixType.NONE; + } } public String getId() { @@ -200,6 +207,14 @@ public boolean isHighPriority() { return highPriority; } + public void setPrefixType(TrainedModelPrefixStrings.PrefixType prefixType) { + this.prefixType = prefixType; + } + + public TrainedModelPrefixStrings.PrefixType getPrefixType() { + return prefixType; + } + @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = super.validate(); @@ -226,6 +241,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { out.writeOptionalStringCollection(textInput); } + if (out.getTransportVersion().onOrAfter(TransportVersions.ML_TRAINED_MODEL_PREFIX_STRINGS_ADDED)) { + out.writeEnum(prefixType); + } } @Override @@ -243,12 +261,13 @@ public boolean equals(Object o) { && Objects.equals(update, that.update) && Objects.equals(inferenceTimeout, that.inferenceTimeout) && Objects.equals(highPriority, that.highPriority) - && Objects.equals(textInput, that.textInput); + && Objects.equals(textInput, that.textInput) + && (prefixType == that.prefixType); } @Override public int hashCode() { - return Objects.hash(id, update, docs, inferenceTimeout, highPriority, textInput); + return Objects.hash(id, update, docs, inferenceTimeout, highPriority, textInput, prefixType); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/DataExtractor.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/DataExtractor.java index e432c8d8a9e14..ba2043a17767f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/DataExtractor.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/DataExtractor.java @@ -41,6 +41,11 @@ record Result(SearchInterval searchInterval, Optional data) {} */ void cancel(); + /** + * Cancels and immediately destroys the data extractor, releasing all its resources. + */ + void destroy(); + /** * @return the end time to which this extractor will search */ diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsTaskState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsTaskState.java index e6fdc7886ce53..8d4b601a38aad 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsTaskState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsTaskState.java @@ -6,42 +6,57 @@ */ package org.elasticsearch.xpack.core.ml.dataframe; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.common.time.TimeUtils; import org.elasticsearch.xpack.core.ml.MlTasks; +import org.elasticsearch.xpack.core.ml.utils.MlTaskState; import java.io.IOException; +import java.time.Instant; import java.util.Objects; -public class DataFrameAnalyticsTaskState implements PersistentTaskState { +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; + +public class DataFrameAnalyticsTaskState implements PersistentTaskState, MlTaskState { public static final String NAME = MlTasks.DATA_FRAME_ANALYTICS_TASK_NAME; - private static ParseField STATE = new ParseField("state"); - private static ParseField ALLOCATION_ID = new ParseField("allocation_id"); - private static ParseField REASON = new ParseField("reason"); + private static final ParseField STATE = new ParseField("state"); + private static final ParseField ALLOCATION_ID = new ParseField("allocation_id"); + private static final ParseField REASON = new ParseField("reason"); + private static final ParseField LAST_STATE_CHANGE_TIME = new ParseField("last_state_change_time"); private final DataFrameAnalyticsState state; private final long allocationId; private final String reason; + private final Instant lastStateChangeTime; private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( NAME, true, - a -> new DataFrameAnalyticsTaskState((DataFrameAnalyticsState) a[0], (long) a[1], (String) a[2]) + a -> new DataFrameAnalyticsTaskState((DataFrameAnalyticsState) a[0], (long) a[1], (String) a[2], (Instant) a[3]) ); static { PARSER.declareString(ConstructingObjectParser.constructorArg(), DataFrameAnalyticsState::fromString, STATE); PARSER.declareLong(ConstructingObjectParser.constructorArg(), ALLOCATION_ID); PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), REASON); + PARSER.declareField( + optionalConstructorArg(), + p -> TimeUtils.parseTimeFieldToInstant(p, LAST_STATE_CHANGE_TIME.getPreferredName()), + LAST_STATE_CHANGE_TIME, + ObjectParser.ValueType.VALUE + ); } public static DataFrameAnalyticsTaskState fromXContent(XContentParser parser) { @@ -52,27 +67,49 @@ public static DataFrameAnalyticsTaskState fromXContent(XContentParser parser) { } } - public DataFrameAnalyticsTaskState(DataFrameAnalyticsState state, long allocationId, @Nullable String reason) { + public DataFrameAnalyticsTaskState( + DataFrameAnalyticsState state, + long allocationId, + @Nullable String reason, + @Nullable Instant lastStateChangeTime + ) { this.state = Objects.requireNonNull(state); this.allocationId = allocationId; this.reason = reason; + // Round to millisecond to avoid serialization round trip differences + this.lastStateChangeTime = (lastStateChangeTime != null) ? Instant.ofEpochMilli(lastStateChangeTime.toEpochMilli()) : null; } public DataFrameAnalyticsTaskState(StreamInput in) throws IOException { this.state = DataFrameAnalyticsState.fromStream(in); this.allocationId = in.readLong(); this.reason = in.readOptionalString(); + if (in.getTransportVersion().onOrAfter(TransportVersions.ML_STATE_CHANGE_TIMESTAMPS)) { + lastStateChangeTime = in.readOptionalInstant(); + } else { + lastStateChangeTime = null; + } } public DataFrameAnalyticsState getState() { return state; } + public long getAllocationId() { + return allocationId; + } + @Nullable public String getReason() { return reason; } + @Override + @Nullable + public Instant getLastStateChangeTime() { + return lastStateChangeTime; + } + public boolean isStatusStale(PersistentTasksCustomMetadata.PersistentTask task) { return allocationId != task.getAllocationId(); } @@ -87,6 +124,9 @@ public void writeTo(StreamOutput out) throws IOException { state.writeTo(out); out.writeLong(allocationId); out.writeOptionalString(reason); + if (out.getTransportVersion().onOrAfter(TransportVersions.ML_STATE_CHANGE_TIMESTAMPS)) { + out.writeOptionalInstant(lastStateChangeTime); + } } @Override @@ -97,6 +137,13 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (reason != null) { builder.field(REASON.getPreferredName(), reason); } + if (lastStateChangeTime != null) { + builder.timeField( + LAST_STATE_CHANGE_TIME.getPreferredName(), + LAST_STATE_CHANGE_TIME.getPreferredName() + "_string", + lastStateChangeTime.toEpochMilli() + ); + } builder.endObject(); return builder; } @@ -106,11 +153,14 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; DataFrameAnalyticsTaskState that = (DataFrameAnalyticsTaskState) o; - return allocationId == that.allocationId && state == that.state && Objects.equals(reason, that.reason); + return allocationId == that.allocationId + && state == that.state + && Objects.equals(reason, that.reason) + && Objects.equals(lastStateChangeTime, that.lastStateChangeTime); } @Override public int hashCode() { - return Objects.hash(state, allocationId, reason); + return Objects.hash(state, allocationId, reason, lastStateChangeTime); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfig.java index 9dfa2d51f0fc0..b469d35b90383 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfig.java @@ -100,6 +100,7 @@ public class TrainedModelConfig implements ToXContentObject, Writeable { public static final ParseField INFERENCE_CONFIG = new ParseField("inference_config"); public static final ParseField LOCATION = new ParseField("location"); public static final ParseField MODEL_PACKAGE = new ParseField("model_package"); + public static final ParseField PREFIX_STRINGS = new ParseField("prefix_strings"); public static final ParseField PER_DEPLOYMENT_MEMORY_BYTES = new ParseField("per_deployment_memory_bytes"); public static final ParseField PER_ALLOCATION_MEMORY_BYTES = new ParseField("per_allocation_memory_bytes"); @@ -170,6 +171,11 @@ private static ObjectParser createParser(boole MODEL_PACKAGE ); parser.declareString(TrainedModelConfig.Builder::setPlatformArchitecture, PLATFORM_ARCHITECTURE); + parser.declareObject( + TrainedModelConfig.Builder::setPrefixStrings, + (p, c) -> TrainedModelPrefixStrings.fromXContent(p, ignoreUnknownFields), + PREFIX_STRINGS + ); return parser; } @@ -198,6 +204,7 @@ public static TrainedModelConfig.Builder fromXContent(XContentParser parser, boo private final ModelPackageConfig modelPackageConfig; private Boolean fullDefinition; private String platformArchitecture; + private TrainedModelPrefixStrings prefixStrings; TrainedModelConfig( String modelId, @@ -217,7 +224,8 @@ public static TrainedModelConfig.Builder fromXContent(XContentParser parser, boo InferenceConfig inferenceConfig, TrainedModelLocation location, ModelPackageConfig modelPackageConfig, - String platformArchitecture + String platformArchitecture, + TrainedModelPrefixStrings prefixStrings ) { this.modelId = ExceptionsHelper.requireNonNull(modelId, MODEL_ID); this.modelType = modelType; @@ -245,6 +253,7 @@ public static TrainedModelConfig.Builder fromXContent(XContentParser parser, boo this.location = location; this.modelPackageConfig = modelPackageConfig; this.platformArchitecture = platformArchitecture; + this.prefixStrings = prefixStrings; } private static TrainedModelInput handleDefaultInput(TrainedModelInput input, TrainedModelType modelType) { @@ -289,6 +298,9 @@ public TrainedModelConfig(StreamInput in) throws IOException { } else { platformArchitecture = null; } + if (in.getTransportVersion().onOrAfter(TransportVersions.ML_TRAINED_MODEL_PREFIX_STRINGS_ADDED)) { + prefixStrings = in.readOptionalWriteable(TrainedModelPrefixStrings::new); + } } public boolean isPackagedModel() { @@ -435,6 +447,10 @@ public String getPlatformArchitecture() { return platformArchitecture; } + public TrainedModelPrefixStrings getPrefixStrings() { + return prefixStrings; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(modelId); @@ -469,6 +485,10 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.ML_TRAINED_MODEL_CONFIG_PLATFORM_ADDED)) { out.writeOptionalString(platformArchitecture); } + + if (out.getTransportVersion().onOrAfter(TransportVersions.ML_TRAINED_MODEL_PREFIX_STRINGS_ADDED)) { + out.writeOptionalWriteable(prefixStrings); + } } @Override @@ -531,6 +551,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (location != null) { writeNamedObject(builder, params, LOCATION.getPreferredName(), location); } + if (prefixStrings != null) { + builder.field(PREFIX_STRINGS.getPreferredName(), prefixStrings); + } if (params.paramAsBoolean(DEFINITION_STATUS, false) && fullDefinition != null) { builder.field("fully_defined", fullDefinition); } @@ -565,7 +588,8 @@ public boolean equals(Object o) { && Objects.equals(inferenceConfig, that.inferenceConfig) && Objects.equals(metadata, that.metadata) && Objects.equals(location, that.location) - && Objects.equals(platformArchitecture, that.platformArchitecture); + && Objects.equals(platformArchitecture, that.platformArchitecture) + && Objects.equals(prefixStrings, that.prefixStrings); } @Override @@ -588,7 +612,8 @@ public int hashCode() { inferenceConfig, defaultFieldMap, location, - platformArchitecture + platformArchitecture, + prefixStrings ); } @@ -614,6 +639,7 @@ public static class Builder { private Long perDeploymentMemoryBytes; private Long perAllocationMemoryBytes; private String platformArchitecture; + private TrainedModelPrefixStrings prefixStrings; public Builder() {} @@ -636,6 +662,7 @@ public Builder(TrainedModelConfig config) { this.location = config.location; this.modelPackageConfig = config.modelPackageConfig; this.platformArchitecture = config.platformArchitecture; + this.prefixStrings = config.prefixStrings; } public Builder setModelId(String modelId) { @@ -733,6 +760,11 @@ public Builder setPlatformArchitecture(String platformArchitecture) { return this; } + public Builder setPrefixStrings(TrainedModelPrefixStrings prefixStrings) { + this.prefixStrings = prefixStrings; + return this; + } + public Builder setModelAliases(Set modelAliases) { if (modelAliases == null || modelAliases.isEmpty()) { return this; @@ -1053,7 +1085,8 @@ public TrainedModelConfig build() { inferenceConfig, location, modelPackageConfig, - platformArchitecture + platformArchitecture, + prefixStrings ); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelPrefixStrings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelPrefixStrings.java new file mode 100644 index 0000000000000..749cbb4a7c1ea --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelPrefixStrings.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.ml.inference; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; + +public record TrainedModelPrefixStrings(String ingestPrefix, String searchPrefix) implements ToXContentObject, Writeable { + + public enum PrefixType { + INGEST, + SEARCH, + NONE + } + + public static final ParseField INGEST_PREFIX = new ParseField("ingest"); + public static final ParseField SEARCH_PREFIX = new ParseField("search"); + public static final String NAME = "trained_model_config_prefix_strings"; + + private static final ConstructingObjectParser LENIENT_PARSER = createParser(true); + private static final ConstructingObjectParser STRICT_PARSER = createParser(false); + + @SuppressWarnings("unchecked") + private static ConstructingObjectParser createParser(boolean ignoreUnknownFields) { + ConstructingObjectParser parser = new ConstructingObjectParser<>( + NAME, + ignoreUnknownFields, + a -> new TrainedModelPrefixStrings((String) a[0], (String) a[1]) + ); + parser.declareString(ConstructingObjectParser.optionalConstructorArg(), INGEST_PREFIX); + parser.declareString(ConstructingObjectParser.optionalConstructorArg(), SEARCH_PREFIX); + return parser; + } + + public static TrainedModelPrefixStrings fromXContent(XContentParser parser, boolean lenient) throws IOException { + return lenient ? LENIENT_PARSER.parse(parser, null) : STRICT_PARSER.parse(parser, null); + } + + public TrainedModelPrefixStrings(StreamInput in) throws IOException { + this(in.readOptionalString(), in.readOptionalString()); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (ingestPrefix != null) { + builder.field(INGEST_PREFIX.getPreferredName(), ingestPrefix); + } + if (searchPrefix != null) { + builder.field(SEARCH_PREFIX.getPreferredName(), searchPrefix); + } + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalString(ingestPrefix); + out.writeOptionalString(searchPrefix); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/persistence/InferenceIndexConstants.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/persistence/InferenceIndexConstants.java index 6c8fc6fec4e0e..49ffca8f32d26 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/persistence/InferenceIndexConstants.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/persistence/InferenceIndexConstants.java @@ -50,7 +50,8 @@ public final class InferenceIndexConstants { private static final String MAPPINGS_VERSION_VARIABLE = "xpack.ml.version"; // 2 added support for platform specific models - public static final int INFERENCE_INDEX_MAPPINGS_VERSION = 2; + // 3 added prefix strings configuration + public static final int INFERENCE_INDEX_MAPPINGS_VERSION = 3; public static String mapping() { return TemplateUtils.loadTemplate( diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfig.java index 25a2055e00f68..5ce5b0188771b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfig.java @@ -61,6 +61,10 @@ public static LearnToRankConfig fromXContentLenient(XContentParser parser) { return LENIENT_PARSER.apply(parser, null).build(); } + public static Builder builder(LearnToRankConfig config) { + return new Builder(config); + } + private final List featureExtractorBuilders; public LearnToRankConfig(Integer numTopFeatureImportanceValues, List featureExtractorBuilders) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfigUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfigUpdate.java deleted file mode 100644 index b4241f1704520..0000000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfigUpdate.java +++ /dev/null @@ -1,248 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.core.ml.inference.trainedmodel; - -import org.elasticsearch.TransportVersion; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.index.query.QueryRewriteContext; -import org.elasticsearch.index.query.Rewriteable; -import org.elasticsearch.xcontent.ObjectParser; -import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ltr.LearnToRankFeatureExtractorBuilder; -import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; -import org.elasticsearch.xpack.core.ml.utils.NamedXContentObject; -import org.elasticsearch.xpack.core.ml.utils.NamedXContentObjectHelper; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Set; -import java.util.stream.Collectors; - -import static org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfig.DEFAULT_RESULTS_FIELD; -import static org.elasticsearch.xpack.core.ml.inference.trainedmodel.LearnToRankConfig.FEATURE_EXTRACTORS; -import static org.elasticsearch.xpack.core.ml.inference.trainedmodel.LearnToRankConfig.NUM_TOP_FEATURE_IMPORTANCE_VALUES; - -public class LearnToRankConfigUpdate implements InferenceConfigUpdate, NamedXContentObject, Rewriteable { - - public static final ParseField NAME = LearnToRankConfig.NAME; - - public static LearnToRankConfigUpdate EMPTY_PARAMS = new LearnToRankConfigUpdate(null, null); - - public static LearnToRankConfigUpdate fromConfig(LearnToRankConfig config) { - return new LearnToRankConfigUpdate(config.getNumTopFeatureImportanceValues(), config.getFeatureExtractorBuilders()); - } - - private static final ObjectParser STRICT_PARSER = createParser(false); - - private static ObjectParser createParser(boolean lenient) { - ObjectParser parser = new ObjectParser<>( - NAME.getPreferredName(), - lenient, - LearnToRankConfigUpdate.Builder::new - ); - parser.declareInt(LearnToRankConfigUpdate.Builder::setNumTopFeatureImportanceValues, NUM_TOP_FEATURE_IMPORTANCE_VALUES); - parser.declareNamedObjects( - LearnToRankConfigUpdate.Builder::setFeatureExtractorBuilders, - (p, c, n) -> p.namedObject(LearnToRankFeatureExtractorBuilder.class, n, false), - b -> {}, - FEATURE_EXTRACTORS - ); - return parser; - } - - public static LearnToRankConfigUpdate fromXContentStrict(XContentParser parser) { - return STRICT_PARSER.apply(parser, null).build(); - } - - private final Integer numTopFeatureImportanceValues; - private final List featureExtractorBuilderList; - - public LearnToRankConfigUpdate( - Integer numTopFeatureImportanceValues, - List featureExtractorBuilders - ) { - if (numTopFeatureImportanceValues != null && numTopFeatureImportanceValues < 0) { - throw new IllegalArgumentException( - "[" + NUM_TOP_FEATURE_IMPORTANCE_VALUES.getPreferredName() + "] must be greater than or equal to 0" - ); - } - if (featureExtractorBuilders != null) { - Set featureNames = featureExtractorBuilders.stream() - .map(LearnToRankFeatureExtractorBuilder::featureName) - .collect(Collectors.toSet()); - if (featureNames.size() < featureExtractorBuilders.size()) { - throw new IllegalArgumentException( - "[" + FEATURE_EXTRACTORS.getPreferredName() + "] contains duplicate [feature_name] values" - ); - } - } - this.numTopFeatureImportanceValues = numTopFeatureImportanceValues; - this.featureExtractorBuilderList = featureExtractorBuilders == null ? List.of() : featureExtractorBuilders; - } - - public LearnToRankConfigUpdate(StreamInput in) throws IOException { - this.numTopFeatureImportanceValues = in.readOptionalVInt(); - this.featureExtractorBuilderList = in.readNamedWriteableCollectionAsList(LearnToRankFeatureExtractorBuilder.class); - } - - public Integer getNumTopFeatureImportanceValues() { - return numTopFeatureImportanceValues; - } - - @Override - public String getResultsField() { - return DEFAULT_RESULTS_FIELD; - } - - @Override - public InferenceConfigUpdate.Builder, ? extends InferenceConfigUpdate> newBuilder() { - return new Builder().setNumTopFeatureImportanceValues(numTopFeatureImportanceValues); - } - - @Override - public String getWriteableName() { - return NAME.getPreferredName(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeOptionalVInt(numTopFeatureImportanceValues); - out.writeNamedWriteableCollection(featureExtractorBuilderList); - } - - @Override - public String getName() { - return NAME.getPreferredName(); - } - - @Override - public TransportVersion getMinimalSupportedVersion() { - return LearnToRankConfig.MIN_SUPPORTED_TRANSPORT_VERSION; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - if (numTopFeatureImportanceValues != null) { - builder.field(NUM_TOP_FEATURE_IMPORTANCE_VALUES.getPreferredName(), numTopFeatureImportanceValues); - } - if (featureExtractorBuilderList.isEmpty() == false) { - NamedXContentObjectHelper.writeNamedObjects( - builder, - params, - true, - FEATURE_EXTRACTORS.getPreferredName(), - featureExtractorBuilderList - ); - } - builder.endObject(); - return builder; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - LearnToRankConfigUpdate that = (LearnToRankConfigUpdate) o; - return Objects.equals(this.numTopFeatureImportanceValues, that.numTopFeatureImportanceValues) - && Objects.equals(this.featureExtractorBuilderList, that.featureExtractorBuilderList); - } - - @Override - public int hashCode() { - return Objects.hash(numTopFeatureImportanceValues, featureExtractorBuilderList); - } - - @Override - public LearnToRankConfig apply(InferenceConfig originalConfig) { - if (originalConfig instanceof LearnToRankConfig == false) { - throw ExceptionsHelper.badRequestException( - "Inference config of type [{}] can not be updated with a inference request of type [{}]", - originalConfig.getName(), - getName() - ); - } - - LearnToRankConfig ltrConfig = (LearnToRankConfig) originalConfig; - if (isNoop(ltrConfig)) { - return ltrConfig; - } - LearnToRankConfig.Builder builder = new LearnToRankConfig.Builder(ltrConfig); - if (numTopFeatureImportanceValues != null) { - builder.setNumTopFeatureImportanceValues(numTopFeatureImportanceValues); - } - if (featureExtractorBuilderList.isEmpty() == false) { - Map existingExtractors = ltrConfig.getFeatureExtractorBuilders() - .stream() - .collect(Collectors.toMap(LearnToRankFeatureExtractorBuilder::featureName, f -> f)); - featureExtractorBuilderList.forEach(f -> existingExtractors.put(f.featureName(), f)); - builder.setLearnToRankFeatureExtractorBuilders(new ArrayList<>(existingExtractors.values())); - } - return builder.build(); - } - - @Override - public boolean isSupported(InferenceConfig inferenceConfig) { - return inferenceConfig instanceof LearnToRankConfig; - } - - boolean isNoop(LearnToRankConfig originalConfig) { - return (numTopFeatureImportanceValues == null || originalConfig.getNumTopFeatureImportanceValues() == numTopFeatureImportanceValues) - && (featureExtractorBuilderList.isEmpty() - || Objects.equals(originalConfig.getFeatureExtractorBuilders(), featureExtractorBuilderList)); - } - - @Override - public LearnToRankConfigUpdate rewrite(QueryRewriteContext ctx) throws IOException { - if (featureExtractorBuilderList.isEmpty()) { - return this; - } - List rewrittenBuilders = new ArrayList<>(featureExtractorBuilderList.size()); - boolean rewritten = false; - for (LearnToRankFeatureExtractorBuilder extractorBuilder : featureExtractorBuilderList) { - LearnToRankFeatureExtractorBuilder rewrittenExtractor = Rewriteable.rewrite(extractorBuilder, ctx); - rewritten |= (rewrittenExtractor != extractorBuilder); - rewrittenBuilders.add(rewrittenExtractor); - } - if (rewritten) { - return new LearnToRankConfigUpdate(getNumTopFeatureImportanceValues(), rewrittenBuilders); - } - return this; - } - - public static class Builder implements InferenceConfigUpdate.Builder { - private Integer numTopFeatureImportanceValues; - private List featureExtractorBuilderList; - - @Override - public Builder setResultsField(String resultsField) { - assert false : "results field should never be set in ltr config"; - return this; - } - - public Builder setNumTopFeatureImportanceValues(Integer numTopFeatureImportanceValues) { - this.numTopFeatureImportanceValues = numTopFeatureImportanceValues; - return this; - } - - public Builder setFeatureExtractorBuilders(List featureExtractorBuilderList) { - this.featureExtractorBuilderList = featureExtractorBuilderList; - return this; - } - - @Override - public LearnToRankConfigUpdate build() { - return new LearnToRankConfigUpdate(numTopFeatureImportanceValues, featureExtractorBuilderList); - } - } -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ModelPackageConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ModelPackageConfig.java index 19095ee52fe08..536cce95df527 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ModelPackageConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ModelPackageConfig.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; import org.elasticsearch.TransportVersions; -import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -21,6 +20,7 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.common.time.TimeUtils; import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelPrefixStrings; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import java.io.IOException; @@ -69,7 +69,8 @@ private static ConstructingObjectParser createParser(b (String) a[9], // model_type tags, (String) a[11], // vocabulary file - (String) a[12] // platform architecture + (String) a[12], // platform architecture + (TrainedModelPrefixStrings) a[13] ); } ); @@ -95,6 +96,11 @@ private static ConstructingObjectParser createParser(b parser.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), TrainedModelConfig.TAGS); parser.declareString(ConstructingObjectParser.optionalConstructorArg(), VOCABULARY_FILE); parser.declareString(ConstructingObjectParser.optionalConstructorArg(), PLATFORM_ARCHITECTURE); + parser.declareObject( + ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> TrainedModelPrefixStrings.fromXContent(p, lenient), + TrainedModelConfig.PREFIX_STRINGS + ); return parser; } @@ -122,6 +128,7 @@ public static ModelPackageConfig fromXContentLenient(XContentParser parser) thro private final List tags; private final String vocabularyFile; private final String platformArchitecture; + private final TrainedModelPrefixStrings prefixStrings; public ModelPackageConfig( String packagedModelId, @@ -136,7 +143,8 @@ public ModelPackageConfig( String modelType, List tags, String vocabularyFile, - String platformArchitecture + String platformArchitecture, + TrainedModelPrefixStrings prefixStrings ) { this.packagedModelId = ExceptionsHelper.requireNonNull(packagedModelId, PACKAGED_MODEL_ID); this.modelRepository = modelRepository; @@ -154,6 +162,7 @@ public ModelPackageConfig( this.tags = tags == null ? Collections.emptyList() : Collections.unmodifiableList(tags); this.vocabularyFile = vocabularyFile; this.platformArchitecture = platformArchitecture; + this.prefixStrings = prefixStrings; } public ModelPackageConfig(StreamInput in) throws IOException { @@ -174,6 +183,11 @@ public ModelPackageConfig(StreamInput in) throws IOException { } else { platformArchitecture = null; } + if (in.getTransportVersion().onOrAfter(TransportVersions.ML_TRAINED_MODEL_PREFIX_STRINGS_ADDED)) { + prefixStrings = in.readOptionalWriteable(TrainedModelPrefixStrings::new); + } else { + prefixStrings = null; + } } public String getPackagedModelId() { @@ -228,6 +242,10 @@ public String getPlatformArchitecture() { return platformArchitecture; } + public TrainedModelPrefixStrings getPrefixStrings() { + return prefixStrings; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -268,6 +286,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (Strings.isNullOrEmpty(platformArchitecture) == false) { builder.field(PLATFORM_ARCHITECTURE.getPreferredName(), platformArchitecture); } + if (prefixStrings != null) { + builder.field(TrainedModelConfig.PREFIX_STRINGS.getPreferredName(), prefixStrings); + } builder.endObject(); return builder; @@ -290,6 +311,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.ML_PACKAGE_LOADER_PLATFORM_ADDED)) { out.writeOptionalString(platformArchitecture); } + if (out.getTransportVersion().onOrAfter(TransportVersions.ML_TRAINED_MODEL_PREFIX_STRINGS_ADDED)) { + out.writeOptionalWriteable(prefixStrings); + } } @Override @@ -313,7 +337,8 @@ public boolean equals(Object o) { && Objects.equals(modelType, that.modelType) && Objects.equals(tags, that.tags) && Objects.equals(vocabularyFile, that.vocabularyFile) - && Objects.equals(platformArchitecture, that.platformArchitecture); + && Objects.equals(platformArchitecture, that.platformArchitecture) + && Objects.equals(prefixStrings, that.prefixStrings); } @Override @@ -331,7 +356,8 @@ public int hashCode() { modelType, tags, vocabularyFile, - platformArchitecture + platformArchitecture, + prefixStrings ); } @@ -355,6 +381,7 @@ public static class Builder { private List tags; private String vocabularyFile; private String platformArchitecture; + private TrainedModelPrefixStrings prefixStrings; public Builder(ModelPackageConfig modelPackageConfig) { this.packagedModelId = modelPackageConfig.packagedModelId; @@ -370,6 +397,7 @@ public Builder(ModelPackageConfig modelPackageConfig) { this.tags = modelPackageConfig.tags; this.vocabularyFile = modelPackageConfig.vocabularyFile; this.platformArchitecture = modelPackageConfig.platformArchitecture; + this.prefixStrings = modelPackageConfig.prefixStrings; } public Builder setPackedModelId(String packagedModelId) { @@ -437,9 +465,13 @@ public Builder setPlatformArchitecture(String platformArchitecture) { return this; } + public Builder setPrefixStrings(TrainedModelPrefixStrings prefixStrings) { + this.prefixStrings = prefixStrings; + return this; + } + /** - * Reset all fields which are only part of the package metadata, but not be part - * of the config. + * Reset (clear) all fields which are part to the model configuration */ public Builder resetPackageOnlyFields() { this.description = null; @@ -447,15 +479,7 @@ public Builder resetPackageOnlyFields() { this.metadata = null; this.modelType = null; this.tags = null; - return this; - } - - public Builder validate(boolean forCreation) { - ActionRequestValidationException validationException = null; - - if (validationException != null) { - throw validationException; - } + this.prefixStrings = null; return this; } @@ -473,7 +497,8 @@ public ModelPackageConfig build() { modelType, tags, vocabularyFile, - platformArchitecture + platformArchitecture, + prefixStrings ); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobTaskState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobTaskState.java index 3db1165026193..c07cb0cf9c91a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobTaskState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobTaskState.java @@ -6,41 +6,53 @@ */ package org.elasticsearch.xpack.core.ml.job.config; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.common.time.TimeUtils; import org.elasticsearch.xpack.core.ml.MlTasks; +import org.elasticsearch.xpack.core.ml.utils.MlTaskState; import java.io.IOException; +import java.time.Instant; import java.util.Objects; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; -public class JobTaskState implements PersistentTaskState { +public class JobTaskState implements PersistentTaskState, MlTaskState { public static final String NAME = MlTasks.JOB_TASK_NAME; - private static ParseField STATE = new ParseField("state"); - private static ParseField ALLOCATION_ID = new ParseField("allocation_id"); - private static ParseField REASON = new ParseField("reason"); + private static final ParseField STATE = new ParseField("state"); + private static final ParseField ALLOCATION_ID = new ParseField("allocation_id"); + private static final ParseField REASON = new ParseField("reason"); + private static final ParseField LAST_STATE_CHANGE_TIME = new ParseField("last_state_change_time"); private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( NAME, true, - args -> new JobTaskState((JobState) args[0], (Long) args[1], (String) args[2]) + args -> new JobTaskState((JobState) args[0], (Long) args[1], (String) args[2], (Instant) args[3]) ); static { PARSER.declareString(constructorArg(), JobState::fromString, STATE); PARSER.declareLong(constructorArg(), ALLOCATION_ID); PARSER.declareString(optionalConstructorArg(), REASON); + PARSER.declareField( + optionalConstructorArg(), + p -> TimeUtils.parseTimeFieldToInstant(p, LAST_STATE_CHANGE_TIME.getPreferredName()), + LAST_STATE_CHANGE_TIME, + ObjectParser.ValueType.VALUE + ); } public static JobTaskState fromXContent(XContentParser parser) { @@ -54,28 +66,46 @@ public static JobTaskState fromXContent(XContentParser parser) { private final JobState state; private final long allocationId; private final String reason; + private final Instant lastStateChangeTime; - public JobTaskState(JobState state, long allocationId, @Nullable String reason) { + public JobTaskState(JobState state, long allocationId, @Nullable String reason, @Nullable Instant lastStateChangeTime) { this.state = Objects.requireNonNull(state); this.allocationId = allocationId; this.reason = reason; + // Round to millisecond to avoid serialization round trip differences + this.lastStateChangeTime = (lastStateChangeTime != null) ? Instant.ofEpochMilli(lastStateChangeTime.toEpochMilli()) : null; } public JobTaskState(StreamInput in) throws IOException { state = JobState.fromStream(in); allocationId = in.readLong(); reason = in.readOptionalString(); + if (in.getTransportVersion().onOrAfter(TransportVersions.ML_STATE_CHANGE_TIMESTAMPS)) { + lastStateChangeTime = in.readOptionalInstant(); + } else { + lastStateChangeTime = null; + } } public JobState getState() { return state; } + public long getAllocationId() { + return allocationId; + } + @Nullable public String getReason() { return reason; } + @Override + @Nullable + public Instant getLastStateChangeTime() { + return lastStateChangeTime; + } + /** * The job state stores the allocation ID at the time it was last set. * This method compares the allocation ID in the state with the allocation @@ -101,6 +131,9 @@ public void writeTo(StreamOutput out) throws IOException { state.writeTo(out); out.writeLong(allocationId); out.writeOptionalString(reason); + if (out.getTransportVersion().onOrAfter(TransportVersions.ML_STATE_CHANGE_TIMESTAMPS)) { + out.writeOptionalInstant(lastStateChangeTime); + } } @Override @@ -111,6 +144,13 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (reason != null) { builder.field(REASON.getPreferredName(), reason); } + if (lastStateChangeTime != null) { + builder.timeField( + LAST_STATE_CHANGE_TIME.getPreferredName(), + LAST_STATE_CHANGE_TIME.getPreferredName() + "_string", + lastStateChangeTime.toEpochMilli() + ); + } builder.endObject(); return builder; } @@ -120,11 +160,14 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; JobTaskState that = (JobTaskState) o; - return state == that.state && Objects.equals(allocationId, that.allocationId) && Objects.equals(reason, that.reason); + return state == that.state + && Objects.equals(allocationId, that.allocationId) + && Objects.equals(reason, that.reason) + && Objects.equals(lastStateChangeTime, that.lastStateChangeTime); } @Override public int hashCode() { - return Objects.hash(state, allocationId, reason); + return Objects.hash(state, allocationId, reason, lastStateChangeTime); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/MlLTRNamedXContentProvider.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/ltr/MlLTRNamedXContentProvider.java similarity index 78% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/MlLTRNamedXContentProvider.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/ltr/MlLTRNamedXContentProvider.java index dbfae12413632..c7a8db0ebf011 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/MlLTRNamedXContentProvider.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/ltr/MlLTRNamedXContentProvider.java @@ -4,15 +4,13 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.xpack.core.ml.inference; +package org.elasticsearch.xpack.core.ml.ltr; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.plugins.spi.NamedXContentProvider; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfig; -import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfigUpdate; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.LearnToRankConfig; -import org.elasticsearch.xpack.core.ml.inference.trainedmodel.LearnToRankConfigUpdate; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.LenientlyParsedInferenceConfig; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.StrictlyParsedInferenceConfig; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ltr.LearnToRankFeatureExtractorBuilder; @@ -46,14 +44,6 @@ public List getNamedXContentParsers() { LearnToRankConfig::fromXContentStrict ) ); - // Inference Config Update - namedXContent.add( - new NamedXContentRegistry.Entry( - InferenceConfigUpdate.class, - LearnToRankConfigUpdate.NAME, - LearnToRankConfigUpdate::fromXContentStrict - ) - ); // LTR extractors namedXContent.add( new NamedXContentRegistry.Entry( @@ -71,14 +61,6 @@ public List getNamedWriteables() { namedWriteables.add( new NamedWriteableRegistry.Entry(InferenceConfig.class, LearnToRankConfig.NAME.getPreferredName(), LearnToRankConfig::new) ); - // Inference config update - namedWriteables.add( - new NamedWriteableRegistry.Entry( - InferenceConfigUpdate.class, - LearnToRankConfigUpdate.NAME.getPreferredName(), - LearnToRankConfigUpdate::new - ) - ); // LTR Extractors namedWriteables.add( new NamedWriteableRegistry.Entry( diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/MlTaskState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/MlTaskState.java new file mode 100644 index 0000000000000..09a7d3827caf2 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/MlTaskState.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.ml.utils; + +import org.elasticsearch.core.Nullable; + +import java.time.Instant; + +public interface MlTaskState { + + /** + * The time of the last state change. + */ + @Nullable + Instant getLastStateChangeTime(); +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/QueryProvider.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/QueryProvider.java index da50b1eb64b50..5b22165b57443 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/QueryProvider.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/QueryProvider.java @@ -30,10 +30,9 @@ public class QueryProvider implements Writeable, ToXContentObject, Rewriteable { private static final Logger logger = LogManager.getLogger(QueryProvider.class); - - private Exception parsingException; - private QueryBuilder parsedQuery; - private Map query; + private final Exception parsingException; + private final QueryBuilder parsedQuery; + private final Map query; public static QueryProvider defaultQuery() { return new QueryProvider( diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/DeleteRollupJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/DeleteRollupJobAction.java index da13d10f55a6b..5286503fd01d5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/DeleteRollupJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/DeleteRollupJobAction.java @@ -6,14 +6,12 @@ */ package org.elasticsearch.xpack.core.rollup.action; -import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.support.tasks.BaseTasksRequest; import org.elasticsearch.action.support.tasks.BaseTasksResponse; -import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -96,12 +94,6 @@ public boolean equals(Object obj) { } } - public static class RequestBuilder extends ActionRequestBuilder { - protected RequestBuilder(ElasticsearchClient client, DeleteRollupJobAction action) { - super(client, action, new DeleteRollupJobAction.Request()); - } - } - public static class Response extends BaseTasksResponse implements Writeable, ToXContentObject { private final boolean acknowledged; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupCapsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupCapsAction.java index 2b9fa95f2bfd1..200c984317d79 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupCapsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupCapsAction.java @@ -7,11 +7,9 @@ package org.elasticsearch.xpack.core.rollup.action; import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; -import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -96,13 +94,6 @@ public boolean equals(Object obj) { } } - public static class RequestBuilder extends ActionRequestBuilder { - - protected RequestBuilder(ElasticsearchClient client, GetRollupCapsAction action) { - super(client, action, new Request()); - } - } - public static class Response extends ActionResponse implements Writeable, ToXContentObject { private Map jobs = Collections.emptyMap(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupIndexCapsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupIndexCapsAction.java index 8cb0a91f7c3f0..7975e36646792 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupIndexCapsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupIndexCapsAction.java @@ -7,13 +7,11 @@ package org.elasticsearch.xpack.core.rollup.action; import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -125,13 +123,6 @@ public boolean equals(Object obj) { } } - public static class RequestBuilder extends ActionRequestBuilder { - - protected RequestBuilder(ElasticsearchClient client, GetRollupIndexCapsAction action) { - super(client, action, new Request()); - } - } - public static class Response extends ActionResponse implements Writeable, ToXContentObject { private Map jobs = Collections.emptyMap(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupJobsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupJobsAction.java index a06ef484544d7..c28ed47c38602 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupJobsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupJobsAction.java @@ -6,14 +6,12 @@ */ package org.elasticsearch.xpack.core.rollup.action; -import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.support.tasks.BaseTasksRequest; import org.elasticsearch.action.support.tasks.BaseTasksResponse; -import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -120,13 +118,6 @@ public boolean equals(Object obj) { } } - public static class RequestBuilder extends ActionRequestBuilder { - - protected RequestBuilder(ElasticsearchClient client, GetRollupJobsAction action) { - super(client, action, new Request()); - } - } - public static class Response extends BaseTasksResponse implements Writeable, ToXContentObject { private final List jobs; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/PutRollupJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/PutRollupJobAction.java index 65bb9aac5471a..76b3b7c077924 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/PutRollupJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/PutRollupJobAction.java @@ -13,8 +13,6 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; -import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.ToXContentObject; @@ -122,10 +120,4 @@ public boolean equals(Object obj) { } } - public static class RequestBuilder extends MasterNodeOperationRequestBuilder { - - protected RequestBuilder(ElasticsearchClient client, PutRollupJobAction action) { - super(client, action, new Request()); - } - } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StartRollupJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StartRollupJobAction.java index 547026bc66b3d..00a4daaa061c9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StartRollupJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StartRollupJobAction.java @@ -6,12 +6,10 @@ */ package org.elasticsearch.xpack.core.rollup.action; -import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.tasks.BaseTasksRequest; import org.elasticsearch.action.support.tasks.BaseTasksResponse; -import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -88,13 +86,6 @@ public boolean equals(Object obj) { } } - public static class RequestBuilder extends ActionRequestBuilder { - - protected RequestBuilder(ElasticsearchClient client, StartRollupJobAction action) { - super(client, action, new Request()); - } - } - public static class Response extends BaseTasksResponse implements Writeable, ToXContentObject { private final boolean started; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StopRollupJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StopRollupJobAction.java index 1139be86f2346..f1527b4c2cafb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StopRollupJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StopRollupJobAction.java @@ -6,12 +6,10 @@ */ package org.elasticsearch.xpack.core.rollup.action; -import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.tasks.BaseTasksRequest; import org.elasticsearch.action.support.tasks.BaseTasksResponse; -import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -121,13 +119,6 @@ public boolean equals(Object obj) { } } - public static class RequestBuilder extends ActionRequestBuilder { - - protected RequestBuilder(ElasticsearchClient client, StopRollupJobAction action) { - super(client, action, new Request()); - } - } - public static class Response extends BaseTasksResponse implements Writeable, ToXContentObject { private final boolean stopped; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/Grant.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/Grant.java index f24ddbd86c937..41f1f50b6f7f0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/Grant.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/Grant.java @@ -7,13 +7,17 @@ package org.elasticsearch.xpack.core.security.action; +import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; +import org.elasticsearch.xpack.core.security.authc.jwt.JwtAuthenticationToken; +import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; import org.elasticsearch.xpack.core.security.authc.support.BearerToken; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; @@ -33,6 +37,24 @@ public class Grant implements Writeable { private SecureString password; private SecureString accessToken; private String runAsUsername; + private ClientAuthentication clientAuthentication; + + public record ClientAuthentication(String scheme, SecureString value) implements Writeable { + + public ClientAuthentication(SecureString value) { + this(JwtRealmSettings.HEADER_SHARED_SECRET_AUTHENTICATION_SCHEME, value); + } + + ClientAuthentication(StreamInput in) throws IOException { + this(in.readString(), in.readSecureString()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(scheme); + out.writeSecureString(value); + } + } public Grant() {} @@ -46,6 +68,11 @@ public Grant(StreamInput in) throws IOException { } else { this.runAsUsername = null; } + if (in.getTransportVersion().onOrAfter(TransportVersions.GRANT_API_KEY_CLIENT_AUTHENTICATION_ADDED)) { + this.clientAuthentication = in.readOptionalWriteable(ClientAuthentication::new); + } else { + this.clientAuthentication = null; + } } public void writeTo(StreamOutput out) throws IOException { @@ -56,6 +83,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { out.writeOptionalString(runAsUsername); } + if (out.getTransportVersion().onOrAfter(TransportVersions.GRANT_API_KEY_CLIENT_AUTHENTICATION_ADDED)) { + out.writeOptionalWriteable(clientAuthentication); + } } public String getType() { @@ -78,6 +108,10 @@ public String getRunAsUsername() { return runAsUsername; } + public ClientAuthentication getClientAuthentication() { + return clientAuthentication; + } + public void setType(String type) { this.type = type; } @@ -98,12 +132,31 @@ public void setRunAsUsername(String runAsUsername) { this.runAsUsername = runAsUsername; } + public void setClientAuthentication(ClientAuthentication clientAuthentication) { + this.clientAuthentication = clientAuthentication; + } + public AuthenticationToken getAuthenticationToken() { assert validate(null) == null : "grant is invalid"; return switch (type) { case PASSWORD_GRANT_TYPE -> new UsernamePasswordToken(username, password); - case ACCESS_TOKEN_GRANT_TYPE -> new BearerToken(accessToken); - default -> null; + case ACCESS_TOKEN_GRANT_TYPE -> { + SecureString clientAuthentication = this.clientAuthentication != null ? this.clientAuthentication.value() : null; + AuthenticationToken token = JwtAuthenticationToken.tryParseJwt(accessToken, clientAuthentication); + if (token != null) { + yield token; + } + if (clientAuthentication != null) { + clientAuthentication.close(); + throw new ElasticsearchSecurityException( + "[client_authentication] not supported with the supplied access_token type", + RestStatus.BAD_REQUEST + ); + } + // here we effectively assume it's an ES access token (from the {@code TokenService}) + yield new BearerToken(accessToken); + } + default -> throw new ElasticsearchSecurityException("the grant type [{}] is not supported", type); }; } @@ -114,10 +167,20 @@ public ActionRequestValidationException validate(ActionRequestValidationExceptio validationException = validateRequiredField("username", username, validationException); validationException = validateRequiredField("password", password, validationException); validationException = validateUnsupportedField("access_token", accessToken, validationException); + if (clientAuthentication != null) { + return addValidationError("[client_authentication] is not supported for grant_type [" + type + "]", validationException); + } } else if (type.equals(ACCESS_TOKEN_GRANT_TYPE)) { validationException = validateRequiredField("access_token", accessToken, validationException); validationException = validateUnsupportedField("username", username, validationException); validationException = validateUnsupportedField("password", password, validationException); + if (clientAuthentication != null + && JwtRealmSettings.HEADER_SHARED_SECRET_AUTHENTICATION_SCHEME.equals(clientAuthentication.scheme.trim()) == false) { + return addValidationError( + "[client_authentication.scheme] must be set to [" + JwtRealmSettings.HEADER_SHARED_SECRET_AUTHENTICATION_SCHEME + "]", + validationException + ); + } } else { validationException = addValidationError("grant_type [" + type + "] is not supported", validationException); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java index 79c8ef10100e9..b06b7728f541f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java @@ -92,6 +92,7 @@ public String value() { private final Instant creation; private final Instant expiration; private final boolean invalidated; + private final Instant invalidation; private final String username; private final String realm; private final Map metadata; @@ -107,6 +108,7 @@ public ApiKey( Instant creation, Instant expiration, boolean invalidated, + @Nullable Instant invalidation, String username, String realm, @Nullable Map metadata, @@ -120,6 +122,7 @@ public ApiKey( creation, expiration, invalidated, + invalidation, username, realm, metadata, @@ -135,6 +138,7 @@ private ApiKey( Instant creation, Instant expiration, boolean invalidated, + Instant invalidation, String username, String realm, @Nullable Map metadata, @@ -150,6 +154,7 @@ private ApiKey( this.creation = Instant.ofEpochMilli(creation.toEpochMilli()); this.expiration = (expiration != null) ? Instant.ofEpochMilli(expiration.toEpochMilli()) : null; this.invalidated = invalidated; + this.invalidation = (invalidation != null) ? Instant.ofEpochMilli(invalidation.toEpochMilli()) : null; this.username = username; this.realm = realm; this.metadata = metadata == null ? Map.of() : metadata; @@ -177,6 +182,12 @@ public ApiKey(StreamInput in) throws IOException { this.creation = in.readInstant(); this.expiration = in.readOptionalInstant(); this.invalidated = in.readBoolean(); + if (in.getTransportVersion().onOrAfter(TransportVersions.GET_API_KEY_INVALIDATION_TIME_ADDED)) { + this.invalidation = in.readOptionalInstant(); + } else { + this.invalidation = null; + } + this.username = in.readString(); this.realm = in.readString(); if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { @@ -218,6 +229,10 @@ public boolean isInvalidated() { return invalidated; } + public Instant getInvalidation() { + return invalidation; + } + public String getUsername() { return username; } @@ -252,10 +267,11 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t if (expiration != null) { builder.field("expiration", expiration.toEpochMilli()); } - builder.field("invalidated", invalidated) - .field("username", username) - .field("realm", realm) - .field("metadata", (metadata == null ? Map.of() : metadata)); + builder.field("invalidated", invalidated); + if (invalidation != null) { + builder.field("invalidation", invalidation.toEpochMilli()); + } + builder.field("username", username).field("realm", realm).field("metadata", (metadata == null ? Map.of() : metadata)); if (roleDescriptors != null) { builder.startObject("role_descriptors"); for (var roleDescriptor : roleDescriptors) { @@ -321,6 +337,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeInstant(creation); out.writeOptionalInstant(expiration); out.writeBoolean(invalidated); + if (out.getTransportVersion().onOrAfter(TransportVersions.GET_API_KEY_INVALIDATION_TIME_ADDED)) { + out.writeOptionalInstant(invalidation); + } out.writeString(username); out.writeString(realm); if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { @@ -334,7 +353,20 @@ public void writeTo(StreamOutput out) throws IOException { @Override public int hashCode() { - return Objects.hash(name, id, type, creation, expiration, invalidated, username, realm, metadata, roleDescriptors, limitedBy); + return Objects.hash( + name, + id, + type, + creation, + expiration, + invalidated, + invalidation, + username, + realm, + metadata, + roleDescriptors, + limitedBy + ); } @Override @@ -355,6 +387,7 @@ public boolean equals(Object obj) { && Objects.equals(creation, other.creation) && Objects.equals(expiration, other.expiration) && Objects.equals(invalidated, other.invalidated) + && Objects.equals(invalidation, other.invalidation) && Objects.equals(username, other.username) && Objects.equals(realm, other.realm) && Objects.equals(metadata, other.metadata) @@ -371,11 +404,12 @@ public boolean equals(Object obj) { Instant.ofEpochMilli((Long) args[3]), (args[4] == null) ? null : Instant.ofEpochMilli((Long) args[4]), (Boolean) args[5], - (String) args[6], + (args[6] == null) ? null : Instant.ofEpochMilli((Long) args[6]), (String) args[7], - (args[8] == null) ? null : (Map) args[8], - (List) args[9], - (RoleDescriptorsIntersection) args[10] + (String) args[8], + (args[9] == null) ? null : (Map) args[9], + (List) args[10], + (RoleDescriptorsIntersection) args[11] ); }); static { @@ -385,6 +419,7 @@ public boolean equals(Object obj) { PARSER.declareLong(constructorArg(), new ParseField("creation")); PARSER.declareLong(optionalConstructorArg(), new ParseField("expiration")); PARSER.declareBoolean(constructorArg(), new ParseField("invalidated")); + PARSER.declareLong(optionalConstructorArg(), new ParseField("invalidation")); PARSER.declareString(constructorArg(), new ParseField("username")); PARSER.declareString(constructorArg(), new ParseField("realm")); PARSER.declareObject(optionalConstructorArg(), (p, c) -> p.map(), new ParseField("metadata")); @@ -418,6 +453,8 @@ public String toString() { + expiration + ", invalidated=" + invalidated + + ", invalidation=" + + invalidation + ", username=" + username + ", realm=" diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateRequestBuilder.java deleted file mode 100644 index 98a1f01388753..0000000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateRequestBuilder.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.core.security.action.oidc; - -import org.elasticsearch.action.ActionRequestBuilder; -import org.elasticsearch.client.internal.ElasticsearchClient; - -/** - * Request builder for populating a {@link OpenIdConnectAuthenticateRequest} - */ -public class OpenIdConnectAuthenticateRequestBuilder extends ActionRequestBuilder< - OpenIdConnectAuthenticateRequest, - OpenIdConnectAuthenticateResponse> { - - public OpenIdConnectAuthenticateRequestBuilder(ElasticsearchClient client) { - super(client, OpenIdConnectAuthenticateAction.INSTANCE, new OpenIdConnectAuthenticateRequest()); - } - - public OpenIdConnectAuthenticateRequestBuilder redirectUri(String redirectUri) { - request.setRedirectUri(redirectUri); - return this; - } - - public OpenIdConnectAuthenticateRequestBuilder state(String state) { - request.setState(state); - return this; - } - - public OpenIdConnectAuthenticateRequestBuilder nonce(String nonce) { - request.setNonce(nonce); - return this; - } - -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectPrepareAuthenticationRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectPrepareAuthenticationRequestBuilder.java deleted file mode 100644 index 2dd267a70dac1..0000000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectPrepareAuthenticationRequestBuilder.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.core.security.action.oidc; - -import org.elasticsearch.action.ActionRequestBuilder; -import org.elasticsearch.client.internal.ElasticsearchClient; - -/** - * Request builder for populating a {@link OpenIdConnectPrepareAuthenticationRequest} - */ -public class OpenIdConnectPrepareAuthenticationRequestBuilder extends ActionRequestBuilder< - OpenIdConnectPrepareAuthenticationRequest, - OpenIdConnectPrepareAuthenticationResponse> { - - public OpenIdConnectPrepareAuthenticationRequestBuilder(ElasticsearchClient client) { - super(client, OpenIdConnectPrepareAuthenticationAction.INSTANCE, new OpenIdConnectPrepareAuthenticationRequest()); - } - - public OpenIdConnectPrepareAuthenticationRequestBuilder realmName(String name) { - request.setRealmName(name); - return this; - } -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestBuilder.java index c011bd8af6c51..ec0ecfc909980 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestBuilder.java @@ -50,7 +50,7 @@ static ApplicationPrivilegeDescriptor parsePrivilege(XContentParser parser, Stri */ public PutPrivilegesRequestBuilder source(BytesReference source, XContentType xContentType) throws IOException { Objects.requireNonNull(xContentType); - // EMPTY is ok here because we never call namedObject + // NamedXContentRegistry.EMPTY is ok here because we never call namedObject try ( InputStream stream = source.streamInput(); XContentParser parser = xContentType.xContent() diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/realm/ClearRealmCacheRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/realm/ClearRealmCacheRequestBuilder.java index c76fa00e9cde4..5b7f563f8d75e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/realm/ClearRealmCacheRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/realm/ClearRealmCacheRequestBuilder.java @@ -15,11 +15,8 @@ public class ClearRealmCacheRequestBuilder extends NodesOperationRequestBuilder< ClearRealmCacheRequestBuilder> { public ClearRealmCacheRequestBuilder(ElasticsearchClient client) { - this(client, ClearRealmCacheAction.INSTANCE); - } + super(client, ClearRealmCacheAction.INSTANCE, new ClearRealmCacheRequest()); - public ClearRealmCacheRequestBuilder(ElasticsearchClient client, ClearRealmCacheAction action) { - super(client, action, new ClearRealmCacheRequest()); } /** diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/ClearRolesCacheRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/ClearRolesCacheRequestBuilder.java index a28a48e401d6c..13541df7bed93 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/ClearRolesCacheRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/ClearRolesCacheRequestBuilder.java @@ -18,11 +18,7 @@ public class ClearRolesCacheRequestBuilder extends NodesOperationRequestBuilder< ClearRolesCacheRequestBuilder> { public ClearRolesCacheRequestBuilder(ElasticsearchClient client) { - this(client, ClearRolesCacheAction.INSTANCE, new ClearRolesCacheRequest()); - } - - public ClearRolesCacheRequestBuilder(ElasticsearchClient client, ClearRolesCacheAction action, ClearRolesCacheRequest request) { - super(client, action, request); + super(client, ClearRolesCacheAction.INSTANCE, new ClearRolesCacheRequest()); } /** diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/DeleteRoleRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/DeleteRoleRequestBuilder.java index e2e634df75d43..93dfed1ee2906 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/DeleteRoleRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/DeleteRoleRequestBuilder.java @@ -18,11 +18,7 @@ public class DeleteRoleRequestBuilder extends ActionRequestBuilder { public DeleteRoleRequestBuilder(ElasticsearchClient client) { - this(client, DeleteRoleAction.INSTANCE); - } - - public DeleteRoleRequestBuilder(ElasticsearchClient client, DeleteRoleAction action) { - super(client, action, new DeleteRoleRequest()); + super(client, DeleteRoleAction.INSTANCE, new DeleteRoleRequest()); } public DeleteRoleRequestBuilder name(String name) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesRequestBuilder.java index ead52e245b83c..693a497d05087 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesRequestBuilder.java @@ -15,11 +15,7 @@ public class GetRolesRequestBuilder extends ActionRequestBuilder { public GetRolesRequestBuilder(ElasticsearchClient client) { - this(client, GetRolesAction.INSTANCE); - } - - public GetRolesRequestBuilder(ElasticsearchClient client, GetRolesAction action) { - super(client, action, new GetRolesRequest()); + super(client, GetRolesAction.INSTANCE, new GetRolesRequest()); } public GetRolesRequestBuilder names(String... names) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenRequestBuilder.java deleted file mode 100644 index 8971ad7beefa5..0000000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenRequestBuilder.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.core.security.action.token; - -import org.elasticsearch.action.ActionRequestBuilder; -import org.elasticsearch.client.internal.ElasticsearchClient; - -/** - * Request builder that is used to populate a {@link InvalidateTokenRequest} - */ -public final class InvalidateTokenRequestBuilder extends ActionRequestBuilder { - - public InvalidateTokenRequestBuilder(ElasticsearchClient client) { - super(client, InvalidateTokenAction.INSTANCE, new InvalidateTokenRequest()); - } - - /** - * The string representation of the token that is being invalidated. This is the value returned - * from a create token request. - */ - public InvalidateTokenRequestBuilder setTokenString(String token) { - request.setTokenString(token); - return this; - } - - /** - * Sets the type of the token that should be invalidated - */ - public InvalidateTokenRequestBuilder setType(InvalidateTokenRequest.Type type) { - request.setTokenType(type); - return this; - } - - /** - * Sets the name of the realm for which all tokens should be invalidated - */ - public InvalidateTokenRequestBuilder setRealmName(String realmName) { - request.setRealmName(realmName); - return this; - } - - /** - * Sets the username for which all tokens should be invalidated - */ - public InvalidateTokenRequestBuilder setUserName(String username) { - request.setUserName(username); - return this; - } -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Authentication.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Authentication.java index 6efb1cfd1fe03..4d285cf3b144d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Authentication.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Authentication.java @@ -1130,10 +1130,6 @@ private boolean isApiKeyRealm() { return API_KEY_REALM_NAME.equals(name) && API_KEY_REALM_TYPE.equals(type); } - private boolean isServiceAccountRealm() { - return ServiceAccountSettings.REALM_NAME.equals(name) && ServiceAccountSettings.REALM_TYPE.equals(type); - } - private boolean isCrossClusterAccessRealm() { return CROSS_CLUSTER_ACCESS_REALM_NAME.equals(name) && CROSS_CLUSTER_ACCESS_REALM_TYPE.equals(type); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtAuthenticationToken.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/jwt/JwtAuthenticationToken.java similarity index 90% rename from x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtAuthenticationToken.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/jwt/JwtAuthenticationToken.java index 9ca0ddb42e663..ebfaae72b9df2 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtAuthenticationToken.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/jwt/JwtAuthenticationToken.java @@ -4,7 +4,7 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.xpack.security.authc.jwt; +package org.elasticsearch.xpack.core.security.authc.jwt; import com.nimbusds.jwt.JWTClaimsSet; import com.nimbusds.jwt.SignedJWT; @@ -29,13 +29,22 @@ public class JwtAuthenticationToken implements AuthenticationToken { @Nullable private final SecureString clientAuthenticationSharedSecret; + public static JwtAuthenticationToken tryParseJwt(SecureString userCredentials, @Nullable SecureString clientCredentials) { + SignedJWT signedJWT = JwtUtil.parseSignedJWT(userCredentials); + if (signedJWT == null) { + return null; + } + return new JwtAuthenticationToken(signedJWT, JwtUtil.sha256(userCredentials), clientCredentials); + } + /** * Store a mandatory JWT and optional Shared Secret. * @param signedJWT The JWT parsed from the end-user credentials * @param userCredentialsHash The hash of the end-user credentials is used to compute the key for user cache at the realm level. - * See also {@link JwtRealm#authenticate}. + * See also {@code JwtRealm#authenticate}. * @param clientAuthenticationSharedSecret URL-safe Shared Secret for Client authentication. Required by some JWT realms. */ + @SuppressWarnings("this-escape") public JwtAuthenticationToken( SignedJWT signedJWT, byte[] userCredentialsHash, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/jwt/JwtRealmSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/jwt/JwtRealmSettings.java index 9a4fdae51e81b..1903dd5146f69 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/jwt/JwtRealmSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/jwt/JwtRealmSettings.java @@ -33,6 +33,8 @@ */ public class JwtRealmSettings { + public static final String HEADER_SHARED_SECRET_AUTHENTICATION_SCHEME = "SharedSecret"; + private JwtRealmSettings() {} public static final String TYPE = "jwt"; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtUtil.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/jwt/JwtUtil.java similarity index 99% rename from x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtUtil.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/jwt/JwtUtil.java index 928ecd7fa265d..d70b76f8bc574 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtUtil.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/jwt/JwtUtil.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.security.authc.jwt; +package org.elasticsearch.xpack.core.security.authc.jwt; import com.nimbusds.jose.JWSObject; import com.nimbusds.jose.jwk.JWK; @@ -47,7 +47,6 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; -import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; import org.elasticsearch.xpack.core.ssl.SSLService; import java.io.InputStream; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java index b091f3bc9f894..35c32780d2e4c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java @@ -29,7 +29,7 @@ import org.elasticsearch.action.datastreams.GetDataStreamAction; import org.elasticsearch.action.datastreams.PromoteDataStreamAction; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesAction; -import org.elasticsearch.action.search.SearchShardsAction; +import org.elasticsearch.action.search.TransportSearchShardsAction; import org.elasticsearch.common.Strings; import org.elasticsearch.index.seqno.RetentionLeaseActions; import org.elasticsearch.xpack.core.ccr.action.ForgetFollowerAction; @@ -78,19 +78,25 @@ public final class IndexPrivilege extends Privilege { private static final Automaton READ_CROSS_CLUSTER_AUTOMATON = patterns( "internal:transport/proxy/indices:data/read/*", ClusterSearchShardsAction.NAME, - SearchShardsAction.NAME + TransportSearchShardsAction.TYPE.name() + ); + private static final Automaton CREATE_AUTOMATON = patterns( + "indices:data/write/index*", + "indices:data/write/bulk*", + "indices:data/write/simulate/bulk*" ); - private static final Automaton CREATE_AUTOMATON = patterns("indices:data/write/index*", "indices:data/write/bulk*"); private static final Automaton CREATE_DOC_AUTOMATON = patterns( "indices:data/write/index", "indices:data/write/index[*", "indices:data/write/index:op_type/create", - "indices:data/write/bulk*" + "indices:data/write/bulk*", + "indices:data/write/simulate/bulk*" ); private static final Automaton INDEX_AUTOMATON = patterns( "indices:data/write/index*", "indices:data/write/bulk*", - "indices:data/write/update*" + "indices:data/write/update*", + "indices:data/write/simulate/bulk*" ); private static final Automaton DELETE_AUTOMATON = patterns("indices:data/write/delete*", "indices:data/write/bulk*"); private static final Automaton WRITE_AUTOMATON = patterns("indices:data/write/*", AutoPutMappingAction.NAME); @@ -113,7 +119,7 @@ public final class IndexPrivilege extends Privilege { GetFieldMappingsAction.NAME + "*", GetMappingsAction.NAME, ClusterSearchShardsAction.NAME, - SearchShardsAction.NAME, + TransportSearchShardsAction.TYPE.name(), ValidateQueryAction.NAME + "*", GetSettingsAction.NAME, ExplainLifecycleAction.NAME, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/SystemPrivilege.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/SystemPrivilege.java index b15212d979d09..bc42632507256 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/SystemPrivilege.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/SystemPrivilege.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.security.authz.privilege; -import org.elasticsearch.action.search.SearchShardsAction; +import org.elasticsearch.action.search.TransportSearchShardsAction; import org.elasticsearch.index.seqno.RetentionLeaseActions; import org.elasticsearch.index.seqno.RetentionLeaseBackgroundSyncAction; import org.elasticsearch.index.seqno.RetentionLeaseSyncAction; @@ -43,7 +43,7 @@ public final class SystemPrivilege extends Privilege { "indices:data/read/*", // needed for SystemIndexMigrator "indices:admin/refresh", // needed for SystemIndexMigrator "indices:admin/aliases", // needed for SystemIndexMigrator - SearchShardsAction.NAME // added so this API can be called with the system user by other APIs + TransportSearchShardsAction.TYPE.name() // added so this API can be called with the system user by other APIs ); private static final Predicate PREDICATE = (action) -> { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java index 33c8dbdf27bf9..6a1da2e0ddfa0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java @@ -197,6 +197,8 @@ static RoleDescriptor kibanaSystem(String name) { RoleDescriptor.IndicesPrivileges.builder().indices("logs-elastic_agent*").privileges("read").build(), // Fleet publishes Agent metrics in kibana task runner RoleDescriptor.IndicesPrivileges.builder().indices("metrics-fleet_server*").privileges("all").build(), + // Fleet reads output health from this index pattern + RoleDescriptor.IndicesPrivileges.builder().indices("logs-fleet_server*").privileges("read").build(), // Legacy "Alerts as data" used in Security Solution. // Kibana user creates these indices; reads / writes to them. RoleDescriptor.IndicesPrivileges.builder().indices(ReservedRolesStore.ALERTS_LEGACY_INDEX).privileges("all").build(), @@ -350,6 +352,10 @@ static RoleDescriptor kibanaSystem(String name) { .privileges("create_index", "read", "index", "delete", IndicesAliasesAction.NAME, UpdateSettingsAction.NAME) .build(), RoleDescriptor.IndicesPrivileges.builder().indices("risk-score.risk-*").privileges("all").build(), + RoleDescriptor.IndicesPrivileges.builder() + .indices(".asset-criticality.asset-criticality-*") + .privileges("create_index", "manage", "read") + .build(), // For cloud_defend usageCollection RoleDescriptor.IndicesPrivileges.builder() .indices("logs-cloud_defend.*", "metrics-cloud_defend.*") diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java index 18a2be0e9b358..b0f1c78b0c99d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java @@ -64,6 +64,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; +import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.isDataStreamsLifecycleOnlyMode; import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; @@ -71,18 +72,6 @@ * Abstracts the logic of managing versioned index templates, ingest pipelines and lifecycle policies for plugins that require such things. */ public abstract class IndexTemplateRegistry implements ClusterStateListener { - public static final String DATA_STREAMS_LIFECYCLE_ONLY_SETTING_NAME = "data_streams.lifecycle_only.mode"; - - /** - * Check if {@link #DATA_STREAMS_LIFECYCLE_ONLY_SETTING_NAME} is present and set to {@code true}, indicating that - * we're running in a cluster configuration that is only expecting to use data streams lifecycles. - * - * @param settings the node settings - * @return true if {@link #DATA_STREAMS_LIFECYCLE_ONLY_SETTING_NAME} is present and set - */ - public static boolean isDataStreamsLifecycleOnlyMode(final Settings settings) { - return settings.getAsBoolean(DATA_STREAMS_LIFECYCLE_ONLY_SETTING_NAME, false); - } private static final Logger logger = LogManager.getLogger(IndexTemplateRegistry.class); @@ -121,7 +110,7 @@ public IndexTemplateRegistry( /** * Returns the configured configurations for the lifecycle policies. Subclasses should provide * the ILM configurations and they will be loaded if we're not running data stream only mode (controlled via - * {@link #DATA_STREAMS_LIFECYCLE_ONLY_SETTING_NAME}). + * {@link org.elasticsearch.cluster.metadata.DataStreamLifecycle#DATA_STREAMS_LIFECYCLE_ONLY_SETTING_NAME}). * * The loaded lifecycle configurations will be installed if returned by {@link #getLifecyclePolicies()}. Child classes * have a chance to override {@link #getLifecyclePolicies()} in case they want additional control over if these diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IngestPipelineConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IngestPipelineConfig.java index a216030f1c2e0..2768355183687 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IngestPipelineConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IngestPipelineConfig.java @@ -12,6 +12,7 @@ import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.Objects; /** @@ -22,6 +23,7 @@ public abstract class IngestPipelineConfig { protected final String resource; protected final int version; protected final String versionProperty; + protected final Map variables; /** * A list of this pipeline's dependencies, for example - such referred to through a pipeline processor. @@ -35,11 +37,23 @@ public IngestPipelineConfig(String id, String resource, int version, String vers } public IngestPipelineConfig(String id, String resource, int version, String versionProperty, List dependencies) { + this(id, resource, version, versionProperty, dependencies, Map.of()); + } + + public IngestPipelineConfig( + String id, + String resource, + int version, + String versionProperty, + List dependencies, + Map variables + ) { this.id = Objects.requireNonNull(id); this.resource = Objects.requireNonNull(resource); this.version = version; this.versionProperty = Objects.requireNonNull(versionProperty); this.dependencies = dependencies; + this.variables = Objects.requireNonNull(variables); } public String getId() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/JsonIngestPipelineConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/JsonIngestPipelineConfig.java index fc2ca7cbce186..05a27de40aadc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/JsonIngestPipelineConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/JsonIngestPipelineConfig.java @@ -12,6 +12,7 @@ import org.elasticsearch.xcontent.XContentType; import java.util.List; +import java.util.Map; public class JsonIngestPipelineConfig extends IngestPipelineConfig { public JsonIngestPipelineConfig(String id, String resource, int version, String versionProperty) { @@ -22,6 +23,17 @@ public JsonIngestPipelineConfig(String id, String resource, int version, String super(id, resource, version, versionProperty, dependencies); } + public JsonIngestPipelineConfig( + String id, + String resource, + int version, + String versionProperty, + List dependencies, + Map variables + ) { + super(id, resource, version, versionProperty, dependencies, variables); + } + @Override public XContentType getXContentType() { return XContentType.JSON; @@ -29,6 +41,6 @@ public XContentType getXContentType() { @Override public BytesReference loadConfig() { - return new BytesArray(TemplateUtils.loadTemplate(resource, String.valueOf(version), versionProperty)); + return new BytesArray(TemplateUtils.loadTemplate(resource, String.valueOf(version), versionProperty, variables)); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/TemplateUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/TemplateUtils.java index ad27607e47c5e..d1fda4ab1bd13 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/TemplateUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/TemplateUtils.java @@ -98,7 +98,7 @@ public static void validate(String source) { } } - private static String replaceVariables(String input, String version, String versionProperty, Map variables) { + public static String replaceVariables(String input, String version, String versionProperty, Map variables) { String template = replaceVariable(input, versionProperty, version); for (Map.Entry variable : variables.entrySet()) { template = replaceVariable(template, variable.getKey(), variable.getValue()); @@ -134,25 +134,10 @@ public static boolean checkTemplateExistsAndVersionIsGTECurrentVersion(String te * @param templateName Name of the index template * @param state Cluster state * @param logger Logger - * @param versionComposableTemplateExpected In which version of Elasticsearch did this template switch to being a composable template? - * null means the template hasn't been switched yet. */ - public static boolean checkTemplateExistsAndIsUpToDate( - String templateName, - String versionKey, - ClusterState state, - Logger logger, - Version versionComposableTemplateExpected - ) { + public static boolean checkTemplateExistsAndIsUpToDate(String templateName, String versionKey, ClusterState state, Logger logger) { - return checkTemplateExistsAndVersionMatches( - templateName, - versionKey, - state, - logger, - Version.CURRENT::equals, - versionComposableTemplateExpected - ); + return checkTemplateExistsAndVersionMatches(templateName, versionKey, state, logger, Version.CURRENT::equals); } /** @@ -162,32 +147,20 @@ public static boolean checkTemplateExistsAndIsUpToDate( * @param state Cluster state * @param logger Logger * @param predicate Predicate to execute on version check - * @param versionComposableTemplateExpected In which version of Elasticsearch did this template switch to being a composable template? - * null means the template hasn't been switched yet. */ public static boolean checkTemplateExistsAndVersionMatches( String templateName, String versionKey, ClusterState state, Logger logger, - Predicate predicate, - Version versionComposableTemplateExpected + Predicate predicate ) { - CompressedXContent mappings; - if (versionComposableTemplateExpected != null && state.nodes().getMinNodeVersion().onOrAfter(versionComposableTemplateExpected)) { - ComposableIndexTemplate templateMeta = state.metadata().templatesV2().get(templateName); - if (templateMeta == null) { - return false; - } - mappings = templateMeta.template().mappings(); - } else { - IndexTemplateMetadata templateMeta = state.metadata().templates().get(templateName); - if (templateMeta == null) { - return false; - } - mappings = templateMeta.getMappings(); + IndexTemplateMetadata templateMeta = state.metadata().templates().get(templateName); + if (templateMeta == null) { + return false; } + CompressedXContent mappings = templateMeta.getMappings(); // check all mappings contain correct version in _meta // we have to parse the source here which is annoying diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/TermsEnumRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/TermsEnumRequestBuilder.java deleted file mode 100644 index 091101ffc95aa..0000000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/TermsEnumRequestBuilder.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.core.termsenum.action; - -import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder; -import org.elasticsearch.client.internal.ElasticsearchClient; - -public class TermsEnumRequestBuilder extends BroadcastOperationRequestBuilder< - TermsEnumRequest, - TermsEnumResponse, - TermsEnumRequestBuilder> { - - public TermsEnumRequestBuilder(ElasticsearchClient client, TermsEnumAction action) { - super(client, action, new TermsEnumRequest()); - } - - public TermsEnumRequestBuilder setField(String field) { - request.field(field); - return this; - } - - public TermsEnumRequestBuilder setString(String string) { - request.string(string); - return this; - } - - public TermsEnumRequestBuilder setSize(int size) { - request.size(size); - return this; - } - -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/TransportTermsEnumAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/TransportTermsEnumAction.java index 78406929c2cf7..a6e9ac228fe9f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/TransportTermsEnumAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/TransportTermsEnumAction.java @@ -10,9 +10,11 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.util.PriorityQueue; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.OriginalIndices; +import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.search.SearchTransportService; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DefaultShardOperationFailedException; @@ -147,7 +149,23 @@ protected void doExecute(Task task, TermsEnumRequest request, ActionListener loggingListener = listener.delegateFailureAndWrap((l, termsEnumResponse) -> { + // Deduplicate failures by exception message and index + ShardOperationFailedException[] deduplicated = ExceptionsHelper.groupBy(termsEnumResponse.getShardFailures()); + for (ShardOperationFailedException e : deduplicated) { + boolean causeHas500Status = false; + if (e.getCause() != null) { + causeHas500Status = ExceptionsHelper.status(e.getCause()).getStatus() >= 500; + } + if ((e.status().getStatus() >= 500 || causeHas500Status) + && ExceptionsHelper.isNodeOrShardUnavailableTypeException(e.getCause()) == false) { + logger.warn("TransportTermsEnumAction shard failure (partial results response)", e); + } + } + l.onResponse(termsEnumResponse); + }); + new AsyncBroadcastAction(task, request, loggingListener).start(); } protected static NodeTermsEnumRequest newNodeRequest( diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/GetCheckpointAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/GetCheckpointAction.java index 7ac27d79d3cb8..e492a98748af2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/GetCheckpointAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/GetCheckpointAction.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.TaskId; @@ -48,12 +49,21 @@ public static class Request extends ActionRequest implements IndicesRequest.Repl private String[] indices; private final IndicesOptions indicesOptions; + private final QueryBuilder query; + private final String cluster; private final TimeValue timeout; public Request(StreamInput in) throws IOException { super(in); indices = in.readStringArray(); indicesOptions = IndicesOptions.readIndicesOptions(in); + if (in.getTransportVersion().onOrAfter(TransportVersions.TRANSFORM_GET_CHECKPOINT_QUERY_AND_CLUSTER_ADDED)) { + query = in.readOptionalNamedWriteable(QueryBuilder.class); + cluster = in.readOptionalString(); + } else { + query = null; + cluster = null; + } if (in.getTransportVersion().onOrAfter(TransportVersions.TRANSFORM_GET_CHECKPOINT_TIMEOUT_ADDED)) { timeout = in.readOptionalTimeValue(); } else { @@ -61,9 +71,11 @@ public Request(StreamInput in) throws IOException { } } - public Request(String[] indices, IndicesOptions indicesOptions, TimeValue timeout) { + public Request(String[] indices, IndicesOptions indicesOptions, QueryBuilder query, String cluster, TimeValue timeout) { this.indices = indices != null ? indices : Strings.EMPTY_ARRAY; this.indicesOptions = indicesOptions; + this.query = query; + this.cluster = cluster; this.timeout = timeout; } @@ -82,6 +94,14 @@ public IndicesOptions indicesOptions() { return indicesOptions; } + public QueryBuilder getQuery() { + return query; + } + + public String getCluster() { + return cluster; + } + public TimeValue getTimeout() { return timeout; } @@ -98,12 +118,14 @@ public boolean equals(Object obj) { return Arrays.equals(indices, that.indices) && Objects.equals(indicesOptions, that.indicesOptions) + && Objects.equals(query, that.query) + && Objects.equals(cluster, that.cluster) && Objects.equals(timeout, that.timeout); } @Override public int hashCode() { - return Objects.hash(Arrays.hashCode(indices), indicesOptions, timeout); + return Objects.hash(Arrays.hashCode(indices), indicesOptions, query, cluster, timeout); } @Override @@ -111,6 +133,10 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeStringArray(indices); indicesOptions.writeIndicesOptions(out); + if (out.getTransportVersion().onOrAfter(TransportVersions.TRANSFORM_GET_CHECKPOINT_QUERY_AND_CLUSTER_ADDED)) { + out.writeOptionalNamedWriteable(query); + out.writeOptionalString(cluster); + } if (out.getTransportVersion().onOrAfter(TransportVersions.TRANSFORM_GET_CHECKPOINT_TIMEOUT_ADDED)) { out.writeOptionalTimeValue(timeout); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/ack/AckWatchRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/ack/AckWatchRequestBuilder.java index 5237f6b796336..81c059a5fca00 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/ack/AckWatchRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/ack/AckWatchRequestBuilder.java @@ -14,10 +14,6 @@ */ public class AckWatchRequestBuilder extends ActionRequestBuilder { - public AckWatchRequestBuilder(ElasticsearchClient client) { - super(client, AckWatchAction.INSTANCE, new AckWatchRequest()); - } - public AckWatchRequestBuilder(ElasticsearchClient client, String id) { super(client, AckWatchAction.INSTANCE, new AckWatchRequest(id)); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/activate/ActivateWatchRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/activate/ActivateWatchRequestBuilder.java index a5c682293fe79..04adaf993b43d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/activate/ActivateWatchRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/activate/ActivateWatchRequestBuilder.java @@ -14,10 +14,6 @@ */ public class ActivateWatchRequestBuilder extends ActionRequestBuilder { - public ActivateWatchRequestBuilder(ElasticsearchClient client) { - super(client, ActivateWatchAction.INSTANCE, new ActivateWatchRequest()); - } - public ActivateWatchRequestBuilder(ElasticsearchClient client, String id, boolean activate) { super(client, ActivateWatchAction.INSTANCE, new ActivateWatchRequest(id, activate)); } diff --git a/x-pack/plugin/ml/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification b/x-pack/plugin/core/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification similarity index 85% rename from x-pack/plugin/ml/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification rename to x-pack/plugin/core/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification index 7dbef291bdd46..545918cbab502 100644 --- a/x-pack/plugin/ml/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification +++ b/x-pack/plugin/core/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification @@ -5,4 +5,4 @@ # 2.0. # -org.elasticsearch.xpack.ml.MlFeatures +org.elasticsearch.xpack.core.XPackFeatures diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/ClusterStateLicenseServiceTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/ClusterStateLicenseServiceTests.java index 99f2f66c71674..c1a8c0a10a9f7 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/ClusterStateLicenseServiceTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/ClusterStateLicenseServiceTests.java @@ -240,11 +240,6 @@ private ClusterService mockDefaultClusterService() { return clusterService; } - @SuppressWarnings("unchecked") - private static MasterServiceTaskQueue newMockTaskQueue() { - return mock(MasterServiceTaskQueue.class); - } - private void assertRegisterValidLicense(Settings baseSettings, License.LicenseType licenseType) throws IOException { tryRegisterLicense(baseSettings, licenseType, future -> assertThat(future.actionGet().status(), equalTo(LicensesStatus.VALID))); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/internal/TrialLicenseVersionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/internal/TrialLicenseVersionTests.java index ff62fbc4d4877..cc3a3e41af63d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/internal/TrialLicenseVersionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/internal/TrialLicenseVersionTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.Version; import org.elasticsearch.test.ESTestCase; +import static org.elasticsearch.license.internal.TrialLicenseVersion.CURRENT; import static org.elasticsearch.license.internal.TrialLicenseVersion.TRIAL_VERSION_CUTOVER; import static org.elasticsearch.license.internal.TrialLicenseVersion.TRIAL_VERSION_CUTOVER_MAJOR; import static org.hamcrest.Matchers.equalTo; @@ -18,11 +19,14 @@ public class TrialLicenseVersionTests extends ESTestCase { public void testCanParseAllVersions() { for (var version : Version.getDeclaredVersions(Version.class)) { - TrialLicenseVersion parsedVersion = TrialLicenseVersion.fromXContent(version.toString()); - if (version.major < TRIAL_VERSION_CUTOVER_MAJOR) { - assertTrue(new TrialLicenseVersion(TRIAL_VERSION_CUTOVER).ableToStartNewTrialSince(parsedVersion)); - } else { - assertFalse(new TrialLicenseVersion(TRIAL_VERSION_CUTOVER).ableToStartNewTrialSince(parsedVersion)); + // Only consider versions before the cut-over; the comparison becomes meaningless after the cut-over point + if (version.onOrBefore(Version.fromId(TRIAL_VERSION_CUTOVER))) { + TrialLicenseVersion parsedVersion = TrialLicenseVersion.fromXContent(version.toString()); + if (version.major < TRIAL_VERSION_CUTOVER_MAJOR) { + assertTrue(parsedVersion.ableToStartNewTrial()); + } else { + assertFalse(parsedVersion.ableToStartNewTrial()); + } } } } @@ -33,11 +37,14 @@ public void testRoundTripParsing() { } public void testNewTrialAllowed() { - var randomVersion = new TrialLicenseVersion(randomNonNegativeInt()); - var subsequentVersion = new TrialLicenseVersion( - randomVersion.asInt() + randomIntBetween(0, Integer.MAX_VALUE - randomVersion.asInt()) - ); - assertFalse(randomVersion.ableToStartNewTrialSince(randomVersion)); - assertTrue(subsequentVersion.ableToStartNewTrialSince(randomVersion)); + assertTrue(new TrialLicenseVersion(randomIntBetween(7_00_00_00, 7_99_99_99)).ableToStartNewTrial()); + assertFalse(new TrialLicenseVersion(CURRENT.asInt()).ableToStartNewTrial()); + assertFalse(new TrialLicenseVersion(randomIntBetween(8_00_00_00, TRIAL_VERSION_CUTOVER)).ableToStartNewTrial()); + final int trialVersion = randomIntBetween(TRIAL_VERSION_CUTOVER, CURRENT.asInt()); + if (trialVersion < CURRENT.asInt()) { + assertTrue(new TrialLicenseVersion(trialVersion).ableToStartNewTrial()); + } else { + assertFalse(new TrialLicenseVersion(trialVersion).ableToStartNewTrial()); + } } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotShardTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotShardTests.java index 462e1942018ee..d7abbe7dbefb4 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotShardTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotShardTests.java @@ -123,7 +123,7 @@ public void testSourceIncomplete() throws IOException { repository.start(); try (Engine.IndexCommitRef snapshotRef = shard.acquireLastIndexCommit(true)) { IndexShardSnapshotStatus indexShardSnapshotStatus = IndexShardSnapshotStatus.newInitializing(new ShardGeneration(-1L)); - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); runAsSnapshot( shard.getThreadPool(), () -> repository.snapshotShard( @@ -165,7 +165,7 @@ public void testIncrementalSnapshot() throws IOException { try (Engine.IndexCommitRef snapshotRef = shard.acquireLastIndexCommit(true)) { IndexShardSnapshotStatus indexShardSnapshotStatus = IndexShardSnapshotStatus.newInitializing(null); SnapshotId snapshotId = new SnapshotId("test", "test"); - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); runAsSnapshot( shard.getThreadPool(), () -> repository.snapshotShard( @@ -196,7 +196,7 @@ public void testIncrementalSnapshot() throws IOException { SnapshotId snapshotId = new SnapshotId("test_1", "test_1"); IndexShardSnapshotStatus indexShardSnapshotStatus = IndexShardSnapshotStatus.newInitializing(shardGeneration); - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); runAsSnapshot( shard.getThreadPool(), () -> repository.snapshotShard( @@ -227,7 +227,7 @@ public void testIncrementalSnapshot() throws IOException { SnapshotId snapshotId = new SnapshotId("test_2", "test_2"); IndexShardSnapshotStatus indexShardSnapshotStatus = IndexShardSnapshotStatus.newInitializing(shardGeneration); - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); runAsSnapshot( shard.getThreadPool(), () -> repository.snapshotShard( @@ -289,7 +289,7 @@ public void testRestoreMinimal() throws IOException { repository.start(); try (Engine.IndexCommitRef snapshotRef = shard.acquireLastIndexCommit(true)) { IndexShardSnapshotStatus indexShardSnapshotStatus = IndexShardSnapshotStatus.newInitializing(null); - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); runAsSnapshot(shard.getThreadPool(), () -> { repository.snapshotShard( new SnapshotShardContext( @@ -306,7 +306,7 @@ public void testRestoreMinimal() throws IOException { ) ); future.actionGet(); - final PlainActionFuture finFuture = PlainActionFuture.newFuture(); + final PlainActionFuture finFuture = new PlainActionFuture<>(); final ShardGenerations shardGenerations = ShardGenerations.builder() .put(indexId, 0, indexShardSnapshotStatus.generation()) .build(); @@ -382,7 +382,7 @@ public void onFailure(Exception e) { DiscoveryNode discoveryNode = DiscoveryNodeUtils.create("node_g"); restoredShard.markAsRecovering("test from snap", new RecoveryState(restoredShard.routingEntry(), discoveryNode, null)); runAsSnapshot(shard.getThreadPool(), () -> { - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); restoredShard.restoreFromRepository(repository, future); assertTrue(future.actionGet()); }); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelperTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelperTests.java index c64cb7e546861..0f3a58350c36a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelperTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelperTests.java @@ -12,10 +12,10 @@ import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.internal.Client; @@ -170,7 +170,7 @@ public void testExecuteWithHeadersAsyncNoHeaders() throws InterruptedException { SearchRequest request = new SearchRequest("foo"); String originName = randomFrom(ClientHelper.ML_ORIGIN, ClientHelper.WATCHER_ORIGIN, ClientHelper.ROLLUP_ORIGIN); - ClientHelper.executeWithHeadersAsync(Collections.emptyMap(), originName, client, SearchAction.INSTANCE, request, listener); + ClientHelper.executeWithHeadersAsync(Collections.emptyMap(), originName, client, TransportSearchAction.TYPE, request, listener); latch.await(); } @@ -201,7 +201,7 @@ public void testExecuteWithHeadersAsyncWrongHeaders() throws InterruptedExceptio headers.put("bar", "bar"); String originName = randomFrom(ClientHelper.ML_ORIGIN, ClientHelper.WATCHER_ORIGIN, ClientHelper.ROLLUP_ORIGIN); - ClientHelper.executeWithHeadersAsync(headers, originName, client, SearchAction.INSTANCE, request, listener); + ClientHelper.executeWithHeadersAsync(headers, originName, client, TransportSearchAction.TYPE, request, listener); latch.await(); } @@ -234,7 +234,7 @@ public void testExecuteWithHeadersAsyncWithHeaders() throws Exception { headers.put("_xpack_security_authentication", "bar"); String originName = randomFrom(ClientHelper.ML_ORIGIN, ClientHelper.WATCHER_ORIGIN, ClientHelper.ROLLUP_ORIGIN); - ClientHelper.executeWithHeadersAsync(headers, originName, client, SearchAction.INSTANCE, request, listener); + ClientHelper.executeWithHeadersAsync(headers, originName, client, TransportSearchAction.TYPE, request, listener); latch.await(); } @@ -246,7 +246,7 @@ public void testExecuteWithHeadersNoHeaders() { when(threadPool.getThreadContext()).thenReturn(threadContext); when(client.threadPool()).thenReturn(threadPool); - PlainActionFuture searchFuture = PlainActionFuture.newFuture(); + PlainActionFuture searchFuture = new PlainActionFuture<>(); searchFuture.onResponse( new SearchResponse( InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, @@ -270,7 +270,7 @@ public void testExecuteWithHeaders() { when(threadPool.getThreadContext()).thenReturn(threadContext); when(client.threadPool()).thenReturn(threadPool); - PlainActionFuture searchFuture = PlainActionFuture.newFuture(); + PlainActionFuture searchFuture = new PlainActionFuture<>(); searchFuture.onResponse( new SearchResponse( InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, @@ -305,7 +305,7 @@ public void testExecuteWithHeadersNoSecurityHeaders() { when(threadPool.getThreadContext()).thenReturn(threadContext); when(client.threadPool()).thenReturn(threadPool); - PlainActionFuture searchFuture = PlainActionFuture.newFuture(); + PlainActionFuture searchFuture = new PlainActionFuture<>(); searchFuture.onResponse( new SearchResponse( InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/DataTiersUsageTransportActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/DataTiersUsageTransportActionTests.java deleted file mode 100644 index 93e991b0fa5ae..0000000000000 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/DataTiersUsageTransportActionTests.java +++ /dev/null @@ -1,786 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.core; - -import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; -import org.elasticsearch.action.admin.indices.stats.CommonStats; -import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; -import org.elasticsearch.action.admin.indices.stats.IndexShardStats; -import org.elasticsearch.action.admin.indices.stats.ShardStats; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.cluster.metadata.Metadata; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodeRole; -import org.elasticsearch.cluster.node.DiscoveryNodeUtils; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.routing.IndexRoutingTable; -import org.elasticsearch.cluster.routing.IndexShardRoutingTable; -import org.elasticsearch.cluster.routing.RoutingNode; -import org.elasticsearch.cluster.routing.RoutingNodes; -import org.elasticsearch.cluster.routing.RoutingTable; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.ShardRoutingState; -import org.elasticsearch.cluster.routing.TestShardRouting; -import org.elasticsearch.cluster.routing.allocation.DataTier; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.PathUtils; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.index.shard.DocsStats; -import org.elasticsearch.index.shard.IndexLongFieldRange; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.shard.ShardPath; -import org.elasticsearch.index.store.StoreStats; -import org.elasticsearch.indices.NodeIndicesStats; -import org.elasticsearch.search.aggregations.metrics.TDigestState; -import org.elasticsearch.test.ESTestCase; - -import java.nio.file.Path; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_CREATION_DATE; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -public class DataTiersUsageTransportActionTests extends ESTestCase { - - public void testCalculateMAD() { - assertThat(DataTiersUsageTransportAction.computeMedianAbsoluteDeviation(TDigestState.create(10)), equalTo(0L)); - - TDigestState sketch = TDigestState.create(randomDoubleBetween(1, 1000, false)); - sketch.add(1); - sketch.add(1); - sketch.add(2); - sketch.add(2); - sketch.add(4); - sketch.add(6); - sketch.add(9); - assertThat(DataTiersUsageTransportAction.computeMedianAbsoluteDeviation(sketch), equalTo(1L)); - } - - public void testTierIndices() { - IndexMetadata hotIndex1 = indexMetadata("hot-1", 1, 0, DataTier.DATA_HOT); - IndexMetadata hotIndex2 = indexMetadata("hot-2", 1, 0, DataTier.DATA_HOT); - IndexMetadata warmIndex1 = indexMetadata("warm-1", 1, 0, DataTier.DATA_WARM); - IndexMetadata coldIndex1 = indexMetadata("cold-1", 1, 0, DataTier.DATA_COLD); - IndexMetadata coldIndex2 = indexMetadata("cold-2", 1, 0, DataTier.DATA_COLD, DataTier.DATA_WARM); // Prefers cold over warm - IndexMetadata nonTiered = indexMetadata("non-tier", 1, 0); // No tier - - Map indices = new HashMap<>(); - indices.put("hot-1", hotIndex1); - indices.put("hot-2", hotIndex2); - indices.put("warm-1", warmIndex1); - indices.put("cold-1", coldIndex1); - indices.put("cold-2", coldIndex2); - indices.put("non-tier", nonTiered); - - Map tiers = DataTiersUsageTransportAction.tierIndices(indices); - assertThat(tiers.size(), equalTo(5)); - assertThat(tiers.get("hot-1"), equalTo(DataTier.DATA_HOT)); - assertThat(tiers.get("hot-2"), equalTo(DataTier.DATA_HOT)); - assertThat(tiers.get("warm-1"), equalTo(DataTier.DATA_WARM)); - assertThat(tiers.get("cold-1"), equalTo(DataTier.DATA_COLD)); - assertThat(tiers.get("cold-2"), equalTo(DataTier.DATA_COLD)); - assertThat(tiers.get("non-tier"), nullValue()); - } - - public void testCalculateStatsNoTiers() { - // Nodes: 0 Tiered Nodes, 1 Data Node - DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); - DiscoveryNode leader = newNode(0, DiscoveryNodeRole.MASTER_ROLE); - discoBuilder.add(leader); - discoBuilder.masterNodeId(leader.getId()); - - DiscoveryNode dataNode1 = newNode(1, DiscoveryNodeRole.DATA_ROLE); - discoBuilder.add(dataNode1); - - discoBuilder.localNodeId(dataNode1.getId()); - - // Indices: 1 Regular index - Metadata.Builder metadataBuilder = Metadata.builder(); - RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); - - IndexMetadata index1 = indexMetadata("index_1", 3, 1); - metadataBuilder.put(index1, false).generateClusterUuidIfNeeded(); - { - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index1.getIndex()); - routeTestShardToNodes(index1, 0, indexRoutingTableBuilder, dataNode1); - routeTestShardToNodes(index1, 1, indexRoutingTableBuilder, dataNode1); - routeTestShardToNodes(index1, 2, indexRoutingTableBuilder, dataNode1); - routingTableBuilder.add(indexRoutingTableBuilder.build()); - } - - // Cluster State and create stats responses - ClusterState clusterState = ClusterState.builder(new ClusterName("test")) - .nodes(discoBuilder) - .metadata(metadataBuilder) - .routingTable(routingTableBuilder.build()) - .build(); - - long byteSize = randomLongBetween(1024L, 1024L * 1024L * 1024L * 30L); // 1 KB to 30 GB - long docCount = randomLongBetween(100L, 100000000L); // one hundred to one hundred million - List nodeStatsList = buildNodeStats(clusterState, byteSize, docCount); - - // Calculate usage - Map indexByTier = DataTiersUsageTransportAction.tierIndices(clusterState.metadata().indices()); - Map tierSpecificStats = DataTiersUsageTransportAction.calculateStats( - nodeStatsList, - indexByTier, - clusterState.getRoutingNodes() - ); - - // Verify - No results when no tiers present - assertThat(tierSpecificStats.size(), is(0)); - } - - public void testCalculateStatsTieredNodesOnly() { - // Nodes: 1 Data, 1 Hot, 1 Warm, 1 Cold, 1 Frozen - DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); - DiscoveryNode leader = newNode(0, DiscoveryNodeRole.MASTER_ROLE); - discoBuilder.add(leader); - discoBuilder.masterNodeId(leader.getId()); - - DiscoveryNode dataNode1 = newNode(1, DiscoveryNodeRole.DATA_ROLE); - discoBuilder.add(dataNode1); - DiscoveryNode hotNode1 = newNode(2, DiscoveryNodeRole.DATA_HOT_NODE_ROLE); - discoBuilder.add(hotNode1); - DiscoveryNode warmNode1 = newNode(3, DiscoveryNodeRole.DATA_WARM_NODE_ROLE); - discoBuilder.add(warmNode1); - DiscoveryNode coldNode1 = newNode(4, DiscoveryNodeRole.DATA_COLD_NODE_ROLE); - discoBuilder.add(coldNode1); - DiscoveryNode frozenNode1 = newNode(5, DiscoveryNodeRole.DATA_FROZEN_NODE_ROLE); - discoBuilder.add(frozenNode1); - - discoBuilder.localNodeId(dataNode1.getId()); - - // Indices: 1 Regular index, not hosted on any tiers - Metadata.Builder metadataBuilder = Metadata.builder(); - RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); - - IndexMetadata index1 = indexMetadata("index_1", 3, 1); - metadataBuilder.put(index1, false).generateClusterUuidIfNeeded(); - { - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index1.getIndex()); - routeTestShardToNodes(index1, 0, indexRoutingTableBuilder, dataNode1); - routeTestShardToNodes(index1, 1, indexRoutingTableBuilder, dataNode1); - routeTestShardToNodes(index1, 2, indexRoutingTableBuilder, dataNode1); - routingTableBuilder.add(indexRoutingTableBuilder.build()); - } - - // Cluster State and create stats responses - ClusterState clusterState = ClusterState.builder(new ClusterName("test")) - .nodes(discoBuilder) - .metadata(metadataBuilder) - .routingTable(routingTableBuilder.build()) - .build(); - - long byteSize = randomLongBetween(1024L, 1024L * 1024L * 1024L * 30L); // 1 KB to 30 GB - long docCount = randomLongBetween(100L, 100000000L); // one hundred to one hundred million - List nodeStatsList = buildNodeStats(clusterState, byteSize, docCount); - - // Calculate usage - Map indexByTier = DataTiersUsageTransportAction.tierIndices(clusterState.metadata().indices()); - Map tierSpecificStats = DataTiersUsageTransportAction.calculateStats( - nodeStatsList, - indexByTier, - clusterState.getRoutingNodes() - ); - - // Verify - Results are present but they lack index numbers because none are tiered - assertThat(tierSpecificStats.size(), is(4)); - - DataTiersFeatureSetUsage.TierSpecificStats hotStats = tierSpecificStats.get(DataTier.DATA_HOT); - assertThat(hotStats, is(notNullValue())); - assertThat(hotStats.nodeCount, is(1)); - assertThat(hotStats.indexCount, is(0)); - assertThat(hotStats.totalShardCount, is(0)); - assertThat(hotStats.docCount, is(0L)); - assertThat(hotStats.totalByteCount, is(0L)); - assertThat(hotStats.primaryShardCount, is(0)); - assertThat(hotStats.primaryByteCount, is(0L)); - assertThat(hotStats.primaryByteCountMedian, is(0L)); // All same size - assertThat(hotStats.primaryShardBytesMAD, is(0L)); // All same size - - DataTiersFeatureSetUsage.TierSpecificStats warmStats = tierSpecificStats.get(DataTier.DATA_WARM); - assertThat(warmStats, is(notNullValue())); - assertThat(warmStats.nodeCount, is(1)); - assertThat(warmStats.indexCount, is(0)); - assertThat(warmStats.totalShardCount, is(0)); - assertThat(warmStats.docCount, is(0L)); - assertThat(warmStats.totalByteCount, is(0L)); - assertThat(warmStats.primaryShardCount, is(0)); - assertThat(warmStats.primaryByteCount, is(0L)); - assertThat(warmStats.primaryByteCountMedian, is(0L)); // All same size - assertThat(warmStats.primaryShardBytesMAD, is(0L)); // All same size - - DataTiersFeatureSetUsage.TierSpecificStats coldStats = tierSpecificStats.get(DataTier.DATA_COLD); - assertThat(coldStats, is(notNullValue())); - assertThat(coldStats.nodeCount, is(1)); - assertThat(coldStats.indexCount, is(0)); - assertThat(coldStats.totalShardCount, is(0)); - assertThat(coldStats.docCount, is(0L)); - assertThat(coldStats.totalByteCount, is(0L)); - assertThat(coldStats.primaryShardCount, is(0)); - assertThat(coldStats.primaryByteCount, is(0L)); - assertThat(coldStats.primaryByteCountMedian, is(0L)); // All same size - assertThat(coldStats.primaryShardBytesMAD, is(0L)); // All same size - - DataTiersFeatureSetUsage.TierSpecificStats frozenStats = tierSpecificStats.get(DataTier.DATA_FROZEN); - assertThat(frozenStats, is(notNullValue())); - assertThat(frozenStats.nodeCount, is(1)); - assertThat(frozenStats.indexCount, is(0)); - assertThat(frozenStats.totalShardCount, is(0)); - assertThat(frozenStats.docCount, is(0L)); - assertThat(frozenStats.totalByteCount, is(0L)); - assertThat(frozenStats.primaryShardCount, is(0)); - assertThat(frozenStats.primaryByteCount, is(0L)); - assertThat(frozenStats.primaryByteCountMedian, is(0L)); // All same size - assertThat(frozenStats.primaryShardBytesMAD, is(0L)); // All same size - } - - public void testCalculateStatsTieredIndicesOnly() { - // Nodes: 3 Data, 0 Tiered - Only hosting indices on generic data nodes - int nodeId = 0; - DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); - DiscoveryNode leader = newNode(nodeId++, DiscoveryNodeRole.MASTER_ROLE); - discoBuilder.add(leader); - discoBuilder.masterNodeId(leader.getId()); - - DiscoveryNode dataNode1 = newNode(nodeId++, DiscoveryNodeRole.DATA_ROLE); - discoBuilder.add(dataNode1); - DiscoveryNode dataNode2 = newNode(nodeId++, DiscoveryNodeRole.DATA_ROLE); - discoBuilder.add(dataNode2); - DiscoveryNode dataNode3 = newNode(nodeId, DiscoveryNodeRole.DATA_ROLE); - discoBuilder.add(dataNode3); - - discoBuilder.localNodeId(dataNode1.getId()); - - // Indices: 1 Hot index, 2 Warm indices, 3 Cold indices - Metadata.Builder metadataBuilder = Metadata.builder(); - RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); - - IndexMetadata hotIndex1 = indexMetadata("hot_index_1", 3, 1, DataTier.DATA_HOT); - metadataBuilder.put(hotIndex1, false).generateClusterUuidIfNeeded(); - { - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(hotIndex1.getIndex()); - routeTestShardToNodes(hotIndex1, 0, indexRoutingTableBuilder, dataNode1, dataNode2); - routeTestShardToNodes(hotIndex1, 1, indexRoutingTableBuilder, dataNode2, dataNode3); - routeTestShardToNodes(hotIndex1, 2, indexRoutingTableBuilder, dataNode3, dataNode1); - routingTableBuilder.add(indexRoutingTableBuilder.build()); - } - - IndexMetadata warmIndex1 = indexMetadata("warm_index_1", 1, 1, DataTier.DATA_WARM); - metadataBuilder.put(warmIndex1, false).generateClusterUuidIfNeeded(); - { - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(warmIndex1.getIndex()); - routeTestShardToNodes(warmIndex1, 0, indexRoutingTableBuilder, dataNode1, dataNode2); - routingTableBuilder.add(indexRoutingTableBuilder.build()); - } - IndexMetadata warmIndex2 = indexMetadata("warm_index_2", 1, 1, DataTier.DATA_WARM); - metadataBuilder.put(warmIndex2, false).generateClusterUuidIfNeeded(); - { - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(warmIndex2.getIndex()); - routeTestShardToNodes(warmIndex2, 0, indexRoutingTableBuilder, dataNode3, dataNode1); - routingTableBuilder.add(indexRoutingTableBuilder.build()); - } - - IndexMetadata coldIndex1 = indexMetadata("cold_index_1", 1, 0, DataTier.DATA_COLD); - metadataBuilder.put(coldIndex1, false).generateClusterUuidIfNeeded(); - { - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(coldIndex1.getIndex()); - routeTestShardToNodes(coldIndex1, 0, indexRoutingTableBuilder, dataNode1); - routingTableBuilder.add(indexRoutingTableBuilder.build()); - } - IndexMetadata coldIndex2 = indexMetadata("cold_index_2", 1, 0, DataTier.DATA_COLD); - metadataBuilder.put(coldIndex2, false).generateClusterUuidIfNeeded(); - { - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(coldIndex2.getIndex()); - routeTestShardToNodes(coldIndex2, 0, indexRoutingTableBuilder, dataNode2); - routingTableBuilder.add(indexRoutingTableBuilder.build()); - } - IndexMetadata coldIndex3 = indexMetadata("cold_index_3", 1, 0, DataTier.DATA_COLD); - metadataBuilder.put(coldIndex3, false).generateClusterUuidIfNeeded(); - { - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(coldIndex3.getIndex()); - routeTestShardToNodes(coldIndex3, 0, indexRoutingTableBuilder, dataNode3); - routingTableBuilder.add(indexRoutingTableBuilder.build()); - } - - // Cluster State and create stats responses - ClusterState clusterState = ClusterState.builder(new ClusterName("test")) - .nodes(discoBuilder) - .metadata(metadataBuilder) - .routingTable(routingTableBuilder.build()) - .build(); - - long byteSize = randomLongBetween(1024L, 1024L * 1024L * 1024L * 30L); // 1 KB to 30 GB - long docCount = randomLongBetween(100L, 100000000L); // one hundred to one hundred million - List nodeStatsList = buildNodeStats(clusterState, byteSize, docCount); - - // Calculate usage - Map indexByTier = DataTiersUsageTransportAction.tierIndices(clusterState.metadata().indices()); - Map tierSpecificStats = DataTiersUsageTransportAction.calculateStats( - nodeStatsList, - indexByTier, - clusterState.getRoutingNodes() - ); - - // Verify - Index stats exist for the tiers, but no tiered nodes are found - assertThat(tierSpecificStats.size(), is(3)); - - DataTiersFeatureSetUsage.TierSpecificStats hotStats = tierSpecificStats.get(DataTier.DATA_HOT); - assertThat(hotStats, is(notNullValue())); - assertThat(hotStats.nodeCount, is(0)); - assertThat(hotStats.indexCount, is(1)); - assertThat(hotStats.totalShardCount, is(6)); - assertThat(hotStats.docCount, is(6 * docCount)); - assertThat(hotStats.totalByteCount, is(6 * byteSize)); - assertThat(hotStats.primaryShardCount, is(3)); - assertThat(hotStats.primaryByteCount, is(3 * byteSize)); - assertThat(hotStats.primaryByteCountMedian, is(byteSize)); // All same size - assertThat(hotStats.primaryShardBytesMAD, is(0L)); // All same size - - DataTiersFeatureSetUsage.TierSpecificStats warmStats = tierSpecificStats.get(DataTier.DATA_WARM); - assertThat(warmStats, is(notNullValue())); - assertThat(warmStats.nodeCount, is(0)); - assertThat(warmStats.indexCount, is(2)); - assertThat(warmStats.totalShardCount, is(4)); - assertThat(warmStats.docCount, is(4 * docCount)); - assertThat(warmStats.totalByteCount, is(4 * byteSize)); - assertThat(warmStats.primaryShardCount, is(2)); - assertThat(warmStats.primaryByteCount, is(2 * byteSize)); - assertThat(warmStats.primaryByteCountMedian, is(byteSize)); // All same size - assertThat(warmStats.primaryShardBytesMAD, is(0L)); // All same size - - DataTiersFeatureSetUsage.TierSpecificStats coldStats = tierSpecificStats.get(DataTier.DATA_COLD); - assertThat(coldStats, is(notNullValue())); - assertThat(coldStats.nodeCount, is(0)); - assertThat(coldStats.indexCount, is(3)); - assertThat(coldStats.totalShardCount, is(3)); - assertThat(coldStats.docCount, is(3 * docCount)); - assertThat(coldStats.totalByteCount, is(3 * byteSize)); - assertThat(coldStats.primaryShardCount, is(3)); - assertThat(coldStats.primaryByteCount, is(3 * byteSize)); - assertThat(coldStats.primaryByteCountMedian, is(byteSize)); // All same size - assertThat(coldStats.primaryShardBytesMAD, is(0L)); // All same size - } - - public void testCalculateStatsReasonableCase() { - // Nodes: 3 Hot, 5 Warm, 1 Cold - int nodeId = 0; - DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); - DiscoveryNode leader = newNode(nodeId++, DiscoveryNodeRole.MASTER_ROLE); - discoBuilder.add(leader); - discoBuilder.masterNodeId(leader.getId()); - - DiscoveryNode hotNode1 = newNode(nodeId++, DiscoveryNodeRole.DATA_HOT_NODE_ROLE); - discoBuilder.add(hotNode1); - DiscoveryNode hotNode2 = newNode(nodeId++, DiscoveryNodeRole.DATA_HOT_NODE_ROLE); - discoBuilder.add(hotNode2); - DiscoveryNode hotNode3 = newNode(nodeId++, DiscoveryNodeRole.DATA_HOT_NODE_ROLE); - discoBuilder.add(hotNode3); - DiscoveryNode warmNode1 = newNode(nodeId++, DiscoveryNodeRole.DATA_WARM_NODE_ROLE); - discoBuilder.add(warmNode1); - DiscoveryNode warmNode2 = newNode(nodeId++, DiscoveryNodeRole.DATA_WARM_NODE_ROLE); - discoBuilder.add(warmNode2); - DiscoveryNode warmNode3 = newNode(nodeId++, DiscoveryNodeRole.DATA_WARM_NODE_ROLE); - discoBuilder.add(warmNode3); - DiscoveryNode warmNode4 = newNode(nodeId++, DiscoveryNodeRole.DATA_WARM_NODE_ROLE); - discoBuilder.add(warmNode4); - DiscoveryNode warmNode5 = newNode(nodeId++, DiscoveryNodeRole.DATA_WARM_NODE_ROLE); - discoBuilder.add(warmNode5); - DiscoveryNode coldNode1 = newNode(nodeId, DiscoveryNodeRole.DATA_COLD_NODE_ROLE); - discoBuilder.add(coldNode1); - - discoBuilder.localNodeId(hotNode1.getId()); - - // Indices: 1 Hot index, 2 Warm indices, 3 Cold indices - Metadata.Builder metadataBuilder = Metadata.builder(); - RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); - - IndexMetadata hotIndex1 = indexMetadata("hot_index_1", 3, 1, DataTier.DATA_HOT); - metadataBuilder.put(hotIndex1, false).generateClusterUuidIfNeeded(); - { - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(hotIndex1.getIndex()); - routeTestShardToNodes(hotIndex1, 0, indexRoutingTableBuilder, hotNode1, hotNode2); - routeTestShardToNodes(hotIndex1, 1, indexRoutingTableBuilder, hotNode2, hotNode3); - routeTestShardToNodes(hotIndex1, 2, indexRoutingTableBuilder, hotNode3, hotNode1); - routingTableBuilder.add(indexRoutingTableBuilder.build()); - } - - IndexMetadata warmIndex1 = indexMetadata("warm_index_1", 1, 1, DataTier.DATA_WARM); - metadataBuilder.put(warmIndex1, false).generateClusterUuidIfNeeded(); - { - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(warmIndex1.getIndex()); - routeTestShardToNodes(warmIndex1, 0, indexRoutingTableBuilder, warmNode1, warmNode2); - routingTableBuilder.add(indexRoutingTableBuilder.build()); - } - IndexMetadata warmIndex2 = indexMetadata("warm_index_2", 1, 1, DataTier.DATA_WARM); - metadataBuilder.put(warmIndex2, false).generateClusterUuidIfNeeded(); - { - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(warmIndex2.getIndex()); - routeTestShardToNodes(warmIndex2, 0, indexRoutingTableBuilder, warmNode3, warmNode4); - routingTableBuilder.add(indexRoutingTableBuilder.build()); - } - - IndexMetadata coldIndex1 = indexMetadata("cold_index_1", 1, 0, DataTier.DATA_COLD); - metadataBuilder.put(coldIndex1, false).generateClusterUuidIfNeeded(); - { - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(coldIndex1.getIndex()); - routeTestShardToNodes(coldIndex1, 0, indexRoutingTableBuilder, coldNode1); - routingTableBuilder.add(indexRoutingTableBuilder.build()); - } - IndexMetadata coldIndex2 = indexMetadata("cold_index_2", 1, 0, DataTier.DATA_COLD); - metadataBuilder.put(coldIndex2, false).generateClusterUuidIfNeeded(); - { - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(coldIndex2.getIndex()); - routeTestShardToNodes(coldIndex2, 0, indexRoutingTableBuilder, coldNode1); - routingTableBuilder.add(indexRoutingTableBuilder.build()); - } - IndexMetadata coldIndex3 = indexMetadata("cold_index_3", 1, 0, DataTier.DATA_COLD); - metadataBuilder.put(coldIndex3, false).generateClusterUuidIfNeeded(); - { - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(coldIndex3.getIndex()); - routeTestShardToNodes(coldIndex3, 0, indexRoutingTableBuilder, coldNode1); - routingTableBuilder.add(indexRoutingTableBuilder.build()); - } - - // Cluster State and create stats responses - ClusterState clusterState = ClusterState.builder(new ClusterName("test")) - .nodes(discoBuilder) - .metadata(metadataBuilder) - .routingTable(routingTableBuilder.build()) - .build(); - - long byteSize = randomLongBetween(1024L, 1024L * 1024L * 1024L * 30L); // 1 KB to 30 GB - long docCount = randomLongBetween(100L, 100000000L); // one hundred to one hundred million - List nodeStatsList = buildNodeStats(clusterState, byteSize, docCount); - - // Calculate usage - Map indexByTier = DataTiersUsageTransportAction.tierIndices(clusterState.metadata().indices()); - Map tierSpecificStats = DataTiersUsageTransportAction.calculateStats( - nodeStatsList, - indexByTier, - clusterState.getRoutingNodes() - ); - - // Verify - Node and Index stats are both collected - assertThat(tierSpecificStats.size(), is(3)); - - DataTiersFeatureSetUsage.TierSpecificStats hotStats = tierSpecificStats.get(DataTier.DATA_HOT); - assertThat(hotStats, is(notNullValue())); - assertThat(hotStats.nodeCount, is(3)); - assertThat(hotStats.indexCount, is(1)); - assertThat(hotStats.totalShardCount, is(6)); - assertThat(hotStats.docCount, is(6 * docCount)); - assertThat(hotStats.totalByteCount, is(6 * byteSize)); - assertThat(hotStats.primaryShardCount, is(3)); - assertThat(hotStats.primaryByteCount, is(3 * byteSize)); - assertThat(hotStats.primaryByteCountMedian, is(byteSize)); // All same size - assertThat(hotStats.primaryShardBytesMAD, is(0L)); // All same size - - DataTiersFeatureSetUsage.TierSpecificStats warmStats = tierSpecificStats.get(DataTier.DATA_WARM); - assertThat(warmStats, is(notNullValue())); - assertThat(warmStats.nodeCount, is(5)); - assertThat(warmStats.indexCount, is(2)); - assertThat(warmStats.totalShardCount, is(4)); - assertThat(warmStats.docCount, is(4 * docCount)); - assertThat(warmStats.totalByteCount, is(4 * byteSize)); - assertThat(warmStats.primaryShardCount, is(2)); - assertThat(warmStats.primaryByteCount, is(2 * byteSize)); - assertThat(warmStats.primaryByteCountMedian, is(byteSize)); // All same size - assertThat(warmStats.primaryShardBytesMAD, is(0L)); // All same size - - DataTiersFeatureSetUsage.TierSpecificStats coldStats = tierSpecificStats.get(DataTier.DATA_COLD); - assertThat(coldStats, is(notNullValue())); - assertThat(coldStats.nodeCount, is(1)); - assertThat(coldStats.indexCount, is(3)); - assertThat(coldStats.totalShardCount, is(3)); - assertThat(coldStats.docCount, is(3 * docCount)); - assertThat(coldStats.totalByteCount, is(3 * byteSize)); - assertThat(coldStats.primaryShardCount, is(3)); - assertThat(coldStats.primaryByteCount, is(3 * byteSize)); - assertThat(coldStats.primaryByteCountMedian, is(byteSize)); // All same size - assertThat(coldStats.primaryShardBytesMAD, is(0L)); // All same size - } - - public void testCalculateStatsMixedTiers() { - // Nodes: 3 Hot+Warm - Nodes that are marked as part of multiple tiers - int nodeId = 0; - DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); - DiscoveryNode leader = newNode(nodeId++, DiscoveryNodeRole.MASTER_ROLE); - discoBuilder.add(leader); - discoBuilder.masterNodeId(leader.getId()); - - DiscoveryNode mixedNode1 = newNode(nodeId++, DiscoveryNodeRole.DATA_HOT_NODE_ROLE, DiscoveryNodeRole.DATA_WARM_NODE_ROLE); - discoBuilder.add(mixedNode1); - DiscoveryNode mixedNode2 = newNode(nodeId++, DiscoveryNodeRole.DATA_HOT_NODE_ROLE, DiscoveryNodeRole.DATA_WARM_NODE_ROLE); - discoBuilder.add(mixedNode2); - DiscoveryNode mixedNode3 = newNode(nodeId, DiscoveryNodeRole.DATA_HOT_NODE_ROLE, DiscoveryNodeRole.DATA_WARM_NODE_ROLE); - discoBuilder.add(mixedNode3); - - discoBuilder.localNodeId(mixedNode1.getId()); - - // Indices: 1 Hot index, 2 Warm indices - Metadata.Builder metadataBuilder = Metadata.builder(); - RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); - - IndexMetadata hotIndex1 = indexMetadata("hot_index_1", 3, 1, DataTier.DATA_HOT); - metadataBuilder.put(hotIndex1, false).generateClusterUuidIfNeeded(); - { - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(hotIndex1.getIndex()); - routeTestShardToNodes(hotIndex1, 0, indexRoutingTableBuilder, mixedNode1, mixedNode2); - routeTestShardToNodes(hotIndex1, 1, indexRoutingTableBuilder, mixedNode3, mixedNode1); - routeTestShardToNodes(hotIndex1, 2, indexRoutingTableBuilder, mixedNode2, mixedNode3); - routingTableBuilder.add(indexRoutingTableBuilder.build()); - } - - IndexMetadata warmIndex1 = indexMetadata("warm_index_1", 1, 1, DataTier.DATA_WARM); - metadataBuilder.put(warmIndex1, false).generateClusterUuidIfNeeded(); - { - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(warmIndex1.getIndex()); - routeTestShardToNodes(warmIndex1, 0, indexRoutingTableBuilder, mixedNode1, mixedNode2); - routingTableBuilder.add(indexRoutingTableBuilder.build()); - } - IndexMetadata warmIndex2 = indexMetadata("warm_index_2", 1, 1, DataTier.DATA_WARM); - metadataBuilder.put(warmIndex2, false).generateClusterUuidIfNeeded(); - { - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(warmIndex2.getIndex()); - routeTestShardToNodes(warmIndex2, 0, indexRoutingTableBuilder, mixedNode3, mixedNode1); - routingTableBuilder.add(indexRoutingTableBuilder.build()); - } - - // Cluster State and create stats responses - ClusterState clusterState = ClusterState.builder(new ClusterName("test")) - .nodes(discoBuilder) - .metadata(metadataBuilder) - .routingTable(routingTableBuilder.build()) - .build(); - - long byteSize = randomLongBetween(1024L, 1024L * 1024L * 1024L * 30L); // 1 KB to 30 GB - long docCount = randomLongBetween(100L, 100000000L); // one hundred to one hundred million - List nodeStatsList = buildNodeStats(clusterState, byteSize, docCount); - - // Calculate usage - Map indexByTier = DataTiersUsageTransportAction.tierIndices(clusterState.metadata().indices()); - Map tierSpecificStats = DataTiersUsageTransportAction.calculateStats( - nodeStatsList, - indexByTier, - clusterState.getRoutingNodes() - ); - - // Verify - Index stats are separated by their preferred tier, instead of counted - // toward multiple tiers based on their current routing. Nodes are counted for each tier they are in. - assertThat(tierSpecificStats.size(), is(2)); - - DataTiersFeatureSetUsage.TierSpecificStats hotStats = tierSpecificStats.get(DataTier.DATA_HOT); - assertThat(hotStats, is(notNullValue())); - assertThat(hotStats.nodeCount, is(3)); - assertThat(hotStats.indexCount, is(1)); - assertThat(hotStats.totalShardCount, is(6)); - assertThat(hotStats.docCount, is(6 * docCount)); - assertThat(hotStats.totalByteCount, is(6 * byteSize)); - assertThat(hotStats.primaryShardCount, is(3)); - assertThat(hotStats.primaryByteCount, is(3 * byteSize)); - assertThat(hotStats.primaryByteCountMedian, is(byteSize)); // All same size - assertThat(hotStats.primaryShardBytesMAD, is(0L)); // All same size - - DataTiersFeatureSetUsage.TierSpecificStats warmStats = tierSpecificStats.get(DataTier.DATA_WARM); - assertThat(warmStats, is(notNullValue())); - assertThat(warmStats.nodeCount, is(3)); - assertThat(warmStats.indexCount, is(2)); - assertThat(warmStats.totalShardCount, is(4)); - assertThat(warmStats.docCount, is(4 * docCount)); - assertThat(warmStats.totalByteCount, is(4 * byteSize)); - assertThat(warmStats.primaryShardCount, is(2)); - assertThat(warmStats.primaryByteCount, is(2 * byteSize)); - assertThat(warmStats.primaryByteCountMedian, is(byteSize)); // All same size - assertThat(warmStats.primaryShardBytesMAD, is(0L)); // All same size - } - - public void testCalculateStatsStuckInWrongTier() { - // Nodes: 3 Hot, 0 Warm - Emulating indices stuck on non-preferred tiers - int nodeId = 0; - DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); - DiscoveryNode leader = newNode(nodeId++, DiscoveryNodeRole.MASTER_ROLE); - discoBuilder.add(leader); - discoBuilder.masterNodeId(leader.getId()); - - DiscoveryNode hotNode1 = newNode(nodeId++, DiscoveryNodeRole.DATA_HOT_NODE_ROLE); - discoBuilder.add(hotNode1); - DiscoveryNode hotNode2 = newNode(nodeId++, DiscoveryNodeRole.DATA_HOT_NODE_ROLE); - discoBuilder.add(hotNode2); - DiscoveryNode hotNode3 = newNode(nodeId, DiscoveryNodeRole.DATA_HOT_NODE_ROLE); - discoBuilder.add(hotNode3); - - discoBuilder.localNodeId(hotNode1.getId()); - - // Indices: 1 Hot index, 1 Warm index (Warm index is allocated to less preferred hot node because warm nodes are missing) - Metadata.Builder metadataBuilder = Metadata.builder(); - RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); - - IndexMetadata hotIndex1 = indexMetadata("hot_index_1", 3, 1, DataTier.DATA_HOT); - metadataBuilder.put(hotIndex1, false).generateClusterUuidIfNeeded(); - { - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(hotIndex1.getIndex()); - routeTestShardToNodes(hotIndex1, 0, indexRoutingTableBuilder, hotNode1, hotNode2); - routeTestShardToNodes(hotIndex1, 1, indexRoutingTableBuilder, hotNode3, hotNode1); - routeTestShardToNodes(hotIndex1, 2, indexRoutingTableBuilder, hotNode2, hotNode3); - routingTableBuilder.add(indexRoutingTableBuilder.build()); - } - - IndexMetadata warmIndex1 = indexMetadata("warm_index_1", 1, 1, DataTier.DATA_WARM, DataTier.DATA_HOT); - metadataBuilder.put(warmIndex1, false).generateClusterUuidIfNeeded(); - { - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(warmIndex1.getIndex()); - routeTestShardToNodes(warmIndex1, 0, indexRoutingTableBuilder, hotNode1, hotNode2); - routingTableBuilder.add(indexRoutingTableBuilder.build()); - } - - // Cluster State and create stats responses - ClusterState clusterState = ClusterState.builder(new ClusterName("test")) - .nodes(discoBuilder) - .metadata(metadataBuilder) - .routingTable(routingTableBuilder.build()) - .build(); - - long byteSize = randomLongBetween(1024L, 1024L * 1024L * 1024L * 30L); // 1 KB to 30 GB - long docCount = randomLongBetween(100L, 100000000L); // one hundred to one hundred million - List nodeStatsList = buildNodeStats(clusterState, byteSize, docCount); - - // Calculate usage - Map indexByTier = DataTiersUsageTransportAction.tierIndices(clusterState.metadata().indices()); - Map tierSpecificStats = DataTiersUsageTransportAction.calculateStats( - nodeStatsList, - indexByTier, - clusterState.getRoutingNodes() - ); - - // Verify - Warm indices are still calculated separately from Hot ones, despite Warm nodes missing - assertThat(tierSpecificStats.size(), is(2)); - - DataTiersFeatureSetUsage.TierSpecificStats hotStats = tierSpecificStats.get(DataTier.DATA_HOT); - assertThat(hotStats, is(notNullValue())); - assertThat(hotStats.nodeCount, is(3)); - assertThat(hotStats.indexCount, is(1)); - assertThat(hotStats.totalShardCount, is(6)); - assertThat(hotStats.docCount, is(6 * docCount)); - assertThat(hotStats.totalByteCount, is(6 * byteSize)); - assertThat(hotStats.primaryShardCount, is(3)); - assertThat(hotStats.primaryByteCount, is(3 * byteSize)); - assertThat(hotStats.primaryByteCountMedian, is(byteSize)); // All same size - assertThat(hotStats.primaryShardBytesMAD, is(0L)); // All same size - - DataTiersFeatureSetUsage.TierSpecificStats warmStats = tierSpecificStats.get(DataTier.DATA_WARM); - assertThat(warmStats, is(notNullValue())); - assertThat(warmStats.nodeCount, is(0)); - assertThat(warmStats.indexCount, is(1)); - assertThat(warmStats.totalShardCount, is(2)); - assertThat(warmStats.docCount, is(2 * docCount)); - assertThat(warmStats.totalByteCount, is(2 * byteSize)); - assertThat(warmStats.primaryShardCount, is(1)); - assertThat(warmStats.primaryByteCount, is(byteSize)); - assertThat(warmStats.primaryByteCountMedian, is(byteSize)); // All same size - assertThat(warmStats.primaryShardBytesMAD, is(0L)); // All same size - } - - private static DiscoveryNode newNode(int nodeId, DiscoveryNodeRole... roles) { - return DiscoveryNodeUtils.builder("node_" + nodeId).roles(Set.of(roles)).build(); - } - - private static IndexMetadata indexMetadata(String indexName, int numberOfShards, int numberOfReplicas, String... dataTierPrefs) { - Settings.Builder settingsBuilder = indexSettings(IndexVersion.current(), numberOfShards, numberOfReplicas).put( - SETTING_CREATION_DATE, - System.currentTimeMillis() - ); - - if (dataTierPrefs.length > 1) { - StringBuilder tierBuilder = new StringBuilder(dataTierPrefs[0]); - for (int idx = 1; idx < dataTierPrefs.length; idx++) { - tierBuilder.append(',').append(dataTierPrefs[idx]); - } - settingsBuilder.put(DataTier.TIER_PREFERENCE, tierBuilder.toString()); - } else if (dataTierPrefs.length == 1) { - settingsBuilder.put(DataTier.TIER_PREFERENCE, dataTierPrefs[0]); - } - - return IndexMetadata.builder(indexName).settings(settingsBuilder.build()).timestampRange(IndexLongFieldRange.UNKNOWN).build(); - } - - private static void routeTestShardToNodes( - IndexMetadata index, - int shard, - IndexRoutingTable.Builder indexRoutingTableBuilder, - DiscoveryNode... nodes - ) { - ShardId shardId = new ShardId(index.getIndex(), shard); - IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId); - boolean primary = true; - for (DiscoveryNode node : nodes) { - indexShardRoutingBuilder.addShard( - TestShardRouting.newShardRouting(shardId, node.getId(), null, primary, ShardRoutingState.STARTED) - ); - primary = false; - } - indexRoutingTableBuilder.addIndexShard(indexShardRoutingBuilder); - } - - private List buildNodeStats(ClusterState clusterState, long bytesPerShard, long docsPerShard) { - DiscoveryNodes nodes = clusterState.getNodes(); - RoutingNodes routingNodes = clusterState.getRoutingNodes(); - List nodeStatsList = new ArrayList<>(); - for (DiscoveryNode node : nodes) { - RoutingNode routingNode = routingNodes.node(node.getId()); - if (routingNode == null) { - continue; - } - Map> indexStats = new HashMap<>(); - for (ShardRouting shardRouting : routingNode) { - ShardId shardId = shardRouting.shardId(); - ShardStats shardStat = shardStat(bytesPerShard, docsPerShard, shardRouting); - IndexShardStats shardStats = new IndexShardStats(shardId, new ShardStats[] { shardStat }); - indexStats.computeIfAbsent(shardId.getIndex(), k -> new ArrayList<>()).add(shardStats); - } - NodeIndicesStats nodeIndexStats = new NodeIndicesStats(new CommonStats(), Collections.emptyMap(), indexStats, true); - nodeStatsList.add(mockNodeStats(node, nodeIndexStats)); - } - return nodeStatsList; - } - - private static ShardStats shardStat(long byteCount, long docCount, ShardRouting routing) { - StoreStats storeStats = new StoreStats(randomNonNegativeLong(), byteCount, 0L); - DocsStats docsStats = new DocsStats(docCount, 0L, byteCount); - - CommonStats commonStats = new CommonStats(CommonStatsFlags.ALL); - commonStats.getStore().add(storeStats); - commonStats.getDocs().add(docsStats); - - Path fakePath = PathUtils.get("test/dir/" + routing.shardId().getIndex().getUUID() + "/" + routing.shardId().id()); - ShardPath fakeShardPath = new ShardPath(false, fakePath, fakePath, routing.shardId()); - - return new ShardStats(routing, fakeShardPath, commonStats, null, null, null, false, 0); - } - - private static NodeStats mockNodeStats(DiscoveryNode node, NodeIndicesStats indexStats) { - NodeStats stats = mock(NodeStats.class); - when(stats.getNode()).thenReturn(node); - when(stats.getIndices()).thenReturn(indexStats); - return stats; - } -} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackSettingsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackSettingsTests.java index c6a2e6ca18813..92feb4de81aa3 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackSettingsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackSettingsTests.java @@ -12,11 +12,15 @@ import org.elasticsearch.transport.RemoteClusterPortSettings; import java.security.NoSuchAlgorithmException; +import java.security.Provider; +import java.security.Security; import java.util.List; +import java.util.Locale; import javax.crypto.SecretKeyFactory; +import javax.net.ssl.SSLContext; -import static org.hamcrest.Matchers.contains; +import static org.elasticsearch.xpack.core.XPackSettings.DEFAULT_SUPPORTED_PROTOCOLS; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; @@ -69,13 +73,29 @@ public void testDefaultPasswordHashingAlgorithmInFips() { } } - public void testDefaultSupportedProtocols() { - if (inFipsJvm()) { - assertThat(XPackSettings.DEFAULT_SUPPORTED_PROTOCOLS, contains("TLSv1.2", "TLSv1.1")); - } else { - assertThat(XPackSettings.DEFAULT_SUPPORTED_PROTOCOLS, contains("TLSv1.3", "TLSv1.2", "TLSv1.1")); - + public void testDefaultSupportedProtocols() throws NoSuchAlgorithmException { + // TLSv1.3 is recommended but is not required for FIPS-140-3 compliance, government-only applications must use TLS 1.2 or higher + // https://www.gsa.gov/system/files?file=SSL-TLS-Implementation-%5BCIO-IT-Security-14-69-Rev-7%5D-06-12-2023.pdf + List defaultSupportedProtocols = DEFAULT_SUPPORTED_PROTOCOLS.stream().map(s -> s.toLowerCase(Locale.ROOT)).toList(); + int i = 0; + Provider[] providers = Security.getProviders(); + for (Provider provider : providers) { + for (Provider.Service service : provider.getServices()) { + if ("SSLContext".equalsIgnoreCase(service.getType())) { + if (defaultSupportedProtocols.contains(service.getAlgorithm().toLowerCase(Locale.ROOT))) { + i++; + if (inFipsJvm()) { + // ensure bouncy castle is the provider + assertEquals("BCJSSE", provider.getName()); + } + SSLContext.getInstance(service.getAlgorithm()); // ensure no exceptions + } + + } + + } } + assertEquals("did not find all supported TLS protocols", i, defaultSupportedProtocols.size()); } public void testServiceTokenHashingAlgorithmSettingValidation() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/async/AsyncTaskServiceTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/async/AsyncTaskServiceTests.java index fc35a4b4761bd..bc191349ea601 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/async/AsyncTaskServiceTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/async/AsyncTaskServiceTests.java @@ -217,7 +217,7 @@ public void testAutoCreateIndex() throws Exception { AsyncExecutionId id = new AsyncExecutionId("0", new TaskId("N/A", 0)); AsyncSearchResponse resp = new AsyncSearchResponse(id.getEncoded(), true, true, 0L, 0L); { - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); indexService.createResponse(id.getDocId(), Collections.emptyMap(), resp, future); future.get(); assertSettings(); @@ -228,14 +228,14 @@ public void testAutoCreateIndex() throws Exception { // Subsequent response deletes throw a (wrapped) index not found exception { - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); indexService.deleteResponse(id, future); expectThrows(Exception.class, future::get); } // So do updates { - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); indexService.updateResponse(id.getDocId(), Collections.emptyMap(), resp, future); expectThrows(Exception.class, future::get); assertSettings(); @@ -243,7 +243,7 @@ public void testAutoCreateIndex() throws Exception { // And so does updating the expiration time { - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); indexService.updateExpirationTime("0", 10L, future); expectThrows(Exception.class, future::get); assertSettings(); @@ -251,7 +251,7 @@ public void testAutoCreateIndex() throws Exception { // But the index is still auto-created { - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); indexService.createResponse(id.getDocId(), Collections.emptyMap(), resp, future); future.get(); assertSettings(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsageTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsageTests.java index 3cdaa7e6015d2..66ab5755f9392 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsageTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsageTests.java @@ -7,10 +7,23 @@ package org.elasticsearch.xpack.core.datastreams; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamLifecycle; +import org.elasticsearch.cluster.metadata.DataStreamTestHelper; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.Index; import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.test.ESTestCase; +import java.util.List; +import java.util.LongSummaryStatistics; +import java.util.UUID; + +import static org.elasticsearch.xpack.core.action.DataStreamLifecycleUsageTransportAction.calculateStats; +import static org.hamcrest.Matchers.is; + public class DataStreamLifecycleFeatureSetUsageTests extends AbstractWireSerializingTestCase { @Override @@ -83,6 +96,61 @@ protected DataStreamLifecycleFeatureSetUsage mutateInstance(DataStreamLifecycleF }; } + public void testLifecycleStats() { + List dataStreams = List.of( + DataStreamTestHelper.newInstance( + randomAlphaOfLength(10), + List.of(new Index(randomAlphaOfLength(10), UUID.randomUUID().toString())), + 1L, + null, + false, + new DataStreamLifecycle() + ), + DataStreamTestHelper.newInstance( + randomAlphaOfLength(10), + List.of(new Index(randomAlphaOfLength(10), UUID.randomUUID().toString())), + 1L, + null, + false, + new DataStreamLifecycle(new DataStreamLifecycle.Retention(TimeValue.timeValueMillis(1000)), null, true) + ), + DataStreamTestHelper.newInstance( + randomAlphaOfLength(10), + List.of(new Index(randomAlphaOfLength(10), UUID.randomUUID().toString())), + 1L, + null, + false, + new DataStreamLifecycle(new DataStreamLifecycle.Retention(TimeValue.timeValueMillis(100)), null, true) + ), + DataStreamTestHelper.newInstance( + randomAlphaOfLength(10), + List.of(new Index(randomAlphaOfLength(10), UUID.randomUUID().toString())), + 1L, + null, + false, + new DataStreamLifecycle(new DataStreamLifecycle.Retention(TimeValue.timeValueMillis(5000)), null, false) + ), + DataStreamTestHelper.newInstance( + randomAlphaOfLength(10), + List.of(new Index(randomAlphaOfLength(10), UUID.randomUUID().toString())), + 1L, + null, + false, + null + ) + ); + + Tuple stats = calculateStats(dataStreams); + // 3 data streams with an enabled lifecycle + assertThat(stats.v1(), is(3L)); + LongSummaryStatistics longSummaryStatistics = stats.v2(); + assertThat(longSummaryStatistics.getMax(), is(1000L)); + assertThat(longSummaryStatistics.getMin(), is(100L)); + // only counting the ones with an effective retention in the summary statistics + assertThat(longSummaryStatistics.getCount(), is(2L)); + assertThat(longSummaryStatistics.getAverage(), is(550.0)); + } + @Override protected Writeable.Reader instanceReader() { return DataStreamLifecycleFeatureSetUsage::new; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datatiers/DataTierUsageFixtures.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datatiers/DataTierUsageFixtures.java new file mode 100644 index 0000000000000..63cc6e4d7914e --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datatiers/DataTierUsageFixtures.java @@ -0,0 +1,114 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.datatiers; + +import org.elasticsearch.action.admin.indices.stats.CommonStats; +import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; +import org.elasticsearch.action.admin.indices.stats.IndexShardStats; +import org.elasticsearch.action.admin.indices.stats.ShardStats; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodeRole; +import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.cluster.routing.allocation.DataTier; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.PathUtils; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.shard.DocsStats; +import org.elasticsearch.index.shard.IndexLongFieldRange; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.shard.ShardPath; +import org.elasticsearch.index.store.StoreStats; +import org.elasticsearch.indices.NodeIndicesStats; +import org.elasticsearch.test.ESTestCase; + +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_CREATION_DATE; + +class DataTierUsageFixtures extends ESTestCase { + + private static final CommonStats COMMON_STATS = new CommonStats( + CommonStatsFlags.NONE.set(CommonStatsFlags.Flag.Docs, true).set(CommonStatsFlags.Flag.Store, true) + ); + + static DiscoveryNode newNode(int nodeId, DiscoveryNodeRole... roles) { + return DiscoveryNodeUtils.builder("node_" + nodeId).roles(Set.of(roles)).build(); + } + + static void routeTestShardToNodes( + IndexMetadata index, + int shard, + IndexRoutingTable.Builder indexRoutingTableBuilder, + DiscoveryNode... nodes + ) { + ShardId shardId = new ShardId(index.getIndex(), shard); + IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId); + boolean primary = true; + for (DiscoveryNode node : nodes) { + indexShardRoutingBuilder.addShard( + TestShardRouting.newShardRouting(shardId, node.getId(), null, primary, ShardRoutingState.STARTED) + ); + primary = false; + } + indexRoutingTableBuilder.addIndexShard(indexShardRoutingBuilder); + } + + static NodeIndicesStats buildNodeIndicesStats(RoutingNode routingNode, long bytesPerShard, long docsPerShard) { + Map> indexStats = new HashMap<>(); + for (ShardRouting shardRouting : routingNode) { + ShardId shardId = shardRouting.shardId(); + ShardStats shardStat = shardStat(bytesPerShard, docsPerShard, shardRouting); + IndexShardStats shardStats = new IndexShardStats(shardId, new ShardStats[] { shardStat }); + indexStats.computeIfAbsent(shardId.getIndex(), k -> new ArrayList<>()).add(shardStats); + } + return new NodeIndicesStats(COMMON_STATS, Map.of(), indexStats, true); + } + + private static ShardStats shardStat(long byteCount, long docCount, ShardRouting routing) { + StoreStats storeStats = new StoreStats(randomNonNegativeLong(), byteCount, 0L); + DocsStats docsStats = new DocsStats(docCount, 0L, byteCount); + Path fakePath = PathUtils.get("test/dir/" + routing.shardId().getIndex().getUUID() + "/" + routing.shardId().id()); + ShardPath fakeShardPath = new ShardPath(false, fakePath, fakePath, routing.shardId()); + CommonStats commonStats = new CommonStats(CommonStatsFlags.ALL); + commonStats.getStore().add(storeStats); + commonStats.getDocs().add(docsStats); + return new ShardStats(routing, fakeShardPath, commonStats, null, null, null, false, 0); + } + + static IndexMetadata indexMetadata(String indexName, int numberOfShards, int numberOfReplicas, String... dataTierPrefs) { + Settings.Builder settingsBuilder = indexSettings(IndexVersion.current(), numberOfShards, numberOfReplicas).put( + SETTING_CREATION_DATE, + System.currentTimeMillis() + ); + + if (dataTierPrefs.length > 1) { + StringBuilder tierBuilder = new StringBuilder(dataTierPrefs[0]); + for (int idx = 1; idx < dataTierPrefs.length; idx++) { + tierBuilder.append(',').append(dataTierPrefs[idx]); + } + settingsBuilder.put(DataTier.TIER_PREFERENCE, tierBuilder.toString()); + } else if (dataTierPrefs.length == 1) { + settingsBuilder.put(DataTier.TIER_PREFERENCE, dataTierPrefs[0]); + } + + return IndexMetadata.builder(indexName).settings(settingsBuilder.build()).timestampRange(IndexLongFieldRange.UNKNOWN).build(); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/DataTiersFeatureSetUsageTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datatiers/DataTiersFeatureSetUsageTests.java similarity index 97% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/DataTiersFeatureSetUsageTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datatiers/DataTiersFeatureSetUsageTests.java index e5f37dfb5764c..0951408441b3f 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/DataTiersFeatureSetUsageTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datatiers/DataTiersFeatureSetUsageTests.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.core; +package org.elasticsearch.xpack.core.datatiers; import org.elasticsearch.cluster.routing.allocation.DataTier; import org.elasticsearch.common.io.stream.Writeable; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datatiers/DataTiersUsageTransportActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datatiers/DataTiersUsageTransportActionTests.java new file mode 100644 index 0000000000000..bb8dce7db0e23 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datatiers/DataTiersUsageTransportActionTests.java @@ -0,0 +1,535 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.datatiers; + +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodeRole; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.allocation.DataTier; +import org.elasticsearch.search.aggregations.metrics.TDigestState; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; + +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.IntStream; + +import static org.elasticsearch.xpack.core.datatiers.DataTierUsageFixtures.indexMetadata; +import static org.elasticsearch.xpack.core.datatiers.DataTierUsageFixtures.newNode; +import static org.elasticsearch.xpack.core.datatiers.DataTierUsageFixtures.routeTestShardToNodes; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +public class DataTiersUsageTransportActionTests extends ESTestCase { + + private long byteSize; + private long docCount; + + @Before + public void setup() { + byteSize = randomLongBetween(1024L, 1024L * 1024L * 1024L * 30L); // 1 KB to 30 GB + docCount = randomLongBetween(100L, 100000000L); // one hundred to one hundred million + } + + public void testTierIndices() { + DiscoveryNode dataNode = newNode(0, DiscoveryNodeRole.DATA_ROLE); + DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); + discoBuilder.add(dataNode); + + IndexMetadata hotIndex1 = indexMetadata("hot-1", 1, 0, DataTier.DATA_HOT); + IndexMetadata hotIndex2 = indexMetadata("hot-2", 1, 0, DataTier.DATA_HOT); + IndexMetadata warmIndex1 = indexMetadata("warm-1", 1, 0, DataTier.DATA_WARM); + IndexMetadata coldIndex1 = indexMetadata("cold-1", 1, 0, DataTier.DATA_COLD); + IndexMetadata coldIndex2 = indexMetadata("cold-2", 1, 0, DataTier.DATA_COLD, DataTier.DATA_WARM); // Prefers cold over warm + IndexMetadata nonTiered = indexMetadata("non-tier", 1, 0); // No tier + IndexMetadata hotIndex3 = indexMetadata("hot-3", 1, 0, DataTier.DATA_HOT); + + Metadata.Builder metadataBuilder = Metadata.builder() + .put(hotIndex1, false) + .put(hotIndex2, false) + .put(warmIndex1, false) + .put(coldIndex1, false) + .put(coldIndex2, false) + .put(nonTiered, false) + .put(hotIndex3, false) + .generateClusterUuidIfNeeded(); + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + routingTableBuilder.add(getIndexRoutingTable(hotIndex1, dataNode)); + routingTableBuilder.add(getIndexRoutingTable(hotIndex2, dataNode)); + routingTableBuilder.add(getIndexRoutingTable(hotIndex2, dataNode)); + routingTableBuilder.add(getIndexRoutingTable(warmIndex1, dataNode)); + routingTableBuilder.add(getIndexRoutingTable(coldIndex1, dataNode)); + routingTableBuilder.add(getIndexRoutingTable(coldIndex2, dataNode)); + routingTableBuilder.add(getIndexRoutingTable(nonTiered, dataNode)); + ClusterState clusterState = ClusterState.builder(new ClusterName("test")) + .nodes(discoBuilder) + .metadata(metadataBuilder) + .routingTable(routingTableBuilder.build()) + .build(); + Map> result = DataTiersUsageTransportAction.getIndicesGroupedByTier( + clusterState, + List.of(new NodeDataTiersUsage(dataNode, Map.of(DataTier.DATA_WARM, createStats(5, 5, 0, 10)))) + ); + assertThat(result.keySet(), equalTo(Set.of(DataTier.DATA_HOT, DataTier.DATA_WARM, DataTier.DATA_COLD))); + assertThat(result.get(DataTier.DATA_HOT), equalTo(Set.of(hotIndex1.getIndex().getName(), hotIndex2.getIndex().getName()))); + assertThat(result.get(DataTier.DATA_WARM), equalTo(Set.of(warmIndex1.getIndex().getName()))); + assertThat(result.get(DataTier.DATA_COLD), equalTo(Set.of(coldIndex1.getIndex().getName(), coldIndex2.getIndex().getName()))); + } + + public void testCalculateMAD() { + assertThat(DataTiersUsageTransportAction.computeMedianAbsoluteDeviation(TDigestState.create(10)), equalTo(0L)); + + TDigestState sketch = TDigestState.create(randomDoubleBetween(1, 1000, false)); + sketch.add(1); + sketch.add(1); + sketch.add(2); + sketch.add(2); + sketch.add(4); + sketch.add(6); + sketch.add(9); + assertThat(DataTiersUsageTransportAction.computeMedianAbsoluteDeviation(sketch), equalTo(1L)); + } + + public void testCalculateStatsNoTiers() { + // Nodes: 0 Tiered Nodes, 1 Data Node, no indices on tiered nodes + DiscoveryNode leader = newNode(0, DiscoveryNodeRole.MASTER_ROLE); + DiscoveryNode dataNode1 = newNode(1, DiscoveryNodeRole.DATA_ROLE); + + List nodeDataTiersUsages = List.of( + new NodeDataTiersUsage(leader, Map.of()), + new NodeDataTiersUsage(dataNode1, Map.of()) + ); + Map tierSpecificStats = DataTiersUsageTransportAction.aggregateStats( + nodeDataTiersUsages, + Map.of() + ); + + // Verify - No results when no tiers present + assertThat(tierSpecificStats.size(), is(0)); + } + + public void testCalculateStatsTieredNodesOnly() { + // Nodes: 1 Data, 1 Hot, 1 Warm, 1 Cold, 1 Frozen + DiscoveryNode leader = newNode(0, DiscoveryNodeRole.MASTER_ROLE); + DiscoveryNode dataNode1 = newNode(1, DiscoveryNodeRole.DATA_ROLE); + DiscoveryNode hotNode1 = newNode(2, DiscoveryNodeRole.DATA_HOT_NODE_ROLE); + DiscoveryNode warmNode1 = newNode(3, DiscoveryNodeRole.DATA_WARM_NODE_ROLE); + DiscoveryNode coldNode1 = newNode(4, DiscoveryNodeRole.DATA_COLD_NODE_ROLE); + DiscoveryNode frozenNode1 = newNode(5, DiscoveryNodeRole.DATA_FROZEN_NODE_ROLE); + + List nodeDataTiersUsages = List.of( + new NodeDataTiersUsage(leader, Map.of()), + new NodeDataTiersUsage(dataNode1, Map.of()), + new NodeDataTiersUsage(hotNode1, Map.of()), + new NodeDataTiersUsage(warmNode1, Map.of()), + new NodeDataTiersUsage(coldNode1, Map.of()), + new NodeDataTiersUsage(frozenNode1, Map.of()) + ); + + Map tierSpecificStats = DataTiersUsageTransportAction.aggregateStats( + nodeDataTiersUsages, + Map.of() + ); + + // Verify - Results are present, but they lack index numbers because none are tiered + assertThat(tierSpecificStats.size(), is(4)); + + DataTiersFeatureSetUsage.TierSpecificStats hotStats = tierSpecificStats.get(DataTier.DATA_HOT); + assertThat(hotStats, is(notNullValue())); + assertThat(hotStats.nodeCount, is(1)); + assertThat(hotStats.indexCount, is(0)); + assertThat(hotStats.totalShardCount, is(0)); + assertThat(hotStats.docCount, is(0L)); + assertThat(hotStats.totalByteCount, is(0L)); + assertThat(hotStats.primaryShardCount, is(0)); + assertThat(hotStats.primaryByteCount, is(0L)); + assertThat(hotStats.primaryByteCountMedian, is(0L)); // All same size + assertThat(hotStats.primaryShardBytesMAD, is(0L)); // All same size + + DataTiersFeatureSetUsage.TierSpecificStats warmStats = tierSpecificStats.get(DataTier.DATA_WARM); + assertThat(warmStats, is(notNullValue())); + assertThat(warmStats.nodeCount, is(1)); + assertThat(warmStats.indexCount, is(0)); + assertThat(warmStats.totalShardCount, is(0)); + assertThat(warmStats.docCount, is(0L)); + assertThat(warmStats.totalByteCount, is(0L)); + assertThat(warmStats.primaryShardCount, is(0)); + assertThat(warmStats.primaryByteCount, is(0L)); + assertThat(warmStats.primaryByteCountMedian, is(0L)); // All same size + assertThat(warmStats.primaryShardBytesMAD, is(0L)); // All same size + + DataTiersFeatureSetUsage.TierSpecificStats coldStats = tierSpecificStats.get(DataTier.DATA_COLD); + assertThat(coldStats, is(notNullValue())); + assertThat(coldStats.nodeCount, is(1)); + assertThat(coldStats.indexCount, is(0)); + assertThat(coldStats.totalShardCount, is(0)); + assertThat(coldStats.docCount, is(0L)); + assertThat(coldStats.totalByteCount, is(0L)); + assertThat(coldStats.primaryShardCount, is(0)); + assertThat(coldStats.primaryByteCount, is(0L)); + assertThat(coldStats.primaryByteCountMedian, is(0L)); // All same size + assertThat(coldStats.primaryShardBytesMAD, is(0L)); // All same size + + DataTiersFeatureSetUsage.TierSpecificStats frozenStats = tierSpecificStats.get(DataTier.DATA_FROZEN); + assertThat(frozenStats, is(notNullValue())); + assertThat(frozenStats.nodeCount, is(1)); + assertThat(frozenStats.indexCount, is(0)); + assertThat(frozenStats.totalShardCount, is(0)); + assertThat(frozenStats.docCount, is(0L)); + assertThat(frozenStats.totalByteCount, is(0L)); + assertThat(frozenStats.primaryShardCount, is(0)); + assertThat(frozenStats.primaryByteCount, is(0L)); + assertThat(frozenStats.primaryByteCountMedian, is(0L)); // All same size + assertThat(frozenStats.primaryShardBytesMAD, is(0L)); // All same size + } + + public void testCalculateStatsTieredIndicesOnly() { + // Nodes: 3 Data, 0 Tiered - Only hosting indices on generic data nodes + int nodeId = 0; + DiscoveryNode leader = newNode(nodeId++, DiscoveryNodeRole.MASTER_ROLE); + DiscoveryNode dataNode1 = newNode(nodeId++, DiscoveryNodeRole.DATA_ROLE); + DiscoveryNode dataNode2 = newNode(nodeId++, DiscoveryNodeRole.DATA_ROLE); + DiscoveryNode dataNode3 = newNode(nodeId, DiscoveryNodeRole.DATA_ROLE); + + // Indices: + // 1 Hot index: 3 primaries, 3 replicas one on each node + // 2 Warm indices, each index 1 primary one replica + // 3 Cold indices, each index 1 primary on a different node + String hotIndex = "hot_index_1"; + String warmIndex1 = "warm_index_1"; + String warmIndex2 = "warm_index_2"; + String coldIndex1 = "cold_index_1"; + String coldIndex2 = "cold_index_2"; + String coldIndex3 = "cold_index_3"; + + List nodeDataTiersUsages = List.of( + new NodeDataTiersUsage(leader, Map.of()), + new NodeDataTiersUsage( + dataNode1, + Map.of( + DataTier.DATA_HOT, + createStats(1, 2, docCount, byteSize), + DataTier.DATA_WARM, + createStats(0, 2, docCount, byteSize), + DataTier.DATA_COLD, + createStats(1, 1, docCount, byteSize) + ) + ), + new NodeDataTiersUsage( + dataNode2, + Map.of( + DataTier.DATA_HOT, + createStats(1, 2, docCount, byteSize), + DataTier.DATA_WARM, + createStats(1, 1, docCount, byteSize), + DataTier.DATA_COLD, + createStats(1, 1, docCount, byteSize) + ) + ), + new NodeDataTiersUsage( + dataNode3, + Map.of( + DataTier.DATA_HOT, + createStats(1, 2, docCount, byteSize), + DataTier.DATA_WARM, + createStats(1, 1, docCount, byteSize), + DataTier.DATA_COLD, + createStats(1, 1, docCount, byteSize) + ) + ) + ); + // Calculate usage + Map tierSpecificStats = DataTiersUsageTransportAction.aggregateStats( + nodeDataTiersUsages, + Map.of( + DataTier.DATA_HOT, + Set.of(hotIndex), + DataTier.DATA_WARM, + Set.of(warmIndex1, warmIndex2), + DataTier.DATA_COLD, + Set.of(coldIndex1, coldIndex2, coldIndex3) + ) + ); + + // Verify - Index stats exist for the tiers, but no tiered nodes are found + assertThat(tierSpecificStats.size(), is(3)); + + DataTiersFeatureSetUsage.TierSpecificStats hotStats = tierSpecificStats.get(DataTier.DATA_HOT); + assertThat(hotStats, is(notNullValue())); + assertThat(hotStats.nodeCount, is(0)); + assertThat(hotStats.indexCount, is(1)); + assertThat(hotStats.totalShardCount, is(6)); + assertThat(hotStats.docCount, is(6 * docCount)); + assertThat(hotStats.totalByteCount, is(6 * byteSize)); + assertThat(hotStats.primaryShardCount, is(3)); + assertThat(hotStats.primaryByteCount, is(3 * byteSize)); + assertThat(hotStats.primaryByteCountMedian, is(byteSize)); // All same size + assertThat(hotStats.primaryShardBytesMAD, is(0L)); // All same size + + DataTiersFeatureSetUsage.TierSpecificStats warmStats = tierSpecificStats.get(DataTier.DATA_WARM); + assertThat(warmStats, is(notNullValue())); + assertThat(warmStats.nodeCount, is(0)); + assertThat(warmStats.indexCount, is(2)); + assertThat(warmStats.totalShardCount, is(4)); + assertThat(warmStats.docCount, is(4 * docCount)); + assertThat(warmStats.totalByteCount, is(4 * byteSize)); + assertThat(warmStats.primaryShardCount, is(2)); + assertThat(warmStats.primaryByteCount, is(2 * byteSize)); + assertThat(warmStats.primaryByteCountMedian, is(byteSize)); // All same size + assertThat(warmStats.primaryShardBytesMAD, is(0L)); // All same size + + DataTiersFeatureSetUsage.TierSpecificStats coldStats = tierSpecificStats.get(DataTier.DATA_COLD); + assertThat(coldStats, is(notNullValue())); + assertThat(coldStats.nodeCount, is(0)); + assertThat(coldStats.indexCount, is(3)); + assertThat(coldStats.totalShardCount, is(3)); + assertThat(coldStats.docCount, is(3 * docCount)); + assertThat(coldStats.totalByteCount, is(3 * byteSize)); + assertThat(coldStats.primaryShardCount, is(3)); + assertThat(coldStats.primaryByteCount, is(3 * byteSize)); + assertThat(coldStats.primaryByteCountMedian, is(byteSize)); // All same size + assertThat(coldStats.primaryShardBytesMAD, is(0L)); // All same size + } + + public void testCalculateStatsReasonableCase() { + // Nodes: 3 Hot, 5 Warm, 1 Cold + int nodeId = 0; + DiscoveryNode leader = newNode(nodeId++, DiscoveryNodeRole.MASTER_ROLE); + DiscoveryNode hotNode1 = newNode(nodeId++, DiscoveryNodeRole.DATA_HOT_NODE_ROLE); + DiscoveryNode hotNode2 = newNode(nodeId++, DiscoveryNodeRole.DATA_HOT_NODE_ROLE); + DiscoveryNode hotNode3 = newNode(nodeId++, DiscoveryNodeRole.DATA_HOT_NODE_ROLE); + DiscoveryNode warmNode1 = newNode(nodeId++, DiscoveryNodeRole.DATA_WARM_NODE_ROLE); + DiscoveryNode warmNode2 = newNode(nodeId++, DiscoveryNodeRole.DATA_WARM_NODE_ROLE); + DiscoveryNode warmNode3 = newNode(nodeId++, DiscoveryNodeRole.DATA_WARM_NODE_ROLE); + DiscoveryNode warmNode4 = newNode(nodeId++, DiscoveryNodeRole.DATA_WARM_NODE_ROLE); + DiscoveryNode warmNode5 = newNode(nodeId++, DiscoveryNodeRole.DATA_WARM_NODE_ROLE); + DiscoveryNode coldNode1 = newNode(nodeId, DiscoveryNodeRole.DATA_COLD_NODE_ROLE); + + // Indices: + // 1 Hot index: 3 primaries, 3 replicas one on each node + // 2 Warm indices: each index has 1 primary and 1 replica residing in 4 nodes + // 3 Cold indices: 1 primary each on the cold node + String hotIndex1 = "hot_index_1"; + String warmIndex1 = "warm_index_1"; + String warmIndex2 = "warm_index_2"; + String coldIndex1 = "cold_index_1"; + String coldIndex2 = "cold_index_2"; + String coldIndex3 = "cold_index_3"; + + List nodeDataTiersUsages = List.of( + new NodeDataTiersUsage(leader, Map.of()), + new NodeDataTiersUsage(hotNode1, Map.of(DataTier.DATA_HOT, createStats(1, 2, docCount, byteSize))), + new NodeDataTiersUsage(hotNode2, Map.of(DataTier.DATA_HOT, createStats(1, 2, docCount, byteSize))), + new NodeDataTiersUsage(hotNode3, Map.of(DataTier.DATA_HOT, createStats(1, 2, docCount, byteSize))), + new NodeDataTiersUsage(warmNode1, Map.of(DataTier.DATA_WARM, createStats(1, 1, docCount, byteSize))), + new NodeDataTiersUsage(warmNode2, Map.of(DataTier.DATA_WARM, createStats(0, 1, docCount, byteSize))), + new NodeDataTiersUsage(warmNode3, Map.of(DataTier.DATA_WARM, createStats(1, 1, docCount, byteSize))), + new NodeDataTiersUsage(warmNode4, Map.of(DataTier.DATA_WARM, createStats(0, 1, docCount, byteSize))), + new NodeDataTiersUsage(warmNode5, Map.of()), + new NodeDataTiersUsage(coldNode1, Map.of(DataTier.DATA_COLD, createStats(3, 3, docCount, byteSize))) + + ); + // Calculate usage + Map tierSpecificStats = DataTiersUsageTransportAction.aggregateStats( + nodeDataTiersUsages, + Map.of( + DataTier.DATA_HOT, + Set.of(hotIndex1), + DataTier.DATA_WARM, + Set.of(warmIndex1, warmIndex2), + DataTier.DATA_COLD, + Set.of(coldIndex1, coldIndex2, coldIndex3) + ) + ); + + // Verify - Node and Index stats are both collected + assertThat(tierSpecificStats.size(), is(3)); + + DataTiersFeatureSetUsage.TierSpecificStats hotStats = tierSpecificStats.get(DataTier.DATA_HOT); + assertThat(hotStats, is(notNullValue())); + assertThat(hotStats.nodeCount, is(3)); + assertThat(hotStats.indexCount, is(1)); + assertThat(hotStats.totalShardCount, is(6)); + assertThat(hotStats.docCount, is(6 * docCount)); + assertThat(hotStats.totalByteCount, is(6 * byteSize)); + assertThat(hotStats.primaryShardCount, is(3)); + assertThat(hotStats.primaryByteCount, is(3 * byteSize)); + assertThat(hotStats.primaryByteCountMedian, is(byteSize)); // All same size + assertThat(hotStats.primaryShardBytesMAD, is(0L)); // All same size + + DataTiersFeatureSetUsage.TierSpecificStats warmStats = tierSpecificStats.get(DataTier.DATA_WARM); + assertThat(warmStats, is(notNullValue())); + assertThat(warmStats.nodeCount, is(5)); + assertThat(warmStats.indexCount, is(2)); + assertThat(warmStats.totalShardCount, is(4)); + assertThat(warmStats.docCount, is(4 * docCount)); + assertThat(warmStats.totalByteCount, is(4 * byteSize)); + assertThat(warmStats.primaryShardCount, is(2)); + assertThat(warmStats.primaryByteCount, is(2 * byteSize)); + assertThat(warmStats.primaryByteCountMedian, is(byteSize)); // All same size + assertThat(warmStats.primaryShardBytesMAD, is(0L)); // All same size + + DataTiersFeatureSetUsage.TierSpecificStats coldStats = tierSpecificStats.get(DataTier.DATA_COLD); + assertThat(coldStats, is(notNullValue())); + assertThat(coldStats.nodeCount, is(1)); + assertThat(coldStats.indexCount, is(3)); + assertThat(coldStats.totalShardCount, is(3)); + assertThat(coldStats.docCount, is(3 * docCount)); + assertThat(coldStats.totalByteCount, is(3 * byteSize)); + assertThat(coldStats.primaryShardCount, is(3)); + assertThat(coldStats.primaryByteCount, is(3 * byteSize)); + assertThat(coldStats.primaryByteCountMedian, is(byteSize)); // All same size + assertThat(coldStats.primaryShardBytesMAD, is(0L)); // All same size + } + + public void testCalculateStatsMixedTiers() { + // Nodes: 3 Hot+Warm - Nodes that are marked as part of multiple tiers + int nodeId = 0; + DiscoveryNode leader = newNode(nodeId++, DiscoveryNodeRole.MASTER_ROLE); + + DiscoveryNode mixedNode1 = newNode(nodeId++, DiscoveryNodeRole.DATA_HOT_NODE_ROLE, DiscoveryNodeRole.DATA_WARM_NODE_ROLE); + DiscoveryNode mixedNode2 = newNode(nodeId++, DiscoveryNodeRole.DATA_HOT_NODE_ROLE, DiscoveryNodeRole.DATA_WARM_NODE_ROLE); + DiscoveryNode mixedNode3 = newNode(nodeId, DiscoveryNodeRole.DATA_HOT_NODE_ROLE, DiscoveryNodeRole.DATA_WARM_NODE_ROLE); + + String hotIndex1 = "hot_index_1"; + String warmIndex1 = "warm_index_1"; + String warmIndex2 = "warm_index_2"; + + // Indices: 1 Hot index, 2 Warm indices + List nodeDataTiersUsages = List.of( + new NodeDataTiersUsage(leader, Map.of()), + new NodeDataTiersUsage( + mixedNode1, + Map.of(DataTier.DATA_HOT, createStats(1, 2, docCount, byteSize), DataTier.DATA_WARM, createStats(1, 2, docCount, byteSize)) + ), + new NodeDataTiersUsage( + mixedNode2, + Map.of(DataTier.DATA_HOT, createStats(1, 2, docCount, byteSize), DataTier.DATA_WARM, createStats(0, 1, docCount, byteSize)) + ), + new NodeDataTiersUsage( + mixedNode3, + Map.of(DataTier.DATA_HOT, createStats(1, 2, docCount, byteSize), DataTier.DATA_WARM, createStats(1, 1, docCount, byteSize)) + ) + ); + + // Calculate usage + Map tierSpecificStats = DataTiersUsageTransportAction.aggregateStats( + nodeDataTiersUsages, + Map.of(DataTier.DATA_HOT, Set.of(hotIndex1), DataTier.DATA_WARM, Set.of(warmIndex1, warmIndex2)) + ); + + // Verify - Index stats are separated by their preferred tier, instead of counted + // toward multiple tiers based on their current routing. Nodes are counted for each tier they are in. + assertThat(tierSpecificStats.size(), is(2)); + + DataTiersFeatureSetUsage.TierSpecificStats hotStats = tierSpecificStats.get(DataTier.DATA_HOT); + assertThat(hotStats, is(notNullValue())); + assertThat(hotStats.nodeCount, is(3)); + assertThat(hotStats.indexCount, is(1)); + assertThat(hotStats.totalShardCount, is(6)); + assertThat(hotStats.docCount, is(6 * docCount)); + assertThat(hotStats.totalByteCount, is(6 * byteSize)); + assertThat(hotStats.primaryShardCount, is(3)); + assertThat(hotStats.primaryByteCount, is(3 * byteSize)); + assertThat(hotStats.primaryByteCountMedian, is(byteSize)); // All same size + assertThat(hotStats.primaryShardBytesMAD, is(0L)); // All same size + + DataTiersFeatureSetUsage.TierSpecificStats warmStats = tierSpecificStats.get(DataTier.DATA_WARM); + assertThat(warmStats, is(notNullValue())); + assertThat(warmStats.nodeCount, is(3)); + assertThat(warmStats.indexCount, is(2)); + assertThat(warmStats.totalShardCount, is(4)); + assertThat(warmStats.docCount, is(4 * docCount)); + assertThat(warmStats.totalByteCount, is(4 * byteSize)); + assertThat(warmStats.primaryShardCount, is(2)); + assertThat(warmStats.primaryByteCount, is(2 * byteSize)); + assertThat(warmStats.primaryByteCountMedian, is(byteSize)); // All same size + assertThat(warmStats.primaryShardBytesMAD, is(0L)); // All same size + } + + public void testCalculateStatsStuckInWrongTier() { + // Nodes: 3 Hot, 0 Warm - Emulating indices stuck on non-preferred tiers + int nodeId = 0; + DiscoveryNode leader = newNode(nodeId++, DiscoveryNodeRole.MASTER_ROLE); + DiscoveryNode hotNode1 = newNode(nodeId++, DiscoveryNodeRole.DATA_HOT_NODE_ROLE); + DiscoveryNode hotNode2 = newNode(nodeId++, DiscoveryNodeRole.DATA_HOT_NODE_ROLE); + DiscoveryNode hotNode3 = newNode(nodeId, DiscoveryNodeRole.DATA_HOT_NODE_ROLE); + + String hotIndex1 = "hot_index_1"; + String warmIndex1 = "warm_index_1"; + + List nodeDataTiersUsages = List.of( + new NodeDataTiersUsage(leader, Map.of()), + new NodeDataTiersUsage( + hotNode1, + Map.of(DataTier.DATA_HOT, createStats(1, 2, docCount, byteSize), DataTier.DATA_WARM, createStats(1, 1, docCount, byteSize)) + ), + new NodeDataTiersUsage( + hotNode2, + Map.of(DataTier.DATA_HOT, createStats(1, 2, docCount, byteSize), DataTier.DATA_WARM, createStats(0, 1, docCount, byteSize)) + ), + new NodeDataTiersUsage(hotNode3, Map.of(DataTier.DATA_HOT, createStats(1, 2, docCount, byteSize))) + ); + + // Calculate usage + Map tierSpecificStats = DataTiersUsageTransportAction.aggregateStats( + nodeDataTiersUsages, + Map.of(DataTier.DATA_HOT, Set.of(hotIndex1), DataTier.DATA_WARM, Set.of(warmIndex1)) + ); + + // Verify - Warm indices are still calculated separately from Hot ones, despite Warm nodes missing + assertThat(tierSpecificStats.size(), is(2)); + + DataTiersFeatureSetUsage.TierSpecificStats hotStats = tierSpecificStats.get(DataTier.DATA_HOT); + assertThat(hotStats, is(notNullValue())); + assertThat(hotStats.nodeCount, is(3)); + assertThat(hotStats.indexCount, is(1)); + assertThat(hotStats.totalShardCount, is(6)); + assertThat(hotStats.docCount, is(6 * docCount)); + assertThat(hotStats.totalByteCount, is(6 * byteSize)); + assertThat(hotStats.primaryShardCount, is(3)); + assertThat(hotStats.primaryByteCount, is(3 * byteSize)); + assertThat(hotStats.primaryByteCountMedian, is(byteSize)); // All same size + assertThat(hotStats.primaryShardBytesMAD, is(0L)); // All same size + + DataTiersFeatureSetUsage.TierSpecificStats warmStats = tierSpecificStats.get(DataTier.DATA_WARM); + assertThat(warmStats, is(notNullValue())); + assertThat(warmStats.nodeCount, is(0)); + assertThat(warmStats.indexCount, is(1)); + assertThat(warmStats.totalShardCount, is(2)); + assertThat(warmStats.docCount, is(2 * docCount)); + assertThat(warmStats.totalByteCount, is(2 * byteSize)); + assertThat(warmStats.primaryShardCount, is(1)); + assertThat(warmStats.primaryByteCount, is(byteSize)); + assertThat(warmStats.primaryByteCountMedian, is(byteSize)); // All same size + assertThat(warmStats.primaryShardBytesMAD, is(0L)); // All same size + } + + private NodeDataTiersUsage.UsageStats createStats(int primaryShardCount, int totalNumberOfShards, long docCount, long byteSize) { + return new NodeDataTiersUsage.UsageStats( + primaryShardCount > 0 ? IntStream.range(0, primaryShardCount).mapToObj(i -> byteSize).toList() : List.of(), + totalNumberOfShards, + totalNumberOfShards * docCount, + totalNumberOfShards * byteSize + ); + } + + private IndexRoutingTable.Builder getIndexRoutingTable(IndexMetadata indexMetadata, DiscoveryNode node) { + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(indexMetadata.getIndex()); + routeTestShardToNodes(indexMetadata, 0, indexRoutingTableBuilder, node); + return indexRoutingTableBuilder; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datatiers/NodesDataTiersUsageTransportActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datatiers/NodesDataTiersUsageTransportActionTests.java new file mode 100644 index 0000000000000..fb4291530d037 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datatiers/NodesDataTiersUsageTransportActionTests.java @@ -0,0 +1,214 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.datatiers; + +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodeRole; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.allocation.DataTier; +import org.elasticsearch.indices.NodeIndicesStats; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; + +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xpack.core.datatiers.DataTierUsageFixtures.buildNodeIndicesStats; +import static org.elasticsearch.xpack.core.datatiers.DataTierUsageFixtures.indexMetadata; +import static org.elasticsearch.xpack.core.datatiers.DataTierUsageFixtures.newNode; +import static org.elasticsearch.xpack.core.datatiers.DataTierUsageFixtures.routeTestShardToNodes; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +public class NodesDataTiersUsageTransportActionTests extends ESTestCase { + + private long byteSize; + private long docCount; + + @Before + public void setup() { + byteSize = randomLongBetween(1024L, 1024L * 1024L * 1024L * 30L); // 1 KB to 30 GB + docCount = randomLongBetween(100L, 100000000L); // one hundred to one hundred million + } + + public void testCalculateStatsNoTiers() { + // Nodes: 0 Tiered Nodes, 1 Data Node + DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); + DiscoveryNode dataNode1 = newNode(1, DiscoveryNodeRole.DATA_ROLE); + discoBuilder.add(dataNode1); + discoBuilder.localNodeId(dataNode1.getId()); + + // Indices: 1 Regular index + Metadata.Builder metadataBuilder = Metadata.builder(); + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + + IndexMetadata index1 = indexMetadata("index_1", 3, 1); + metadataBuilder.put(index1, false).generateClusterUuidIfNeeded(); + + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index1.getIndex()); + routeTestShardToNodes(index1, 0, indexRoutingTableBuilder, dataNode1); + routeTestShardToNodes(index1, 1, indexRoutingTableBuilder, dataNode1); + routeTestShardToNodes(index1, 2, indexRoutingTableBuilder, dataNode1); + routingTableBuilder.add(indexRoutingTableBuilder.build()); + + // Cluster State and create stats responses + ClusterState clusterState = ClusterState.builder(new ClusterName("test")) + .metadata(metadataBuilder) + .nodes(discoBuilder) + .routingTable(routingTableBuilder.build()) + .build(); + NodeIndicesStats nodeIndicesStats = buildNodeIndicesStats( + clusterState.getRoutingNodes().node(dataNode1.getId()), + byteSize, + docCount + ); + + // Calculate usage + Map usageStats = NodesDataTiersUsageTransportAction.aggregateStats( + clusterState.getRoutingNodes().node(dataNode1.getId()), + clusterState.metadata(), + nodeIndicesStats + ); + + // Verify - No results when no tiers present + assertThat(usageStats.size(), is(0)); + } + + public void testCalculateStatsNoIndices() { + // Nodes: 1 Data, 1 Hot, 1 Warm, 1 Cold, 1 Frozen + DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); + DiscoveryNode dataNode1 = newNode(1, DiscoveryNodeRole.DATA_HOT_NODE_ROLE); + discoBuilder.add(dataNode1); + discoBuilder.localNodeId(dataNode1.getId()); + + // Indices: 1 Regular index, not hosted on any tiers + Metadata.Builder metadataBuilder = Metadata.builder(); + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + + // Cluster State and create stats responses + ClusterState clusterState = ClusterState.builder(new ClusterName("test")) + .metadata(metadataBuilder) + .nodes(discoBuilder) + .routingTable(routingTableBuilder.build()) + .build(); + NodeIndicesStats nodeIndicesStats = buildNodeIndicesStats( + clusterState.getRoutingNodes().node(dataNode1.getId()), + byteSize, + docCount + ); + + // Calculate usage + Map usageStats = NodesDataTiersUsageTransportAction.aggregateStats( + clusterState.getRoutingNodes().node(dataNode1.getId()), + clusterState.metadata(), + nodeIndicesStats + ); + + // Verify - No results when no tiers present + assertThat(usageStats.size(), is(0)); + } + + public void testCalculateStatsTieredIndicesOnly() { + // Nodes: 3 Data, 0 Tiered - Only hosting indices on generic data nodes + int nodeId = 0; + DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); + + DiscoveryNode dataNode1 = newNode(nodeId++, DiscoveryNodeRole.DATA_ROLE); + discoBuilder.add(dataNode1); + DiscoveryNode dataNode2 = newNode(nodeId, DiscoveryNodeRole.DATA_ROLE); + discoBuilder.add(dataNode2); + + discoBuilder.localNodeId(dataNode1.getId()); + + // Indices: 1 Hot index, 2 Warm indices, 3 Cold indices + Metadata.Builder metadataBuilder = Metadata.builder(); + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + + IndexMetadata hotIndex1 = indexMetadata("hot_index_1", 3, 1, DataTier.DATA_HOT); + metadataBuilder.put(hotIndex1, false).generateClusterUuidIfNeeded(); + { + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(hotIndex1.getIndex()); + routeTestShardToNodes(hotIndex1, 0, indexRoutingTableBuilder, dataNode1, dataNode2); + routeTestShardToNodes(hotIndex1, 1, indexRoutingTableBuilder, dataNode2, dataNode1); + routingTableBuilder.add(indexRoutingTableBuilder.build()); + } + + IndexMetadata warmIndex1 = indexMetadata("warm_index_1", 1, 1, DataTier.DATA_WARM); + metadataBuilder.put(warmIndex1, false).generateClusterUuidIfNeeded(); + { + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(warmIndex1.getIndex()); + routeTestShardToNodes(warmIndex1, 0, indexRoutingTableBuilder, dataNode1, dataNode2); + routingTableBuilder.add(indexRoutingTableBuilder.build()); + } + IndexMetadata warmIndex2 = indexMetadata("warm_index_2", 1, 1, DataTier.DATA_WARM); + metadataBuilder.put(warmIndex2, false).generateClusterUuidIfNeeded(); + { + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(warmIndex2.getIndex()); + routeTestShardToNodes(warmIndex2, 0, indexRoutingTableBuilder, dataNode2, dataNode1); + routingTableBuilder.add(indexRoutingTableBuilder.build()); + } + + IndexMetadata coldIndex1 = indexMetadata("cold_index_1", 1, 0, DataTier.DATA_COLD); + metadataBuilder.put(coldIndex1, false).generateClusterUuidIfNeeded(); + { + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(coldIndex1.getIndex()); + routeTestShardToNodes(coldIndex1, 0, indexRoutingTableBuilder, dataNode1); + routingTableBuilder.add(indexRoutingTableBuilder.build()); + } + + // Cluster State and create stats responses + ClusterState clusterState = ClusterState.builder(new ClusterName("test")) + .nodes(discoBuilder) + .metadata(metadataBuilder) + .routingTable(routingTableBuilder.build()) + .build(); + NodeIndicesStats nodeIndicesStats = buildNodeIndicesStats( + clusterState.getRoutingNodes().node(dataNode1.getId()), + byteSize, + docCount + ); + + // Calculate usage + Map usageStats = NodesDataTiersUsageTransportAction.aggregateStats( + clusterState.getRoutingNodes().node(dataNode1.getId()), + clusterState.metadata(), + nodeIndicesStats + ); + + // Verify - Index stats exist for the tiers, but no tiered nodes are found + assertThat(usageStats.size(), is(3)); + + NodeDataTiersUsage.UsageStats hotStats = usageStats.get(DataTier.DATA_HOT); + assertThat(hotStats, is(notNullValue())); + assertThat(hotStats.getPrimaryShardSizes(), equalTo(List.of(byteSize))); + assertThat(hotStats.getTotalShardCount(), is(2)); + assertThat(hotStats.getDocCount(), is(hotStats.getTotalShardCount() * docCount)); + assertThat(hotStats.getTotalSize(), is(hotStats.getTotalShardCount() * byteSize)); + + NodeDataTiersUsage.UsageStats warmStats = usageStats.get(DataTier.DATA_WARM); + assertThat(warmStats, is(notNullValue())); + assertThat(warmStats.getPrimaryShardSizes(), equalTo(List.of(byteSize))); + assertThat(warmStats.getTotalShardCount(), is(2)); + assertThat(warmStats.getDocCount(), is(warmStats.getTotalShardCount() * docCount)); + assertThat(warmStats.getTotalSize(), is(warmStats.getTotalShardCount() * byteSize)); + + NodeDataTiersUsage.UsageStats coldStats = usageStats.get(DataTier.DATA_COLD); + assertThat(coldStats, is(notNullValue())); + assertThat(coldStats.getPrimaryShardSizes(), equalTo(List.of(byteSize))); + assertThat(coldStats.getTotalShardCount(), is(1)); + assertThat(coldStats.getDocCount(), is(coldStats.getTotalShardCount() * docCount)); + assertThat(coldStats.getTotalSize(), is(coldStats.getTotalShardCount() * byteSize)); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/DownsampleActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/DownsampleActionTests.java index 109e8f87627ad..7cb93803de4ee 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/DownsampleActionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/DownsampleActionTests.java @@ -6,8 +6,16 @@ */ package org.elasticsearch.xpack.core.ilm; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.IndexMode; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.test.EqualsHashCodeTestUtils; import org.elasticsearch.xcontent.XContentParser; @@ -19,7 +27,9 @@ import static org.elasticsearch.xpack.core.ilm.DownsampleAction.CONDITIONAL_DATASTREAM_CHECK_KEY; import static org.elasticsearch.xpack.core.ilm.DownsampleAction.CONDITIONAL_TIME_SERIES_CHECK_KEY; +import static org.elasticsearch.xpack.core.ilm.DownsampleAction.DOWNSAMPLED_INDEX_PREFIX; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; public class DownsampleActionTests extends AbstractActionTestCase { @@ -132,6 +142,92 @@ public void testToSteps() { assertThat(steps.get(14).getNextStepKey(), equalTo(nextStepKey)); } + public void testDownsamplingPrerequisitesStep() { + DateHistogramInterval fixedInterval = ConfigTestHelpers.randomInterval(); + DownsampleAction action = new DownsampleAction(fixedInterval, WAIT_TIMEOUT); + String phase = randomAlphaOfLengthBetween(1, 10); + StepKey nextStepKey = new StepKey( + randomAlphaOfLengthBetween(1, 10), + randomAlphaOfLengthBetween(1, 10), + randomAlphaOfLengthBetween(1, 10) + ); + { + // non time series indices skip the action + BranchingStep branchingStep = getFirstBranchingStep(action, phase, nextStepKey); + IndexMetadata indexMetadata = newIndexMeta("test", Settings.EMPTY); + + ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().put(indexMetadata, true).build()) + .build(); + + branchingStep.performAction(indexMetadata.getIndex(), state); + assertThat(branchingStep.getNextStepKey(), is(nextStepKey)); + } + { + // time series indices execute the action + BranchingStep branchingStep = getFirstBranchingStep(action, phase, nextStepKey); + Settings settings = Settings.builder() + .put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES) + .put("index.routing_path", "uid") + .build(); + IndexMetadata indexMetadata = newIndexMeta("test", settings); + + ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().put(indexMetadata, true).build()) + .build(); + + branchingStep.performAction(indexMetadata.getIndex(), state); + assertThat(branchingStep.getNextStepKey().name(), is(CheckNotDataStreamWriteIndexStep.NAME)); + } + { + // already downsampled indices for the interval skip the action + BranchingStep branchingStep = getFirstBranchingStep(action, phase, nextStepKey); + Settings settings = Settings.builder() + .put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES) + .put("index.routing_path", "uid") + .put(IndexMetadata.INDEX_DOWNSAMPLE_STATUS_KEY, IndexMetadata.DownsampleTaskStatus.SUCCESS) + .put(IndexMetadata.INDEX_DOWNSAMPLE_ORIGIN_NAME.getKey(), "test") + .build(); + String indexName = DOWNSAMPLED_INDEX_PREFIX + fixedInterval + "-test"; + IndexMetadata indexMetadata = newIndexMeta(indexName, settings); + + ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().put(indexMetadata, true).build()) + .build(); + + branchingStep.performAction(indexMetadata.getIndex(), state); + assertThat(branchingStep.getNextStepKey(), is(nextStepKey)); + } + { + // indices with the same name as the target downsample index that are NOT downsample indices skip the action + BranchingStep branchingStep = getFirstBranchingStep(action, phase, nextStepKey); + String indexName = DOWNSAMPLED_INDEX_PREFIX + fixedInterval + "-test"; + IndexMetadata indexMetadata = newIndexMeta(indexName, Settings.EMPTY); + + ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().put(indexMetadata, true).build()) + .build(); + + branchingStep.performAction(indexMetadata.getIndex(), state); + assertThat(branchingStep.getNextStepKey(), is(nextStepKey)); + } + } + + private static BranchingStep getFirstBranchingStep(DownsampleAction action, String phase, StepKey nextStepKey) { + List steps = action.toSteps(null, phase, nextStepKey); + assertNotNull(steps); + assertEquals(15, steps.size()); + + assertTrue(steps.get(0) instanceof BranchingStep); + assertThat(steps.get(0).getKey().name(), equalTo(CONDITIONAL_TIME_SERIES_CHECK_KEY)); + + return (BranchingStep) steps.get(0); + } + + public static IndexMetadata newIndexMeta(String name, Settings indexSettings) { + return IndexMetadata.builder(name).settings(indexSettings(IndexVersion.current(), 1, 1).put(indexSettings)).build(); + } + public void testEqualsAndHashCode() { EqualsHashCodeTestUtils.checkEqualsAndHashCode(createTestInstance(), this::copy, this::notCopy); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyClientTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyClientTests.java index 162794865ba5a..753edfbe334b9 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyClientTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyClientTests.java @@ -8,9 +8,9 @@ package org.elasticsearch.xpack.core.ilm; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.settings.Settings; @@ -57,7 +57,7 @@ public void testExecuteWithHeadersAsyncNoHeaders() throws InterruptedException { SearchRequest request = new SearchRequest("foo"); final var policyClient = new LifecyclePolicySecurityClient(client, ClientHelper.INDEX_LIFECYCLE_ORIGIN, Collections.emptyMap()); - policyClient.execute(SearchAction.INSTANCE, request, listener); + policyClient.execute(TransportSearchAction.TYPE, request, listener); latch.await(); } @@ -89,7 +89,7 @@ public void testExecuteWithHeadersAsyncWrongHeaders() throws InterruptedExceptio headers.put("bar", "bar"); final var policyClient = new LifecyclePolicySecurityClient(client, ClientHelper.INDEX_LIFECYCLE_ORIGIN, headers); - policyClient.execute(SearchAction.INSTANCE, request, listener); + policyClient.execute(TransportSearchAction.TYPE, request, listener); latch.await(); } @@ -123,7 +123,7 @@ public void testExecuteWithHeadersAsyncWithHeaders() throws Exception { headers.put("_xpack_security_authentication", "bar"); final var policyClient = new LifecyclePolicySecurityClient(client, ClientHelper.INDEX_LIFECYCLE_ORIGIN, headers); - policyClient.execute(SearchAction.INSTANCE, request, listener); + policyClient.execute(TransportSearchAction.TYPE, request, listener); latch.await(); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtilsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtilsTests.java index eecca03e55406..3efe2dc04ea19 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtilsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtilsTests.java @@ -111,19 +111,17 @@ public void testCalculateUsage() { new ComposableIndexTemplateMetadata( Collections.singletonMap( "mytemplate", - new ComposableIndexTemplate( - Collections.singletonList("myds"), - new Template( - Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, "mypolicy").build(), - null, - null - ), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false) - ) + ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("myds")) + .template( + new Template( + Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, "mypolicy").build(), + null, + null + ) + ) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build() ) ) ) @@ -163,15 +161,13 @@ public void testCalculateUsage() { new ComposableIndexTemplateMetadata( Collections.singletonMap( "mytemplate", - new ComposableIndexTemplate( - Collections.singletonList("myds"), - new Template(Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, "mypolicy").build(), null, null), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false) - ) + ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("myds")) + .template( + new Template(Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, "mypolicy").build(), null, null) + ) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build() ) ) ); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/MlTasksTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/MlTasksTests.java index ae4f451a49d2f..c11cefed137e9 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/MlTasksTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/MlTasksTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.xpack.core.ml.job.snapshot.upgrade.SnapshotUpgradeTaskState; import java.net.InetAddress; +import java.time.Instant; import java.util.Set; import java.util.stream.Collectors; @@ -50,7 +51,10 @@ public void testGetJobState() { ); assertEquals(JobState.OPENING, MlTasks.getJobState("foo", tasksBuilder.build())); - tasksBuilder.updateTaskState(MlTasks.jobTaskId("foo"), new JobTaskState(JobState.OPENED, tasksBuilder.getLastAllocationId(), null)); + tasksBuilder.updateTaskState( + MlTasks.jobTaskId("foo"), + new JobTaskState(JobState.OPENED, tasksBuilder.getLastAllocationId(), null, Instant.now()) + ); assertEquals(JobState.OPENED, MlTasks.getJobState("foo", tasksBuilder.build())); } @@ -327,7 +331,7 @@ public void testNonFailedJobTasksOnNode() { new OpenJobAction.JobParams("foo-1"), new PersistentTasksCustomMetadata.Assignment("node-1", "test assignment") ); - tasksBuilder.updateTaskState(MlTasks.jobTaskId("job-1"), new JobTaskState(JobState.FAILED, 1, "testing")); + tasksBuilder.updateTaskState(MlTasks.jobTaskId("job-1"), new JobTaskState(JobState.FAILED, 1, "testing", Instant.now())); tasksBuilder.addTask( MlTasks.jobTaskId("job-2"), MlTasks.JOB_TASK_NAME, @@ -335,7 +339,7 @@ public void testNonFailedJobTasksOnNode() { new PersistentTasksCustomMetadata.Assignment("node-1", "test assignment") ); if (randomBoolean()) { - tasksBuilder.updateTaskState(MlTasks.jobTaskId("job-2"), new JobTaskState(JobState.OPENED, 2, "testing")); + tasksBuilder.updateTaskState(MlTasks.jobTaskId("job-2"), new JobTaskState(JobState.OPENED, 2, "testing", Instant.now())); } tasksBuilder.addTask( MlTasks.jobTaskId("job-3"), @@ -344,7 +348,7 @@ public void testNonFailedJobTasksOnNode() { new PersistentTasksCustomMetadata.Assignment("node-2", "test assignment") ); if (randomBoolean()) { - tasksBuilder.updateTaskState(MlTasks.jobTaskId("job-3"), new JobTaskState(JobState.FAILED, 3, "testing")); + tasksBuilder.updateTaskState(MlTasks.jobTaskId("job-3"), new JobTaskState(JobState.FAILED, 3, "testing", Instant.now())); } assertThat(MlTasks.nonFailedJobTasksOnNode(tasksBuilder.build(), "node-1"), contains(hasProperty("id", equalTo("job-job-2")))); @@ -514,7 +518,7 @@ private static PersistentTasksCustomMetadata.PersistentTask createDataFrameAn if (state != null) { builder.updateTaskState( MlTasks.dataFrameAnalyticsTaskId(jobId), - new DataFrameAnalyticsTaskState(state, builder.getLastAllocationId() - (isStale ? 1 : 0), null) + new DataFrameAnalyticsTaskState(state, builder.getLastAllocationId() - (isStale ? 1 : 0), null, Instant.now()) ); } PersistentTasksCustomMetadata tasks = builder.build(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/InferModelActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/InferModelActionRequestTests.java index 69c1b23a5ff85..fcfc396313016 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/InferModelActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/InferModelActionRequestTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.xpack.core.ml.AbstractBWCWireSerializationTestCase; import org.elasticsearch.xpack.core.ml.action.InferModelAction.Request; import org.elasticsearch.xpack.core.ml.inference.MlInferenceNamedXContentProvider; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelPrefixStrings; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ClassificationConfigUpdateTests; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.EmptyConfigUpdateTests; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.FillMaskConfigUpdate; @@ -66,6 +67,9 @@ protected Request createTestInstance() { ); request.setHighPriority(randomBoolean()); + if (randomBoolean()) { + request.setPrefixType(randomFrom(TrainedModelPrefixStrings.PrefixType.values())); + } return request; } @@ -79,8 +83,9 @@ protected Request mutateInstance(Request instance) { var update = instance.getUpdate(); var previouslyLicensed = instance.isPreviouslyLicensed(); var timeout = instance.getInferenceTimeout(); + var prefixType = instance.getPrefixType(); - int change = randomIntBetween(0, 6); + int change = randomIntBetween(0, 7); switch (change) { case 0: modelId = modelId + "foo"; @@ -111,12 +116,17 @@ protected Request mutateInstance(Request instance) { case 6: timeout = TimeValue.timeValueSeconds(timeout.getSeconds() - 1); break; + case 7: + prefixType = TrainedModelPrefixStrings.PrefixType.values()[(prefixType.ordinal() + 1) % TrainedModelPrefixStrings.PrefixType + .values().length]; + break; default: throw new IllegalStateException(); } var r = new Request(modelId, update, objectsToInfer, textInput, timeout, previouslyLicensed); r.setHighPriority(highPriority); + r.setPrefixType(prefixType); return r; } @@ -211,6 +221,18 @@ protected Request mutateInstanceForVersion(Request instance, TransportVersion ve ); r.setHighPriority(false); return r; + } else if (version.before(TransportVersions.ML_TRAINED_MODEL_PREFIX_STRINGS_ADDED)) { + var r = new Request( + instance.getId(), + adjustedUpdate, + instance.getObjectsToInfer(), + instance.getTextInput(), + instance.getInferenceTimeout(), + instance.isPreviouslyLicensed() + ); + r.setHighPriority(instance.isHighPriority()); + r.setPrefixType(TrainedModelPrefixStrings.PrefixType.NONE); + return r; } return instance; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/InferTrainedModelDeploymentRequestsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/InferTrainedModelDeploymentRequestsTests.java index e417235e4a094..e7d7a7e0926d1 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/InferTrainedModelDeploymentRequestsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/InferTrainedModelDeploymentRequestsTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.ml.inference.MlInferenceNamedXContentProvider; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelPrefixStrings; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.EmptyConfigUpdateTests; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfigUpdate; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ZeroShotClassificationConfigUpdateTests; @@ -36,9 +37,10 @@ protected Writeable.Reader instanceRe @Override protected InferTrainedModelDeploymentAction.Request createTestInstance() { boolean createQueryStringRequest = randomBoolean(); + InferTrainedModelDeploymentAction.Request request; if (createQueryStringRequest) { - return InferTrainedModelDeploymentAction.Request.forTextInput( + request = InferTrainedModelDeploymentAction.Request.forTextInput( randomAlphaOfLength(4), randomBoolean() ? null : randomInferenceConfigUpdate(), Arrays.asList(generateRandomStringArray(4, 7, false)), @@ -50,13 +52,16 @@ protected InferTrainedModelDeploymentAction.Request createTestInstance() { () -> randomMap(1, 3, () -> Tuple.tuple(randomAlphaOfLength(7), randomAlphaOfLength(7))) ); - return InferTrainedModelDeploymentAction.Request.forDocs( + request = InferTrainedModelDeploymentAction.Request.forDocs( randomAlphaOfLength(4), randomBoolean() ? null : randomInferenceConfigUpdate(), docs, randomBoolean() ? null : TimeValue.parseTimeValue(randomTimeValue(), "timeout") ); } + request.setHighPriority(randomBoolean()); + request.setPrefixType(randomFrom(TrainedModelPrefixStrings.PrefixType.values())); + return request; } @Override @@ -66,8 +71,7 @@ protected InferTrainedModelDeploymentAction.Request mutateInstance(InferTrainedM @Override protected NamedWriteableRegistry getNamedWriteableRegistry() { - List entries = new ArrayList<>(); - entries.addAll(new MlInferenceNamedXContentProvider().getNamedWriteables()); + List entries = new ArrayList<>(new MlInferenceNamedXContentProvider().getNamedWriteables()); return new NamedWriteableRegistry(entries); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java index e08e61a6554a7..f6c859830119b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java @@ -8,13 +8,11 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.aggregations.AggregationsPlugin; import org.elasticsearch.aggregations.pipeline.DerivativePipelineAggregationBuilder; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -68,7 +66,6 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; public class DatafeedUpdateTests extends AbstractXContentSerializingTestCase { @@ -77,9 +74,6 @@ public class DatafeedUpdateTests extends AbstractXContentSerializingTestCase { + + private boolean lenient = randomBoolean(); + + public static TrainedModelPrefixStrings randomInstance() { + boolean noNullMembers = randomBoolean(); + if (noNullMembers) { + return new TrainedModelPrefixStrings(randomAlphaOfLength(5), randomAlphaOfLength(5)); + } else { + boolean firstIsNull = randomBoolean(); + return new TrainedModelPrefixStrings(firstIsNull ? null : randomAlphaOfLength(5), firstIsNull ? randomAlphaOfLength(5) : null); + } + } + + @Override + protected Writeable.Reader instanceReader() { + return TrainedModelPrefixStrings::new; + } + + @Override + protected TrainedModelPrefixStrings createTestInstance() { + return randomInstance(); + } + + @Override + protected boolean supportsUnknownFields() { + return lenient; + } + + @Override + protected TrainedModelPrefixStrings mutateInstance(TrainedModelPrefixStrings instance) throws IOException { + return null; + } + + @Override + protected TrainedModelPrefixStrings doParseInstance(XContentParser parser) throws IOException { + return TrainedModelPrefixStrings.fromXContent(parser, lenient); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfigTests.java index ba971d535bf92..16e56b5dc73bd 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfigTests.java @@ -21,9 +21,9 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.ml.inference.InferenceConfigItemTestCase; import org.elasticsearch.xpack.core.ml.inference.MlInferenceNamedXContentProvider; -import org.elasticsearch.xpack.core.ml.inference.MlLTRNamedXContentProvider; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ltr.LearnToRankFeatureExtractorBuilder; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ltr.QueryExtractorBuilderTests; +import org.elasticsearch.xpack.core.ml.ltr.MlLTRNamedXContentProvider; import org.junit.Before; import java.io.IOException; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfigUpdateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfigUpdateTests.java deleted file mode 100644 index 30befc767300b..0000000000000 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfigUpdateTests.java +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.core.ml.inference.trainedmodel; - -import org.elasticsearch.TransportVersion; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.search.SearchModule; -import org.elasticsearch.xcontent.NamedXContentRegistry; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xpack.core.ml.AbstractBWCSerializationTestCase; -import org.elasticsearch.xpack.core.ml.inference.MlInferenceNamedXContentProvider; -import org.elasticsearch.xpack.core.ml.inference.MlLTRNamedXContentProvider; -import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ltr.LearnToRankFeatureExtractorBuilder; -import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ltr.QueryExtractorBuilder; -import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ltr.QueryExtractorBuilderTests; -import org.elasticsearch.xpack.core.ml.utils.QueryProvider; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -import static org.elasticsearch.xpack.core.ml.inference.trainedmodel.LearnToRankConfigTests.randomLearnToRankConfig; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.in; -import static org.hamcrest.Matchers.is; - -public class LearnToRankConfigUpdateTests extends AbstractBWCSerializationTestCase { - - public static LearnToRankConfigUpdate randomLearnToRankConfigUpdate() { - return new LearnToRankConfigUpdate( - randomBoolean() ? null : randomIntBetween(0, 10), - randomBoolean() - ? null - : Stream.generate(QueryExtractorBuilderTests::randomInstance).limit(randomInt(5)).collect(Collectors.toList()) - ); - } - - public void testApply() throws IOException { - LearnToRankConfig originalConfig = randomLearnToRankConfig(); - assertThat(originalConfig, equalTo(LearnToRankConfigUpdate.EMPTY_PARAMS.apply(originalConfig))); - assertThat( - new LearnToRankConfig.Builder(originalConfig).setNumTopFeatureImportanceValues(5).build(), - equalTo(new LearnToRankConfigUpdate.Builder().setNumTopFeatureImportanceValues(5).build().apply(originalConfig)) - ); - assertThat( - new LearnToRankConfig.Builder(originalConfig).setNumTopFeatureImportanceValues(1).build(), - equalTo(new LearnToRankConfigUpdate.Builder().setNumTopFeatureImportanceValues(1).build().apply(originalConfig)) - ); - - LearnToRankFeatureExtractorBuilder extractorBuilder = new QueryExtractorBuilder( - "foo", - QueryProvider.fromParsedQuery(QueryBuilders.termQuery("foo", "bar")) - ); - LearnToRankFeatureExtractorBuilder extractorBuilder2 = new QueryExtractorBuilder( - "bar", - QueryProvider.fromParsedQuery(QueryBuilders.termQuery("foo", "bar")) - ); - - LearnToRankConfig config = new LearnToRankConfigUpdate.Builder().setNumTopFeatureImportanceValues(1) - .setFeatureExtractorBuilders(List.of(extractorBuilder2, extractorBuilder)) - .build() - .apply(originalConfig); - assertThat(config.getNumTopFeatureImportanceValues(), equalTo(1)); - assertThat(extractorBuilder2, is(in(config.getFeatureExtractorBuilders()))); - assertThat(extractorBuilder, is(in(config.getFeatureExtractorBuilders()))); - } - - @Override - protected LearnToRankConfigUpdate createTestInstance() { - return randomLearnToRankConfigUpdate(); - } - - @Override - protected LearnToRankConfigUpdate mutateInstance(LearnToRankConfigUpdate instance) { - return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929 - } - - @Override - protected Writeable.Reader instanceReader() { - return LearnToRankConfigUpdate::new; - } - - @Override - protected LearnToRankConfigUpdate doParseInstance(XContentParser parser) throws IOException { - return LearnToRankConfigUpdate.fromXContentStrict(parser); - } - - @Override - protected LearnToRankConfigUpdate mutateInstanceForVersion(LearnToRankConfigUpdate instance, TransportVersion version) { - return instance; - } - - @Override - protected NamedXContentRegistry xContentRegistry() { - List namedXContent = new ArrayList<>(); - namedXContent.addAll(new MlInferenceNamedXContentProvider().getNamedXContentParsers()); - namedXContent.addAll(new MlLTRNamedXContentProvider().getNamedXContentParsers()); - namedXContent.addAll(new SearchModule(Settings.EMPTY, Collections.emptyList()).getNamedXContents()); - return new NamedXContentRegistry(namedXContent); - } - - @Override - protected NamedWriteableRegistry writableRegistry() { - List namedWriteables = new ArrayList<>(new MlInferenceNamedXContentProvider().getNamedWriteables()); - namedWriteables.addAll(new MlLTRNamedXContentProvider().getNamedWriteables()); - namedWriteables.addAll(new SearchModule(Settings.EMPTY, Collections.emptyList()).getNamedWriteables()); - return new NamedWriteableRegistry(namedWriteables); - } - - @Override - protected NamedWriteableRegistry getNamedWriteableRegistry() { - return writableRegistry(); - } -} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ModelPackageConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ModelPackageConfigTests.java index b5e82a5da75b2..bf1d74f044c1e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ModelPackageConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ModelPackageConfigTests.java @@ -18,6 +18,8 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.ml.AbstractBWCSerializationTestCase; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelPrefixStrings; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelPrefixStringsTests; import org.elasticsearch.xpack.core.ml.inference.TrainedModelType; import java.io.IOException; @@ -45,12 +47,14 @@ public static ModelPackageConfig randomModulePackageConfig() { randomFrom(TrainedModelType.values()).toString(), randomBoolean() ? Arrays.asList(generateRandomStringArray(randomIntBetween(0, 5), 15, false)) : null, randomBoolean() ? randomAlphaOfLength(10) : null, - randomBoolean() ? randomAlphaOfLength(10) : null + randomBoolean() ? randomAlphaOfLength(10) : null, + TrainedModelPrefixStringsTests.randomInstance() + // randomBoolean() ? TrainedModelPrefixStringsTests.randomInstance() : null ); } public static ModelPackageConfig mutateModelPackageConfig(ModelPackageConfig instance) { - switch (between(0, 12)) { + switch (between(0, 13)) { case 0: return new ModelPackageConfig.Builder(instance).setPackedModelId(randomAlphaOfLength(15)).build(); case 1: @@ -87,6 +91,12 @@ public static ModelPackageConfig mutateModelPackageConfig(ModelPackageConfig ins return new ModelPackageConfig.Builder(instance).setVocabularyFile(randomAlphaOfLength(15)).build(); case 12: return new ModelPackageConfig.Builder(instance).setPlatformArchitecture(randomAlphaOfLength(15)).build(); + case 13: { + TrainedModelPrefixStrings mutatedPrefixes = instance.getPrefixStrings() == null + ? TrainedModelPrefixStringsTests.randomInstance() + : null; + return new ModelPackageConfig.Builder(instance).setPrefixStrings(mutatedPrefixes).build(); + } default: throw new AssertionError("Illegal randomisation branch"); } @@ -114,10 +124,14 @@ protected ModelPackageConfig mutateInstance(ModelPackageConfig instance) { @Override protected ModelPackageConfig mutateInstanceForVersion(ModelPackageConfig instance, TransportVersion version) { + var builder = new ModelPackageConfig.Builder(instance); if (version.before(TransportVersions.ML_PACKAGE_LOADER_PLATFORM_ADDED)) { - return new ModelPackageConfig.Builder(instance).setPlatformArchitecture(null).build(); + builder.setPlatformArchitecture(null); + } + if (version.before(TransportVersions.ML_TRAINED_MODEL_PREFIX_STRINGS_ADDED)) { + builder.setPrefixStrings(null); } - return instance; + return builder.build(); } private static Map randomInferenceConfigAsMap() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKeySerializationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKeySerializationTests.java new file mode 100644 index 0000000000000..d2a02cd053ca1 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKeySerializationTests.java @@ -0,0 +1,81 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.action.apikey; + +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.core.XPackClientPlugin; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xpack.core.security.action.apikey.ApiKeyTests.randomApiKeyInstance; +import static org.hamcrest.Matchers.nullValue; + +public class ApiKeySerializationTests extends AbstractWireSerializingTestCase { + + public void testSerializationBackwardsCompatibility() throws IOException { + ApiKey testInstance = createTestInstance(); + ApiKey deserializedInstance = copyInstance(testInstance, TransportVersions.V_8_500_064); + try { + // Transport is on a version before invalidation was introduced, so should always be null + assertThat(deserializedInstance.getInvalidation(), nullValue()); + } finally { + dispose(deserializedInstance); + } + } + + @Override + protected ApiKey createTestInstance() { + return randomApiKeyInstance(); + } + + @Override + protected ApiKey mutateInstance(ApiKey instance) throws IOException { + ApiKey copyOfInstance = copyInstance(instance); + // Metadata in test instance is mutable, so mutate it instead of the copy (immutable metadata) to make sure they differ + Object metadataNumberValue = instance.getMetadata().getOrDefault("number", Integer.toString(randomInt())); + instance.getMetadata().put("number", Integer.parseInt(metadataNumberValue.toString()) + randomInt()); + return copyOfInstance; + } + + @Override + protected Writeable.Reader instanceReader() { + return ApiKey::new; + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(new XPackClientPlugin().getNamedWriteables()); + } + + public static Map randomMetadata() { + Map randomMetadata = randomFrom( + Map.of( + "application", + randomAlphaOfLength(5), + "number", + 1, + "numbers", + List.of(1, 3, 5), + "environment", + Map.of("os", "linux", "level", 42, "category", "trusted") + ), + Map.of(randomAlphaOfLengthBetween(3, 8), randomAlphaOfLengthBetween(3, 8)), + Map.of(), + null + ); + + // Make metadata mutable for testing purposes + return randomMetadata == null ? new HashMap<>() : new HashMap<>(randomMetadata); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKeyTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKeyTests.java index d6c218c47bc39..9e357915186a5 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKeyTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKeyTests.java @@ -22,9 +22,11 @@ import java.io.IOException; import java.time.Instant; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; import static org.elasticsearch.xpack.core.security.authz.RoleDescriptorTests.randomCrossClusterAccessRoleDescriptor; import static org.elasticsearch.xpack.core.security.authz.RoleDescriptorTests.randomUniquelyNamedRoleDescriptors; @@ -40,38 +42,7 @@ public class ApiKeyTests extends ESTestCase { @SuppressWarnings("unchecked") public void testXContent() throws IOException { - final String name = randomAlphaOfLengthBetween(4, 10); - final String id = randomAlphaOfLength(20); - final ApiKey.Type type = randomFrom(ApiKey.Type.values()); - // between 1970 and 2065 - final Instant creation = Instant.ofEpochSecond(randomLongBetween(0, 3000000000L), randomLongBetween(0, 999999999)); - final Instant expiration = randomBoolean() - ? null - : Instant.ofEpochSecond(randomLongBetween(0, 3000000000L), randomLongBetween(0, 999999999)); - final boolean invalidated = randomBoolean(); - final String username = randomAlphaOfLengthBetween(4, 10); - final String realmName = randomAlphaOfLengthBetween(3, 8); - final Map metadata = randomMetadata(); - final List roleDescriptors = type == ApiKey.Type.CROSS_CLUSTER - ? List.of(randomCrossClusterAccessRoleDescriptor()) - : randomFrom(randomUniquelyNamedRoleDescriptors(0, 3), null); - final List limitedByRoleDescriptors = type == ApiKey.Type.CROSS_CLUSTER - ? null - : randomUniquelyNamedRoleDescriptors(0, 3); - - final ApiKey apiKey = new ApiKey( - name, - id, - type, - creation, - expiration, - invalidated, - username, - realmName, - metadata, - roleDescriptors, - limitedByRoleDescriptors - ); + final ApiKey apiKey = randomApiKeyInstance(); // The metadata will never be null because the constructor convert it to empty map if a null is passed in assertThat(apiKey.getMetadata(), notNullValue()); @@ -84,51 +55,56 @@ public void testXContent() throws IOException { assertThat(ApiKey.fromXContent(parser), equalTo(apiKey)); } - assertThat(map.get("name"), equalTo(name)); - assertThat(map.get("id"), equalTo(id)); - assertThat(map.get("type"), equalTo(type.value())); - assertThat(Long.valueOf(map.get("creation").toString()), equalTo(creation.toEpochMilli())); - if (expiration != null) { - assertThat(Long.valueOf(map.get("expiration").toString()), equalTo(expiration.toEpochMilli())); + assertThat(map.get("name"), equalTo(apiKey.getName())); + assertThat(map.get("id"), equalTo(apiKey.getId())); + assertThat(map.get("type"), equalTo(apiKey.getType().value())); + assertThat(Long.valueOf(map.get("creation").toString()), equalTo(apiKey.getCreation().toEpochMilli())); + if (apiKey.getExpiration() != null) { + assertThat(Long.valueOf(map.get("expiration").toString()), equalTo(apiKey.getExpiration().toEpochMilli())); } else { assertThat(map.containsKey("expiration"), is(false)); } - assertThat(map.get("invalidated"), is(invalidated)); - assertThat(map.get("username"), equalTo(username)); - assertThat(map.get("realm"), equalTo(realmName)); - assertThat(map.get("metadata"), equalTo(Objects.requireNonNullElseGet(metadata, Map::of))); + assertThat(map.get("invalidated"), is(apiKey.isInvalidated())); + assertThat(map.get("username"), equalTo(apiKey.getUsername())); + assertThat(map.get("realm"), equalTo(apiKey.getRealm())); + assertThat(map.get("metadata"), equalTo(Objects.requireNonNullElseGet(apiKey.getMetadata(), Map::of))); - if (roleDescriptors == null) { + if (apiKey.getRoleDescriptors() == null) { assertThat(map, not(hasKey("role_descriptors"))); assertThat(map, not(hasKey("access"))); } else { final var rdMap = (Map) map.get("role_descriptors"); - assertThat(rdMap.size(), equalTo(roleDescriptors.size())); - for (var roleDescriptor : roleDescriptors) { + assertThat(rdMap.size(), equalTo(apiKey.getRoleDescriptors().size())); + for (var roleDescriptor : apiKey.getRoleDescriptors()) { assertThat(rdMap, hasKey(roleDescriptor.getName())); assertThat(XContentTestUtils.convertToMap(roleDescriptor), equalTo(rdMap.get(roleDescriptor.getName()))); } - if (type == ApiKey.Type.CROSS_CLUSTER) { + if (apiKey.getType() == ApiKey.Type.CROSS_CLUSTER) { final var accessMap = (Map) map.get("access"); final CrossClusterApiKeyRoleDescriptorBuilder roleDescriptorBuilder = CrossClusterApiKeyRoleDescriptorBuilder.parse( XContentTestUtils.convertToXContent(accessMap, XContentType.JSON).utf8ToString() ); - assertThat(roleDescriptorBuilder.build(), equalTo(roleDescriptors.get(0))); + assertThat(roleDescriptorBuilder.build(), equalTo(apiKey.getRoleDescriptors().get(0))); } else { assertThat(map, not(hasKey("access"))); } } final var limitedByList = (List>) map.get("limited_by"); - if (type != ApiKey.Type.CROSS_CLUSTER) { + if (apiKey.getType() != ApiKey.Type.CROSS_CLUSTER) { assertThat(limitedByList.size(), equalTo(1)); final Map limitedByMap = limitedByList.get(0); - assertThat(limitedByMap.size(), equalTo(limitedByRoleDescriptors.size())); - for (RoleDescriptor roleDescriptor : limitedByRoleDescriptors) { - assertThat(limitedByMap, hasKey(roleDescriptor.getName())); - assertThat(XContentTestUtils.convertToMap(roleDescriptor), equalTo(limitedByMap.get(roleDescriptor.getName()))); + + int roleDescriptorCount = 0; + for (Set roleDescriptors : apiKey.getLimitedBy().roleDescriptorsList()) { + for (RoleDescriptor roleDescriptor : roleDescriptors) { + assertThat(limitedByMap, hasKey(roleDescriptor.getName())); + assertThat(XContentTestUtils.convertToMap(roleDescriptor), equalTo(limitedByMap.get(roleDescriptor.getName()))); + roleDescriptorCount++; + } } + assertThat(limitedByMap.size(), equalTo(roleDescriptorCount)); } else { assertThat(limitedByList, nullValue()); } @@ -156,7 +132,7 @@ private ApiKey.Type parseTypeString(String typeString) throws IOException { } public static Map randomMetadata() { - return randomFrom( + Map randomMetadata = randomFrom( Map.of( "application", randomAlphaOfLength(5), @@ -171,5 +147,47 @@ public static Map randomMetadata() { Map.of(), null ); + + // Make metadata mutable for testing purposes + return randomMetadata == null ? new HashMap<>() : new HashMap<>(randomMetadata); + } + + public static ApiKey randomApiKeyInstance() { + final String name = randomAlphaOfLengthBetween(4, 10); + final String id = randomAlphaOfLength(20); + final ApiKey.Type type = randomFrom(ApiKey.Type.values()); + // between 1970 and 2065 + final Instant creation = Instant.ofEpochSecond(randomLongBetween(0, 3000000000L), randomLongBetween(0, 999999999)); + final Instant expiration = randomBoolean() + ? null + : Instant.ofEpochSecond(randomLongBetween(0, 3000000000L), randomLongBetween(0, 999999999)); + final boolean invalidated = randomBoolean(); + final Instant invalidation = invalidated + ? Instant.ofEpochSecond(randomLongBetween(0, 3000000000L), randomLongBetween(0, 999999999)) + : null; + final String username = randomAlphaOfLengthBetween(4, 10); + final String realmName = randomAlphaOfLengthBetween(3, 8); + final Map metadata = randomMetadata(); + final List roleDescriptors = type == ApiKey.Type.CROSS_CLUSTER + ? List.of(randomCrossClusterAccessRoleDescriptor()) + : randomFrom(randomUniquelyNamedRoleDescriptors(0, 3), null); + final List limitedByRoleDescriptors = type == ApiKey.Type.CROSS_CLUSTER + ? null + : randomUniquelyNamedRoleDescriptors(0, 3); + + return new ApiKey( + name, + id, + type, + creation, + expiration, + invalidated, + invalidation, + username, + realmName, + metadata, + roleDescriptors, + limitedByRoleDescriptors + ); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyResponseTests.java index 3fcf8ea5a0deb..a32ed8f53f5b2 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyResponseTests.java @@ -49,6 +49,7 @@ public void testSerialization() throws IOException { Instant.now(), (withExpiration) ? Instant.now() : null, false, + null, randomAlphaOfLength(4), randomAlphaOfLength(5), randomBoolean() ? null : Map.of(randomAlphaOfLengthBetween(3, 8), randomAlphaOfLengthBetween(3, 8)), @@ -110,6 +111,7 @@ public void testToXContent() throws IOException { Instant.ofEpochMilli(100000L), Instant.ofEpochMilli(10000000L), false, + null, "user-a", "realm-x", null, @@ -123,6 +125,7 @@ public void testToXContent() throws IOException { Instant.ofEpochMilli(100000L), Instant.ofEpochMilli(10000000L), true, + Instant.ofEpochMilli(100000000L), "user-b", "realm-y", Map.of(), @@ -136,6 +139,7 @@ public void testToXContent() throws IOException { Instant.ofEpochMilli(100000L), null, true, + Instant.ofEpochMilli(100000000L), "user-c", "realm-z", Map.of("foo", "bar"), @@ -159,6 +163,7 @@ public void testToXContent() throws IOException { Instant.ofEpochMilli(100000L), null, true, + Instant.ofEpochMilli(100000000L), "user-c", "realm-z", Map.of("foo", "bar"), @@ -192,6 +197,7 @@ public void testToXContent() throws IOException { "creation": 100000, "expiration": 10000000, "invalidated": true, + "invalidation": 100000000, "username": "user-b", "realm": "realm-y", "metadata": {}, @@ -231,6 +237,7 @@ public void testToXContent() throws IOException { %s "creation": 100000, "invalidated": true, + "invalidation": 100000000, "username": "user-c", "realm": "realm-z", "metadata": { @@ -297,6 +304,7 @@ public void testToXContent() throws IOException { %s "creation": 100000, "invalidated": true, + "invalidation": 100000000, "username": "user-c", "realm": "realm-z", "metadata": { @@ -365,6 +373,7 @@ private ApiKey createApiKeyInfo( Instant creation, Instant expiration, boolean invalidated, + Instant invalidation, String username, String realm, Map metadata, @@ -378,6 +387,7 @@ private ApiKey createApiKeyInfo( creation, expiration, invalidated, + invalidation, username, realm, metadata, diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/QueryApiKeyResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/QueryApiKeyResponseTests.java index 9ee71415a91ea..677d2201fe1e1 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/QueryApiKeyResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/QueryApiKeyResponseTests.java @@ -105,6 +105,7 @@ private ApiKey randomApiKeyInfo() { creation, expiration, false, + null, username, realm_name, metadata, diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java index d864a89581a18..eebf4ab46b2dd 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java @@ -246,10 +246,6 @@ private PutRoleRequest buildRequestWithApplicationPrivilege(String appName, Stri return request; } - private PutRoleRequest buildRandomRequest() { - return buildRandomRequest(true); - } - private PutRoleRequest buildRandomRequest(boolean allowRemoteIndices) { final PutRoleRequest request = new PutRoleRequest(); request.name(randomAlphaOfLengthBetween(4, 9)); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/LimitedRoleTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/LimitedRoleTests.java index 6a3883672023a..7ab450fc2e191 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/LimitedRoleTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/LimitedRoleTests.java @@ -11,7 +11,7 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexAction; import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; import org.elasticsearch.action.bulk.BulkAction; -import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.cluster.metadata.AliasMetadata; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -299,7 +299,7 @@ public void testAuthorize() { .build(); IndicesAccessControl iac = fromRole.authorize( - SearchAction.NAME, + TransportSearchAction.TYPE.name(), Sets.newHashSet("_index", "_alias1"), md.getIndicesLookup(), fieldPermissionsCache @@ -328,7 +328,7 @@ public void testAuthorize() { .add(IndexPrivilege.NONE, "_index1") .build(); iac = limitedByRole.authorize( - SearchAction.NAME, + TransportSearchAction.TYPE.name(), Sets.newHashSet("_index", "_alias1"), md.getIndicesLookup(), fieldPermissionsCache @@ -367,7 +367,12 @@ public void testAuthorize() { } else { role = fromRole.limitedBy(limitedByRole); } - iac = role.authorize(SearchAction.NAME, Sets.newHashSet("_index", "_alias1"), md.getIndicesLookup(), fieldPermissionsCache); + iac = role.authorize( + TransportSearchAction.TYPE.name(), + Sets.newHashSet("_index", "_alias1"), + md.getIndicesLookup(), + fieldPermissionsCache + ); assertThat(iac.isGranted(), is(false)); assertThat(iac.getIndexPermissions("_index"), is(notNullValue())); assertThat(iac.hasIndexPermissions("_index"), is(true)); @@ -440,12 +445,12 @@ public void testCheckClusterAction() { public void testCheckIndicesAction() { Role fromRole = Role.builder(EMPTY_RESTRICTED_INDICES, "a-role").add(IndexPrivilege.READ, "ind-1").build(); - assertThat(fromRole.checkIndicesAction(SearchAction.NAME), is(true)); + assertThat(fromRole.checkIndicesAction(TransportSearchAction.TYPE.name()), is(true)); assertThat(fromRole.checkIndicesAction(CreateIndexAction.NAME), is(false)); { Role limitedByRole = Role.builder(EMPTY_RESTRICTED_INDICES, "limited-role").add(IndexPrivilege.ALL, "ind-1").build(); - assertThat(limitedByRole.checkIndicesAction(SearchAction.NAME), is(true)); + assertThat(limitedByRole.checkIndicesAction(TransportSearchAction.TYPE.name()), is(true)); assertThat(limitedByRole.checkIndicesAction(CreateIndexAction.NAME), is(true)); Role role; if (randomBoolean()) { @@ -453,64 +458,79 @@ public void testCheckIndicesAction() { } else { role = fromRole.limitedBy(limitedByRole); } - assertThat(role.checkIndicesAction(SearchAction.NAME), is(true)); + assertThat(role.checkIndicesAction(TransportSearchAction.TYPE.name()), is(true)); assertThat(role.checkIndicesAction(CreateIndexAction.NAME), is(false)); } { Role limitedByRole = Role.builder(EMPTY_RESTRICTED_INDICES, "limited-role").add(IndexPrivilege.NONE, "ind-1").build(); - assertThat(limitedByRole.checkIndicesAction(SearchAction.NAME), is(false)); + assertThat(limitedByRole.checkIndicesAction(TransportSearchAction.TYPE.name()), is(false)); Role role; if (randomBoolean()) { role = limitedByRole.limitedBy(fromRole); } else { role = fromRole.limitedBy(limitedByRole); } - assertThat(role.checkIndicesAction(SearchAction.NAME), is(false)); + assertThat(role.checkIndicesAction(TransportSearchAction.TYPE.name()), is(false)); assertThat(role.checkIndicesAction(CreateIndexAction.NAME), is(false)); } } public void testAllowedIndicesMatcher() { Role fromRole = Role.builder(EMPTY_RESTRICTED_INDICES, "a-role").add(IndexPrivilege.READ, "ind-1*").build(); - assertThat(fromRole.allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction("ind-1")), is(true)); - assertThat(fromRole.allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction("ind-11")), is(true)); - assertThat(fromRole.allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction("ind-2")), is(false)); + assertThat(fromRole.allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction("ind-1")), is(true)); + assertThat(fromRole.allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction("ind-11")), is(true)); + assertThat(fromRole.allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction("ind-2")), is(false)); { Role limitedByRole = Role.builder(EMPTY_RESTRICTED_INDICES, "limited-role").add(IndexPrivilege.READ, "ind-1", "ind-2").build(); - assertThat(limitedByRole.allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction("ind-1")), is(true)); - assertThat(limitedByRole.allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction("ind-11")), is(false)); - assertThat(limitedByRole.allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction("ind-2")), is(true)); + assertThat( + limitedByRole.allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction("ind-1")), + is(true) + ); + assertThat( + limitedByRole.allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction("ind-11")), + is(false) + ); + assertThat( + limitedByRole.allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction("ind-2")), + is(true) + ); Role role; if (randomBoolean()) { role = limitedByRole.limitedBy(fromRole); } else { role = fromRole.limitedBy(limitedByRole); } - assertThat(role.allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction("ind-1")), is(true)); - assertThat(role.allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction("ind-11")), is(false)); - assertThat(role.allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction("ind-2")), is(false)); + assertThat(role.allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction("ind-1")), is(true)); + assertThat(role.allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction("ind-11")), is(false)); + assertThat(role.allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction("ind-2")), is(false)); } { Role limitedByRole = Role.builder(EMPTY_RESTRICTED_INDICES, "limited-role").add(IndexPrivilege.READ, "ind-*").build(); - assertThat(limitedByRole.allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction("ind-1")), is(true)); - assertThat(limitedByRole.allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction("ind-2")), is(true)); + assertThat( + limitedByRole.allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction("ind-1")), + is(true) + ); + assertThat( + limitedByRole.allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction("ind-2")), + is(true) + ); Role role; if (randomBoolean()) { role = limitedByRole.limitedBy(fromRole); } else { role = fromRole.limitedBy(limitedByRole); } - assertThat(role.allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction("ind-1")), is(true)); - assertThat(role.allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction("ind-2")), is(false)); + assertThat(role.allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction("ind-1")), is(true)); + assertThat(role.allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction("ind-2")), is(false)); } } public void testAllowedIndicesMatcherWithNestedRole() { Role role = Role.builder(EMPTY_RESTRICTED_INDICES, "a-role").add(IndexPrivilege.READ, "ind-1*").build(); - assertThat(role.allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction("ind-1")), is(true)); - assertThat(role.allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction("ind-11")), is(true)); - assertThat(role.allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction("ind-2")), is(false)); + assertThat(role.allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction("ind-1")), is(true)); + assertThat(role.allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction("ind-11")), is(true)); + assertThat(role.allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction("ind-2")), is(false)); final int depth = randomIntBetween(2, 4); boolean index11Excluded = false; @@ -526,9 +546,12 @@ public void testAllowedIndicesMatcherWithNestedRole() { } else { role = role.limitedBy(limitedByRole); } - assertThat(role.allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction("ind-1")), is(true)); - assertThat(role.allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction("ind-11")), is(false == index11Excluded)); - assertThat(role.allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction("ind-2")), is(false)); + assertThat(role.allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction("ind-1")), is(true)); + assertThat( + role.allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction("ind-11")), + is(false == index11Excluded) + ); + assertThat(role.allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction("ind-2")), is(false)); } } @@ -540,13 +563,13 @@ public void testAllowedActionsMatcher() { .build(); Automaton fromRoleAutomaton = fromRole.allowedActionsMatcher("index1"); Predicate fromRolePredicate = Automatons.predicate(fromRoleAutomaton); - assertThat(fromRolePredicate.test(SearchAction.NAME), is(true)); + assertThat(fromRolePredicate.test(TransportSearchAction.TYPE.name()), is(true)); assertThat(fromRolePredicate.test(BulkAction.NAME), is(true)); Role limitedByRole = Role.builder(EMPTY_RESTRICTED_INDICES, "limitedRole").add(IndexPrivilege.READ, "index1", "index2").build(); Automaton limitedByRoleAutomaton = limitedByRole.allowedActionsMatcher("index1"); Predicate limitedByRolePredicated = Automatons.predicate(limitedByRoleAutomaton); - assertThat(limitedByRolePredicated.test(SearchAction.NAME), is(true)); + assertThat(limitedByRolePredicated.test(TransportSearchAction.TYPE.name()), is(true)); assertThat(limitedByRolePredicated.test(BulkAction.NAME), is(false)); Role role; if (randomBoolean()) { @@ -557,17 +580,17 @@ public void testAllowedActionsMatcher() { Automaton roleAutomaton = role.allowedActionsMatcher("index1"); Predicate rolePredicate = Automatons.predicate(roleAutomaton); - assertThat(rolePredicate.test(SearchAction.NAME), is(true)); + assertThat(rolePredicate.test(TransportSearchAction.TYPE.name()), is(true)); assertThat(rolePredicate.test(BulkAction.NAME), is(false)); roleAutomaton = role.allowedActionsMatcher("index2"); rolePredicate = Automatons.predicate(roleAutomaton); - assertThat(rolePredicate.test(SearchAction.NAME), is(true)); + assertThat(rolePredicate.test(TransportSearchAction.TYPE.name()), is(true)); assertThat(rolePredicate.test(BulkAction.NAME), is(false)); roleAutomaton = role.allowedActionsMatcher("other"); rolePredicate = Automatons.predicate(roleAutomaton); - assertThat(rolePredicate.test(SearchAction.NAME), is(false)); + assertThat(rolePredicate.test(TransportSearchAction.TYPE.name()), is(false)); assertThat(rolePredicate.test(BulkAction.NAME), is(false)); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilegeTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilegeTests.java index 37f8999031417..9dde594653367 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilegeTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilegeTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; import org.elasticsearch.action.delete.DeleteAction; import org.elasticsearch.action.index.IndexAction; -import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.update.UpdateAction; import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.test.ESTestCase; @@ -59,7 +59,7 @@ public void testOrderingOfPrivilegeNames() throws Exception { } public void testFindPrivilegesThatGrant() { - assertThat(findPrivilegesThatGrant(SearchAction.NAME), equalTo(List.of("read", "all"))); + assertThat(findPrivilegesThatGrant(TransportSearchAction.TYPE.name()), equalTo(List.of("read", "all"))); assertThat(findPrivilegesThatGrant(IndexAction.NAME), equalTo(List.of("create_doc", "create", "index", "write", "all"))); assertThat(findPrivilegesThatGrant(UpdateAction.NAME), equalTo(List.of("index", "write", "all"))); assertThat(findPrivilegesThatGrant(DeleteAction.NAME), equalTo(List.of("delete", "write", "all"))); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index 5028066f67ad9..6ee70173f505e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -52,9 +52,9 @@ import org.elasticsearch.action.ingest.GetPipelineAction; import org.elasticsearch.action.ingest.PutPipelineAction; import org.elasticsearch.action.ingest.SimulatePipelineAction; -import org.elasticsearch.action.search.MultiSearchAction; -import org.elasticsearch.action.search.SearchAction; -import org.elasticsearch.action.search.SearchShardsAction; +import org.elasticsearch.action.search.TransportMultiSearchAction; +import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.action.search.TransportSearchShardsAction; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.update.UpdateAction; @@ -615,8 +615,14 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(mockIndexAbstraction(index)), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(DeleteAction.NAME).test(mockIndexAbstraction(index)), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(mockIndexAbstraction(index)), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction(index)), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(mockIndexAbstraction(index)), is(true)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), + is(true) + ); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), + is(true) + ); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetAction.NAME).test(mockIndexAbstraction(index)), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(mockIndexAbstraction(index)), is(true)); }); @@ -634,8 +640,14 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(mockIndexAbstraction(index)), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(DeleteAction.NAME).test(mockIndexAbstraction(index)), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(mockIndexAbstraction(index)), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction(index)), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(mockIndexAbstraction(index)), is(true)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), + is(true) + ); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), + is(true) + ); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetAction.NAME).test(mockIndexAbstraction(index)), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(mockIndexAbstraction(index)), is(false)); }); @@ -653,8 +665,14 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(mockIndexAbstraction(index)), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(DeleteAction.NAME).test(mockIndexAbstraction(index)), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(mockIndexAbstraction(index)), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction(index)), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(mockIndexAbstraction(index)), is(true)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), + is(true) + ); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), + is(true) + ); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetAction.NAME).test(mockIndexAbstraction(index)), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(mockIndexAbstraction(index)), is(false)); }); @@ -669,8 +687,14 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(mockIndexAbstraction(index)), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(DeleteAction.NAME).test(mockIndexAbstraction(index)), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(mockIndexAbstraction(index)), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction(index)), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(mockIndexAbstraction(index)), is(true)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), + is(true) + ); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), + is(true) + ); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetAction.NAME).test(mockIndexAbstraction(index)), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(mockIndexAbstraction(index)), is(true)); }); @@ -688,8 +712,14 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(mockIndexAbstraction(index)), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(mockIndexAbstraction(index)), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(DeleteAction.NAME).test(mockIndexAbstraction(index)), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction(index)), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(mockIndexAbstraction(index)), is(true)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), + is(true) + ); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), + is(true) + ); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetAction.NAME).test(mockIndexAbstraction(index)), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(mockIndexAbstraction(index)), is(true)); }); @@ -702,8 +732,14 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(mockIndexAbstraction(index)), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(mockIndexAbstraction(index)), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(DeleteAction.NAME).test(mockIndexAbstraction(index)), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction(index)), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(mockIndexAbstraction(index)), is(true)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), + is(true) + ); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), + is(true) + ); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetAction.NAME).test(mockIndexAbstraction(index)), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(mockIndexAbstraction(index)), is(false)); @@ -725,8 +761,8 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(DeleteAction.NAME).test(indexAbstraction), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(indexAbstraction), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(indexAbstraction), is(true)); @@ -744,8 +780,8 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(DeleteAction.NAME).test(indexAbstraction), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(indexAbstraction), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(indexAbstraction), is(true)); @@ -770,8 +806,11 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(dotFleetSecretsIndex), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(dotFleetSecretsIndex), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(DeleteAction.NAME).test(dotFleetSecretsIndex), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(dotFleetSecretsIndex), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(dotFleetSecretsIndex), is(false)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(dotFleetSecretsIndex), is(false)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(dotFleetSecretsIndex), + is(false) + ); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetAction.NAME).test(dotFleetSecretsIndex), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(dotFleetSecretsIndex), is(false)); @@ -787,8 +826,14 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(mockIndexAbstraction(index)), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(mockIndexAbstraction(index)), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(DeleteAction.NAME).test(mockIndexAbstraction(index)), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction(index)), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(mockIndexAbstraction(index)), is(true)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), + is(true) + ); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), + is(true) + ); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetAction.NAME).test(mockIndexAbstraction(index)), is(true)); }); @@ -802,8 +847,8 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(DeleteAction.NAME).test(indexAbstraction), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(indexAbstraction), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(indexAbstraction), is(true)); @@ -820,8 +865,8 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(DeleteAction.NAME).test(indexAbstraction), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(indexAbstraction), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(indexAbstraction), is(true)); @@ -839,8 +884,8 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(DeleteAction.NAME).test(indexAbstraction), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(indexAbstraction), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(indexAbstraction), is(true)); @@ -858,8 +903,8 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(DeleteAction.NAME).test(indexAbstraction), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(indexAbstraction), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(indexAbstraction), is(true)); @@ -880,8 +925,14 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(mockIndexAbstraction(index)), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(DeleteAction.NAME).test(mockIndexAbstraction(index)), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(mockIndexAbstraction(index)), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction(index)), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(mockIndexAbstraction(index)), is(false)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), + is(false) + ); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), + is(false) + ); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetAction.NAME).test(mockIndexAbstraction(index)), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(mockIndexAbstraction(index)), is(false)); }); @@ -918,8 +969,8 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(DeleteAction.NAME).test(indexAbstraction), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(indexAbstraction), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(indexAbstraction), is(true)); @@ -937,8 +988,8 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(DeleteAction.NAME).test(indexAbstraction), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(indexAbstraction), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(indexAbstraction), is(true)); @@ -955,8 +1006,14 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(mockIndexAbstraction(index)), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(DeleteAction.NAME).test(mockIndexAbstraction(index)), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(mockIndexAbstraction(index)), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction(index)), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(mockIndexAbstraction(index)), is(true)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), + is(true) + ); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), + is(true) + ); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetAction.NAME).test(mockIndexAbstraction(index)), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(mockIndexAbstraction(index)), is(false)); @@ -1029,8 +1086,14 @@ public void testKibanaSystemRole() { || indexName.startsWith(".logs-endpoint.heartbeat-") || indexName.startsWith(".logs-osquery_manager.actions-"); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetAction.NAME).test(indexAbstraction), is(isAlsoReadIndex)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(indexAbstraction), is(isAlsoReadIndex)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(indexAbstraction), is(isAlsoReadIndex)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(indexAbstraction), + is(isAlsoReadIndex) + ); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(indexAbstraction), + is(isAlsoReadIndex) + ); // Endpoint diagnostic, APM and Synthetics data streams also have an ILM policy with a delete action, all others should not. final boolean isAlsoIlmDeleteIndex = indexName.startsWith(".logs-endpoint.diagnostic.collection-") @@ -1078,7 +1141,7 @@ public void testKibanaSystemRole() { logger.info("index name [{}]", indexName); final IndexAbstraction indexAbstraction = mockIndexAbstraction(indexName); // Allow indexing - assertThat(kibanaRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateAction.NAME).test(indexAbstraction), is(true)); @@ -1116,7 +1179,7 @@ public void testKibanaSystemRole() { Arrays.asList("logs-ti_recordedfuture_latest.threat", "logs-ti_anomali_latest.threatstream").forEach(indexName -> { final IndexAbstraction indexAbstraction = mockIndexAbstraction(indexName); // Allow search and indexing - assertThat(kibanaRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateAction.NAME).test(indexAbstraction), is(true)); @@ -1158,8 +1221,8 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(PutMappingAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(RolloverAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(DeleteIndexAction.NAME).test(indexAbstraction), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(indexAbstraction), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetAction.NAME).test(indexAbstraction), is(true)); // Implied by the overall view_index_metadata and monitor privilege @@ -1177,7 +1240,7 @@ public void testKibanaSystemRole() { logger.info("index name [{}]", indexName); final IndexAbstraction indexAbstraction = mockIndexAbstraction(indexName); // Allow indexing - assertThat(kibanaRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateAction.NAME).test(indexAbstraction), is(true)); @@ -1205,8 +1268,8 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(DeleteAction.NAME).test(indexAbstraction), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(indexAbstraction), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(indexAbstraction), is(true)); @@ -1223,8 +1286,8 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(DeleteAction.NAME).test(indexAbstraction), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(indexAbstraction), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(indexAbstraction), is(true)); @@ -1243,7 +1306,7 @@ public void testKibanaSystemRole() { logger.info("index name [{}]", indexName); final IndexAbstraction indexAbstraction = mockIndexAbstraction(indexName); // Allow indexing - assertThat(kibanaRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateAction.NAME).test(indexAbstraction), is(true)); @@ -1280,8 +1343,8 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(DeleteAction.NAME).test(indexAbstraction), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(indexAbstraction), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(indexAbstraction), is(true)); @@ -1318,7 +1381,7 @@ public void testKibanaSystemRole() { Arrays.asList("kibana_sample_data_ecommerce", "kibana_sample_data_ecommerce_transform" + randomInt()).forEach(indexName -> { final IndexAbstraction indexAbstraction = mockIndexAbstraction(indexName); // Allow search and indexing - assertThat(kibanaRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateAction.NAME).test(indexAbstraction), is(true)); @@ -1340,6 +1403,13 @@ public void testKibanaSystemRole() { Arrays.asList("risk-score.risk-score-" + randomAlphaOfLength(randomIntBetween(0, 13))) .forEach(indexName -> assertAllIndicesAccessAllowed(kibanaRole, indexName)); + + Arrays.asList(".asset-criticality.asset-criticality-" + randomAlphaOfLength(randomIntBetween(0, 13))).forEach(indexName -> { + final IndexAbstraction indexAbstraction = mockIndexAbstraction(indexName); + assertThat(kibanaRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(indexAbstraction), is(true)); + assertViewIndexMetadata(kibanaRole, indexName); + }); } public void testKibanaAdminRole() { @@ -1515,12 +1585,18 @@ public void testMonitoringUserRole() { assertThat(monitoringUserRole.runAs().check(randomAlphaOfLengthBetween(1, 12)), is(false)); - assertThat(monitoringUserRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction("foo")), is(false)); assertThat( - monitoringUserRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction(".reporting")), + monitoringUserRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction("foo")), + is(false) + ); + assertThat( + monitoringUserRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction(".reporting")), + is(false) + ); + assertThat( + monitoringUserRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction(".kibana")), is(false) ); - assertThat(monitoringUserRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction(".kibana")), is(false)); assertThat( monitoringUserRole.indices().allowedIndicesMatcher("indices:foo").test(mockIndexAbstraction(randomAlphaOfLengthBetween(8, 24))), is(false) @@ -1549,7 +1625,10 @@ public void testMonitoringUserRole() { monitoringUserRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(mockIndexAbstraction(index)), is(false) ); - assertThat(monitoringUserRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction(index)), is(true)); + assertThat( + monitoringUserRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), + is(true) + ); assertThat(monitoringUserRole.indices().allowedIndicesMatcher(GetAction.NAME).test(mockIndexAbstraction(index)), is(true)); assertThat(monitoringUserRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(mockIndexAbstraction(index)), is(true)); @@ -1644,15 +1723,19 @@ public void testRemoteMonitoringAgentRole() { assertThat(remoteMonitoringAgentRole.runAs().check(randomAlphaOfLengthBetween(1, 12)), is(false)); assertThat( - remoteMonitoringAgentRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction("foo")), + remoteMonitoringAgentRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction("foo")), is(false) ); assertThat( - remoteMonitoringAgentRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction(".reporting")), + remoteMonitoringAgentRole.indices() + .allowedIndicesMatcher(TransportSearchAction.TYPE.name()) + .test(mockIndexAbstraction(".reporting")), is(false) ); assertThat( - remoteMonitoringAgentRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction(".kibana")), + remoteMonitoringAgentRole.indices() + .allowedIndicesMatcher(TransportSearchAction.TYPE.name()) + .test(mockIndexAbstraction(".kibana")), is(false) ); assertThat( @@ -1694,7 +1777,9 @@ public void testRemoteMonitoringAgentRole() { is(true) ); assertThat( - remoteMonitoringAgentRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction(monitoringIndex)), + remoteMonitoringAgentRole.indices() + .allowedIndicesMatcher(TransportSearchAction.TYPE.name()) + .test(mockIndexAbstraction(monitoringIndex)), is(true) ); assertThat( @@ -1768,7 +1853,9 @@ public void testRemoteMonitoringAgentRole() { is(false) ); assertThat( - remoteMonitoringAgentRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction(metricbeatIndex)), + remoteMonitoringAgentRole.indices() + .allowedIndicesMatcher(TransportSearchAction.TYPE.name()) + .test(mockIndexAbstraction(metricbeatIndex)), is(false) ); assertThat( @@ -1811,15 +1898,21 @@ public void testRemoteMonitoringCollectorRole() { is(true) ); assertThat( - remoteMonitoringCollectorRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction("foo")), + remoteMonitoringCollectorRole.indices() + .allowedIndicesMatcher(TransportSearchAction.TYPE.name()) + .test(mockIndexAbstraction("foo")), is(false) ); assertThat( - remoteMonitoringCollectorRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction(".reporting")), + remoteMonitoringCollectorRole.indices() + .allowedIndicesMatcher(TransportSearchAction.TYPE.name()) + .test(mockIndexAbstraction(".reporting")), is(false) ); assertThat( - remoteMonitoringCollectorRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction(".kibana")), + remoteMonitoringCollectorRole.indices() + .allowedIndicesMatcher(TransportSearchAction.TYPE.name()) + .test(mockIndexAbstraction(".kibana")), is(true) ); assertThat( @@ -1881,7 +1974,9 @@ public void testRemoteMonitoringCollectorRole() { is(false) ); assertThat( - remoteMonitoringCollectorRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction(index)), + remoteMonitoringCollectorRole.indices() + .allowedIndicesMatcher(TransportSearchAction.TYPE.name()) + .test(mockIndexAbstraction(index)), is(false) ); assertThat( @@ -1959,13 +2054,13 @@ public void testRemoteMonitoringCollectorRole() { assertThat( remoteMonitoringCollectorRole.indices() - .allowedIndicesMatcher(SearchAction.NAME) + .allowedIndicesMatcher(TransportSearchAction.TYPE.name()) .test(mockIndexAbstraction(randomFrom(TestRestrictedIndices.SAMPLE_RESTRICTED_NAMES))), is(false) ); assertThat( remoteMonitoringCollectorRole.indices() - .allowedIndicesMatcher(SearchAction.NAME) + .allowedIndicesMatcher(TransportSearchAction.TYPE.name()) .test(mockIndexAbstraction(XPackPlugin.ASYNC_RESULTS_INDEX + randomAlphaOfLengthBetween(0, 2))), is(false) ); @@ -2074,12 +2169,18 @@ public void testReportingUserRole() { assertThat(reportingUserRole.runAs().check(randomAlphaOfLengthBetween(1, 12)), is(false)); - assertThat(reportingUserRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction("foo")), is(false)); assertThat( - reportingUserRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction(".reporting")), + reportingUserRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction("foo")), + is(false) + ); + assertThat( + reportingUserRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction(".reporting")), + is(false) + ); + assertThat( + reportingUserRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction(".kibana")), is(false) ); - assertThat(reportingUserRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction(".kibana")), is(false)); assertThat( reportingUserRole.indices().allowedIndicesMatcher("indices:foo").test(mockIndexAbstraction(randomAlphaOfLengthBetween(8, 24))), is(false) @@ -2094,7 +2195,10 @@ public void testReportingUserRole() { reportingUserRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(mockIndexAbstraction(index)), is(false) ); - assertThat(reportingUserRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction(index)), is(false)); + assertThat( + reportingUserRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), + is(false) + ); assertThat(reportingUserRole.indices().allowedIndicesMatcher(GetAction.NAME).test(mockIndexAbstraction(index)), is(false)); assertThat(reportingUserRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(mockIndexAbstraction(index)), is(false)); assertThat(reportingUserRole.indices().allowedIndicesMatcher(UpdateAction.NAME).test(mockIndexAbstraction(index)), is(false)); @@ -2163,7 +2267,7 @@ public void testSuperuserRole() { FieldPermissionsCache fieldPermissionsCache = new FieldPermissionsCache(Settings.EMPTY); SortedMap lookup = metadata.getIndicesLookup(); IndicesAccessControl iac = superuserRole.indices() - .authorize(SearchAction.NAME, Sets.newHashSet("a1", "ba"), lookup, fieldPermissionsCache); + .authorize(TransportSearchAction.TYPE.name(), Sets.newHashSet("a1", "ba"), lookup, fieldPermissionsCache); assertThat(iac.hasIndexPermissions("a1"), is(true)); assertThat(iac.hasIndexPermissions("b"), is(true)); iac = superuserRole.indices().authorize(DeleteIndexAction.NAME, Sets.newHashSet("a1", "ba"), lookup, fieldPermissionsCache); @@ -2179,7 +2283,7 @@ public void testSuperuserRole() { // Read security indices => allowed iac = superuserRole.indices() .authorize( - randomFrom(SearchAction.NAME, GetIndexAction.NAME), + randomFrom(TransportSearchAction.TYPE.name(), GetIndexAction.NAME), Sets.newHashSet(TestRestrictedIndices.SECURITY_MAIN_ALIAS), lookup, fieldPermissionsCache @@ -2198,7 +2302,7 @@ public void testSuperuserRole() { assertThat("For " + iac, iac.hasIndexPermissions(TestRestrictedIndices.SECURITY_MAIN_ALIAS), is(false)); assertThat("For " + iac, iac.hasIndexPermissions(internalSecurityIndex), is(false)); - assertTrue(superuserRole.indices().check(SearchAction.NAME)); + assertTrue(superuserRole.indices().check(TransportSearchAction.TYPE.name())); assertFalse(superuserRole.indices().check("unknown")); assertThat(superuserRole.runAs().check(randomAlphaOfLengthBetween(1, 30)), is(true)); @@ -2313,8 +2417,14 @@ public void testBeatsAdminRole() { assertThat(beatsAdminRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(mockIndexAbstraction(index)), is(true)); assertThat(beatsAdminRole.indices().allowedIndicesMatcher(DeleteAction.NAME).test(mockIndexAbstraction(index)), is(true)); assertThat(beatsAdminRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(mockIndexAbstraction(index)), is(true)); - assertThat(beatsAdminRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction(index)), is(true)); - assertThat(beatsAdminRole.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(mockIndexAbstraction(index)), is(true)); + assertThat( + beatsAdminRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), + is(true) + ); + assertThat( + beatsAdminRole.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), + is(true) + ); assertThat(beatsAdminRole.indices().allowedIndicesMatcher(GetAction.NAME).test(mockIndexAbstraction(index)), is(true)); assertNoAccessAllowed(beatsAdminRole, TestRestrictedIndices.SAMPLE_RESTRICTED_NAMES); @@ -3221,8 +3331,11 @@ private void assertAllIndicesAccessAllowed(Role role, String index) { assertThat(role.indices().allowedIndicesMatcher(IndexAction.NAME).test(mockIndexAbstraction(index)), is(true)); assertThat(role.indices().allowedIndicesMatcher(DeleteAction.NAME).test(mockIndexAbstraction(index)), is(true)); assertThat(role.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(mockIndexAbstraction(index)), is(true)); - assertThat(role.indices().allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction(index)), is(true)); - assertThat(role.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(mockIndexAbstraction(index)), is(true)); + assertThat(role.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), is(true)); + assertThat( + role.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), + is(true) + ); assertThat(role.indices().allowedIndicesMatcher(GetAction.NAME).test(mockIndexAbstraction(index)), is(true)); // inherits from 'all' assertThat(role.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(mockIndexAbstraction(index)), is(true)); @@ -3230,7 +3343,7 @@ private void assertAllIndicesAccessAllowed(Role role, String index) { private void assertReadWriteDocsAndMaintenanceButNotDeleteIndexAllowed(Role role, String index) { assertThat(role.indices().allowedIndicesMatcher(DeleteIndexAction.NAME).test(mockIndexAbstraction(index)), is(false)); - assertThat(role.indices().allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction(index)), is(true)); + assertThat(role.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), is(true)); assertThat(role.indices().allowedIndicesMatcher(GetAction.NAME).test(mockIndexAbstraction(index)), is(true)); assertThat(role.indices().allowedIndicesMatcher(IndexAction.NAME).test(mockIndexAbstraction(index)), is(true)); assertThat(role.indices().allowedIndicesMatcher(UpdateAction.NAME).test(mockIndexAbstraction(index)), is(true)); @@ -3244,7 +3357,7 @@ private void assertReadWriteDocsAndMaintenanceButNotDeleteIndexAllowed(Role role private void assertReadWriteDocsButNotDeleteIndexAllowed(Role role, String index) { assertThat(role.indices().allowedIndicesMatcher(DeleteIndexAction.NAME).test(mockIndexAbstraction(index)), is(false)); - assertThat(role.indices().allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction(index)), is(true)); + assertThat(role.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), is(true)); assertThat(role.indices().allowedIndicesMatcher(GetAction.NAME).test(mockIndexAbstraction(index)), is(true)); assertThat(role.indices().allowedIndicesMatcher(IndexAction.NAME).test(mockIndexAbstraction(index)), is(true)); assertThat(role.indices().allowedIndicesMatcher(UpdateAction.NAME).test(mockIndexAbstraction(index)), is(true)); @@ -3256,7 +3369,7 @@ private void assertOnlyReadAllowed(Role role, String index) { assertThat(role.indices().allowedIndicesMatcher(DeleteIndexAction.NAME).test(mockIndexAbstraction(index)), is(false)); assertThat(role.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(mockIndexAbstraction(index)), is(false)); assertThat(role.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(mockIndexAbstraction(index)), is(false)); - assertThat(role.indices().allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction(index)), is(true)); + assertThat(role.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), is(true)); assertThat(role.indices().allowedIndicesMatcher(GetAction.NAME).test(mockIndexAbstraction(index)), is(true)); assertThat(role.indices().allowedIndicesMatcher(IndexAction.NAME).test(mockIndexAbstraction(index)), is(false)); assertThat(role.indices().allowedIndicesMatcher(UpdateAction.NAME).test(mockIndexAbstraction(index)), is(false)); @@ -3274,7 +3387,7 @@ private void assertViewIndexMetadata(Role role, String index) { GetFieldMappingsAction.NAME + "*", GetMappingsAction.NAME, ClusterSearchShardsAction.NAME, - SearchShardsAction.NAME, + TransportSearchShardsAction.TYPE.name(), ValidateQueryAction.NAME + "*", GetSettingsAction.NAME, ExplainLifecycleAction.NAME, @@ -3295,7 +3408,7 @@ private void assertNoAccessAllowed(Role role, String index) { assertThat(role.indices().allowedIndicesMatcher(DeleteIndexAction.NAME).test(mockIndexAbstraction(index)), is(false)); assertThat(role.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(mockIndexAbstraction(index)), is(false)); assertThat(role.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(mockIndexAbstraction(index)), is(false)); - assertThat(role.indices().allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction(index)), is(false)); + assertThat(role.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), is(false)); assertThat(role.indices().allowedIndicesMatcher(GetAction.NAME).test(mockIndexAbstraction(index)), is(false)); assertThat(role.indices().allowedIndicesMatcher(IndexAction.NAME).test(mockIndexAbstraction(index)), is(false)); assertThat(role.indices().allowedIndicesMatcher(UpdateAction.NAME).test(mockIndexAbstraction(index)), is(false)); @@ -3343,8 +3456,14 @@ public void testLogstashAdminRole() { assertThat(logstashAdminRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(mockIndexAbstraction(index)), is(true)); assertThat(logstashAdminRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(mockIndexAbstraction(index)), is(true)); assertThat(logstashAdminRole.indices().allowedIndicesMatcher(GetAction.NAME).test(mockIndexAbstraction(index)), is(true)); - assertThat(logstashAdminRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction(index)), is(true)); - assertThat(logstashAdminRole.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(mockIndexAbstraction(index)), is(true)); + assertThat( + logstashAdminRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), + is(true) + ); + assertThat( + logstashAdminRole.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), + is(true) + ); assertThat( logstashAdminRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(mockIndexAbstraction(index)), is(true) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/test/TestRestrictedIndices.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/test/TestRestrictedIndices.java index 92c3a65ffeaa3..9e26444040b03 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/test/TestRestrictedIndices.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/test/TestRestrictedIndices.java @@ -110,15 +110,10 @@ public class TestRestrictedIndices { ".fleet-actions-results", "fleet actions results", SystemDataStreamDescriptor.Type.EXTERNAL, - new ComposableIndexTemplate( - List.of(".fleet-actions-results"), - null, - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate() - ), + ComposableIndexTemplate.builder() + .indexPatterns(List.of(".fleet-actions-results")) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build(), Map.of(), List.of("fleet", "kibana"), null diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/RestrictedTrustManagerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/RestrictedTrustManagerTests.java index 66fb5c7642f21..bbf80279b0b2a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/RestrictedTrustManagerTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/RestrictedTrustManagerTests.java @@ -218,11 +218,7 @@ public void testThatDelegateTrustManagerIsRespected() throws Exception { if (cert.endsWith("/ca")) { assertTrusted(trustManager, cert); } else { - assertNotValid( - trustManager, - cert, - inFipsJvm() ? "unable to process certificates: Unable to find certificate chain." : "PKIX path building failed.*" - ); + assertNotValid(trustManager, cert, inFipsJvm() ? "Unable to find certificate chain." : "PKIX path building failed.*"); } } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistryTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistryTests.java index ff0e99e4a1e17..db9cf91681199 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistryTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistryTests.java @@ -344,15 +344,15 @@ public void testAutomaticRollover() throws Exception { Metadata.builder(Objects.requireNonNull(state).metadata()) .put( entry.getKey(), - new ComposableIndexTemplate( - template.indexPatterns(), - template.template(), - template.composedOf(), - template.priority(), - 2L, - template.metadata(), - template.getDataStreamTemplate() - ) + ComposableIndexTemplate.builder() + .indexPatterns(template.indexPatterns()) + .template(template.template()) + .componentTemplates(template.composedOf()) + .priority(template.priority()) + .version(2L) + .metadata(template.metadata()) + .dataStreamTemplate(template.getDataStreamTemplate()) + .build() ) ) .build(); @@ -831,38 +831,23 @@ public void testFindRolloverTargetDataStreams() { ) .build(); - ComposableIndexTemplate it1 = new ComposableIndexTemplate( - List.of("ds1*", "ds2*", "ds3*"), - null, - null, - 100L, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ); + ComposableIndexTemplate it1 = ComposableIndexTemplate.builder() + .indexPatterns(List.of("ds1*", "ds2*", "ds3*")) + .priority(100L) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build(); - ComposableIndexTemplate it2 = new ComposableIndexTemplate( - List.of("ds2*"), - null, - null, - 200L, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ); + ComposableIndexTemplate it2 = ComposableIndexTemplate.builder() + .indexPatterns(List.of("ds2*")) + .priority(200L) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build(); - ComposableIndexTemplate it5 = new ComposableIndexTemplate( - List.of("ds5*"), - null, - null, - 200L, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ); + ComposableIndexTemplate it5 = ComposableIndexTemplate.builder() + .indexPatterns(List.of("ds5*")) + .priority(200L) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build(); state = ClusterState.builder(state) .metadata(Metadata.builder(state.metadata()).put("it1", it1).put("it2", it2).put("it5", it5)) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/termsenum/TermsEnumTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/termsenum/TermsEnumTests.java index 435d5c2f36bd5..c8983c9a4a5c7 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/termsenum/TermsEnumTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/termsenum/TermsEnumTests.java @@ -109,8 +109,7 @@ private void checkTermsEnumKeywords(String indexed) throws Exception { } private void indexAndRefresh(String indexName, String id, String field, String value) throws IOException { - client().prepareIndex(indexName) - .setId(id) + prepareIndex(indexName).setId(id) .setSource(jsonBuilder().startObject().field(field, value).endObject()) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); @@ -206,8 +205,9 @@ public void testTermsEnumIPRandomized() throws Exception { for (int i = 0; i < numDocs; i++) { randomIps[i] = randomIp(randomBoolean()); bulkRequestBuilder.add( - client().prepareIndex(indexName) - .setSource(jsonBuilder().startObject().field("ip_addr", NetworkAddress.format(randomIps[i])).endObject()) + prepareIndex(indexName).setSource( + jsonBuilder().startObject().field("ip_addr", NetworkAddress.format(randomIps[i])).endObject() + ) ); } assertNoFailures(bulkRequestBuilder.get()); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/GetCheckpointActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/GetCheckpointActionRequestTests.java index 43ec0a0f1b4f5..e96a7741b4f52 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/GetCheckpointActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/GetCheckpointActionRequestTests.java @@ -11,9 +11,10 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.TaskId; -import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.transform.action.GetCheckpointAction.Request; import java.util.ArrayList; @@ -26,7 +27,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; -public class GetCheckpointActionRequestTests extends AbstractWireSerializingTestCase { +public class GetCheckpointActionRequestTests extends AbstractWireSerializingTransformTestCase { @Override protected Request createTestInstance() { @@ -42,9 +43,11 @@ protected Reader instanceReader() { protected Request mutateInstance(Request instance) { List indices = instance.indices() != null ? new ArrayList<>(Arrays.asList(instance.indices())) : new ArrayList<>(); IndicesOptions indicesOptions = instance.indicesOptions(); + QueryBuilder query = instance.getQuery(); + String cluster = instance.getCluster(); TimeValue timeout = instance.getTimeout(); - switch (between(0, 2)) { + switch (between(0, 4)) { case 0: indices.add(randomAlphaOfLengthBetween(1, 20)); break; @@ -58,13 +61,19 @@ protected Request mutateInstance(Request instance) { ); break; case 2: + query = query != null ? null : QueryBuilders.matchAllQuery(); + break; + case 3: + cluster = cluster != null ? null : randomAlphaOfLengthBetween(1, 10); + break; + case 4: timeout = timeout != null ? null : TimeValue.timeValueSeconds(randomIntBetween(1, 300)); break; default: throw new AssertionError("Illegal randomization branch"); } - return new Request(indices.toArray(new String[0]), indicesOptions, timeout); + return new Request(indices.toArray(new String[0]), indicesOptions, query, cluster, timeout); } public void testCreateTask() { @@ -74,7 +83,7 @@ public void testCreateTask() { } public void testCreateTaskWithNullIndices() { - Request request = new Request(null, null, null); + Request request = new Request(null, null, null, null, null); CancellableTask task = request.createTask(123, "type", "action", new TaskId("dummy-node:456"), Map.of()); assertThat(task.getDescription(), is(equalTo("get_checkpoint[0]"))); } @@ -89,6 +98,8 @@ private static Request randomRequest(Integer numIndices) { Boolean.toString(randomBoolean()), SearchRequest.DEFAULT_INDICES_OPTIONS ), + randomBoolean() ? QueryBuilders.matchAllQuery() : null, + randomBoolean() ? randomAlphaOfLengthBetween(1, 10) : null, randomBoolean() ? TimeValue.timeValueSeconds(randomIntBetween(1, 300)) : null ); } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/180-days@lifecycle.json b/x-pack/plugin/core/template-resources/src/main/resources/180-days@lifecycle.json index 7929d4cb5594c..0fcaddb9a02ce 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/180-days@lifecycle.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/180-days@lifecycle.json @@ -33,5 +33,6 @@ "_meta": { "description": "built-in ILM policy using the hot, warm, and cold phases with a retention of 180 days", "managed": true - } + }, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/30-days@lifecycle.json b/x-pack/plugin/core/template-resources/src/main/resources/30-days@lifecycle.json index 6d5a12b39762d..5764b75299ced 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/30-days@lifecycle.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/30-days@lifecycle.json @@ -29,5 +29,6 @@ "_meta": { "description": "built-in ILM policy using the hot and warm phases with a retention of 30 days", "managed": true - } + }, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/365-days@lifecycle.json b/x-pack/plugin/core/template-resources/src/main/resources/365-days@lifecycle.json index 3d2340245f117..4398b14387dec 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/365-days@lifecycle.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/365-days@lifecycle.json @@ -33,5 +33,6 @@ "_meta": { "description": "built-in ILM policy using the hot, warm, and cold phases with a retention of 365 days", "managed": true - } + }, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/7-days@lifecycle.json b/x-pack/plugin/core/template-resources/src/main/resources/7-days@lifecycle.json index 2c5778e5af1db..1a1f74beac516 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/7-days@lifecycle.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/7-days@lifecycle.json @@ -29,5 +29,6 @@ "_meta": { "description": "built-in ILM policy using the hot and warm phases with a retention of 7 days", "managed": true - } + }, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/90-days@lifecycle.json b/x-pack/plugin/core/template-resources/src/main/resources/90-days@lifecycle.json index cae4e7c83a064..e0d2487c8961a 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/90-days@lifecycle.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/90-days@lifecycle.json @@ -33,5 +33,6 @@ "_meta": { "description": "built-in ILM policy using the hot, warm, and cold phases with a retention of 90 days", "managed": true - } + }, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/data-streams@mappings.json b/x-pack/plugin/core/template-resources/src/main/resources/data-streams@mappings.json index f87c0e79b7c45..96bbeca8f7ac8 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/data-streams@mappings.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/data-streams@mappings.json @@ -63,5 +63,6 @@ "description": "general mapping conventions for data streams", "managed": true }, - "version": ${xpack.stack.template.version} + "version": ${xpack.stack.template.version}, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/ecs@mappings.json b/x-pack/plugin/core/template-resources/src/main/resources/ecs@mappings.json index fc29fc98dca96..f1d03531e4b6b 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/ecs@mappings.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/ecs@mappings.json @@ -190,5 +190,6 @@ "description": "dynamic mappings based on ECS, installed by x-pack", "managed": true }, - "version": ${xpack.stack.template.version} + "version": ${xpack.stack.template.version}, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/entsearch/connector/elastic-connectors-mappings.json b/x-pack/plugin/core/template-resources/src/main/resources/entsearch/connector/elastic-connectors-mappings.json index 709ce5d3abbd0..2a41662a136a7 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/entsearch/connector/elastic-connectors-mappings.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/entsearch/connector/elastic-connectors-mappings.json @@ -9,7 +9,7 @@ "pipeline": { "default_name": "ent-search-generic-ingestion", "default_extract_binary_content": true, - "default_run_ml_inference": false, + "default_run_ml_inference": true, "default_reduce_whitespace": true }, "version": ${xpack.application.connector.template.version} @@ -18,6 +18,9 @@ "api_key_id": { "type": "keyword" }, + "connector_id": { + "type": "keyword" + }, "configuration": { "type": "object" }, diff --git a/x-pack/plugin/core/template-resources/src/main/resources/entsearch/connector/elastic-connectors-settings.json b/x-pack/plugin/core/template-resources/src/main/resources/entsearch/connector/elastic-connectors-settings.json index 65556f88b26f7..22f35b3ac5c99 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/entsearch/connector/elastic-connectors-settings.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/entsearch/connector/elastic-connectors-settings.json @@ -3,7 +3,7 @@ "settings": { "hidden": true, "number_of_shards": "1", - "auto_expand_replicas": "0-3", + "auto_expand_replicas": "0-1", "number_of_replicas": "0" } }, diff --git a/x-pack/plugin/core/template-resources/src/main/resources/kibana-reporting@template.json b/x-pack/plugin/core/template-resources/src/main/resources/kibana-reporting@template.json index a4388d671eb0d..b92942ff010d6 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/kibana-reporting@template.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/kibana-reporting@template.json @@ -173,5 +173,6 @@ "description": "default kibana reporting template installed by elasticsearch", "managed": true }, - "version": ${xpack.stack.template.version} + "version": ${xpack.stack.template.version}, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/logs@default-pipeline.json b/x-pack/plugin/core/template-resources/src/main/resources/logs@default-pipeline.json index 518ff3cece752..d8dc9cca5ea7c 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/logs@default-pipeline.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/logs@default-pipeline.json @@ -20,5 +20,6 @@ "description": "default pipeline for the logs index template installed by x-pack", "managed": true }, - "version": ${xpack.stack.template.version} + "version": ${xpack.stack.template.version}, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/logs@json-pipeline.json b/x-pack/plugin/core/template-resources/src/main/resources/logs@json-pipeline.json index cebeccd344324..e3b0f85642a46 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/logs@json-pipeline.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/logs@json-pipeline.json @@ -44,5 +44,6 @@ "description": "automatic parsing of JSON log messages", "managed": true }, - "version": ${xpack.stack.template.version} + "version": ${xpack.stack.template.version}, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/logs@lifecycle.json b/x-pack/plugin/core/template-resources/src/main/resources/logs@lifecycle.json index 6bce19aaaab49..5b58c138d785f 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/logs@lifecycle.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/logs@lifecycle.json @@ -12,5 +12,6 @@ "_meta": { "description": "default policy for the logs index template installed by x-pack", "managed": true - } + }, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/logs@mappings.json b/x-pack/plugin/core/template-resources/src/main/resources/logs@mappings.json index 7417d4809559d..82cbf7e478a27 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/logs@mappings.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/logs@mappings.json @@ -23,5 +23,6 @@ "description": "default mappings for the logs index template installed by x-pack", "managed": true }, - "version": ${xpack.stack.template.version} + "version": ${xpack.stack.template.version}, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/logs@settings.json b/x-pack/plugin/core/template-resources/src/main/resources/logs@settings.json index cc61f195402fe..577768edbed30 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/logs@settings.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/logs@settings.json @@ -6,9 +6,6 @@ "name": "logs" }, "codec": "best_compression", - "query": { - "default_field": ["message"] - }, "mapping": { "ignore_malformed": true }, @@ -20,5 +17,6 @@ "description": "default settings for the logs index template installed by x-pack", "managed": true }, - "version": ${xpack.stack.template.version} + "version": ${xpack.stack.template.version}, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/logs@template.json b/x-pack/plugin/core/template-resources/src/main/resources/logs@template.json index b41b2d0453c89..f9b945d75f4f8 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/logs@template.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/logs@template.json @@ -14,5 +14,6 @@ "description": "default logs template installed by x-pack", "managed": true }, - "version": ${xpack.stack.template.version} + "version": ${xpack.stack.template.version}, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/metrics@lifecycle.json b/x-pack/plugin/core/template-resources/src/main/resources/metrics@lifecycle.json index 3c37e8db4a7da..daa07659e559e 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/metrics@lifecycle.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/metrics@lifecycle.json @@ -12,5 +12,6 @@ "_meta": { "description": "default policy for the metrics index template installed by x-pack", "managed": true - } + }, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/metrics@mappings.json b/x-pack/plugin/core/template-resources/src/main/resources/metrics@mappings.json index 5741b441256f9..4e48f6b7adaed 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/metrics@mappings.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/metrics@mappings.json @@ -53,5 +53,6 @@ "description": "default mappings for the metrics index template installed by x-pack", "managed": true }, - "version": ${xpack.stack.template.version} + "version": ${xpack.stack.template.version}, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/metrics@settings.json b/x-pack/plugin/core/template-resources/src/main/resources/metrics@settings.json index 1a13139bb18a4..3a0e6feeaede4 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/metrics@settings.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/metrics@settings.json @@ -16,5 +16,6 @@ "description": "default settings for the metrics index template installed by x-pack", "managed": true }, - "version": ${xpack.stack.template.version} + "version": ${xpack.stack.template.version}, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/metrics@template.json b/x-pack/plugin/core/template-resources/src/main/resources/metrics@template.json index a596314bc9e8c..464df09ffe2ce 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/metrics@template.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/metrics@template.json @@ -12,5 +12,6 @@ "description": "default metrics template installed by x-pack", "managed": true }, - "version": ${xpack.stack.template.version} + "version": ${xpack.stack.template.version}, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/metrics@tsdb-settings.json b/x-pack/plugin/core/template-resources/src/main/resources/metrics@tsdb-settings.json index cbcad39ef78d0..6a64ff9be5473 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/metrics@tsdb-settings.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/metrics@tsdb-settings.json @@ -15,5 +15,6 @@ "description": "default settings for the metrics index template installed by x-pack", "managed": true }, - "version": ${xpack.stack.template.version} + "version": ${xpack.stack.template.version}, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/ml/inference_index_mappings.json b/x-pack/plugin/core/template-resources/src/main/resources/ml/inference_index_mappings.json index 77634546e0e6e..2dbc4bac8bd00 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/ml/inference_index_mappings.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/ml/inference_index_mappings.json @@ -149,6 +149,9 @@ }, "vocab": { "enabled": false + }, + "prefix_strings": { + "enabled": false } } } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-beats-mb.json b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-beats-mb.json index ebe8f4dfbcce1..fab8ca451358f 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-beats-mb.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-beats-mb.json @@ -838,6 +838,9 @@ "properties": { "acked": { "type": "long" + }, + "max_events": { + "type": "long" } } } @@ -1928,6 +1931,10 @@ "acked": { "type": "alias", "path": "beat.stats.libbeat.pipeline.queue.acked" + }, + "max_events": { + "type": "alias", + "path": "beat.stats.libbeat.pipeline.queue.max_events" } } } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-beats.json b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-beats.json index 14174c6b86dcb..6dee05564cc10 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-beats.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-beats.json @@ -854,6 +854,9 @@ "properties": { "acked": { "type": "long" + }, + "max_events": { + "type": "long" } } } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-logstash-mb.json b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-logstash-mb.json index e6efc1ea5a11b..706b582f5c3af 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-logstash-mb.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-logstash-mb.json @@ -523,6 +523,9 @@ "name": { "type": "keyword", "ignore_above": 1024 + }, + "ip": { + "type": "ip" } } }, diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-costs.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-costs.json deleted file mode 100644 index 7f54b012f8803..0000000000000 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-costs.json +++ /dev/null @@ -1,60 +0,0 @@ -{ - "index_patterns": [ - ".profiling-costs*" - ], - "template": { - "settings": { - "index": { - "number_of_replicas": 0, - "auto_expand_replicas": "0-1", - "refresh_interval": "30s", - "hidden": true - } - }, - "mappings": { - "_source": { - "mode": "synthetic" - }, - "_meta": { - "index-template-version": ${xpack.profiling.template.version}, - "index-version": ${xpack.profiling.index.costs.version} - }, - "dynamic": false, - "properties": { - "ecs.version": { - "type": "keyword", - "index": true - }, - "@timestamp": { // creation date - "type": "date", - "index": true - }, - "provider": { - "type": "keyword", - "index": true - }, - "region": { - "type": "keyword", - "index": true - }, - "instance_type": { - "type": "keyword", - "index": true - }, - "co2_factor": { - "type": "double", - "index": false - }, - "cost_factor": { - "type": "double", - "index": false - } - } - } - }, - "priority": 100, - "_meta": { - "description": "Index template for .profiling-costs" - }, - "version": ${xpack.profiling.template.version} -} diff --git a/x-pack/plugin/core/template-resources/src/main/resources/synthetics@lifecycle.json b/x-pack/plugin/core/template-resources/src/main/resources/synthetics@lifecycle.json index 1e4220725177d..aa2cf5489b45f 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/synthetics@lifecycle.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/synthetics@lifecycle.json @@ -12,5 +12,6 @@ "_meta": { "description": "default policy for the synthetics index template installed by x-pack", "managed": true - } + }, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/synthetics@mappings.json b/x-pack/plugin/core/template-resources/src/main/resources/synthetics@mappings.json index 9e3e56e3261d0..81b85285450c7 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/synthetics@mappings.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/synthetics@mappings.json @@ -17,5 +17,6 @@ "description": "default mappings for the synthetics index template installed by x-pack", "managed": true }, - "version": ${xpack.stack.template.version} + "version": ${xpack.stack.template.version}, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/synthetics@settings.json b/x-pack/plugin/core/template-resources/src/main/resources/synthetics@settings.json index 27ced96be36e3..04d68d083bf9f 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/synthetics@settings.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/synthetics@settings.json @@ -13,5 +13,6 @@ "description": "default settings for the synthetics index template installed by x-pack", "managed": true }, - "version": ${xpack.stack.template.version} + "version": ${xpack.stack.template.version}, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/synthetics@template.json b/x-pack/plugin/core/template-resources/src/main/resources/synthetics@template.json index 6369bd5a82c15..344426541b8c5 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/synthetics@template.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/synthetics@template.json @@ -12,5 +12,6 @@ "description": "default synthetics template installed by x-pack", "managed": true }, - "version": ${xpack.stack.template.version} + "version": ${xpack.stack.template.version}, + "deprecated": ${xpack.stack.template.deprecated} } diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/NodesDeprecationCheckAction.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/NodesDeprecationCheckAction.java index 596fb69d06f69..4448ed4a04866 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/NodesDeprecationCheckAction.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/NodesDeprecationCheckAction.java @@ -9,8 +9,6 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.nodes.BaseNodeResponse; -import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder; -import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -92,17 +90,4 @@ public int hashCode() { } } - public static class RequestBuilder extends NodesOperationRequestBuilder< - NodesDeprecationCheckRequest, - NodesDeprecationCheckResponse, - RequestBuilder> { - - protected RequestBuilder( - ElasticsearchClient client, - ActionType action, - NodesDeprecationCheckRequest request - ) { - super(client, action, request); - } - } } diff --git a/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/downsample/60_settings.yml b/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/downsample/60_settings.yml index 6a33cc47e5c51..5bc00251c9163 100644 --- a/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/downsample/60_settings.yml +++ b/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/downsample/60_settings.yml @@ -93,10 +93,9 @@ --- "Downsample datastream with tier preference": - skip: - version: "all" - reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/97150" -# version: " - 8.4.99" -# reason: "rollup renamed to downsample in 8.5.0" + version: " - 8.4.99" + features: default_shards + reason: "rollup renamed to downsample in 8.5.0, avoid globalTemplateIndexSettings with overlapping index pattern" - do: indices.put_index_template: diff --git a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleDisruptionIT.java b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleDisruptionIT.java index 5bd20ce51a57d..826c958de4c18 100644 --- a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleDisruptionIT.java +++ b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleDisruptionIT.java @@ -59,95 +59,94 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99520") @TestLogging(value = "org.elasticsearch.datastreams.lifecycle:TRACE", reason = "debugging") public void testDataStreamLifecycleDownsampleRollingRestart() throws Exception { - try (InternalTestCluster cluster = internalCluster()) { - final List masterNodes = cluster.startMasterOnlyNodes(1); - cluster.startDataOnlyNodes(3); - ensureStableCluster(cluster.size()); - ensureGreen(); - - final String dataStreamName = "metrics-foo"; - DataStreamLifecycle lifecycle = DataStreamLifecycle.newBuilder() - .downsampling( - new DataStreamLifecycle.Downsampling( - List.of( - new DataStreamLifecycle.Downsampling.Round( - TimeValue.timeValueMillis(0), - new DownsampleConfig(new DateHistogramInterval("5m")) - ) + final InternalTestCluster cluster = internalCluster(); + final List masterNodes = cluster.startMasterOnlyNodes(1); + cluster.startDataOnlyNodes(3); + ensureStableCluster(cluster.size()); + ensureGreen(); + + final String dataStreamName = "metrics-foo"; + DataStreamLifecycle lifecycle = DataStreamLifecycle.newBuilder() + .downsampling( + new DataStreamLifecycle.Downsampling( + List.of( + new DataStreamLifecycle.Downsampling.Round( + TimeValue.timeValueMillis(0), + new DownsampleConfig(new DateHistogramInterval("5m")) ) ) ) - .build(); - DataStreamLifecycleDriver.setupTSDBDataStreamAndIngestDocs( - client(), - dataStreamName, - "1986-01-08T23:40:53.384Z", - "2022-01-08T23:40:53.384Z", - lifecycle, - DOC_COUNT, - "1990-09-09T18:00:00" - ); - - // before we rollover we update the index template to remove the start/end time boundaries (they're there just to ease with - // testing so DSL doesn't have to wait for the end_time to lapse) - putTSDBIndexTemplate(client(), dataStreamName, null, null, lifecycle); - client().execute(RolloverAction.INSTANCE, new RolloverRequest(dataStreamName, null)).actionGet(); - - // DSL runs every second and it has to tail forcemerge the index (2 seconds) and mark it as read-only (2s) before it starts - // downsampling. This sleep here tries to get as close as possible to having disruption during the downsample execution. - long sleepTime = randomLongBetween(3000, 4500); - logger.info("-> giving data stream lifecycle [{}] millis to make some progress before starting the disruption", sleepTime); - Thread.sleep(sleepTime); - final CountDownLatch disruptionStart = new CountDownLatch(1); - final CountDownLatch disruptionEnd = new CountDownLatch(1); - List backingIndices = getBackingIndices(client(), dataStreamName); - // first generation index - String sourceIndex = backingIndices.get(0); - new Thread(new Disruptor(cluster, sourceIndex, new DisruptionListener() { - @Override - public void disruptionStart() { - disruptionStart.countDown(); - } - - @Override - public void disruptionEnd() { - disruptionEnd.countDown(); - } - }, masterNodes.get(0), (ignored) -> { - try { - cluster.rollingRestart(new InternalTestCluster.RestartCallback() { - @Override - public boolean validateClusterForming() { - return true; - } - }); - } catch (Exception e) { - throw new RuntimeException(e); - } - })).start(); - - waitUntil( - () -> cluster.client().admin().cluster().preparePendingClusterTasks().get().pendingTasks().isEmpty(), - 60, - TimeUnit.SECONDS - ); - ensureStableCluster(cluster.numDataAndMasterNodes()); - - final String targetIndex = "downsample-5m-" + sourceIndex; - assertBusy(() -> { - try { - GetSettingsResponse getSettingsResponse = client().admin() - .indices() - .getSettings(new GetSettingsRequest().indices(targetIndex)) - .actionGet(); - Settings indexSettings = getSettingsResponse.getIndexToSettings().get(targetIndex); - assertThat(indexSettings, is(notNullValue())); - assertThat(IndexMetadata.INDEX_DOWNSAMPLE_STATUS.get(indexSettings), is(IndexMetadata.DownsampleTaskStatus.SUCCESS)); - } catch (Exception e) { - throw new AssertionError(e); - } - }, 60, TimeUnit.SECONDS); - } + ) + .build(); + DataStreamLifecycleDriver.setupTSDBDataStreamAndIngestDocs( + client(), + dataStreamName, + "1986-01-08T23:40:53.384Z", + "2022-01-08T23:40:53.384Z", + lifecycle, + DOC_COUNT, + "1990-09-09T18:00:00" + ); + + // before we rollover we update the index template to remove the start/end time boundaries (they're there just to ease with + // testing so DSL doesn't have to wait for the end_time to lapse) + putTSDBIndexTemplate(client(), dataStreamName, null, null, lifecycle); + client().execute(RolloverAction.INSTANCE, new RolloverRequest(dataStreamName, null)).actionGet(); + + // DSL runs every second and it has to tail forcemerge the index (2 seconds) and mark it as read-only (2s) before it starts + // downsampling. This sleep here tries to get as close as possible to having disruption during the downsample execution. + long sleepTime = randomLongBetween(3000, 4500); + logger.info("-> giving data stream lifecycle [{}] millis to make some progress before starting the disruption", sleepTime); + Thread.sleep(sleepTime); + final CountDownLatch disruptionStart = new CountDownLatch(1); + final CountDownLatch disruptionEnd = new CountDownLatch(1); + List backingIndices = getBackingIndices(client(), dataStreamName); + // first generation index + String sourceIndex = backingIndices.get(0); + new Thread(new Disruptor(cluster, sourceIndex, new DisruptionListener() { + @Override + public void disruptionStart() { + disruptionStart.countDown(); + } + + @Override + public void disruptionEnd() { + disruptionEnd.countDown(); + } + }, masterNodes.get(0), (ignored) -> { + try { + cluster.rollingRestart(new InternalTestCluster.RestartCallback() { + @Override + public boolean validateClusterForming() { + return true; + } + }); + } catch (Exception e) { + throw new RuntimeException(e); + } + })).start(); + + waitUntil( + () -> cluster.client().admin().cluster().preparePendingClusterTasks().get().pendingTasks().isEmpty(), + 60, + TimeUnit.SECONDS + ); + ensureStableCluster(cluster.numDataAndMasterNodes()); + + final String targetIndex = "downsample-5m-" + sourceIndex; + assertBusy(() -> { + try { + GetSettingsResponse getSettingsResponse = client().admin() + .indices() + .getSettings(new GetSettingsRequest().indices(targetIndex)) + .actionGet(); + Settings indexSettings = getSettingsResponse.getIndexToSettings().get(targetIndex); + assertThat(indexSettings, is(notNullValue())); + assertThat(IndexMetadata.INDEX_DOWNSAMPLE_STATUS.get(indexSettings), is(IndexMetadata.DownsampleTaskStatus.SUCCESS)); + } catch (Exception e) { + throw new AssertionError(e); + } + }, 60, TimeUnit.SECONDS); } interface DisruptionListener { diff --git a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleIT.java b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleIT.java index cb945e8ffa418..57024acee809f 100644 --- a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleIT.java +++ b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleIT.java @@ -30,6 +30,7 @@ import java.util.Set; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.cluster.metadata.ClusterChangedEventUtils.indicesCreated; import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.backingIndexEqualTo; import static org.elasticsearch.xpack.downsample.DataStreamLifecycleDriver.getBackingIndices; import static org.elasticsearch.xpack.downsample.DataStreamLifecycleDriver.putTSDBIndexTemplate; @@ -82,11 +83,11 @@ public void testDownsampling() throws Exception { Set witnessedDownsamplingIndices = new HashSet<>(); clusterService().addListener(event -> { - if (event.indicesCreated().contains(oneSecondDownsampleIndex) + if (indicesCreated(event).contains(oneSecondDownsampleIndex) || event.indicesDeleted().stream().anyMatch(index -> index.getName().equals(oneSecondDownsampleIndex))) { witnessedDownsamplingIndices.add(oneSecondDownsampleIndex); } - if (event.indicesCreated().contains(tenSecondsDownsampleIndex)) { + if (indicesCreated(event).contains(tenSecondsDownsampleIndex)) { witnessedDownsamplingIndices.add(tenSecondsDownsampleIndex); } }); @@ -152,11 +153,11 @@ public void testDownsamplingOnlyExecutesTheLastMatchingRound() throws Exception Set witnessedDownsamplingIndices = new HashSet<>(); clusterService().addListener(event -> { - if (event.indicesCreated().contains(oneSecondDownsampleIndex) + if (indicesCreated(event).contains(oneSecondDownsampleIndex) || event.indicesDeleted().stream().anyMatch(index -> index.getName().equals(oneSecondDownsampleIndex))) { witnessedDownsamplingIndices.add(oneSecondDownsampleIndex); } - if (event.indicesCreated().contains(tenSecondsDownsampleIndex)) { + if (indicesCreated(event).contains(tenSecondsDownsampleIndex)) { witnessedDownsamplingIndices.add(tenSecondsDownsampleIndex); } }); @@ -217,11 +218,11 @@ public void testUpdateDownsampleRound() throws Exception { Set witnessedDownsamplingIndices = new HashSet<>(); clusterService().addListener(event -> { - if (event.indicesCreated().contains(oneSecondDownsampleIndex) + if (indicesCreated(event).contains(oneSecondDownsampleIndex) || event.indicesDeleted().stream().anyMatch(index -> index.getName().equals(oneSecondDownsampleIndex))) { witnessedDownsamplingIndices.add(oneSecondDownsampleIndex); } - if (event.indicesCreated().contains(tenSecondsDownsampleIndex)) { + if (indicesCreated(event).contains(tenSecondsDownsampleIndex)) { witnessedDownsamplingIndices.add(tenSecondsDownsampleIndex); } }); diff --git a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDriver.java b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDriver.java index d704f3bf93c54..db6ab6d01613d 100644 --- a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDriver.java +++ b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDriver.java @@ -138,16 +138,12 @@ private static void putComposableIndexTemplate( ) throws IOException { PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request(id); request.indexTemplate( - new ComposableIndexTemplate( - patterns, - new Template(settings, mappings == null ? null : mappings, null, lifecycle), - null, - null, - null, - metadata, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(patterns) + .template(new Template(settings, mappings == null ? null : mappings, null, lifecycle)) + .metadata(metadata) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ); client.execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); } diff --git a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DownsampleClusterDisruptionIT.java b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DownsampleClusterDisruptionIT.java index 30fb751d1805c..d6549a9618d36 100644 --- a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DownsampleClusterDisruptionIT.java +++ b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DownsampleClusterDisruptionIT.java @@ -18,7 +18,6 @@ import org.elasticsearch.action.downsample.DownsampleAction; import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; @@ -51,6 +50,7 @@ import java.util.function.Consumer; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.core.rollup.ConfigTestHelpers.randomInterval; @@ -149,127 +149,125 @@ public void setup(final String sourceIndex, int numOfShards, int numOfReplicas, } public void testDownsampleIndexWithDataNodeRestart() throws Exception { - try (InternalTestCluster cluster = internalCluster()) { - final List masterNodes = cluster.startMasterOnlyNodes(1); - cluster.startDataOnlyNodes(3); - ensureStableCluster(cluster.size()); - ensureGreen(); - - final String sourceIndex = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); - final String targetIndex = randomAlphaOfLength(11).toLowerCase(Locale.ROOT); - long startTime = LocalDateTime.parse("2020-09-09T18:00:00").atZone(ZoneId.of("UTC")).toInstant().toEpochMilli(); - setup(sourceIndex, 1, 0, startTime); - final DownsampleConfig config = new DownsampleConfig(randomInterval()); - final DownsampleActionSingleNodeTests.SourceSupplier sourceSupplier = () -> { - final String ts = randomDateForInterval(config.getInterval(), startTime); - double counterValue = DATE_FORMATTER.parseMillis(ts); - final List dimensionValues = new ArrayList<>(5); - for (int j = 0; j < randomIntBetween(1, 5); j++) { - dimensionValues.add(randomAlphaOfLength(6)); - } - return XContentFactory.jsonBuilder() - .startObject() - .field(FIELD_TIMESTAMP, ts) - .field(FIELD_DIMENSION_1, randomFrom(dimensionValues)) - .field(FIELD_DIMENSION_2, randomIntBetween(1, 10)) - .field(FIELD_METRIC_COUNTER, counterValue) - .endObject(); - }; - int indexedDocs = bulkIndex(sourceIndex, sourceSupplier, DOC_COUNT); - prepareSourceIndex(sourceIndex); - final CountDownLatch disruptionStart = new CountDownLatch(1); - final CountDownLatch disruptionEnd = new CountDownLatch(1); - - new Thread(new Disruptor(cluster, sourceIndex, new DisruptionListener() { - @Override - public void disruptionStart() { - disruptionStart.countDown(); - } + final InternalTestCluster cluster = internalCluster(); + final List masterNodes = cluster.startMasterOnlyNodes(1); + cluster.startDataOnlyNodes(3); + ensureStableCluster(cluster.size()); + ensureGreen(); + + final String sourceIndex = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + final String targetIndex = randomAlphaOfLength(11).toLowerCase(Locale.ROOT); + long startTime = LocalDateTime.parse("2020-09-09T18:00:00").atZone(ZoneId.of("UTC")).toInstant().toEpochMilli(); + setup(sourceIndex, 1, 0, startTime); + final DownsampleConfig config = new DownsampleConfig(randomInterval()); + final DownsampleActionSingleNodeTests.SourceSupplier sourceSupplier = () -> { + final String ts = randomDateForInterval(config.getInterval(), startTime); + double counterValue = DATE_FORMATTER.parseMillis(ts); + final List dimensionValues = new ArrayList<>(5); + for (int j = 0; j < randomIntBetween(1, 5); j++) { + dimensionValues.add(randomAlphaOfLength(6)); + } + return XContentFactory.jsonBuilder() + .startObject() + .field(FIELD_TIMESTAMP, ts) + .field(FIELD_DIMENSION_1, randomFrom(dimensionValues)) + .field(FIELD_DIMENSION_2, randomIntBetween(1, 10)) + .field(FIELD_METRIC_COUNTER, counterValue) + .endObject(); + }; + int indexedDocs = bulkIndex(sourceIndex, sourceSupplier, DOC_COUNT); + prepareSourceIndex(sourceIndex); + final CountDownLatch disruptionStart = new CountDownLatch(1); + final CountDownLatch disruptionEnd = new CountDownLatch(1); + + new Thread(new Disruptor(cluster, sourceIndex, new DisruptionListener() { + @Override + public void disruptionStart() { + disruptionStart.countDown(); + } - @Override - public void disruptionEnd() { - disruptionEnd.countDown(); - } - }, masterNodes.get(0), (node) -> { - try { - cluster.restartNode(node, new InternalTestCluster.RestartCallback() { - @Override - public boolean validateClusterForming() { - return true; - } - }); - } catch (Exception e) { - throw new RuntimeException(e); - } - })).start(); - startDownsampleTaskDuringDisruption(sourceIndex, targetIndex, config, disruptionStart, disruptionEnd); - waitUntil(() -> cluster.client().admin().cluster().preparePendingClusterTasks().get().pendingTasks().isEmpty()); - ensureStableCluster(cluster.numDataAndMasterNodes()); - assertTargetIndex(cluster, sourceIndex, targetIndex, indexedDocs); - } + @Override + public void disruptionEnd() { + disruptionEnd.countDown(); + } + }, masterNodes.get(0), (node) -> { + try { + cluster.restartNode(node, new InternalTestCluster.RestartCallback() { + @Override + public boolean validateClusterForming() { + return true; + } + }); + } catch (Exception e) { + throw new RuntimeException(e); + } + })).start(); + startDownsampleTaskDuringDisruption(sourceIndex, targetIndex, config, disruptionStart, disruptionEnd); + waitUntil(() -> cluster.client().admin().cluster().preparePendingClusterTasks().get().pendingTasks().isEmpty()); + ensureStableCluster(cluster.numDataAndMasterNodes()); + assertTargetIndex(cluster, sourceIndex, targetIndex, indexedDocs); } @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/100653") public void testDownsampleIndexWithRollingRestart() throws Exception { - try (InternalTestCluster cluster = internalCluster()) { - final List masterNodes = cluster.startMasterOnlyNodes(1); - cluster.startDataOnlyNodes(3); - ensureStableCluster(cluster.size()); - ensureGreen(); - - final String sourceIndex = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); - final String targetIndex = randomAlphaOfLength(11).toLowerCase(Locale.ROOT); - long startTime = LocalDateTime.parse("2020-09-09T18:00:00").atZone(ZoneId.of("UTC")).toInstant().toEpochMilli(); - setup(sourceIndex, 1, 0, startTime); - final DownsampleConfig config = new DownsampleConfig(randomInterval()); - final DownsampleActionSingleNodeTests.SourceSupplier sourceSupplier = () -> { - final String ts = randomDateForInterval(config.getInterval(), startTime); - double counterValue = DATE_FORMATTER.parseMillis(ts); - final List dimensionValues = new ArrayList<>(5); - for (int j = 0; j < randomIntBetween(1, 5); j++) { - dimensionValues.add(randomAlphaOfLength(6)); - } - return XContentFactory.jsonBuilder() - .startObject() - .field(FIELD_TIMESTAMP, ts) - .field(FIELD_DIMENSION_1, randomFrom(dimensionValues)) - .field(FIELD_DIMENSION_2, randomIntBetween(1, 10)) - .field(FIELD_METRIC_COUNTER, counterValue) - .endObject(); - }; - int indexedDocs = bulkIndex(sourceIndex, sourceSupplier, DOC_COUNT); - prepareSourceIndex(sourceIndex); - final CountDownLatch disruptionStart = new CountDownLatch(1); - final CountDownLatch disruptionEnd = new CountDownLatch(1); - - new Thread(new Disruptor(cluster, sourceIndex, new DisruptionListener() { - @Override - public void disruptionStart() { - disruptionStart.countDown(); - } + final InternalTestCluster cluster = internalCluster(); + final List masterNodes = cluster.startMasterOnlyNodes(1); + cluster.startDataOnlyNodes(3); + ensureStableCluster(cluster.size()); + ensureGreen(); + + final String sourceIndex = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + final String targetIndex = randomAlphaOfLength(11).toLowerCase(Locale.ROOT); + long startTime = LocalDateTime.parse("2020-09-09T18:00:00").atZone(ZoneId.of("UTC")).toInstant().toEpochMilli(); + setup(sourceIndex, 1, 0, startTime); + final DownsampleConfig config = new DownsampleConfig(randomInterval()); + final DownsampleActionSingleNodeTests.SourceSupplier sourceSupplier = () -> { + final String ts = randomDateForInterval(config.getInterval(), startTime); + double counterValue = DATE_FORMATTER.parseMillis(ts); + final List dimensionValues = new ArrayList<>(5); + for (int j = 0; j < randomIntBetween(1, 5); j++) { + dimensionValues.add(randomAlphaOfLength(6)); + } + return XContentFactory.jsonBuilder() + .startObject() + .field(FIELD_TIMESTAMP, ts) + .field(FIELD_DIMENSION_1, randomFrom(dimensionValues)) + .field(FIELD_DIMENSION_2, randomIntBetween(1, 10)) + .field(FIELD_METRIC_COUNTER, counterValue) + .endObject(); + }; + int indexedDocs = bulkIndex(sourceIndex, sourceSupplier, DOC_COUNT); + prepareSourceIndex(sourceIndex); + final CountDownLatch disruptionStart = new CountDownLatch(1); + final CountDownLatch disruptionEnd = new CountDownLatch(1); + + new Thread(new Disruptor(cluster, sourceIndex, new DisruptionListener() { + @Override + public void disruptionStart() { + disruptionStart.countDown(); + } - @Override - public void disruptionEnd() { - disruptionEnd.countDown(); - } - }, masterNodes.get(0), (ignored) -> { - try { - cluster.rollingRestart(new InternalTestCluster.RestartCallback() { - @Override - public boolean validateClusterForming() { - return true; - } - }); - } catch (Exception e) { - throw new RuntimeException(e); - } - })).start(); + @Override + public void disruptionEnd() { + disruptionEnd.countDown(); + } + }, masterNodes.get(0), (ignored) -> { + try { + cluster.rollingRestart(new InternalTestCluster.RestartCallback() { + @Override + public boolean validateClusterForming() { + return true; + } + }); + } catch (Exception e) { + throw new RuntimeException(e); + } + })).start(); - startDownsampleTaskDuringDisruption(sourceIndex, targetIndex, config, disruptionStart, disruptionEnd); - waitUntil(() -> cluster.client().admin().cluster().preparePendingClusterTasks().get().pendingTasks().isEmpty()); - ensureStableCluster(cluster.numDataAndMasterNodes()); - assertTargetIndex(cluster, sourceIndex, targetIndex, indexedDocs); - } + startDownsampleTaskDuringDisruption(sourceIndex, targetIndex, config, disruptionStart, disruptionEnd); + waitUntil(() -> cluster.client().admin().cluster().preparePendingClusterTasks().get().pendingTasks().isEmpty()); + ensureStableCluster(cluster.numDataAndMasterNodes()); + assertTargetIndex(cluster, sourceIndex, targetIndex, indexedDocs); } /** @@ -301,65 +299,64 @@ private void startDownsampleTaskDuringDisruption( } public void testDownsampleIndexWithFullClusterRestart() throws Exception { - try (InternalTestCluster cluster = internalCluster()) { - final List masterNodes = cluster.startMasterOnlyNodes(1); - cluster.startDataOnlyNodes(3); - ensureStableCluster(cluster.size()); - ensureGreen(); - - final String sourceIndex = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); - final String downsampleIndex = randomAlphaOfLength(11).toLowerCase(Locale.ROOT); - long startTime = LocalDateTime.parse("2020-09-09T18:00:00").atZone(ZoneId.of("UTC")).toInstant().toEpochMilli(); - setup(sourceIndex, 1, 0, startTime); - final DownsampleConfig config = new DownsampleConfig(randomInterval()); - final DownsampleActionSingleNodeTests.SourceSupplier sourceSupplier = () -> { - final String ts = randomDateForInterval(config.getInterval(), startTime); - double counterValue = DATE_FORMATTER.parseMillis(ts); - final List dimensionValues = new ArrayList<>(5); - for (int j = 0; j < randomIntBetween(1, 5); j++) { - dimensionValues.add(randomAlphaOfLength(6)); - } - return XContentFactory.jsonBuilder() - .startObject() - .field(FIELD_TIMESTAMP, ts) - .field(FIELD_DIMENSION_1, randomFrom(dimensionValues)) - .field(FIELD_DIMENSION_2, randomIntBetween(1, 10)) - .field(FIELD_METRIC_COUNTER, counterValue) - .endObject(); - }; - int indexedDocs = bulkIndex(sourceIndex, sourceSupplier, DOC_COUNT); - prepareSourceIndex(sourceIndex); - final CountDownLatch disruptionStart = new CountDownLatch(1); - final CountDownLatch disruptionEnd = new CountDownLatch(1); - - new Thread(new Disruptor(cluster, sourceIndex, new DisruptionListener() { - @Override - public void disruptionStart() { - disruptionStart.countDown(); - } + final InternalTestCluster cluster = internalCluster(); + final List masterNodes = cluster.startMasterOnlyNodes(1); + cluster.startDataOnlyNodes(3); + ensureStableCluster(cluster.size()); + ensureGreen(); + + final String sourceIndex = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + final String downsampleIndex = randomAlphaOfLength(11).toLowerCase(Locale.ROOT); + long startTime = LocalDateTime.parse("2020-09-09T18:00:00").atZone(ZoneId.of("UTC")).toInstant().toEpochMilli(); + setup(sourceIndex, 1, 0, startTime); + final DownsampleConfig config = new DownsampleConfig(randomInterval()); + final DownsampleActionSingleNodeTests.SourceSupplier sourceSupplier = () -> { + final String ts = randomDateForInterval(config.getInterval(), startTime); + double counterValue = DATE_FORMATTER.parseMillis(ts); + final List dimensionValues = new ArrayList<>(5); + for (int j = 0; j < randomIntBetween(1, 5); j++) { + dimensionValues.add(randomAlphaOfLength(6)); + } + return XContentFactory.jsonBuilder() + .startObject() + .field(FIELD_TIMESTAMP, ts) + .field(FIELD_DIMENSION_1, randomFrom(dimensionValues)) + .field(FIELD_DIMENSION_2, randomIntBetween(1, 10)) + .field(FIELD_METRIC_COUNTER, counterValue) + .endObject(); + }; + int indexedDocs = bulkIndex(sourceIndex, sourceSupplier, DOC_COUNT); + prepareSourceIndex(sourceIndex); + final CountDownLatch disruptionStart = new CountDownLatch(1); + final CountDownLatch disruptionEnd = new CountDownLatch(1); + + new Thread(new Disruptor(cluster, sourceIndex, new DisruptionListener() { + @Override + public void disruptionStart() { + disruptionStart.countDown(); + } - @Override - public void disruptionEnd() { - disruptionEnd.countDown(); - } - }, masterNodes.get(0), (ignored) -> { - try { - cluster.fullRestart(new InternalTestCluster.RestartCallback() { - @Override - public boolean validateClusterForming() { - return true; - } - }); - } catch (Exception e) { - throw new RuntimeException(e); - } - })).start(); + @Override + public void disruptionEnd() { + disruptionEnd.countDown(); + } + }, masterNodes.get(0), (ignored) -> { + try { + cluster.fullRestart(new InternalTestCluster.RestartCallback() { + @Override + public boolean validateClusterForming() { + return true; + } + }); + } catch (Exception e) { + throw new RuntimeException(e); + } + })).start(); - startDownsampleTaskDuringDisruption(sourceIndex, downsampleIndex, config, disruptionStart, disruptionEnd); - waitUntil(() -> cluster.client().admin().cluster().preparePendingClusterTasks().get().pendingTasks().isEmpty()); - ensureStableCluster(cluster.numDataAndMasterNodes()); - assertTargetIndex(cluster, sourceIndex, downsampleIndex, indexedDocs); - } + startDownsampleTaskDuringDisruption(sourceIndex, downsampleIndex, config, disruptionStart, disruptionEnd); + waitUntil(() -> cluster.client().admin().cluster().preparePendingClusterTasks().get().pendingTasks().isEmpty()); + ensureStableCluster(cluster.numDataAndMasterNodes()); + assertTargetIndex(cluster, sourceIndex, downsampleIndex, indexedDocs); } private void assertTargetIndex(final InternalTestCluster cluster, final String sourceIndex, final String targetIndex, int indexedDocs) { @@ -369,20 +366,26 @@ private void assertTargetIndex(final InternalTestCluster cluster, final String s .getIndex(new GetIndexRequest().indices(targetIndex)) .actionGet(); assertEquals(1, getIndexResponse.indices().length); - final SearchResponse sourceIndexSearch = cluster.client() - .prepareSearch(sourceIndex) - .setQuery(new MatchAllQueryBuilder()) - .setSize(Math.min(DOC_COUNT, indexedDocs)) - .setTrackTotalHitsUpTo(Integer.MAX_VALUE) - .get(); - assertEquals(indexedDocs, sourceIndexSearch.getHits().getHits().length); - final SearchResponse targetIndexSearch = cluster.client() - .prepareSearch(targetIndex) - .setQuery(new MatchAllQueryBuilder()) - .setSize(Math.min(DOC_COUNT, indexedDocs)) - .setTrackTotalHitsUpTo(Integer.MAX_VALUE) - .get(); - assertTrue(targetIndexSearch.getHits().getHits().length > 0); + assertResponse( + cluster.client() + .prepareSearch(sourceIndex) + .setQuery(new MatchAllQueryBuilder()) + .setSize(Math.min(DOC_COUNT, indexedDocs)) + .setTrackTotalHitsUpTo(Integer.MAX_VALUE), + sourceIndexSearch -> { + assertEquals(indexedDocs, sourceIndexSearch.getHits().getHits().length); + } + ); + assertResponse( + cluster.client() + .prepareSearch(targetIndex) + .setQuery(new MatchAllQueryBuilder()) + .setSize(Math.min(DOC_COUNT, indexedDocs)) + .setTrackTotalHitsUpTo(Integer.MAX_VALUE), + targetIndexSearch -> { + assertTrue(targetIndexSearch.getHits().getHits().length > 0); + } + ); } private int bulkIndex(final String indexName, final DownsampleActionSingleNodeTests.SourceSupplier sourceSupplier, int docCount) diff --git a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DownsampleTransportFailureIT.java b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DownsampleTransportFailureIT.java index 59a0cd34a1db0..d94d609cf3470 100644 --- a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DownsampleTransportFailureIT.java +++ b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DownsampleTransportFailureIT.java @@ -15,7 +15,6 @@ import org.elasticsearch.action.downsample.DownsampleAction; import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; @@ -52,6 +51,7 @@ import java.util.stream.Collectors; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 2, numClientNodes = 1, supportsDedicatedMasters = false) public class DownsampleTransportFailureIT extends ESIntegTestCase { @@ -228,12 +228,15 @@ private void assertDownsampleFailure(final String nodeName) { } private void assertDocumentsExist(final String nodeName, final String indexName) { - final SearchResponse searchResponse = client(nodeName).prepareSearch(indexName) - .setQuery(new MatchAllQueryBuilder()) - .setTrackTotalHitsUpTo(Integer.MAX_VALUE) - .setSize(DOCUMENTS.size()) - .get(); - assertEquals(DOCUMENTS.size(), searchResponse.getHits().getHits().length); + assertResponse( + client(nodeName).prepareSearch(indexName) + .setQuery(new MatchAllQueryBuilder()) + .setTrackTotalHitsUpTo(Integer.MAX_VALUE) + .setSize(DOCUMENTS.size()), + searchResponse -> { + assertEquals(DOCUMENTS.size(), searchResponse.getHits().getHits().length); + } + ); } private void assertIndexExists(final String nodeName, final String indexName) { diff --git a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/ILMDownsampleDisruptionIT.java b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/ILMDownsampleDisruptionIT.java index 553d0f912407d..a023f171ad209 100644 --- a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/ILMDownsampleDisruptionIT.java +++ b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/ILMDownsampleDisruptionIT.java @@ -19,7 +19,6 @@ import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; @@ -61,6 +60,7 @@ import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.core.rollup.ConfigTestHelpers.randomInterval; import static org.hamcrest.Matchers.equalTo; @@ -140,64 +140,63 @@ public void setup(final String sourceIndex, int numOfShards, int numOfReplicas, } public void testILMDownsampleRollingRestart() throws Exception { - try (InternalTestCluster cluster = internalCluster()) { - final List masterNodes = cluster.startMasterOnlyNodes(1); - cluster.startDataOnlyNodes(3); - ensureStableCluster(cluster.size()); - ensureGreen(); - - final String sourceIndex = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); - long startTime = LocalDateTime.parse("1993-09-09T18:00:00").atZone(ZoneId.of("UTC")).toInstant().toEpochMilli(); - setup(sourceIndex, 1, 0, startTime); - final DownsampleConfig config = new DownsampleConfig(randomInterval()); - final SourceSupplier sourceSupplier = () -> { - final String ts = randomDateForInterval(config.getInterval(), startTime); - double counterValue = DATE_FORMATTER.parseMillis(ts); - final List dimensionValues = new ArrayList<>(5); - for (int j = 0; j < randomIntBetween(1, 5); j++) { - dimensionValues.add(randomAlphaOfLength(6)); - } - return XContentFactory.jsonBuilder() - .startObject() - .field(FIELD_TIMESTAMP, ts) - .field(FIELD_DIMENSION_1, randomFrom(dimensionValues)) - .field(FIELD_DIMENSION_2, randomIntBetween(1, 10)) - .field(FIELD_METRIC_COUNTER, counterValue) - .endObject(); - }; - int indexedDocs = bulkIndex(sourceIndex, sourceSupplier, DOC_COUNT); - final CountDownLatch disruptionStart = new CountDownLatch(1); - final CountDownLatch disruptionEnd = new CountDownLatch(1); - - new Thread(new Disruptor(cluster, sourceIndex, new DisruptionListener() { - @Override - public void disruptionStart() { - disruptionStart.countDown(); - } + final InternalTestCluster cluster = internalCluster(); + final List masterNodes = cluster.startMasterOnlyNodes(1); + cluster.startDataOnlyNodes(3); + ensureStableCluster(cluster.size()); + ensureGreen(); + + final String sourceIndex = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + long startTime = LocalDateTime.parse("1993-09-09T18:00:00").atZone(ZoneId.of("UTC")).toInstant().toEpochMilli(); + setup(sourceIndex, 1, 0, startTime); + final DownsampleConfig config = new DownsampleConfig(randomInterval()); + final SourceSupplier sourceSupplier = () -> { + final String ts = randomDateForInterval(config.getInterval(), startTime); + double counterValue = DATE_FORMATTER.parseMillis(ts); + final List dimensionValues = new ArrayList<>(5); + for (int j = 0; j < randomIntBetween(1, 5); j++) { + dimensionValues.add(randomAlphaOfLength(6)); + } + return XContentFactory.jsonBuilder() + .startObject() + .field(FIELD_TIMESTAMP, ts) + .field(FIELD_DIMENSION_1, randomFrom(dimensionValues)) + .field(FIELD_DIMENSION_2, randomIntBetween(1, 10)) + .field(FIELD_METRIC_COUNTER, counterValue) + .endObject(); + }; + int indexedDocs = bulkIndex(sourceIndex, sourceSupplier, DOC_COUNT); + final CountDownLatch disruptionStart = new CountDownLatch(1); + final CountDownLatch disruptionEnd = new CountDownLatch(1); + + new Thread(new Disruptor(cluster, sourceIndex, new DisruptionListener() { + @Override + public void disruptionStart() { + disruptionStart.countDown(); + } - @Override - public void disruptionEnd() { - disruptionEnd.countDown(); - } - }, masterNodes.get(0), (ignored) -> { - try { - cluster.rollingRestart(new InternalTestCluster.RestartCallback() { - @Override - public boolean validateClusterForming() { - return true; - } - }); - } catch (Exception e) { - throw new RuntimeException(e); - } - })).start(); + @Override + public void disruptionEnd() { + disruptionEnd.countDown(); + } + }, masterNodes.get(0), (ignored) -> { + try { + cluster.rollingRestart(new InternalTestCluster.RestartCallback() { + @Override + public boolean validateClusterForming() { + return true; + } + }); + } catch (Exception e) { + throw new RuntimeException(e); + } + })).start(); - final String targetIndex = "downsample-1h-" + sourceIndex; - startDownsampleTaskViaIlm(sourceIndex, targetIndex, disruptionStart, disruptionEnd); - waitUntil(() -> cluster.client().admin().cluster().preparePendingClusterTasks().get().pendingTasks().isEmpty()); - ensureStableCluster(cluster.numDataAndMasterNodes()); - assertTargetIndex(cluster, targetIndex, indexedDocs); - } + final String targetIndex = "downsample-1h-" + sourceIndex; + startDownsampleTaskViaIlm(sourceIndex, targetIndex, disruptionStart, disruptionEnd); + waitUntil(() -> cluster.client().admin().cluster().preparePendingClusterTasks().get().pendingTasks().isEmpty()); + ensureStableCluster(cluster.numDataAndMasterNodes()); + assertTargetIndex(cluster, targetIndex, indexedDocs); } private void startDownsampleTaskViaIlm( @@ -239,13 +238,16 @@ private void assertTargetIndex(final InternalTestCluster cluster, final String t .getIndex(new GetIndexRequest().indices(targetIndex)) .actionGet(); assertEquals(1, getIndexResponse.indices().length); - final SearchResponse targetIndexSearch = cluster.client() - .prepareSearch(targetIndex) - .setQuery(new MatchAllQueryBuilder()) - .setSize(Math.min(DOC_COUNT, indexedDocs)) - .setTrackTotalHitsUpTo(Integer.MAX_VALUE) - .get(); - assertTrue(targetIndexSearch.getHits().getHits().length > 0); + assertResponse( + cluster.client() + .prepareSearch(targetIndex) + .setQuery(new MatchAllQueryBuilder()) + .setSize(Math.min(DOC_COUNT, indexedDocs)) + .setTrackTotalHitsUpTo(Integer.MAX_VALUE), + targetIndexSearch -> { + assertTrue(targetIndexSearch.getHits().getHits().length > 0); + } + ); } private int bulkIndex(final String indexName, final SourceSupplier sourceSupplier, int docCount) throws IOException { diff --git a/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java b/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java index 9bf580673df2e..c0abab1234133 100644 --- a/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java +++ b/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java @@ -1434,16 +1434,11 @@ private String createDataStream() throws Exception { null ); - ComposableIndexTemplate template = new ComposableIndexTemplate( - List.of(dataStreamName + "*"), - indexTemplate, - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ); + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(List.of(dataStreamName + "*")) + .template(indexTemplate) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build(); PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request(dataStreamName + "_template") .indexTemplate(template); assertAcked(client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet()); diff --git a/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleDataStreamTests.java b/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleDataStreamTests.java index 1719ce2b13c53..95640f4625849 100644 --- a/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleDataStreamTests.java +++ b/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleDataStreamTests.java @@ -24,7 +24,6 @@ import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStream; @@ -62,6 +61,7 @@ import java.util.concurrent.TimeUnit; import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.DEFAULT_TIMESTAMP_FIELD; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; public class DownsampleDataStreamTests extends ESSingleNodeTestCase { @@ -145,35 +145,39 @@ public void testDataStreamDownsample() throws ExecutionException, InterruptedExc new DateHistogramAggregationBuilder("dateHistogram").field("@timestamp").fixedInterval(DateHistogramInterval.MINUTE) ) ); - final SearchResponse searchResponse = client().search(searchRequest).actionGet(); - Arrays.stream(searchResponse.getHits().getHits()) - .limit(10) - .forEach(hit -> assertThat(hit.getIndex(), equalTo(rolloverResponse.getNewIndex()))); - assertThat(searchResponse.getHits().getHits()[10].getIndex(), equalTo(downsampleTargetIndex)); - final InternalDateHistogram dateHistogram = searchResponse.getAggregations().get("dateHistogram"); - // NOTE: due to unpredictable values for the @timestamp field we don't know how many buckets we have in the - // date histogram. We know, anyway, that we will have 10 documents in the first two buckets, 10 documents in the last two buckets. - // The actual number of documents on each of the first two and last two buckets depends on the timestamp value generated when - // indexing - // documents, which might cross the minute boundary of the fixed_interval date histogram aggregation. - // Then we check there is a variable number of intermediate buckets with exactly 0 documents. This is a result of the way - // downsampling - // deals with a fixed interval granularity that is larger than the date histogram fixed interval (1 minute (date histogram - // fixed_interval) - // < 1 hour (downsample fixed_interval)). - final int totalBuckets = dateHistogram.getBuckets().size(); - assertThat(dateHistogram.getBuckets().get(0).getDocCount() + dateHistogram.getBuckets().get(1).getDocCount(), equalTo(10L)); - dateHistogram.getBuckets() - .stream() - .skip(2) - .limit(totalBuckets - 3) - .map(InternalDateHistogram.Bucket::getDocCount) - .toList() - .forEach(docCount -> assertThat(docCount, equalTo(0L))); - assertThat( - dateHistogram.getBuckets().get(totalBuckets - 2).getDocCount() + dateHistogram.getBuckets().get(totalBuckets - 1).getDocCount(), - equalTo(10L) - ); + assertResponse(client().search(searchRequest), searchResponse -> { + Arrays.stream(searchResponse.getHits().getHits()) + .limit(10) + .forEach(hit -> assertThat(hit.getIndex(), equalTo(rolloverResponse.getNewIndex()))); + assertThat(searchResponse.getHits().getHits()[10].getIndex(), equalTo(downsampleTargetIndex)); + final InternalDateHistogram dateHistogram = searchResponse.getAggregations().get("dateHistogram"); + // NOTE: due to unpredictable values for the @timestamp field we don't know how many buckets we have in the + // date histogram. We know, anyway, that we will have 10 documents in the first two buckets, 10 documents in the last two + // buckets. + // The actual number of documents on each of the first two and last two buckets depends on the timestamp value generated when + // indexing + // documents, which might cross the minute boundary of the fixed_interval date histogram aggregation. + // Then we check there is a variable number of intermediate buckets with exactly 0 documents. This is a result of the way + // downsampling + // deals with a fixed interval granularity that is larger than the date histogram fixed interval (1 minute (date histogram + // fixed_interval) + // < 1 hour (downsample fixed_interval)). + final int totalBuckets = dateHistogram.getBuckets().size(); + assertThat(dateHistogram.getBuckets().get(0).getDocCount() + dateHistogram.getBuckets().get(1).getDocCount(), equalTo(10L)); + dateHistogram.getBuckets() + .stream() + .skip(2) + .limit(totalBuckets - 3) + .map(InternalDateHistogram.Bucket::getDocCount) + .toList() + .forEach(docCount -> assertThat(docCount, equalTo(0L))); + assertThat( + dateHistogram.getBuckets().get(totalBuckets - 2).getDocCount() + dateHistogram.getBuckets() + .get(totalBuckets - 1) + .getDocCount(), + equalTo(10L) + ); + }); } private void putComposableIndexTemplate(final String id, final List patterns) throws IOException { @@ -202,7 +206,11 @@ private void putComposableIndexTemplate(final String id, final List patt null ); request.indexTemplate( - new ComposableIndexTemplate(patterns, template, null, null, null, null, new ComposableIndexTemplate.DataStreamTemplate(), null) + ComposableIndexTemplate.builder() + .indexPatterns(patterns) + .template(template) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); } diff --git a/x-pack/plugin/enrich/src/internalClusterTest/java/org/elasticsearch/xpack/enrich/EnrichMultiNodeIT.java b/x-pack/plugin/enrich/src/internalClusterTest/java/org/elasticsearch/xpack/enrich/EnrichMultiNodeIT.java index 5416741c8743d..b81a5e6b902b3 100644 --- a/x-pack/plugin/enrich/src/internalClusterTest/java/org/elasticsearch/xpack/enrich/EnrichMultiNodeIT.java +++ b/x-pack/plugin/enrich/src/internalClusterTest/java/org/elasticsearch/xpack/enrich/EnrichMultiNodeIT.java @@ -23,6 +23,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.Strings; import org.elasticsearch.ingest.common.IngestCommonPlugin; import org.elasticsearch.plugins.Plugin; @@ -53,6 +54,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsInRelativeOrder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; @@ -82,6 +84,7 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { // TODO Change this to run with security enabled // https://github.com/elastic/elasticsearch/issues/75940 .put(XPackSettings.SECURITY_ENABLED.getKey(), false) + .put("thread_pool.write.size", 2) .build(); } @@ -135,6 +138,25 @@ public void testEnrich() { enrich(keys, randomFrom(nodes)); } + public void testStressEnrich() { + List nodes = internalCluster().startNodes( + 3, + Settings.builder().put("enrich.coordinator_proxy.max_concurrent_requests", 1).build() + ); + int indices = randomIntBetween(5, 10); + final Map> keys = Maps.newHashMapWithExpectedSize(indices); + for (int i = 0; i < indices; i++) { + final String indexName = "index-" + i; + List k = createSourceIndex(indexName, 64); + final String policyName = "policy-" + i; + createAndExecutePolicy(policyName, indexName); + final String pipelineName = "pipeline-" + i; + createPipeline(policyName, pipelineName); + keys.put(pipelineName, k); + } + enrich(keys, randomFrom(nodes), 50); + } + public void testEnrichDedicatedIngestNode() { internalCluster().startNode(); String ingestOnlyNode = internalCluster().startNode(ingestOnlyNode()); @@ -210,13 +232,19 @@ public void testExecutePolicyNeverOnElectedMaster() throws Exception { } private static void enrich(List keys, String coordinatingNode) { - int numDocs = 256; + enrich(Map.of(PIPELINE_NAME, keys), coordinatingNode, 256); + } + + private static void enrich(Map> keys, String coordinatingNode, int numDocs) { + final String[] executedPipeline = new String[2 * numDocs]; BulkRequest bulkRequest = new BulkRequest("my-index"); for (int i = 0; i < numDocs; i++) { + final String pipeline = randomFrom(keys.keySet()); + executedPipeline[i] = pipeline; IndexRequest indexRequest = new IndexRequest(); indexRequest.id(Integer.toString(i)); - indexRequest.setPipeline(PIPELINE_NAME); - indexRequest.source(Map.of(MATCH_FIELD, randomFrom(keys))); + indexRequest.setPipeline(pipeline); + indexRequest.source(Map.of(MATCH_FIELD, randomFrom(keys.get(pipeline)))); bulkRequest.add(indexRequest); } BulkResponse bulkResponse = client(coordinatingNode).bulk(bulkRequest).actionGet(); @@ -231,7 +259,7 @@ private static void enrich(List keys, String coordinatingNode) { Map source = getResponse.getSourceAsMap(); Map userEntry = (Map) source.get("user"); assertThat(userEntry.size(), equalTo(DECORATE_FIELDS.length + 1)); - assertThat(keys.contains(userEntry.get(MATCH_FIELD)), is(true)); + assertThat(keys.get(executedPipeline[i]), containsInRelativeOrder(userEntry.get(MATCH_FIELD))); for (String field : DECORATE_FIELDS) { assertThat(userEntry.get(field), notNullValue()); } @@ -250,6 +278,10 @@ private static void enrich(List keys, String coordinatingNode) { } private static List createSourceIndex(int numDocs) { + return createSourceIndex(SOURCE_INDEX_NAME, numDocs); + } + + private static List createSourceIndex(String indexName, int numDocs) { Set keys = new HashSet<>(); for (int i = 0; i < numDocs; i++) { String key; @@ -257,7 +289,7 @@ private static List createSourceIndex(int numDocs) { key = randomAlphaOfLength(16); } while (keys.add(key) == false); - IndexRequest indexRequest = new IndexRequest(SOURCE_INDEX_NAME); + IndexRequest indexRequest = new IndexRequest(indexName); indexRequest.create(true); indexRequest.id(key); indexRequest.source( @@ -274,23 +306,27 @@ private static List createSourceIndex(int numDocs) { ); client().index(indexRequest).actionGet(); } - indicesAdmin().refresh(new RefreshRequest(SOURCE_INDEX_NAME)).actionGet(); + indicesAdmin().refresh(new RefreshRequest(indexName)).actionGet(); return List.copyOf(keys); } private static void createAndExecutePolicy() { + createAndExecutePolicy(POLICY_NAME, SOURCE_INDEX_NAME); + } + + private static void createAndExecutePolicy(String policyName, String indexName) { EnrichPolicy enrichPolicy = new EnrichPolicy( EnrichPolicy.MATCH_TYPE, null, - List.of(SOURCE_INDEX_NAME), + List.of(indexName), MATCH_FIELD, List.of(DECORATE_FIELDS) ); - PutEnrichPolicyAction.Request request = new PutEnrichPolicyAction.Request(POLICY_NAME, enrichPolicy); + PutEnrichPolicyAction.Request request = new PutEnrichPolicyAction.Request(policyName, enrichPolicy); client().execute(PutEnrichPolicyAction.INSTANCE, request).actionGet(); final ActionFuture policyExecuteFuture = client().execute( ExecuteEnrichPolicyAction.INSTANCE, - new ExecuteEnrichPolicyAction.Request(POLICY_NAME) + new ExecuteEnrichPolicyAction.Request(policyName) ); // Make sure we can deserialize enrich policy execution task status final List tasks = clusterAdmin().prepareListTasks().setActions(EnrichPolicyExecutor.TASK_ACTION).get().getTasks(); @@ -307,11 +343,15 @@ private static void createAndExecutePolicy() { } private static void createPipeline() { + createPipeline(POLICY_NAME, PIPELINE_NAME); + } + + private static void createPipeline(String policyName, String pipelineName) { String pipelineBody = Strings.format(""" { "processors": [ { "enrich": { "policy_name": "%s", "field": "%s", "target_field": "user" } } ] - }""", POLICY_NAME, MATCH_FIELD); - PutPipelineRequest request = new PutPipelineRequest(PIPELINE_NAME, new BytesArray(pipelineBody), XContentType.JSON); + }""", policyName, MATCH_FIELD); + PutPipelineRequest request = new PutPipelineRequest(pipelineName, new BytesArray(pipelineBody), XContentType.JSON); clusterAdmin().putPipeline(request).actionGet(); } } diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/EnrichCoordinatorProxyAction.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/EnrichCoordinatorProxyAction.java index 2a880c9a22cdd..5e0e7a6314d67 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/EnrichCoordinatorProxyAction.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/EnrichCoordinatorProxyAction.java @@ -30,6 +30,7 @@ import java.util.ArrayList; import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; @@ -238,13 +239,24 @@ static BiConsumer final List> enrichIndexRequestsAndSlots = entry.getValue(); ActionListener listener = ActionListener.wrap(response -> { shardResponses.put(enrichIndexName, new Tuple<>(response, null)); + response.incRef(); // will be released during reduce if (counter.incrementAndGet() == itemsPerIndex.size()) { - consumer.accept(reduce(request.requests().size(), itemsPerIndex, shardResponses), null); + var res = reduce(request.requests().size(), itemsPerIndex, shardResponses); + try { + consumer.accept(res, null); + } finally { + res.decRef(); + } } }, e -> { shardResponses.put(enrichIndexName, new Tuple<>(null, e)); if (counter.incrementAndGet() == itemsPerIndex.size()) { - consumer.accept(reduce(request.requests().size(), itemsPerIndex, shardResponses), null); + var res = reduce(request.requests().size(), itemsPerIndex, shardResponses); + try { + consumer.accept(res, null); + } finally { + res.decRef(); + } } }); @@ -261,14 +273,23 @@ static MultiSearchResponse reduce( Map> shardResponses ) { MultiSearchResponse.Item[] items = new MultiSearchResponse.Item[numRequest]; - for (Map.Entry> rspEntry : shardResponses.entrySet()) { + for (Iterator>> iterator = shardResponses.entrySet() + .iterator(); iterator.hasNext();) { + Map.Entry> rspEntry = iterator.next(); List> reqSlots = itemsPerIndex.get(rspEntry.getKey()); if (rspEntry.getValue().v1() != null) { MultiSearchResponse shardResponse = rspEntry.getValue().v1(); for (int i = 0; i < shardResponse.getResponses().length; i++) { int slot = reqSlots.get(i).v1(); - items[slot] = shardResponse.getResponses()[i]; + var res = shardResponse.getResponses()[i]; + items[slot] = res; + var r = res.getResponse(); + if (r != null) { + r.incRef(); + } } + iterator.remove(); + shardResponse.decRef(); } else if (rspEntry.getValue().v2() != null) { Exception e = rspEntry.getValue().v2(); for (Tuple originSlot : reqSlots) { diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyExecutorTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyExecutorTests.java index e7a022e841a85..457da7f65294b 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyExecutorTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyExecutorTests.java @@ -277,7 +277,7 @@ protected void ); // Launch a fake policy run that will block until firstTaskBlock is counted down. - PlainActionFuture firstTaskResult = PlainActionFuture.newFuture(); + PlainActionFuture firstTaskResult = new PlainActionFuture<>(); testExecutor.coordinatePolicyExecution( new ExecuteEnrichPolicyAction.Request(testPolicyName).setWaitForCompletion(false), firstTaskResult diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunnerTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunnerTests.java index 134db88ed9e66..f95c4959be771 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunnerTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunnerTests.java @@ -80,11 +80,14 @@ import java.util.Locale; import java.util.Map; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; @@ -124,17 +127,19 @@ public void testRunner() throws Exception { }""", XContentType.JSON).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)).actionGet(); assertEquals(RestStatus.CREATED, indexRequest.status()); - SearchResponse sourceSearchResponse = client().search( - new SearchRequest(sourceIndex).source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) - ).actionGet(); - assertThat(sourceSearchResponse.getHits().getTotalHits().value, equalTo(1L)); - Map sourceDocMap = sourceSearchResponse.getHits().getAt(0).getSourceAsMap(); - assertNotNull(sourceDocMap); - assertThat(sourceDocMap.get("field1"), is(equalTo("value1"))); - assertThat(sourceDocMap.get("field2"), is(equalTo(2))); - assertThat(sourceDocMap.get("field3"), is(equalTo("ignored"))); - assertThat(sourceDocMap.get("field4"), is(equalTo("ignored"))); - assertThat(sourceDocMap.get("field5"), is(equalTo("value5"))); + assertResponse( + client().search(new SearchRequest(sourceIndex).source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()))), + searchResponse -> { + assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); + Map sourceDocMap = searchResponse.getHits().getAt(0).getSourceAsMap(); + assertNotNull(sourceDocMap); + assertThat(sourceDocMap.get("field1"), is(equalTo("value1"))); + assertThat(sourceDocMap.get("field2"), is(equalTo(2))); + assertThat(sourceDocMap.get("field3"), is(equalTo("ignored"))); + assertThat(sourceDocMap.get("field4"), is(equalTo("ignored"))); + assertThat(sourceDocMap.get("field5"), is(equalTo("value5"))); + } + ); List enrichFields = List.of("field2", "field5"); EnrichPolicy policy = new EnrichPolicy(EnrichPolicy.MATCH_TYPE, null, List.of(sourceIndex), "field1", enrichFields); @@ -177,18 +182,21 @@ public void testRunner() throws Exception { } """); // Validate document structure - SearchResponse enrichSearchResponse = client().search( - new SearchRequest(".enrich-test1").source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) - ).actionGet(); - - assertThat(enrichSearchResponse.getHits().getTotalHits().value, equalTo(1L)); - Map enrichDocument = enrichSearchResponse.getHits().iterator().next().getSourceAsMap(); - assertNotNull(enrichDocument); - assertThat(enrichDocument.size(), is(equalTo(3))); - assertThat(enrichDocument.get("field1"), is(equalTo("value1"))); - assertThat(enrichDocument.get("field2"), is(equalTo(2))); - assertThat(enrichDocument.get("field5"), is(equalTo("value5"))); - + assertResponse( + client().search( + new SearchRequest(".enrich-test1").source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) + ), + enrichSearchResponse -> { + + assertThat(enrichSearchResponse.getHits().getTotalHits().value, equalTo(1L)); + Map enrichDocument = enrichSearchResponse.getHits().iterator().next().getSourceAsMap(); + assertNotNull(enrichDocument); + assertThat(enrichDocument.size(), is(equalTo(3))); + assertThat(enrichDocument.get("field1"), is(equalTo("value1"))); + assertThat(enrichDocument.get("field2"), is(equalTo(2))); + assertThat(enrichDocument.get("field5"), is(equalTo("value5"))); + } + ); // Validate segments validateSegments(createdEnrichIndex, 1); @@ -203,15 +211,16 @@ public void testRunnerGeoMatchType() throws Exception { .actionGet(); assertEquals(RestStatus.CREATED, indexRequest.status()); - SearchResponse sourceSearchResponse = client().search( - new SearchRequest(sourceIndex).source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) - ).actionGet(); - assertThat(sourceSearchResponse.getHits().getTotalHits().value, equalTo(1L)); - Map sourceDocMap = sourceSearchResponse.getHits().getAt(0).getSourceAsMap(); - assertNotNull(sourceDocMap); - assertThat(sourceDocMap.get("location"), is(equalTo("POINT(10.0 10.0)"))); - assertThat(sourceDocMap.get("zipcode"), is(equalTo(90210))); - + assertResponse( + client().search(new SearchRequest(sourceIndex).source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()))), + sourceSearchResponse -> { + assertThat(sourceSearchResponse.getHits().getTotalHits().value, equalTo(1L)); + Map sourceDocMap = sourceSearchResponse.getHits().getAt(0).getSourceAsMap(); + assertNotNull(sourceDocMap); + assertThat(sourceDocMap.get("location"), is(equalTo("POINT(10.0 10.0)"))); + assertThat(sourceDocMap.get("zipcode"), is(equalTo(90210))); + } + ); List enrichFields = List.of("zipcode"); EnrichPolicy policy = new EnrichPolicy(EnrichPolicy.GEO_MATCH_TYPE, null, List.of(sourceIndex), "location", enrichFields); String policyName = "test1"; @@ -248,17 +257,20 @@ public void testRunnerGeoMatchType() throws Exception { } """); // Validate document structure - SearchResponse enrichSearchResponse = client().search( - new SearchRequest(".enrich-test1").source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) - ).actionGet(); - - assertThat(enrichSearchResponse.getHits().getTotalHits().value, equalTo(1L)); - Map enrichDocument = enrichSearchResponse.getHits().iterator().next().getSourceAsMap(); - assertNotNull(enrichDocument); - assertThat(enrichDocument.size(), is(equalTo(2))); - assertThat(enrichDocument.get("location"), is(equalTo("POINT(10.0 10.0)"))); - assertThat(enrichDocument.get("zipcode"), is(equalTo(90210))); - + assertResponse( + client().search( + new SearchRequest(".enrich-test1").source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) + ), + enrichSearchResponse -> { + + assertThat(enrichSearchResponse.getHits().getTotalHits().value, equalTo(1L)); + Map enrichDocument = enrichSearchResponse.getHits().iterator().next().getSourceAsMap(); + assertNotNull(enrichDocument); + assertThat(enrichDocument.size(), is(equalTo(2))); + assertThat(enrichDocument.get("location"), is(equalTo("POINT(10.0 10.0)"))); + assertThat(enrichDocument.get("zipcode"), is(equalTo(90210))); + } + ); // Validate segments validateSegments(createdEnrichIndex, 1); @@ -290,15 +302,16 @@ private void testNumberRangeMatchType(String rangeType) throws Exception { .actionGet(); assertEquals(RestStatus.CREATED, indexRequest.status()); - SearchResponse sourceSearchResponse = client().search( - new SearchRequest(sourceIndex).source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) - ).actionGet(); - assertThat(sourceSearchResponse.getHits().getTotalHits().value, equalTo(1L)); - Map sourceDocMap = sourceSearchResponse.getHits().getAt(0).getSourceAsMap(); - assertNotNull(sourceDocMap); - assertThat(sourceDocMap.get("range"), is(equalTo(Map.of("lt", 10, "gt", 1)))); - assertThat(sourceDocMap.get("zipcode"), is(equalTo(90210))); - + assertResponse( + client().search(new SearchRequest(sourceIndex).source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()))), + sourceSearchResponse -> { + assertThat(sourceSearchResponse.getHits().getTotalHits().value, equalTo(1L)); + Map sourceDocMap = sourceSearchResponse.getHits().getAt(0).getSourceAsMap(); + assertNotNull(sourceDocMap); + assertThat(sourceDocMap.get("range"), is(equalTo(Map.of("lt", 10, "gt", 1)))); + assertThat(sourceDocMap.get("zipcode"), is(equalTo(90210))); + } + ); List enrichFields = List.of("zipcode"); EnrichPolicy policy = new EnrichPolicy(EnrichPolicy.RANGE_TYPE, null, List.of(sourceIndex), "range", enrichFields, null); String policyName = "test1"; @@ -337,17 +350,20 @@ private void testNumberRangeMatchType(String rangeType) throws Exception { """, rangeType + "_range")); // Validate document structure - SearchResponse enrichSearchResponse = client().search( - new SearchRequest(".enrich-test1").source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) - ).actionGet(); - - assertThat(enrichSearchResponse.getHits().getTotalHits().value, equalTo(1L)); - Map enrichDocument = enrichSearchResponse.getHits().iterator().next().getSourceAsMap(); - assertNotNull(enrichDocument); - assertThat(enrichDocument.size(), is(equalTo(2))); - assertThat(enrichDocument.get("range"), is(equalTo(Map.of("lt", 10, "gt", 1)))); - assertThat(enrichDocument.get("zipcode"), is(equalTo(90210))); - + assertResponse( + client().search( + new SearchRequest(".enrich-test1").source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) + ), + enrichSearchResponse -> { + + assertThat(enrichSearchResponse.getHits().getTotalHits().value, equalTo(1L)); + Map enrichDocument = enrichSearchResponse.getHits().iterator().next().getSourceAsMap(); + assertNotNull(enrichDocument); + assertThat(enrichDocument.size(), is(equalTo(2))); + assertThat(enrichDocument.get("range"), is(equalTo(Map.of("lt", 10, "gt", 1)))); + assertThat(enrichDocument.get("zipcode"), is(equalTo(90210))); + } + ); // Validate segments validateSegments(createdEnrichIndex, 1); @@ -381,15 +397,18 @@ public void testRunnerRangeTypeWithIpRange() throws Exception { assertNotNull(subnetField); assertThat(subnetField.get("type"), is(equalTo("ip_range"))); - SearchResponse sourceSearchResponse = client().search( - new SearchRequest(sourceIndexName).source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) - ).actionGet(); - assertThat(sourceSearchResponse.getHits().getTotalHits().value, equalTo(1L)); - Map sourceDocMap = sourceSearchResponse.getHits().getAt(0).getSourceAsMap(); - assertNotNull(sourceDocMap); - assertThat(sourceDocMap.get("subnet"), is(equalTo("10.0.0.0/8"))); - assertThat(sourceDocMap.get("department"), is(equalTo("research"))); - + assertResponse( + client().search( + new SearchRequest(sourceIndexName).source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) + ), + sourceSearchResponse -> { + assertThat(sourceSearchResponse.getHits().getTotalHits().value, equalTo(1L)); + Map sourceDocMap = sourceSearchResponse.getHits().getAt(0).getSourceAsMap(); + assertNotNull(sourceDocMap); + assertThat(sourceDocMap.get("subnet"), is(equalTo("10.0.0.0/8"))); + assertThat(sourceDocMap.get("department"), is(equalTo("research"))); + } + ); List enrichFields = List.of("department"); EnrichPolicy policy = new EnrichPolicy(EnrichPolicy.RANGE_TYPE, null, List.of(sourceIndexName), "subnet", enrichFields); String policyName = "test1"; @@ -427,19 +446,22 @@ public void testRunnerRangeTypeWithIpRange() throws Exception { } """); // Validate document structure and lookup of element in range - SearchResponse enrichSearchResponse = client().search( - new SearchRequest(".enrich-test1").source( - SearchSourceBuilder.searchSource().query(QueryBuilders.matchQuery("subnet", "10.0.0.1")) - ) - ).actionGet(); - - assertThat(enrichSearchResponse.getHits().getTotalHits().value, equalTo(1L)); - Map enrichDocument = enrichSearchResponse.getHits().iterator().next().getSourceAsMap(); - assertNotNull(enrichDocument); - assertThat(enrichDocument.size(), is(equalTo(2))); - assertThat(enrichDocument.get("subnet"), is(equalTo("10.0.0.0/8"))); - assertThat(enrichDocument.get("department"), is(equalTo("research"))); - + assertResponse( + client().search( + new SearchRequest(".enrich-test1").source( + SearchSourceBuilder.searchSource().query(QueryBuilders.matchQuery("subnet", "10.0.0.1")) + ) + ), + enrichSearchResponse -> { + + assertThat(enrichSearchResponse.getHits().getTotalHits().value, equalTo(1L)); + Map enrichDocument = enrichSearchResponse.getHits().iterator().next().getSourceAsMap(); + assertNotNull(enrichDocument); + assertThat(enrichDocument.size(), is(equalTo(2))); + assertThat(enrichDocument.get("subnet"), is(equalTo("10.0.0.0/8"))); + assertThat(enrichDocument.get("department"), is(equalTo("research"))); + } + ); // Validate segments validateSegments(createdEnrichIndex, 1); @@ -465,20 +487,24 @@ public void testRunnerMultiSource() throws Exception { }""", idx, idx), XContentType.JSON).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) ).actionGet(); assertEquals(RestStatus.CREATED, indexRequest.status()); - - SearchResponse sourceSearchResponse = client().search( - new SearchRequest(sourceIndex).source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) - ).actionGet(); - assertThat(sourceSearchResponse.getHits().getTotalHits().value, equalTo(1L)); - Map sourceDocMap = sourceSearchResponse.getHits().getAt(0).getSourceAsMap(); - assertNotNull(sourceDocMap); - assertThat(sourceDocMap.get("idx"), is(equalTo(idx))); - assertThat(sourceDocMap.get("key"), is(equalTo("key" + idx))); - assertThat(sourceDocMap.get("field1"), is(equalTo("value1"))); - assertThat(sourceDocMap.get("field2"), is(equalTo(2))); - assertThat(sourceDocMap.get("field3"), is(equalTo("ignored"))); - assertThat(sourceDocMap.get("field4"), is(equalTo("ignored"))); - assertThat(sourceDocMap.get("field5"), is(equalTo("value5"))); + final int targetIdx = idx; + assertResponse( + client().search( + new SearchRequest(sourceIndex).source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) + ), + sourceSearchResponse -> { + assertThat(sourceSearchResponse.getHits().getTotalHits().value, equalTo(1L)); + Map sourceDocMap = sourceSearchResponse.getHits().getAt(0).getSourceAsMap(); + assertNotNull(sourceDocMap); + assertThat(sourceDocMap.get("idx"), is(equalTo(targetIdx))); + assertThat(sourceDocMap.get("key"), is(equalTo("key" + targetIdx))); + assertThat(sourceDocMap.get("field1"), is(equalTo("value1"))); + assertThat(sourceDocMap.get("field2"), is(equalTo(2))); + assertThat(sourceDocMap.get("field3"), is(equalTo("ignored"))); + assertThat(sourceDocMap.get("field4"), is(equalTo("ignored"))); + assertThat(sourceDocMap.get("field5"), is(equalTo("value5"))); + } + ); } String sourceIndexPattern = baseSourceName + "*"; @@ -531,18 +557,21 @@ public void testRunnerMultiSource() throws Exception { } """); // Validate document structure - SearchResponse enrichSearchResponse = client().search( - new SearchRequest(".enrich-test1").source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) - ).actionGet(); - assertThat(enrichSearchResponse.getHits().getTotalHits().value, equalTo(3L)); - Map enrichDocument = enrichSearchResponse.getHits().iterator().next().getSourceAsMap(); - assertNotNull(enrichDocument); - assertThat(enrichDocument.size(), is(equalTo(5))); - assertThat(enrichDocument.get("key"), is(equalTo("key0"))); - assertThat(enrichDocument.get("field1"), is(equalTo("value1"))); - assertThat(enrichDocument.get("field2"), is(equalTo(2))); - assertThat(enrichDocument.get("field5"), is(equalTo("value5"))); - + assertResponse( + client().search( + new SearchRequest(".enrich-test1").source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) + ), + enrichSearchResponse -> { + assertThat(enrichSearchResponse.getHits().getTotalHits().value, equalTo(3L)); + Map enrichDocument = enrichSearchResponse.getHits().iterator().next().getSourceAsMap(); + assertNotNull(enrichDocument); + assertThat(enrichDocument.size(), is(equalTo(5))); + assertThat(enrichDocument.get("key"), is(equalTo("key0"))); + assertThat(enrichDocument.get("field1"), is(equalTo("value1"))); + assertThat(enrichDocument.get("field2"), is(equalTo(2))); + assertThat(enrichDocument.get("field5"), is(equalTo("value5"))); + } + ); // Validate segments validateSegments(createdEnrichIndex, 3); @@ -569,27 +598,32 @@ public void testRunnerMultiSourceDocIdCollisions() throws Exception { }""", idx, idx), XContentType.JSON).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) ).actionGet(); assertEquals(RestStatus.CREATED, indexRequest.status()); - - SearchResponse sourceSearchResponse = client().search( - new SearchRequest(sourceIndex).source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) - ).actionGet(); - assertThat(sourceSearchResponse.getHits().getTotalHits().value, equalTo(1L)); - Map sourceDocMap = sourceSearchResponse.getHits().getAt(0).getSourceAsMap(); - assertNotNull(sourceDocMap); - assertThat(sourceDocMap.get("idx"), is(equalTo(idx))); - assertThat(sourceDocMap.get("key"), is(equalTo("key" + idx))); - assertThat(sourceDocMap.get("field1"), is(equalTo("value1"))); - assertThat(sourceDocMap.get("field2"), is(equalTo(2))); - assertThat(sourceDocMap.get("field3"), is(equalTo("ignored"))); - assertThat(sourceDocMap.get("field4"), is(equalTo("ignored"))); - assertThat(sourceDocMap.get("field5"), is(equalTo("value5"))); - - SearchResponse routingSearchResponse = client().search( - new SearchRequest(sourceIndex).source( - SearchSourceBuilder.searchSource().query(QueryBuilders.matchQuery("_routing", collidingDocId + idx)) - ) - ).actionGet(); - assertEquals(1L, routingSearchResponse.getHits().getTotalHits().value); + final int targetIdx = idx; + assertResponse( + client().search( + new SearchRequest(sourceIndex).source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) + ), + sourceSearchResponse -> { + assertThat(sourceSearchResponse.getHits().getTotalHits().value, equalTo(1L)); + Map sourceDocMap = sourceSearchResponse.getHits().getAt(0).getSourceAsMap(); + assertNotNull(sourceDocMap); + assertThat(sourceDocMap.get("idx"), is(equalTo(targetIdx))); + assertThat(sourceDocMap.get("key"), is(equalTo("key" + targetIdx))); + assertThat(sourceDocMap.get("field1"), is(equalTo("value1"))); + assertThat(sourceDocMap.get("field2"), is(equalTo(2))); + assertThat(sourceDocMap.get("field3"), is(equalTo("ignored"))); + assertThat(sourceDocMap.get("field4"), is(equalTo("ignored"))); + assertThat(sourceDocMap.get("field5"), is(equalTo("value5"))); + } + ); + assertHitCount( + client().search( + new SearchRequest(sourceIndex).source( + SearchSourceBuilder.searchSource().query(QueryBuilders.matchQuery("_routing", collidingDocId + idx)) + ) + ), + 1L + ); } String sourceIndexPattern = baseSourceName + "*"; @@ -641,26 +675,32 @@ public void testRunnerMultiSourceDocIdCollisions() throws Exception { } """); // Validate document structure - SearchResponse enrichSearchResponse = client().search( - new SearchRequest(".enrich-test1").source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) - ).actionGet(); - assertThat(enrichSearchResponse.getHits().getTotalHits().value, equalTo(3L)); - Map enrichDocument = enrichSearchResponse.getHits().iterator().next().getSourceAsMap(); - assertNotNull(enrichDocument); - assertThat(enrichDocument.size(), is(equalTo(5))); - assertThat(enrichDocument.get("key"), is(equalTo("key0"))); - assertThat(enrichDocument.get("field1"), is(equalTo("value1"))); - assertThat(enrichDocument.get("field2"), is(equalTo(2))); - assertThat(enrichDocument.get("field5"), is(equalTo("value5"))); - + assertResponse( + client().search( + new SearchRequest(".enrich-test1").source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) + ), + enrichSearchResponse -> { + assertThat(enrichSearchResponse.getHits().getTotalHits().value, equalTo(3L)); + Map enrichDocument = enrichSearchResponse.getHits().iterator().next().getSourceAsMap(); + assertNotNull(enrichDocument); + assertThat(enrichDocument.size(), is(equalTo(5))); + assertThat(enrichDocument.get("key"), is(equalTo("key0"))); + assertThat(enrichDocument.get("field1"), is(equalTo("value1"))); + assertThat(enrichDocument.get("field2"), is(equalTo(2))); + assertThat(enrichDocument.get("field5"), is(equalTo("value5"))); + } + ); // Validate removal of routing values for (int idx = 0; idx < numberOfSourceIndices; idx++) { - SearchResponse routingSearchResponse = client().search( - new SearchRequest(".enrich-test1").source( - SearchSourceBuilder.searchSource().query(QueryBuilders.matchQuery("_routing", collidingDocId + idx)) - ) - ).actionGet(); - assertEquals(0L, routingSearchResponse.getHits().getTotalHits().value); + final int targetIdx = idx; + assertHitCount( + client().search( + new SearchRequest(".enrich-test1").source( + SearchSourceBuilder.searchSource().query(QueryBuilders.matchQuery("_routing", collidingDocId + targetIdx)) + ) + ), + 0 + ); } // Validate segments @@ -689,19 +729,24 @@ public void testRunnerMultiSourceEnrichKeyCollisions() throws Exception { ).actionGet(); assertEquals(RestStatus.CREATED, indexRequest.status()); - SearchResponse sourceSearchResponse = client().search( - new SearchRequest(sourceIndex).source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) - ).actionGet(); - assertThat(sourceSearchResponse.getHits().getTotalHits().value, equalTo(1L)); - Map sourceDocMap = sourceSearchResponse.getHits().getAt(0).getSourceAsMap(); - assertNotNull(sourceDocMap); - assertThat(sourceDocMap.get("idx"), is(equalTo(idx))); - assertThat(sourceDocMap.get("key"), is(equalTo("key"))); - assertThat(sourceDocMap.get("field1"), is(equalTo("value1"))); - assertThat(sourceDocMap.get("field2"), is(equalTo(2))); - assertThat(sourceDocMap.get("field3"), is(equalTo("ignored"))); - assertThat(sourceDocMap.get("field4"), is(equalTo("ignored"))); - assertThat(sourceDocMap.get("field5"), is(equalTo("value5"))); + final int targetIdx = idx; + assertResponse( + client().search( + new SearchRequest(sourceIndex).source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) + ), + sourceSearchResponse -> { + assertThat(sourceSearchResponse.getHits().getTotalHits().value, equalTo(1L)); + Map sourceDocMap = sourceSearchResponse.getHits().getAt(0).getSourceAsMap(); + assertNotNull(sourceDocMap); + assertThat(sourceDocMap.get("idx"), is(equalTo(targetIdx))); + assertThat(sourceDocMap.get("key"), is(equalTo("key"))); + assertThat(sourceDocMap.get("field1"), is(equalTo("value1"))); + assertThat(sourceDocMap.get("field2"), is(equalTo(2))); + assertThat(sourceDocMap.get("field3"), is(equalTo("ignored"))); + assertThat(sourceDocMap.get("field4"), is(equalTo("ignored"))); + assertThat(sourceDocMap.get("field5"), is(equalTo("value5"))); + } + ); } String sourceIndexPattern = baseSourceName + "*"; @@ -753,18 +798,21 @@ public void testRunnerMultiSourceEnrichKeyCollisions() throws Exception { } """); // Validate document structure - SearchResponse enrichSearchResponse = client().search( - new SearchRequest(".enrich-test1").source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) - ).actionGet(); - assertThat(enrichSearchResponse.getHits().getTotalHits().value, equalTo(3L)); - Map enrichDocument = enrichSearchResponse.getHits().iterator().next().getSourceAsMap(); - assertNotNull(enrichDocument); - assertThat(enrichDocument.size(), is(equalTo(5))); - assertThat(enrichDocument.get("key"), is(equalTo("key"))); - assertThat(enrichDocument.get("field1"), is(equalTo("value1"))); - assertThat(enrichDocument.get("field2"), is(equalTo(2))); - assertThat(enrichDocument.get("field5"), is(equalTo("value5"))); - + assertResponse( + client().search( + new SearchRequest(".enrich-test1").source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) + ), + enrichSearchResponse -> { + assertThat(enrichSearchResponse.getHits().getTotalHits().value, equalTo(3L)); + Map enrichDocument = enrichSearchResponse.getHits().iterator().next().getSourceAsMap(); + assertNotNull(enrichDocument); + assertThat(enrichDocument.size(), is(equalTo(5))); + assertThat(enrichDocument.get("key"), is(equalTo("key"))); + assertThat(enrichDocument.get("field1"), is(equalTo("value1"))); + assertThat(enrichDocument.get("field2"), is(equalTo(2))); + assertThat(enrichDocument.get("field5"), is(equalTo("value5"))); + } + ); // Validate segments validateSegments(createdEnrichIndex, 3); @@ -994,17 +1042,19 @@ public void testRunnerObjectSourceMapping() throws Exception { ).actionGet(); assertEquals(RestStatus.CREATED, indexRequest.status()); - SearchResponse sourceSearchResponse = client().search( - new SearchRequest(sourceIndex).source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) - ).actionGet(); - assertThat(sourceSearchResponse.getHits().getTotalHits().value, equalTo(1L)); - Map sourceDocMap = sourceSearchResponse.getHits().getAt(0).getSourceAsMap(); - assertNotNull(sourceDocMap); - Map dataField = ((Map) sourceDocMap.get("data")); - assertNotNull(dataField); - assertThat(dataField.get("field1"), is(equalTo("value1"))); - assertThat(dataField.get("field2"), is(equalTo(2))); - assertThat(dataField.get("field3"), is(equalTo("ignored"))); + assertResponse( + client().search(new SearchRequest(sourceIndex).source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()))), + sourceSearchResponse -> { + assertThat(sourceSearchResponse.getHits().getTotalHits().value, equalTo(1L)); + Map sourceDocMap = sourceSearchResponse.getHits().getAt(0).getSourceAsMap(); + assertNotNull(sourceDocMap); + Map dataField = ((Map) sourceDocMap.get("data")); + assertNotNull(dataField); + assertThat(dataField.get("field1"), is(equalTo("value1"))); + assertThat(dataField.get("field2"), is(equalTo(2))); + assertThat(dataField.get("field3"), is(equalTo("ignored"))); + } + ); String policyName = "test1"; List enrichFields = List.of("data.field2", "missingField"); @@ -1046,20 +1096,24 @@ public void testRunnerObjectSourceMapping() throws Exception { } } """); - SearchResponse enrichSearchResponse = client().search( - new SearchRequest(".enrich-test1").source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) - ).actionGet(); - - assertThat(enrichSearchResponse.getHits().getTotalHits().value, equalTo(1L)); - Map enrichDocument = enrichSearchResponse.getHits().iterator().next().getSourceAsMap(); - assertNotNull(enrichDocument); - assertThat(enrichDocument.size(), is(equalTo(1))); - Map resultDataField = ((Map) enrichDocument.get("data")); - assertNotNull(resultDataField); - assertThat(resultDataField.size(), is(equalTo(2))); - assertThat(resultDataField.get("field1"), is(equalTo("value1"))); - assertThat(resultDataField.get("field2"), is(equalTo(2))); - assertNull(resultDataField.get("field3")); + assertResponse( + client().search( + new SearchRequest(".enrich-test1").source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) + ), + enrichSearchResponse -> { + + assertThat(enrichSearchResponse.getHits().getTotalHits().value, equalTo(1L)); + Map enrichDocument = enrichSearchResponse.getHits().iterator().next().getSourceAsMap(); + assertNotNull(enrichDocument); + assertThat(enrichDocument.size(), is(equalTo(1))); + Map resultDataField = ((Map) enrichDocument.get("data")); + assertNotNull(resultDataField); + assertThat(resultDataField.size(), is(equalTo(2))); + assertThat(resultDataField.get("field1"), is(equalTo("value1"))); + assertThat(resultDataField.get("field2"), is(equalTo(2))); + assertNull(resultDataField.get("field3")); + } + ); // Validate segments validateSegments(createdEnrichIndex, 1); @@ -1103,18 +1157,19 @@ public void testRunnerExplicitObjectSourceMapping() throws Exception { ).actionGet(); assertEquals(RestStatus.CREATED, indexRequest.status()); - SearchResponse sourceSearchResponse = client().search( - new SearchRequest(sourceIndex).source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) - ).actionGet(); - assertThat(sourceSearchResponse.getHits().getTotalHits().value, equalTo(1L)); - Map sourceDocMap = sourceSearchResponse.getHits().getAt(0).getSourceAsMap(); - assertNotNull(sourceDocMap); - Map dataField = ((Map) sourceDocMap.get("data")); - assertNotNull(dataField); - assertThat(dataField.get("field1"), is(equalTo("value1"))); - assertThat(dataField.get("field2"), is(equalTo(2))); - assertThat(dataField.get("field3"), is(equalTo("ignored"))); - + assertResponse( + client().search(new SearchRequest(sourceIndex).source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()))), + sourceSearchResponse -> { + assertThat(sourceSearchResponse.getHits().getTotalHits().value, equalTo(1L)); + Map sourceDocMap = sourceSearchResponse.getHits().getAt(0).getSourceAsMap(); + assertNotNull(sourceDocMap); + Map dataField = ((Map) sourceDocMap.get("data")); + assertNotNull(dataField); + assertThat(dataField.get("field1"), is(equalTo("value1"))); + assertThat(dataField.get("field2"), is(equalTo(2))); + assertThat(dataField.get("field3"), is(equalTo("ignored"))); + } + ); String policyName = "test1"; List enrichFields = List.of("data.field2", "missingField"); EnrichPolicy policy = new EnrichPolicy(EnrichPolicy.MATCH_TYPE, null, List.of(sourceIndex), "data.field1", enrichFields); @@ -1155,21 +1210,24 @@ public void testRunnerExplicitObjectSourceMapping() throws Exception { } } """); - SearchResponse enrichSearchResponse = client().search( - new SearchRequest(".enrich-test1").source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) - ).actionGet(); - - assertThat(enrichSearchResponse.getHits().getTotalHits().value, equalTo(1L)); - Map enrichDocument = enrichSearchResponse.getHits().iterator().next().getSourceAsMap(); - assertNotNull(enrichDocument); - assertThat(enrichDocument.size(), is(equalTo(1))); - Map resultDataField = ((Map) enrichDocument.get("data")); - assertNotNull(resultDataField); - assertThat(resultDataField.size(), is(equalTo(2))); - assertThat(resultDataField.get("field1"), is(equalTo("value1"))); - assertThat(resultDataField.get("field2"), is(equalTo(2))); - assertNull(resultDataField.get("field3")); - + assertResponse( + client().search( + new SearchRequest(".enrich-test1").source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) + ), + enrichSearchResponse -> { + + assertThat(enrichSearchResponse.getHits().getTotalHits().value, equalTo(1L)); + Map enrichDocument = enrichSearchResponse.getHits().iterator().next().getSourceAsMap(); + assertNotNull(enrichDocument); + assertThat(enrichDocument.size(), is(equalTo(1))); + Map resultDataField = ((Map) enrichDocument.get("data")); + assertNotNull(resultDataField); + assertThat(resultDataField.size(), is(equalTo(2))); + assertThat(resultDataField.get("field1"), is(equalTo("value1"))); + assertThat(resultDataField.get("field2"), is(equalTo(2))); + assertNull(resultDataField.get("field3")); + } + ); // Validate segments validateSegments(createdEnrichIndex, 1); @@ -1213,18 +1271,19 @@ public void testRunnerExplicitObjectSourceMappingRangePolicy() throws Exception }""", XContentType.JSON).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)).actionGet(); assertEquals(RestStatus.CREATED, indexRequest.status()); - SearchResponse sourceSearchResponse = client().search( - new SearchRequest(sourceIndex).source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) - ).actionGet(); - assertThat(sourceSearchResponse.getHits().getTotalHits().value, equalTo(1L)); - Map sourceDocMap = sourceSearchResponse.getHits().getAt(0).getSourceAsMap(); - assertNotNull(sourceDocMap); - Map dataField = ((Map) sourceDocMap.get("data")); - assertNotNull(dataField); - assertThat(dataField.get("subnet"), is(equalTo("10.0.0.0/8"))); - assertThat(dataField.get("department"), is(equalTo("research"))); - assertThat(dataField.get("field3"), is(equalTo("ignored"))); - + assertResponse( + client().search(new SearchRequest(sourceIndex).source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()))), + sourceSearchResponse -> { + assertThat(sourceSearchResponse.getHits().getTotalHits().value, equalTo(1L)); + Map sourceDocMap = sourceSearchResponse.getHits().getAt(0).getSourceAsMap(); + assertNotNull(sourceDocMap); + Map dataField = ((Map) sourceDocMap.get("data")); + assertNotNull(dataField); + assertThat(dataField.get("subnet"), is(equalTo("10.0.0.0/8"))); + assertThat(dataField.get("department"), is(equalTo("research"))); + assertThat(dataField.get("field3"), is(equalTo("ignored"))); + } + ); String policyName = "test1"; List enrichFields = List.of("data.department", "missingField"); EnrichPolicy policy = new EnrichPolicy(EnrichPolicy.RANGE_TYPE, null, List.of(sourceIndex), "data.subnet", enrichFields); @@ -1265,22 +1324,26 @@ public void testRunnerExplicitObjectSourceMappingRangePolicy() throws Exception } } """); - SearchResponse enrichSearchResponse = client().search( - new SearchRequest(".enrich-test1").source( - SearchSourceBuilder.searchSource().query(QueryBuilders.matchQuery("data.subnet", "10.0.0.1")) - ) - ).actionGet(); - - assertThat(enrichSearchResponse.getHits().getTotalHits().value, equalTo(1L)); - Map enrichDocument = enrichSearchResponse.getHits().iterator().next().getSourceAsMap(); - assertNotNull(enrichDocument); - assertThat(enrichDocument.size(), is(equalTo(1))); - Map resultDataField = ((Map) enrichDocument.get("data")); - assertNotNull(resultDataField); - assertThat(resultDataField.size(), is(equalTo(2))); - assertThat(resultDataField.get("subnet"), is(equalTo("10.0.0.0/8"))); - assertThat(resultDataField.get("department"), is(equalTo("research"))); - assertNull(resultDataField.get("field3")); + assertResponse( + client().search( + new SearchRequest(".enrich-test1").source( + SearchSourceBuilder.searchSource().query(QueryBuilders.matchQuery("data.subnet", "10.0.0.1")) + ) + ), + enrichSearchResponse -> { + + assertThat(enrichSearchResponse.getHits().getTotalHits().value, equalTo(1L)); + Map enrichDocument = enrichSearchResponse.getHits().iterator().next().getSourceAsMap(); + assertNotNull(enrichDocument); + assertThat(enrichDocument.size(), is(equalTo(1))); + Map resultDataField = ((Map) enrichDocument.get("data")); + assertNotNull(resultDataField); + assertThat(resultDataField.size(), is(equalTo(2))); + assertThat(resultDataField.get("subnet"), is(equalTo("10.0.0.0/8"))); + assertThat(resultDataField.get("department"), is(equalTo("research"))); + assertNull(resultDataField.get("field3")); + } + ); // Validate segments validateSegments(createdEnrichIndex, 1); @@ -1330,20 +1393,21 @@ public void testRunnerTwoObjectLevelsSourceMapping() throws Exception { }""", XContentType.JSON).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)).actionGet(); assertEquals(RestStatus.CREATED, indexRequest.status()); - SearchResponse sourceSearchResponse = client().search( - new SearchRequest(sourceIndex).source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) - ).actionGet(); - assertThat(sourceSearchResponse.getHits().getTotalHits().value, equalTo(1L)); - Map sourceDocMap = sourceSearchResponse.getHits().getAt(0).getSourceAsMap(); - assertNotNull(sourceDocMap); - Map dataField = ((Map) sourceDocMap.get("data")); - assertNotNull(dataField); - Map fieldsField = ((Map) dataField.get("fields")); - assertNotNull(fieldsField); - assertThat(fieldsField.get("field1"), is(equalTo("value1"))); - assertThat(fieldsField.get("field2"), is(equalTo(2))); - assertThat(fieldsField.get("field3"), is(equalTo("ignored"))); - + assertResponse( + client().search(new SearchRequest(sourceIndex).source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()))), + sourceSearchResponse -> { + assertThat(sourceSearchResponse.getHits().getTotalHits().value, equalTo(1L)); + Map sourceDocMap = sourceSearchResponse.getHits().getAt(0).getSourceAsMap(); + assertNotNull(sourceDocMap); + Map dataField = ((Map) sourceDocMap.get("data")); + assertNotNull(dataField); + Map fieldsField = ((Map) dataField.get("fields")); + assertNotNull(fieldsField); + assertThat(fieldsField.get("field1"), is(equalTo("value1"))); + assertThat(fieldsField.get("field2"), is(equalTo(2))); + assertThat(fieldsField.get("field3"), is(equalTo("ignored"))); + } + ); String policyName = "test1"; List enrichFields = List.of("data.fields.field2", "missingField"); EnrichPolicy policy = new EnrichPolicy(EnrichPolicy.MATCH_TYPE, null, List.of(sourceIndex), "data.fields.field1", enrichFields); @@ -1389,22 +1453,26 @@ public void testRunnerTwoObjectLevelsSourceMapping() throws Exception { } """); - SearchResponse enrichSearchResponse = client().search( - new SearchRequest(".enrich-test1").source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) - ).actionGet(); - - assertThat(enrichSearchResponse.getHits().getTotalHits().value, equalTo(1L)); - Map enrichDocument = enrichSearchResponse.getHits().iterator().next().getSourceAsMap(); - assertNotNull(enrichDocument); - assertThat(enrichDocument.size(), is(equalTo(1))); - Map resultDataField = ((Map) enrichDocument.get("data")); - assertNotNull(resultDataField); - Map resultFieldsField = ((Map) resultDataField.get("fields")); - assertNotNull(resultFieldsField); - assertThat(resultFieldsField.size(), is(equalTo(2))); - assertThat(resultFieldsField.get("field1"), is(equalTo("value1"))); - assertThat(resultFieldsField.get("field2"), is(equalTo(2))); - assertNull(resultFieldsField.get("field3")); + assertResponse( + client().search( + new SearchRequest(".enrich-test1").source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) + ), + enrichSearchResponse -> { + + assertThat(enrichSearchResponse.getHits().getTotalHits().value, equalTo(1L)); + Map enrichDocument = enrichSearchResponse.getHits().iterator().next().getSourceAsMap(); + assertNotNull(enrichDocument); + assertThat(enrichDocument.size(), is(equalTo(1))); + Map resultDataField = ((Map) enrichDocument.get("data")); + assertNotNull(resultDataField); + Map resultFieldsField = ((Map) resultDataField.get("fields")); + assertNotNull(resultFieldsField); + assertThat(resultFieldsField.size(), is(equalTo(2))); + assertThat(resultFieldsField.get("field1"), is(equalTo("value1"))); + assertThat(resultFieldsField.get("field2"), is(equalTo(2))); + assertNull(resultFieldsField.get("field3")); + } + ); // Validate segments validateSegments(createdEnrichIndex, 1); @@ -1454,19 +1522,21 @@ public void testRunnerTwoObjectLevelsSourceMappingRangePolicy() throws Exception }""", XContentType.JSON).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)).actionGet(); assertEquals(RestStatus.CREATED, indexRequest.status()); - SearchResponse sourceSearchResponse = client().search( - new SearchRequest(sourceIndex).source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) - ).actionGet(); - assertThat(sourceSearchResponse.getHits().getTotalHits().value, equalTo(1L)); - Map sourceDocMap = sourceSearchResponse.getHits().getAt(0).getSourceAsMap(); - assertNotNull(sourceDocMap); - Map dataField = ((Map) sourceDocMap.get("data")); - assertNotNull(dataField); - Map fieldsField = ((Map) dataField.get("fields")); - assertNotNull(fieldsField); - assertThat(fieldsField.get("subnet"), is(equalTo("10.0.0.0/8"))); - assertThat(fieldsField.get("department"), is(equalTo("research"))); - assertThat(fieldsField.get("field3"), is(equalTo("ignored"))); + assertResponse( + client().search(new SearchRequest(sourceIndex).source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()))), + sourceSearchResponse -> { + assertThat(sourceSearchResponse.getHits().getTotalHits().value, equalTo(1L)); + Map sourceDocMap = sourceSearchResponse.getHits().getAt(0).getSourceAsMap(); + assertNotNull(sourceDocMap); + Map dataField = ((Map) sourceDocMap.get("data")); + assertNotNull(dataField); + Map fieldsField = ((Map) dataField.get("fields")); + assertNotNull(fieldsField); + assertThat(fieldsField.get("subnet"), is(equalTo("10.0.0.0/8"))); + assertThat(fieldsField.get("department"), is(equalTo("research"))); + assertThat(fieldsField.get("field3"), is(equalTo("ignored"))); + } + ); String policyName = "test1"; List enrichFields = List.of("data.fields.department", "missingField"); @@ -1512,23 +1582,25 @@ public void testRunnerTwoObjectLevelsSourceMappingRangePolicy() throws Exception } } """); - SearchResponse enrichSearchResponse = client().search( - new SearchRequest(".enrich-test1").source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) - ).actionGet(); - - assertThat(enrichSearchResponse.getHits().getTotalHits().value, equalTo(1L)); - Map enrichDocument = enrichSearchResponse.getHits().iterator().next().getSourceAsMap(); - assertNotNull(enrichDocument); - assertThat(enrichDocument.size(), is(equalTo(1))); - Map resultDataField = ((Map) enrichDocument.get("data")); - assertNotNull(resultDataField); - Map resultFieldsField = ((Map) resultDataField.get("fields")); - assertNotNull(resultFieldsField); - assertThat(resultFieldsField.size(), is(equalTo(2))); - assertThat(resultFieldsField.get("subnet"), is(equalTo("10.0.0.0/8"))); - assertThat(resultFieldsField.get("department"), is(equalTo("research"))); - assertNull(resultFieldsField.get("field3")); - + assertResponse( + client().search( + new SearchRequest(".enrich-test1").source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) + ), + enrichSearchResponse -> { + assertThat(enrichSearchResponse.getHits().getTotalHits().value, equalTo(1L)); + Map enrichDocument = enrichSearchResponse.getHits().iterator().next().getSourceAsMap(); + assertNotNull(enrichDocument); + assertThat(enrichDocument.size(), is(equalTo(1))); + Map resultDataField = ((Map) enrichDocument.get("data")); + assertNotNull(resultDataField); + Map resultFieldsField = ((Map) resultDataField.get("fields")); + assertNotNull(resultFieldsField); + assertThat(resultFieldsField.size(), is(equalTo(2))); + assertThat(resultFieldsField.get("subnet"), is(equalTo("10.0.0.0/8"))); + assertThat(resultFieldsField.get("department"), is(equalTo("research"))); + assertNull(resultFieldsField.get("field3")); + } + ); // Validate segments validateSegments(createdEnrichIndex, 1); @@ -1718,16 +1790,17 @@ public void testRunnerDottedKeyNameSourceMapping() throws Exception { ).actionGet(); assertEquals(RestStatus.CREATED, indexRequest.status()); - SearchResponse sourceSearchResponse = client().search( - new SearchRequest(sourceIndex).source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) - ).actionGet(); - assertThat(sourceSearchResponse.getHits().getTotalHits().value, equalTo(1L)); - Map sourceDocMap = sourceSearchResponse.getHits().getAt(0).getSourceAsMap(); - assertNotNull(sourceDocMap); - assertThat(sourceDocMap.get("data.field1"), is(equalTo("value1"))); - assertThat(sourceDocMap.get("data.field2"), is(equalTo(2))); - assertThat(sourceDocMap.get("data.field3"), is(equalTo("ignored"))); - + assertResponse( + client().search(new SearchRequest(sourceIndex).source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()))), + sourceSearchResponse -> { + assertThat(sourceSearchResponse.getHits().getTotalHits().value, equalTo(1L)); + Map sourceDocMap = sourceSearchResponse.getHits().getAt(0).getSourceAsMap(); + assertNotNull(sourceDocMap); + assertThat(sourceDocMap.get("data.field1"), is(equalTo("value1"))); + assertThat(sourceDocMap.get("data.field2"), is(equalTo(2))); + assertThat(sourceDocMap.get("data.field3"), is(equalTo("ignored"))); + } + ); String policyName = "test1"; List enrichFields = List.of("data.field2", "missingField"); EnrichPolicy policy = new EnrichPolicy(EnrichPolicy.MATCH_TYPE, null, List.of(sourceIndex), "data.field1", enrichFields); @@ -1768,17 +1841,21 @@ public void testRunnerDottedKeyNameSourceMapping() throws Exception { } } """); - SearchResponse enrichSearchResponse = client().search( - new SearchRequest(".enrich-test1").source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) - ).actionGet(); - - assertThat(enrichSearchResponse.getHits().getTotalHits().value, equalTo(1L)); - Map enrichDocument = enrichSearchResponse.getHits().iterator().next().getSourceAsMap(); - assertNotNull(enrichDocument); - assertThat(enrichDocument.size(), is(equalTo(2))); - assertThat(enrichDocument.get("data.field1"), is(equalTo("value1"))); - assertThat(enrichDocument.get("data.field2"), is(equalTo(2))); - assertNull(enrichDocument.get("data.field3")); + assertResponse( + client().search( + new SearchRequest(".enrich-test1").source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) + ), + enrichSearchResponse -> { + + assertThat(enrichSearchResponse.getHits().getTotalHits().value, equalTo(1L)); + Map enrichDocument = enrichSearchResponse.getHits().iterator().next().getSourceAsMap(); + assertNotNull(enrichDocument); + assertThat(enrichDocument.size(), is(equalTo(2))); + assertThat(enrichDocument.get("data.field1"), is(equalTo("value1"))); + assertThat(enrichDocument.get("data.field2"), is(equalTo(2))); + assertNull(enrichDocument.get("data.field3")); + } + ); // Validate segments validateSegments(createdEnrichIndex, 1); @@ -1799,18 +1876,19 @@ public void testRunnerWithForceMergeRetry() throws Exception { }""", XContentType.JSON).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)).actionGet(); assertEquals(RestStatus.CREATED, indexRequest.status()); - SearchResponse sourceSearchResponse = client().search( - new SearchRequest(sourceIndex).source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) - ).actionGet(); - assertThat(sourceSearchResponse.getHits().getTotalHits().value, equalTo(1L)); - Map sourceDocMap = sourceSearchResponse.getHits().getAt(0).getSourceAsMap(); - assertNotNull(sourceDocMap); - assertThat(sourceDocMap.get("field1"), is(equalTo("value1"))); - assertThat(sourceDocMap.get("field2"), is(equalTo(2))); - assertThat(sourceDocMap.get("field3"), is(equalTo("ignored"))); - assertThat(sourceDocMap.get("field4"), is(equalTo("ignored"))); - assertThat(sourceDocMap.get("field5"), is(equalTo("value5"))); - + assertResponse( + client().search(new SearchRequest(sourceIndex).source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()))), + sourceSearchResponse -> { + assertThat(sourceSearchResponse.getHits().getTotalHits().value, equalTo(1L)); + Map sourceDocMap = sourceSearchResponse.getHits().getAt(0).getSourceAsMap(); + assertNotNull(sourceDocMap); + assertThat(sourceDocMap.get("field1"), is(equalTo("value1"))); + assertThat(sourceDocMap.get("field2"), is(equalTo(2))); + assertThat(sourceDocMap.get("field3"), is(equalTo("ignored"))); + assertThat(sourceDocMap.get("field4"), is(equalTo("ignored"))); + assertThat(sourceDocMap.get("field5"), is(equalTo("value5"))); + } + ); List enrichFields = List.of("field2", "field5"); EnrichPolicy policy = new EnrichPolicy(EnrichPolicy.MATCH_TYPE, null, List.of(sourceIndex), "field1", enrichFields); String policyName = "test1"; @@ -1930,24 +2008,30 @@ protected void ensureSingleSegment(String destinationIndexName, int attempt) { } """); // Validate document structure - SearchResponse allEnrichDocs = client().search( - new SearchRequest(".enrich-test1").source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) - ).actionGet(); - assertThat(allEnrichDocs.getHits().getTotalHits().value, equalTo(2L)); + assertHitCount( + client().search( + new SearchRequest(".enrich-test1").source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) + ), + 2L + ); for (String keyValue : List.of("value1", "value1.1")) { - SearchResponse enrichSearchResponse = client().search( - new SearchRequest(".enrich-test1").source( - SearchSourceBuilder.searchSource().query(QueryBuilders.matchQuery("field1", keyValue)) - ) - ).actionGet(); - - assertThat(enrichSearchResponse.getHits().getTotalHits().value, equalTo(1L)); - Map enrichDocument = enrichSearchResponse.getHits().iterator().next().getSourceAsMap(); - assertNotNull(enrichDocument); - assertThat(enrichDocument.size(), is(equalTo(3))); - assertThat(enrichDocument.get("field1"), is(equalTo(keyValue))); - assertThat(enrichDocument.get("field2"), is(equalTo(2))); - assertThat(enrichDocument.get("field5"), is(equalTo("value5"))); + assertResponse( + client().search( + new SearchRequest(".enrich-test1").source( + SearchSourceBuilder.searchSource().query(QueryBuilders.matchQuery("field1", keyValue)) + ) + ), + enrichSearchResponse -> { + + assertThat(enrichSearchResponse.getHits().getTotalHits().value, equalTo(1L)); + Map enrichDocument = enrichSearchResponse.getHits().iterator().next().getSourceAsMap(); + assertNotNull(enrichDocument); + assertThat(enrichDocument.size(), is(equalTo(3))); + assertThat(enrichDocument.get("field1"), is(equalTo(keyValue))); + assertThat(enrichDocument.get("field2"), is(equalTo(2))); + assertThat(enrichDocument.get("field5"), is(equalTo("value5"))); + } + ); } // Validate segments @@ -2059,15 +2143,14 @@ public void testRunRangePolicyWithObjectFieldAsMatchField() throws Exception { assertThat(e.getMessage(), equalTo("Field 'field1' has type [object] which doesn't appear to be a range type")); } - public void testEnrichFieldsConflictMappingTypes() { + public void testEnrichFieldsConflictMappingTypes() throws Exception { createIndex("source-1", Settings.EMPTY, "_doc", "user", "type=keyword", "name", "type=text", "zipcode", "type=long"); - client().prepareIndex("source-1") - .setSource("user", "u1", "name", "n", "zipcode", 90000) + prepareIndex("source-1").setSource("user", "u1", "name", "n", "zipcode", 90000) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); createIndex("source-2", Settings.EMPTY, "_doc", "user", "type=keyword", "zipcode", "type=long"); - client().prepareIndex("source-2").setSource(""" + prepareIndex("source-2").setSource(""" { "user": "u2", "name": { @@ -2109,23 +2192,21 @@ public void testEnrichFieldsConflictMappingTypes() { } """); // Validate document structure - SearchResponse searchResponse = client().search(new SearchRequest(".enrich-test1")).actionGet(); - ElasticsearchAssertions.assertHitCount(searchResponse, 2L); - Map hit0 = searchResponse.getHits().getAt(0).getSourceAsMap(); - assertThat(hit0, equalTo(Map.of("user", "u1", "name", "n", "zipcode", 90000))); - Map hit1 = searchResponse.getHits().getAt(1).getSourceAsMap(); - assertThat(hit1, equalTo(Map.of("user", "u2", "name", Map.of("first", "f", "last", "l"), "zipcode", 90001))); + assertResponse(client().search(new SearchRequest(".enrich-test1")), searchResponse -> { + ElasticsearchAssertions.assertHitCount(searchResponse, 2L); + Map hit0 = searchResponse.getHits().getAt(0).getSourceAsMap(); + assertThat(hit0, equalTo(Map.of("user", "u1", "name", "n", "zipcode", 90000))); + Map hit1 = searchResponse.getHits().getAt(1).getSourceAsMap(); + assertThat(hit1, equalTo(Map.of("user", "u2", "name", Map.of("first", "f", "last", "l"), "zipcode", 90001))); + }); } - public void testEnrichMappingConflictFormats() { + public void testEnrichMappingConflictFormats() throws ExecutionException, InterruptedException { createIndex("source-1", Settings.EMPTY, "_doc", "user", "type=keyword", "date", "type=date,format=yyyy"); - client().prepareIndex("source-1") - .setSource("user", "u1", "date", "2023") - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .get(); + prepareIndex("source-1").setSource("user", "u1", "date", "2023").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); createIndex("source-2", Settings.EMPTY, "_doc", "user", "type=keyword", "date", "type=date,format=yyyy-MM"); - client().prepareIndex("source-2").setSource(""" + prepareIndex("source-2").setSource(""" { "user": "u2", "date": "2023-05" @@ -2153,18 +2234,18 @@ public void testEnrichMappingConflictFormats() { } """); // Validate document structure - SearchResponse searchResponse = client().search(new SearchRequest(".enrich-test1")).actionGet(); - ElasticsearchAssertions.assertHitCount(searchResponse, 2L); - Map hit0 = searchResponse.getHits().getAt(0).getSourceAsMap(); - assertThat(hit0, equalTo(Map.of("user", "u1", "date", "2023"))); - Map hit1 = searchResponse.getHits().getAt(1).getSourceAsMap(); - assertThat(hit1, equalTo(Map.of("user", "u2", "date", "2023-05"))); + assertResponse(client().search(new SearchRequest(".enrich-test1")), searchResponse -> { + ElasticsearchAssertions.assertHitCount(searchResponse, 2L); + Map hit0 = searchResponse.getHits().getAt(0).getSourceAsMap(); + assertThat(hit0, equalTo(Map.of("user", "u1", "date", "2023"))); + Map hit1 = searchResponse.getHits().getAt(1).getSourceAsMap(); + assertThat(hit1, equalTo(Map.of("user", "u2", "date", "2023-05"))); + }); } - public void testEnrichObjectField() { + public void testEnrichObjectField() throws ExecutionException, InterruptedException { createIndex("source-1", Settings.EMPTY, "_doc", "id", "type=keyword", "name.first", "type=keyword", "name.last", "type=keyword"); - client().prepareIndex("source-1") - .setSource("user", "u1", "name.first", "F1", "name.last", "L1") + prepareIndex("source-1").setSource("user", "u1", "name.first", "F1", "name.last", "L1") .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); EnrichPolicy policy = new EnrichPolicy(EnrichPolicy.MATCH_TYPE, null, List.of("source-1"), "user", List.of("name")); @@ -2190,10 +2271,11 @@ public void testEnrichObjectField() { } } """); - SearchResponse searchResponse = client().search(new SearchRequest(".enrich-test1")).actionGet(); - ElasticsearchAssertions.assertHitCount(searchResponse, 1L); - Map hit0 = searchResponse.getHits().getAt(0).getSourceAsMap(); - assertThat(hit0, equalTo(Map.of("user", "u1", "name.first", "F1", "name.last", "L1"))); + assertResponse(client().search(new SearchRequest(".enrich-test1")), searchResponse -> { + ElasticsearchAssertions.assertHitCount(searchResponse, 1L); + Map hit0 = searchResponse.getHits().getAt(0).getSourceAsMap(); + assertThat(hit0, equalTo(Map.of("user", "u1", "name.first", "F1", "name.last", "L1"))); + }); } public void testEnrichNestedField() throws Exception { @@ -2267,17 +2349,19 @@ public void testRunnerValidatesIndexIntegrity() throws Exception { }""", XContentType.JSON).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)).actionGet(); assertEquals(RestStatus.CREATED, indexRequest.status()); - SearchResponse sourceSearchResponse = client().search( - new SearchRequest(sourceIndex).source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) - ).actionGet(); - assertThat(sourceSearchResponse.getHits().getTotalHits().value, equalTo(1L)); - Map sourceDocMap = sourceSearchResponse.getHits().getAt(0).getSourceAsMap(); - assertNotNull(sourceDocMap); - assertThat(sourceDocMap.get("field1"), is(equalTo("value1"))); - assertThat(sourceDocMap.get("field2"), is(equalTo(2))); - assertThat(sourceDocMap.get("field3"), is(equalTo("ignored"))); - assertThat(sourceDocMap.get("field4"), is(equalTo("ignored"))); - assertThat(sourceDocMap.get("field5"), is(equalTo("value5"))); + assertResponse( + client().search(new SearchRequest(sourceIndex).source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()))), + sourceSearchResponse -> { + assertThat(sourceSearchResponse.getHits().getTotalHits().value, equalTo(1L)); + Map sourceDocMap = sourceSearchResponse.getHits().getAt(0).getSourceAsMap(); + assertNotNull(sourceDocMap); + assertThat(sourceDocMap.get("field1"), is(equalTo("value1"))); + assertThat(sourceDocMap.get("field2"), is(equalTo(2))); + assertThat(sourceDocMap.get("field3"), is(equalTo("ignored"))); + assertThat(sourceDocMap.get("field4"), is(equalTo("ignored"))); + assertThat(sourceDocMap.get("field5"), is(equalTo("value5"))); + } + ); List enrichFields = List.of("field2", "field5"); EnrichPolicy policy = new EnrichPolicy(EnrichPolicy.MATCH_TYPE, null, List.of(sourceIndex), "field1", enrichFields); diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/CoordinatorTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/CoordinatorTests.java index c28236dc688a4..e986f6e9e0656 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/CoordinatorTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/CoordinatorTests.java @@ -92,24 +92,34 @@ public void testCoordinateLookups() { for (int i = 0; i < 5; i++) { responseItems[i] = new MultiSearchResponse.Item(emptyResponse, null); } - lookupFunction.capturedConsumers.get(0).accept(new MultiSearchResponse(responseItems, 1L), null); - assertThat(coordinator.queue.size(), equalTo(0)); - assertThat(coordinator.getRemoteRequestsCurrent(), equalTo(1)); - assertThat(lookupFunction.capturedRequests.size(), equalTo(2)); - - // Replying last response, resulting in an empty queue and no outstanding requests. - responseItems = new MultiSearchResponse.Item[5]; - for (int i = 0; i < 5; i++) { - responseItems[i] = new MultiSearchResponse.Item(emptyResponse, null); - } - lookupFunction.capturedConsumers.get(1).accept(new MultiSearchResponse(responseItems, 1L), null); - assertThat(coordinator.queue.size(), equalTo(0)); - assertThat(coordinator.getRemoteRequestsCurrent(), equalTo(0)); - assertThat(lookupFunction.capturedRequests.size(), equalTo(2)); + final MultiSearchResponse res1 = new MultiSearchResponse(responseItems, 1L); + try { + lookupFunction.capturedConsumers.get(0).accept(res1, null); + assertThat(coordinator.queue.size(), equalTo(0)); + assertThat(coordinator.getRemoteRequestsCurrent(), equalTo(1)); + assertThat(lookupFunction.capturedRequests.size(), equalTo(2)); - // All individual action listeners for the search requests should have been invoked: - for (ActionListener searchActionListener : searchActionListeners) { - Mockito.verify(searchActionListener).onResponse(Mockito.eq(emptyResponse)); + // Replying last response, resulting in an empty queue and no outstanding requests. + responseItems = new MultiSearchResponse.Item[5]; + for (int i = 0; i < 5; i++) { + responseItems[i] = new MultiSearchResponse.Item(emptyResponse, null); + } + var res2 = new MultiSearchResponse(responseItems, 1L); + try { + lookupFunction.capturedConsumers.get(1).accept(res2, null); + assertThat(coordinator.queue.size(), equalTo(0)); + assertThat(coordinator.getRemoteRequestsCurrent(), equalTo(0)); + assertThat(lookupFunction.capturedRequests.size(), equalTo(2)); + + // All individual action listeners for the search requests should have been invoked: + for (ActionListener searchActionListener : searchActionListeners) { + Mockito.verify(searchActionListener).onResponse(Mockito.eq(emptyResponse)); + } + } finally { + res2.decRef(); + } + } finally { + res1.decRef(); } } @@ -187,14 +197,19 @@ public void testCoordinateLookupsMultiSearchItemError() { for (int i = 0; i < 5; i++) { responseItems[i] = new MultiSearchResponse.Item(null, e); } - lookupFunction.capturedConsumers.get(0).accept(new MultiSearchResponse(responseItems, 1L), null); - assertThat(coordinator.queue.size(), equalTo(0)); - assertThat(coordinator.getRemoteRequestsCurrent(), equalTo(0)); - assertThat(lookupFunction.capturedRequests.size(), equalTo(1)); - - // All individual action listeners for the search requests should have been invoked: - for (ActionListener searchActionListener : searchActionListeners) { - Mockito.verify(searchActionListener).onFailure(Mockito.eq(e)); + var res = new MultiSearchResponse(responseItems, 1L); + try { + lookupFunction.capturedConsumers.get(0).accept(res, null); + assertThat(coordinator.queue.size(), equalTo(0)); + assertThat(coordinator.getRemoteRequestsCurrent(), equalTo(0)); + assertThat(lookupFunction.capturedRequests.size(), equalTo(1)); + + // All individual action listeners for the search requests should have been invoked: + for (ActionListener searchActionListener : searchActionListeners) { + Mockito.verify(searchActionListener).onFailure(Mockito.eq(e)); + } + } finally { + res.decRef(); } } @@ -239,16 +254,16 @@ public void testNoBlockingWhenQueueing() throws Exception { assertThat(lookupFunction.capturedConsumers.size(), is(1)); // Fulfill the captured consumer which will schedule the next item in the queue. - lookupFunction.capturedConsumers.get(0) - .accept( - new MultiSearchResponse(new MultiSearchResponse.Item[] { new MultiSearchResponse.Item(emptySearchResponse(), null) }, 1L), - null - ); - - // Ensure queue was drained and that the item in it was scheduled - assertThat(coordinator.queue.size(), equalTo(0)); - assertThat(lookupFunction.capturedRequests.size(), equalTo(2)); - assertThat(lookupFunction.capturedRequests.get(1).requests().get(0), sameInstance(searchRequest)); + var res = new MultiSearchResponse(new MultiSearchResponse.Item[] { new MultiSearchResponse.Item(emptySearchResponse(), null) }, 1L); + try { + lookupFunction.capturedConsumers.get(0).accept(res, null); + // Ensure queue was drained and that the item in it was scheduled + assertThat(coordinator.queue.size(), equalTo(0)); + assertThat(lookupFunction.capturedRequests.size(), equalTo(2)); + assertThat(lookupFunction.capturedRequests.get(1).requests().get(0), sameInstance(searchRequest)); + } finally { + res.decRef(); + } } public void testLookupFunction() { @@ -302,29 +317,48 @@ public void testReduce() { Map>> itemsPerIndex = new HashMap<>(); Map> shardResponses = new HashMap<>(); - MultiSearchResponse.Item item1 = new MultiSearchResponse.Item(emptySearchResponse(), null); - itemsPerIndex.put("index1", List.of(new Tuple<>(0, null), new Tuple<>(1, null), new Tuple<>(2, null))); - shardResponses.put("index1", new Tuple<>(new MultiSearchResponse(new MultiSearchResponse.Item[] { item1, item1, item1 }, 1), null)); - - Exception failure = new RuntimeException(); - itemsPerIndex.put("index2", List.of(new Tuple<>(3, null), new Tuple<>(4, null), new Tuple<>(5, null))); - shardResponses.put("index2", new Tuple<>(null, failure)); - - MultiSearchResponse.Item item2 = new MultiSearchResponse.Item(emptySearchResponse(), null); - itemsPerIndex.put("index3", List.of(new Tuple<>(6, null), new Tuple<>(7, null), new Tuple<>(8, null))); - shardResponses.put("index3", new Tuple<>(new MultiSearchResponse(new MultiSearchResponse.Item[] { item2, item2, item2 }, 1), null)); - - MultiSearchResponse result = Coordinator.reduce(9, itemsPerIndex, shardResponses); - assertThat(result.getResponses().length, equalTo(9)); - assertThat(result.getResponses()[0], sameInstance(item1)); - assertThat(result.getResponses()[1], sameInstance(item1)); - assertThat(result.getResponses()[2], sameInstance(item1)); - assertThat(result.getResponses()[3].getFailure(), sameInstance(failure)); - assertThat(result.getResponses()[4].getFailure(), sameInstance(failure)); - assertThat(result.getResponses()[5].getFailure(), sameInstance(failure)); - assertThat(result.getResponses()[6], sameInstance(item2)); - assertThat(result.getResponses()[7], sameInstance(item2)); - assertThat(result.getResponses()[8], sameInstance(item2)); + try { + MultiSearchResponse.Item item1 = new MultiSearchResponse.Item(emptySearchResponse(), null); + itemsPerIndex.put("index1", List.of(new Tuple<>(0, null), new Tuple<>(1, null), new Tuple<>(2, null))); + shardResponses.put( + "index1", + new Tuple<>(new MultiSearchResponse(new MultiSearchResponse.Item[] { item1, item1, item1 }, 1), null) + ); + + Exception failure = new RuntimeException(); + itemsPerIndex.put("index2", List.of(new Tuple<>(3, null), new Tuple<>(4, null), new Tuple<>(5, null))); + shardResponses.put("index2", new Tuple<>(null, failure)); + + MultiSearchResponse.Item item2 = new MultiSearchResponse.Item(emptySearchResponse(), null); + itemsPerIndex.put("index3", List.of(new Tuple<>(6, null), new Tuple<>(7, null), new Tuple<>(8, null))); + shardResponses.put( + "index3", + new Tuple<>(new MultiSearchResponse(new MultiSearchResponse.Item[] { item2, item2, item2 }, 1), null) + ); + + MultiSearchResponse result = Coordinator.reduce(9, itemsPerIndex, shardResponses); + try { + assertThat(result.getResponses().length, equalTo(9)); + assertThat(result.getResponses()[0], sameInstance(item1)); + assertThat(result.getResponses()[1], sameInstance(item1)); + assertThat(result.getResponses()[2], sameInstance(item1)); + assertThat(result.getResponses()[3].getFailure(), sameInstance(failure)); + assertThat(result.getResponses()[4].getFailure(), sameInstance(failure)); + assertThat(result.getResponses()[5].getFailure(), sameInstance(failure)); + assertThat(result.getResponses()[6], sameInstance(item2)); + assertThat(result.getResponses()[7], sameInstance(item2)); + assertThat(result.getResponses()[8], sameInstance(item2)); + } finally { + result.decRef(); + } + } finally { + for (Tuple value : shardResponses.values()) { + var res = value.v1(); + if (res != null) { + res.decRef(); + } + } + } } private static SearchResponse emptySearchResponse() { @@ -360,7 +394,12 @@ public void testAllSearchesExecuted() throws Exception { for (int i = 0; i < items.length; i++) { items[i] = new MultiSearchResponse.Item(emptySearchResponse(), null); } - responseConsumer.accept(new MultiSearchResponse(items, 0L), null); + var res = new MultiSearchResponse(items, 0L); + try { + responseConsumer.accept(res, null); + } finally { + res.decRef(); + } }), 5, 2, 20); try { diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/EnrichShardMultiSearchActionTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/EnrichShardMultiSearchActionTests.java index aa9a67bb0d283..c3ee4a19dd543 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/EnrichShardMultiSearchActionTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/EnrichShardMultiSearchActionTests.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.MultiSearchRequest; -import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.MatchAllQueryBuilder; @@ -24,6 +23,7 @@ import java.util.Collection; import java.util.List; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -58,17 +58,22 @@ public void testExecute() throws Exception { request.add(searchRequest); } - MultiSearchResponse result = client().execute( - EnrichShardMultiSearchAction.INSTANCE, - new EnrichShardMultiSearchAction.Request(request) - ).actionGet(); - assertThat(result.getResponses().length, equalTo(numSearches)); - for (int i = 0; i < numSearches; i++) { - assertThat(result.getResponses()[i].isFailure(), is(false)); - assertThat(result.getResponses()[i].getResponse().getHits().getTotalHits().value, equalTo(1L)); - assertThat(result.getResponses()[i].getResponse().getHits().getHits()[0].getSourceAsMap().size(), equalTo(1)); - assertThat(result.getResponses()[i].getResponse().getHits().getHits()[0].getSourceAsMap().get("key1"), equalTo("value1")); - } + assertResponse( + client().execute(EnrichShardMultiSearchAction.INSTANCE, new EnrichShardMultiSearchAction.Request(request)), + response -> { + assertThat(response.getResponses().length, equalTo(numSearches)); + for (int i = 0; i < numSearches; i++) { + assertThat(response.getResponses()[i].isFailure(), is(false)); + assertThat(response.getResponses()[i].getResponse().getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getResponses()[i].getResponse().getHits().getHits()[0].getSourceAsMap().size(), equalTo(1)); + assertThat( + response.getResponses()[i].getResponse().getHits().getHits()[0].getSourceAsMap().get("key1"), + equalTo("value1") + ); + } + } + ); + } public void testNonEnrichIndex() throws Exception { diff --git a/x-pack/plugin/ent-search/qa/rest/build.gradle b/x-pack/plugin/ent-search/qa/rest/build.gradle index cc202aa01c69e..c9b1557d74a9c 100644 --- a/x-pack/plugin/ent-search/qa/rest/build.gradle +++ b/x-pack/plugin/ent-search/qa/rest/build.gradle @@ -7,7 +7,7 @@ dependencies { restResources { restApi { - include '_common', 'bulk', 'cluster', 'nodes', 'indices', 'index', 'query_ruleset', 'search_application', 'xpack', 'security', 'search' + include '_common', 'bulk', 'cluster', 'connector', 'nodes', 'indices', 'index', 'query_ruleset', 'search_application', 'xpack', 'security', 'search' } } diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/300_connector_put.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/300_connector_put.yml new file mode 100644 index 0000000000000..464b64a2b24a3 --- /dev/null +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/300_connector_put.yml @@ -0,0 +1,103 @@ + +setup: + - skip: + version: " - 8.11.99" + reason: Introduced in 8.12.0 + +--- +'Create Connector': + - do: + connector.put: + connector_id: test-connector + body: + index_name: search-test + name: my-connector + language: pl + is_native: false + service_type: super-connector + + - match: { result: 'created' } + + - do: + connector.get: + connector_id: test-connector + + - match: { connector_id: test-connector } + - match: { index_name: search-test } + - match: { name: my-connector } + - match: { language: pl } + - match: { is_native: false } + - match: { service_type: super-connector } + +--- +'Create Connector - Default values are initialized correctly': + - do: + connector.put: + connector_id: test-connector-with-defaults + body: + index_name: search-test + + - match: { result: 'created' } + + - do: + connector.get: + connector_id: test-connector-with-defaults + + - match: { connector_id: test-connector-with-defaults } + - match: { index_name: search-test } + - match: { is_native: false } + - match: { sync_now: false } + - match: { status: created } + - match: { configuration: {} } + - match: { custom_scheduling: {} } + - match: { filtering.0.domain: DEFAULT } + +--- +'Create Connector - Native connector is initialized correctly': + - do: + connector.put: + connector_id: test-connector-native + body: + index_name: search-test + is_native: true + + - match: { result: 'created' } + + - do: + connector.get: + connector_id: test-connector-native + + - match: { connector_id: test-connector-native } + - match: { index_name: search-test } + - match: { is_native: true } + - match: { sync_now: false } + - match: { status: needs_configuration } + - match: { configuration: {} } + - match: { custom_scheduling: {} } + - match: { filtering.0.domain: DEFAULT } + +--- +'Create Connector - Resource already exists': + - do: + connector.put: + connector_id: test-connector-recreating + body: + index_name: search-test + name: my-connector + language: pl + is_native: false + service_type: super-connector + + - match: { result: 'created' } + + - do: + connector.put: + connector_id: test-connector-recreating + body: + index_name: search-test + name: my-connector + language: pl + is_native: false + service_type: super-connector + + - match: { result: 'updated' } diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/310_connector_list.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/310_connector_list.yml new file mode 100644 index 0000000000000..36cd1c283f7e8 --- /dev/null +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/310_connector_list.yml @@ -0,0 +1,108 @@ +setup: + - skip: + version: " - 8.11.99" + reason: Introduced in 8.12.0 + + + - do: + connector.put: + connector_id: connector-a + body: + index_name: search-1-test + name: my-connector + language: pl + is_native: false + service_type: super-connector + - do: + connector.put: + connector_id: connector-c + body: + index_name: search-3-test + name: my-connector + language: nl + is_native: false + service_type: super-connector + - do: + connector.put: + connector_id: connector-b + body: + index_name: search-2-test + name: my-connector + language: en + is_native: true + service_type: super-connector + +--- +"List Connectors": + - do: + connector.list: { } + + - match: { count: 3 } + + # Alphabetical order by connector_id for results + - match: { results.0.connector_id: "connector-a" } + - match: { results.0.index_name: "search-1-test" } + - match: { results.0.language: "pl" } + + - match: { results.1.connector_id: "connector-b" } + - match: { results.1.index_name: "search-2-test" } + - match: { results.1.language: "en" } + + - match: { results.2.connector_id: "connector-c" } + - match: { results.2.index_name: "search-3-test" } + - match: { results.2.language: "nl" } + + +--- +"List Connectors - with from": + - do: + connector.list: + from: 1 + + - match: { count: 3 } + + # Alphabetical order by connector_id for results + - match: { results.0.connector_id: "connector-b" } + - match: { results.0.index_name: "search-2-test" } + - match: { results.0.language: "en" } + + - match: { results.1.connector_id: "connector-c" } + - match: { results.1.index_name: "search-3-test" } + - match: { results.1.language: "nl" } + +--- +"List Connector- with size": + - do: + connector.list: + size: 2 + + - match: { count: 3 } + + # Alphabetical order by connector_id for results + - match: { results.0.connector_id: "connector-a" } + - match: { results.0.index_name: "search-1-test" } + - match: { results.0.language: "pl" } + + - match: { results.1.connector_id: "connector-b" } + - match: { results.1.index_name: "search-2-test" } + - match: { results.1.language: "en" } + +--- +"List Connector - empty": + - do: + connector.delete: + connector_id: connector-a + + - do: + connector.delete: + connector_id: connector-b + + - do: + connector.delete: + connector_id: connector-c + + - do: + connector.list: { } + + - match: { count: 0 } + diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/320_connector_delete.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/320_connector_delete.yml new file mode 100644 index 0000000000000..275936084144b --- /dev/null +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/320_connector_delete.yml @@ -0,0 +1,34 @@ +setup: + - skip: + version: " - 8.11.99" + reason: Introduced in 8.12.0 + + - do: + connector.put: + connector_id: test-connector-to-delete + body: + index_name: search-1-test + name: my-connector + language: pl + is_native: false + service_type: super-connector + +--- +"Delete Connector": + - do: + connector.delete: + connector_id: test-connector-to-delete + + - match: { acknowledged: true } + + - do: + catch: "missing" + connector.get: + connector_id: test-connector-to-delete + +--- +"Delete Connector - Connector does not exist": + - do: + catch: "missing" + connector.delete: + connector_id: test-nonexistent-connector diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/400_connector_sync_job_post.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/400_connector_sync_job_post.yml new file mode 100644 index 0000000000000..055221b917cb1 --- /dev/null +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/400_connector_sync_job_post.yml @@ -0,0 +1,74 @@ +setup: + - skip: + version: " - 8.11.99" + reason: Introduced in 8.12.0 + - do: + connector.put: + connector_id: test-connector + body: + index_name: search-test + name: my-connector + language: de + is_native: false + service_type: super-connector + +--- +'Create connector sync job': + - do: + connector_sync_job.post: + body: + id: test-connector + job_type: full + trigger_method: on_demand + - set: { id: id } + - match: { id: $id } + +--- +'Create connector sync job with missing job type': + - do: + connector_sync_job.post: + body: + id: test-connector + trigger_method: on_demand + - set: { id: id } + - match: { id: $id } + +--- +'Create connector sync job with missing trigger method': + - do: + connector_sync_job.post: + body: + id: test-connector + job_type: full + - set: { id: id } + - match: { id: $id } + +--- +'Create connector sync job with non-existing connector id': + - do: + connector_sync_job.post: + body: + id: non-existing-id + job_type: full + trigger_method: on_demand + catch: missing + +--- +'Create connector sync job with invalid job type': + - do: + connector_sync_job.post: + body: + id: test-connector + job_type: invalid_job_type + trigger_method: on_demand + catch: bad_request + +--- +'Create connector sync job with invalid trigger method': + - do: + connector_sync_job.post: + body: + id: test-connector + job_type: full + trigger_method: invalid_trigger_method + catch: bad_request diff --git a/x-pack/plugin/ent-search/src/main/java/module-info.java b/x-pack/plugin/ent-search/src/main/java/module-info.java index f46bd4f1b5285..d8cbceda4d8a3 100644 --- a/x-pack/plugin/ent-search/src/main/java/module-info.java +++ b/x-pack/plugin/ent-search/src/main/java/module-info.java @@ -33,6 +33,10 @@ exports org.elasticsearch.xpack.application.search.action; exports org.elasticsearch.xpack.application.rules; exports org.elasticsearch.xpack.application.rules.action; + exports org.elasticsearch.xpack.application.connector; + exports org.elasticsearch.xpack.application.connector.action; + exports org.elasticsearch.xpack.application.connector.syncjob; + exports org.elasticsearch.xpack.application.connector.syncjob.action; provides org.elasticsearch.features.FeatureSpecification with org.elasticsearch.xpack.application.EnterpriseSearchFeatures; } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearch.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearch.java index 7836f798af70f..819c345392c65 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearch.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearch.java @@ -40,7 +40,23 @@ import org.elasticsearch.xpack.application.analytics.action.TransportPostAnalyticsEventAction; import org.elasticsearch.xpack.application.analytics.action.TransportPutAnalyticsCollectionAction; import org.elasticsearch.xpack.application.analytics.ingest.AnalyticsEventIngestConfig; +import org.elasticsearch.xpack.application.connector.ConnectorAPIFeature; import org.elasticsearch.xpack.application.connector.ConnectorTemplateRegistry; +import org.elasticsearch.xpack.application.connector.action.DeleteConnectorAction; +import org.elasticsearch.xpack.application.connector.action.GetConnectorAction; +import org.elasticsearch.xpack.application.connector.action.ListConnectorAction; +import org.elasticsearch.xpack.application.connector.action.PutConnectorAction; +import org.elasticsearch.xpack.application.connector.action.RestDeleteConnectorAction; +import org.elasticsearch.xpack.application.connector.action.RestGetConnectorAction; +import org.elasticsearch.xpack.application.connector.action.RestListConnectorAction; +import org.elasticsearch.xpack.application.connector.action.RestPutConnectorAction; +import org.elasticsearch.xpack.application.connector.action.TransportDeleteConnectorAction; +import org.elasticsearch.xpack.application.connector.action.TransportGetConnectorAction; +import org.elasticsearch.xpack.application.connector.action.TransportListConnectorAction; +import org.elasticsearch.xpack.application.connector.action.TransportPutConnectorAction; +import org.elasticsearch.xpack.application.connector.syncjob.action.PostConnectorSyncJobAction; +import org.elasticsearch.xpack.application.connector.syncjob.action.RestPostConnectorSyncJobAction; +import org.elasticsearch.xpack.application.connector.syncjob.action.TransportPostConnectorSyncJobAction; import org.elasticsearch.xpack.application.rules.QueryRulesConfig; import org.elasticsearch.xpack.application.rules.QueryRulesIndexService; import org.elasticsearch.xpack.application.rules.RuleQueryBuilder; @@ -80,6 +96,7 @@ import org.elasticsearch.xpack.core.action.XPackInfoFeatureAction; import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -98,6 +115,8 @@ public class EnterpriseSearch extends Plugin implements ActionPlugin, SystemInde public static final String QUERY_RULES_API_ENDPOINT = "_query_rules"; + public static final String CONNECTOR_API_ENDPOINT = "_connector"; + private static final Logger logger = LogManager.getLogger(EnterpriseSearch.class); public static final String FEATURE_NAME = "ent_search"; @@ -120,29 +139,47 @@ protected XPackLicenseState getLicenseState() { return List.of(usageAction, infoAction); } - return List.of( - // Behavioral Analytics - new ActionHandler<>(PutAnalyticsCollectionAction.INSTANCE, TransportPutAnalyticsCollectionAction.class), - new ActionHandler<>(GetAnalyticsCollectionAction.INSTANCE, TransportGetAnalyticsCollectionAction.class), - new ActionHandler<>(DeleteAnalyticsCollectionAction.INSTANCE, TransportDeleteAnalyticsCollectionAction.class), - new ActionHandler<>(PostAnalyticsEventAction.INSTANCE, TransportPostAnalyticsEventAction.class), - - // Search Applications - new ActionHandler<>(DeleteSearchApplicationAction.INSTANCE, TransportDeleteSearchApplicationAction.class), - new ActionHandler<>(GetSearchApplicationAction.INSTANCE, TransportGetSearchApplicationAction.class), - new ActionHandler<>(ListSearchApplicationAction.INSTANCE, TransportListSearchApplicationAction.class), - new ActionHandler<>(PutSearchApplicationAction.INSTANCE, TransportPutSearchApplicationAction.class), - new ActionHandler<>(QuerySearchApplicationAction.INSTANCE, TransportQuerySearchApplicationAction.class), - new ActionHandler<>(RenderSearchApplicationQueryAction.INSTANCE, TransportRenderSearchApplicationQueryAction.class), - - // Query rules - new ActionHandler<>(DeleteQueryRulesetAction.INSTANCE, TransportDeleteQueryRulesetAction.class), - new ActionHandler<>(GetQueryRulesetAction.INSTANCE, TransportGetQueryRulesetAction.class), - new ActionHandler<>(ListQueryRulesetsAction.INSTANCE, TransportListQueryRulesetsAction.class), - new ActionHandler<>(PutQueryRulesetAction.INSTANCE, TransportPutQueryRulesetAction.class), - usageAction, - infoAction + List> actionHandlers = new ArrayList<>( + List.of( + // Behavioral Analytics + new ActionHandler<>(PutAnalyticsCollectionAction.INSTANCE, TransportPutAnalyticsCollectionAction.class), + new ActionHandler<>(GetAnalyticsCollectionAction.INSTANCE, TransportGetAnalyticsCollectionAction.class), + new ActionHandler<>(DeleteAnalyticsCollectionAction.INSTANCE, TransportDeleteAnalyticsCollectionAction.class), + new ActionHandler<>(PostAnalyticsEventAction.INSTANCE, TransportPostAnalyticsEventAction.class), + + // Search Applications + new ActionHandler<>(DeleteSearchApplicationAction.INSTANCE, TransportDeleteSearchApplicationAction.class), + new ActionHandler<>(GetSearchApplicationAction.INSTANCE, TransportGetSearchApplicationAction.class), + new ActionHandler<>(ListSearchApplicationAction.INSTANCE, TransportListSearchApplicationAction.class), + new ActionHandler<>(PutSearchApplicationAction.INSTANCE, TransportPutSearchApplicationAction.class), + new ActionHandler<>(QuerySearchApplicationAction.INSTANCE, TransportQuerySearchApplicationAction.class), + new ActionHandler<>(RenderSearchApplicationQueryAction.INSTANCE, TransportRenderSearchApplicationQueryAction.class), + + // Query rules + new ActionHandler<>(DeleteQueryRulesetAction.INSTANCE, TransportDeleteQueryRulesetAction.class), + new ActionHandler<>(GetQueryRulesetAction.INSTANCE, TransportGetQueryRulesetAction.class), + new ActionHandler<>(ListQueryRulesetsAction.INSTANCE, TransportListQueryRulesetsAction.class), + new ActionHandler<>(PutQueryRulesetAction.INSTANCE, TransportPutQueryRulesetAction.class), + + usageAction, + infoAction + ) ); + + // Connectors + if (ConnectorAPIFeature.isEnabled()) { + actionHandlers.addAll( + List.of( + new ActionHandler<>(DeleteConnectorAction.INSTANCE, TransportDeleteConnectorAction.class), + new ActionHandler<>(GetConnectorAction.INSTANCE, TransportGetConnectorAction.class), + new ActionHandler<>(ListConnectorAction.INSTANCE, TransportListConnectorAction.class), + new ActionHandler<>(PutConnectorAction.INSTANCE, TransportPutConnectorAction.class) + ) + ); + actionHandlers.add(new ActionHandler<>(PostConnectorSyncJobAction.INSTANCE, TransportPostConnectorSyncJobAction.class)); + } + + return Collections.unmodifiableList(actionHandlers); } @Override @@ -160,27 +197,44 @@ public List getRestHandlers( return Collections.emptyList(); } - return List.of( - // Behavioral Analytics - new RestPutAnalyticsCollectionAction(getLicenseState()), - new RestGetAnalyticsCollectionAction(getLicenseState()), - new RestDeleteAnalyticsCollectionAction(getLicenseState()), - new RestPostAnalyticsEventAction(getLicenseState()), - - // Search Applications - new RestDeleteSearchApplicationAction(getLicenseState()), - new RestGetSearchApplicationAction(getLicenseState()), - new RestListSearchApplicationAction(getLicenseState()), - new RestPutSearchApplicationAction(getLicenseState()), - new RestQuerySearchApplicationAction(getLicenseState()), - new RestRenderSearchApplicationQueryAction(getLicenseState()), - - // Query rules - new RestDeleteQueryRulesetAction(getLicenseState()), - new RestGetQueryRulesetAction(getLicenseState()), - new RestListQueryRulesetsAction(getLicenseState()), - new RestPutQueryRulesetAction(getLicenseState()) + List restHandlers = new ArrayList<>( + List.of( + // Behavioral Analytics + new RestPutAnalyticsCollectionAction(getLicenseState()), + new RestGetAnalyticsCollectionAction(getLicenseState()), + new RestDeleteAnalyticsCollectionAction(getLicenseState()), + new RestPostAnalyticsEventAction(getLicenseState()), + + // Search Applications + new RestDeleteSearchApplicationAction(getLicenseState()), + new RestGetSearchApplicationAction(getLicenseState()), + new RestListSearchApplicationAction(getLicenseState()), + new RestPutSearchApplicationAction(getLicenseState()), + new RestQuerySearchApplicationAction(getLicenseState()), + new RestRenderSearchApplicationQueryAction(getLicenseState()), + + // Query rules + new RestDeleteQueryRulesetAction(getLicenseState()), + new RestGetQueryRulesetAction(getLicenseState()), + new RestListQueryRulesetsAction(getLicenseState()), + new RestPutQueryRulesetAction(getLicenseState()) + ) ); + + // Connectors + if (ConnectorAPIFeature.isEnabled()) { + restHandlers.addAll( + List.of( + new RestDeleteConnectorAction(), + new RestGetConnectorAction(), + new RestListConnectorAction(), + new RestPutConnectorAction() + ) + ); + restHandlers.add(new RestPostConnectorSyncJobAction()); + } + + return Collections.unmodifiableList(restHandlers); } @Override @@ -192,6 +246,7 @@ public Collection createComponents(PluginServices services) { // Behavioral analytics components final AnalyticsTemplateRegistry analyticsTemplateRegistry = new AnalyticsTemplateRegistry( services.clusterService(), + services.featureService(), services.threadPool(), services.client(), services.xContentRegistry() diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchFeatures.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchFeatures.java index 3afdeb4897992..81e072479d402 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchFeatures.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchFeatures.java @@ -10,6 +10,7 @@ import org.elasticsearch.Version; import org.elasticsearch.features.FeatureSpecification; import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.xpack.application.analytics.AnalyticsTemplateRegistry; import org.elasticsearch.xpack.application.connector.ConnectorTemplateRegistry; import java.util.Map; @@ -17,6 +18,11 @@ public class EnterpriseSearchFeatures implements FeatureSpecification { @Override public Map getHistoricalFeatures() { - return Map.of(ConnectorTemplateRegistry.CONNECTOR_TEMPLATES_FEATURE, Version.V_8_10_0); + return Map.of( + ConnectorTemplateRegistry.CONNECTOR_TEMPLATES_FEATURE, + Version.V_8_10_0, + AnalyticsTemplateRegistry.ANALYTICS_TEMPLATE_FEATURE, + Version.V_8_12_0 + ); } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistry.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistry.java index 7472063e92e11..a1446606a21af 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistry.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistry.java @@ -6,13 +6,14 @@ */ package org.elasticsearch.xpack.application.analytics; -import org.elasticsearch.Version; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.metadata.ComponentTemplate; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.features.FeatureService; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentParserConfiguration; @@ -35,8 +36,7 @@ public class AnalyticsTemplateRegistry extends IndexTemplateRegistry { - // This registry requires all nodes to be at least 8.12.0 - static final Version MIN_NODE_VERSION = Version.V_8_12_0; + public static final NodeFeature ANALYTICS_TEMPLATE_FEATURE = new NodeFeature("behavioral_analytics.templates"); // This number must be incremented when we make changes to built-in templates. static final int REGISTRY_VERSION = 3; @@ -103,13 +103,17 @@ protected List getIngestPipelines() { ) ); + private final FeatureService featureService; + public AnalyticsTemplateRegistry( ClusterService clusterService, + FeatureService featureService, ThreadPool threadPool, Client client, NamedXContentRegistry xContentRegistry ) { super(Settings.EMPTY, clusterService, threadPool, client, xContentRegistry); + this.featureService = featureService; } @Override @@ -140,8 +144,6 @@ protected boolean requiresMasterNode() { @Override protected boolean isClusterReady(ClusterChangedEvent event) { - // Ensure templates are installed only once all nodes are updated to 8.8.0. - Version minNodeVersion = event.state().nodes().getMinNodeVersion(); - return minNodeVersion.onOrAfter(MIN_NODE_VERSION); + return featureService.clusterHasFeature(event.state(), ANALYTICS_TEMPLATE_FEATURE); } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/Connector.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/Connector.java new file mode 100644 index 0000000000000..bdee310612e18 --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/Connector.java @@ -0,0 +1,674 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.time.Instant; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * Represents a Connector in the Elasticsearch ecosystem. Connectors are used for integrating + * and synchronizing external data sources with Elasticsearch. Each Connector instance encapsulates + * various settings and state information, including: + *
      + *
    • A unique identifier for distinguishing different connectors.
    • + *
    • API key for authenticating with Elasticsearch, ensuring secure access.
    • + *
    • A configuration mapping which holds specific settings and parameters for the connector's operation.
    • + *
    • A {@link ConnectorCustomSchedule} object that defines custom scheduling.
    • + *
    • A description providing an overview or purpose of the connector.
    • + *
    • An error string capturing the latest error encountered during the connector's operation, if any.
    • + *
    • A {@link ConnectorFeatures} object encapsulating the set of features enabled for this connector.
    • + *
    • A list of {@link ConnectorFiltering} objects for applying filtering rules to the data processed by the connector.
    • + *
    • The name of the Elasticsearch index where the synchronized data is stored or managed.
    • + *
    • A boolean flag 'isNative' indicating whether the connector is a native Elasticsearch connector.
    • + *
    • The language associated with the connector.
    • + *
    • A {@link ConnectorSyncInfo} object containing synchronization state and history information.
    • + *
    • The name of the connector.
    • + *
    • A {@link ConnectorIngestPipeline} object specifying the data ingestion pipeline configuration.
    • + *
    • A {@link ConnectorScheduling} object with the scheduling configuration to trigger data sync.
    • + *
    • The type of connector.
    • + *
    • A {@link ConnectorStatus} indicating the current status of the connector.
    • + *
    • A sync cursor, used for incremental syncs.
    • + *
    • A boolean flag 'syncNow', which, when set, triggers an immediate synchronization operation.
    • + *
    + */ +public class Connector implements NamedWriteable, ToXContentObject { + + public static final String NAME = Connector.class.getName().toUpperCase(Locale.ROOT); + + private final String connectorId; + @Nullable + private final String apiKeyId; + @Nullable + private final Map configuration; // TODO: add explicit types + @Nullable + private final Map customScheduling; + @Nullable + private final String description; + @Nullable + private final String error; + @Nullable + private final ConnectorFeatures features; + @Nullable + private final List filtering; + @Nullable + private final String indexName; + + private final boolean isNative; + @Nullable + private final String language; + @Nullable + private final ConnectorSyncInfo syncInfo; + @Nullable + private final String name; + @Nullable + private final ConnectorIngestPipeline pipeline; + @Nullable + private final ConnectorScheduling scheduling; + @Nullable + private final String serviceType; + private final ConnectorStatus status; + @Nullable + private final Object syncCursor; + private final boolean syncNow; + + /** + * Constructor for Connector. + * + * @param connectorId Unique identifier for the connector. + * @param apiKeyId API key ID used for authentication/authorization against ES. + * @param configuration Configuration settings for the connector. + * @param customScheduling Custom scheduling settings for the connector. + * @param description Description of the connector. + * @param error Information about the last error encountered by the connector, if any. + * @param features Features enabled for the connector. + * @param filtering Filtering settings applied by the connector. + * @param indexName Name of the index associated with the connector. + * @param isNative Flag indicating whether the connector is a native type. + * @param language The language supported by the connector. + * @param syncInfo Information about the synchronization state of the connector. + * @param name Name of the connector. + * @param pipeline Ingest pipeline configuration. + * @param scheduling Scheduling settings for regular data synchronization. + * @param serviceType Type of service the connector integrates with. + * @param status Current status of the connector. + * @param syncCursor Position or state indicating the current point of synchronization. + * @param syncNow Flag indicating whether an immediate synchronization is requested. + */ + private Connector( + String connectorId, + String apiKeyId, + Map configuration, + Map customScheduling, + String description, + String error, + ConnectorFeatures features, + List filtering, + String indexName, + boolean isNative, + String language, + ConnectorSyncInfo syncInfo, + String name, + ConnectorIngestPipeline pipeline, + ConnectorScheduling scheduling, + String serviceType, + ConnectorStatus status, + Object syncCursor, + boolean syncNow + ) { + this.connectorId = Objects.requireNonNull(connectorId, "connectorId cannot be null"); + this.apiKeyId = apiKeyId; + this.configuration = configuration; + this.customScheduling = customScheduling; + this.description = description; + this.error = error; + this.features = features; + this.filtering = filtering; + this.indexName = indexName; + this.isNative = isNative; + this.language = language; + this.syncInfo = syncInfo; + this.name = name; + this.pipeline = pipeline; + this.scheduling = scheduling; + this.serviceType = serviceType; + this.status = Objects.requireNonNull(status, "connector status cannot be null"); + this.syncCursor = syncCursor; + this.syncNow = syncNow; + } + + public Connector(StreamInput in) throws IOException { + this.connectorId = in.readString(); + this.apiKeyId = in.readOptionalString(); + this.configuration = in.readMap(StreamInput::readGenericValue); + this.customScheduling = in.readMap(ConnectorCustomSchedule::new); + this.description = in.readOptionalString(); + this.error = in.readOptionalString(); + this.features = in.readOptionalWriteable(ConnectorFeatures::new); + this.filtering = in.readOptionalCollectionAsList(ConnectorFiltering::new); + this.indexName = in.readOptionalString(); + this.isNative = in.readBoolean(); + this.language = in.readOptionalString(); + this.syncInfo = in.readOptionalWriteable(ConnectorSyncInfo::new); + this.name = in.readOptionalString(); + this.pipeline = in.readOptionalWriteable(ConnectorIngestPipeline::new); + this.scheduling = in.readOptionalWriteable(ConnectorScheduling::new); + this.serviceType = in.readOptionalString(); + this.status = in.readEnum(ConnectorStatus.class); + this.syncCursor = in.readGenericValue(); + this.syncNow = in.readBoolean(); + } + + public static final ParseField ID_FIELD = new ParseField("connector_id"); + static final ParseField API_KEY_ID_FIELD = new ParseField("api_key_id"); + public static final ParseField CONFIGURATION_FIELD = new ParseField("configuration"); + static final ParseField CUSTOM_SCHEDULING_FIELD = new ParseField("custom_scheduling"); + static final ParseField DESCRIPTION_FIELD = new ParseField("description"); + static final ParseField ERROR_FIELD = new ParseField("error"); + static final ParseField FEATURES_FIELD = new ParseField("features"); + public static final ParseField FILTERING_FIELD = new ParseField("filtering"); + public static final ParseField INDEX_NAME_FIELD = new ParseField("index_name"); + static final ParseField IS_NATIVE_FIELD = new ParseField("is_native"); + public static final ParseField LANGUAGE_FIELD = new ParseField("language"); + static final ParseField NAME_FIELD = new ParseField("name"); + public static final ParseField PIPELINE_FIELD = new ParseField("pipeline"); + static final ParseField SCHEDULING_FIELD = new ParseField("scheduling"); + public static final ParseField SERVICE_TYPE_FIELD = new ParseField("service_type"); + static final ParseField STATUS_FIELD = new ParseField("status"); + static final ParseField SYNC_CURSOR_FIELD = new ParseField("sync_cursor"); + static final ParseField SYNC_NOW_FIELD = new ParseField("sync_now"); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("connector", true, (args) -> { + int i = 0; + return new Builder().setConnectorId((String) args[i++]) + .setApiKeyId((String) args[i++]) + .setConfiguration((Map) args[i++]) + .setCustomScheduling((Map) args[i++]) + .setDescription((String) args[i++]) + .setError((String) args[i++]) + .setFeatures((ConnectorFeatures) args[i++]) + .setFiltering((List) args[i++]) + .setIndexName((String) args[i++]) + .setIsNative((Boolean) args[i++]) + .setLanguage((String) args[i++]) + .setSyncInfo( + new ConnectorSyncInfo.Builder().setLastAccessControlSyncError((String) args[i++]) + .setLastAccessControlSyncScheduledAt((Instant) args[i++]) + .setLastAccessControlSyncStatus((ConnectorSyncStatus) args[i++]) + .setLastDeletedDocumentCount((Long) args[i++]) + .setLastIncrementalSyncScheduledAt((Instant) args[i++]) + .setLastIndexedDocumentCount((Long) args[i++]) + .setLastSeen((Instant) args[i++]) + .setLastSyncError((String) args[i++]) + .setLastSyncScheduledAt((Instant) args[i++]) + .setLastSyncStatus((ConnectorSyncStatus) args[i++]) + .setLastSynced((Instant) args[i++]) + .build() + ) + .setName((String) args[i++]) + .setPipeline((ConnectorIngestPipeline) args[i++]) + .setScheduling((ConnectorScheduling) args[i++]) + .setServiceType((String) args[i++]) + .setStatus((ConnectorStatus) args[i++]) + .setSyncCursor(args[i++]) + .setSyncNow((Boolean) args[i]) + .build(); + }); + + static { + PARSER.declareString(constructorArg(), ID_FIELD); + PARSER.declareString(optionalConstructorArg(), API_KEY_ID_FIELD); + PARSER.declareField( + optionalConstructorArg(), + (parser, context) -> parser.map(), + CONFIGURATION_FIELD, + ObjectParser.ValueType.OBJECT + ); + PARSER.declareField( + optionalConstructorArg(), + (p, c) -> p.map(HashMap::new, ConnectorCustomSchedule::fromXContent), + CUSTOM_SCHEDULING_FIELD, + ObjectParser.ValueType.OBJECT + ); + PARSER.declareString(optionalConstructorArg(), DESCRIPTION_FIELD); + PARSER.declareString(optionalConstructorArg(), ERROR_FIELD); + PARSER.declareField( + optionalConstructorArg(), + (p, c) -> ConnectorFeatures.fromXContent(p), + FEATURES_FIELD, + ObjectParser.ValueType.OBJECT + ); + PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> ConnectorFiltering.fromXContent(p), FILTERING_FIELD); + PARSER.declareString(optionalConstructorArg(), INDEX_NAME_FIELD); + PARSER.declareBoolean(optionalConstructorArg(), IS_NATIVE_FIELD); + PARSER.declareString(optionalConstructorArg(), LANGUAGE_FIELD); + + PARSER.declareString(optionalConstructorArg(), ConnectorSyncInfo.LAST_ACCESS_CONTROL_SYNC_ERROR); + PARSER.declareField( + optionalConstructorArg(), + (p, c) -> Instant.parse(p.text()), + ConnectorSyncInfo.LAST_ACCESS_CONTROL_SYNC_SCHEDULED_AT_FIELD, + ObjectParser.ValueType.STRING + ); + PARSER.declareField( + optionalConstructorArg(), + (p, c) -> ConnectorSyncStatus.connectorSyncStatus(p.text()), + ConnectorSyncInfo.LAST_ACCESS_CONTROL_SYNC_STATUS_FIELD, + ObjectParser.ValueType.STRING + ); + PARSER.declareLong(optionalConstructorArg(), ConnectorSyncInfo.LAST_DELETED_DOCUMENT_COUNT_FIELD); + PARSER.declareField( + optionalConstructorArg(), + (p, c) -> Instant.parse(p.text()), + ConnectorSyncInfo.LAST_INCREMENTAL_SYNC_SCHEDULED_AT_FIELD, + ObjectParser.ValueType.STRING + ); + PARSER.declareLong(optionalConstructorArg(), ConnectorSyncInfo.LAST_INDEXED_DOCUMENT_COUNT_FIELD); + PARSER.declareField( + optionalConstructorArg(), + (p, c) -> Instant.parse(p.text()), + ConnectorSyncInfo.LAST_SEEN_FIELD, + ObjectParser.ValueType.STRING + ); + PARSER.declareString(optionalConstructorArg(), ConnectorSyncInfo.LAST_SYNC_ERROR_FIELD); + PARSER.declareField( + optionalConstructorArg(), + (p, c) -> Instant.parse(p.text()), + ConnectorSyncInfo.LAST_SYNC_SCHEDULED_AT_FIELD, + ObjectParser.ValueType.STRING + ); + PARSER.declareField( + optionalConstructorArg(), + (p, c) -> ConnectorSyncStatus.connectorSyncStatus(p.text()), + ConnectorSyncInfo.LAST_SYNC_STATUS_FIELD, + ObjectParser.ValueType.STRING + ); + PARSER.declareField( + optionalConstructorArg(), + (p, c) -> Instant.parse(p.text()), + ConnectorSyncInfo.LAST_SYNCED_FIELD, + ObjectParser.ValueType.STRING + ); + + PARSER.declareString(optionalConstructorArg(), NAME_FIELD); + PARSER.declareField( + optionalConstructorArg(), + (p, c) -> ConnectorIngestPipeline.fromXContent(p), + PIPELINE_FIELD, + ObjectParser.ValueType.OBJECT + ); + PARSER.declareField( + optionalConstructorArg(), + (p, c) -> ConnectorScheduling.fromXContent(p), + SCHEDULING_FIELD, + ObjectParser.ValueType.OBJECT + ); + PARSER.declareString(optionalConstructorArg(), SERVICE_TYPE_FIELD); + PARSER.declareField( + optionalConstructorArg(), + (p, c) -> ConnectorStatus.connectorStatus(p.text()), + STATUS_FIELD, + ObjectParser.ValueType.STRING + ); + PARSER.declareField( + optionalConstructorArg(), + (parser, context) -> parser.map(), + SYNC_CURSOR_FIELD, + ObjectParser.ValueType.OBJECT_OR_NULL + ); + PARSER.declareBoolean(optionalConstructorArg(), SYNC_NOW_FIELD); + } + + public static Connector fromXContentBytes(BytesReference source, XContentType xContentType) { + try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, source, xContentType)) { + return Connector.fromXContent(parser); + } catch (IOException e) { + throw new ElasticsearchParseException("Failed to parse a connector document.", e); + } + } + + public static Connector fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field(ID_FIELD.getPreferredName(), connectorId); + if (apiKeyId != null) { + builder.field(API_KEY_ID_FIELD.getPreferredName(), apiKeyId); + } + if (configuration != null) { + builder.field(CONFIGURATION_FIELD.getPreferredName(), configuration); + } + if (customScheduling != null) { + builder.field(CUSTOM_SCHEDULING_FIELD.getPreferredName(), customScheduling); + } + if (description != null) { + builder.field(DESCRIPTION_FIELD.getPreferredName(), description); + } + if (error != null) { + builder.field(ERROR_FIELD.getPreferredName(), error); + } + if (features != null) { + builder.field(FEATURES_FIELD.getPreferredName(), features); + } + if (filtering != null) { + builder.xContentList(FILTERING_FIELD.getPreferredName(), filtering); + } + if (indexName != null) { + builder.field(INDEX_NAME_FIELD.getPreferredName(), indexName); + } + builder.field(IS_NATIVE_FIELD.getPreferredName(), isNative); + if (language != null) { + builder.field(LANGUAGE_FIELD.getPreferredName(), language); + } + if (syncInfo != null) { + syncInfo.toXContent(builder, params); + } + if (name != null) { + builder.field(NAME_FIELD.getPreferredName(), name); + } + if (pipeline != null) { + builder.field(PIPELINE_FIELD.getPreferredName(), pipeline); + } + if (scheduling != null) { + builder.field(SCHEDULING_FIELD.getPreferredName(), scheduling); + } + if (serviceType != null) { + builder.field(SERVICE_TYPE_FIELD.getPreferredName(), serviceType); + } + if (syncCursor != null) { + builder.field(SYNC_CURSOR_FIELD.getPreferredName(), syncCursor); + } + builder.field(STATUS_FIELD.getPreferredName(), status.toString()); + builder.field(SYNC_NOW_FIELD.getPreferredName(), syncNow); + + } + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(connectorId); + out.writeOptionalString(apiKeyId); + out.writeMap(configuration, StreamOutput::writeGenericValue); + out.writeMap(customScheduling, StreamOutput::writeWriteable); + out.writeOptionalString(description); + out.writeOptionalString(error); + out.writeOptionalWriteable(features); + out.writeOptionalCollection(filtering); + out.writeOptionalString(indexName); + out.writeBoolean(isNative); + out.writeOptionalString(language); + out.writeOptionalWriteable(syncInfo); + out.writeOptionalString(name); + out.writeOptionalWriteable(pipeline); + out.writeOptionalWriteable(scheduling); + out.writeOptionalString(serviceType); + out.writeEnum(status); + out.writeGenericValue(syncCursor); + out.writeBoolean(syncNow); + } + + public String getConnectorId() { + return connectorId; + } + + public List getFiltering() { + return filtering; + } + + public String getIndexName() { + return indexName; + } + + public String getLanguage() { + return language; + } + + public ConnectorIngestPipeline getPipeline() { + return pipeline; + } + + public String getServiceType() { + return serviceType; + } + + public Map getConfiguration() { + return configuration; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Connector connector = (Connector) o; + return isNative == connector.isNative + && syncNow == connector.syncNow + && Objects.equals(connectorId, connector.connectorId) + && Objects.equals(apiKeyId, connector.apiKeyId) + && Objects.equals(configuration, connector.configuration) + && Objects.equals(customScheduling, connector.customScheduling) + && Objects.equals(description, connector.description) + && Objects.equals(error, connector.error) + && Objects.equals(features, connector.features) + && Objects.equals(filtering, connector.filtering) + && Objects.equals(indexName, connector.indexName) + && Objects.equals(language, connector.language) + && Objects.equals(syncInfo, connector.syncInfo) + && Objects.equals(name, connector.name) + && Objects.equals(pipeline, connector.pipeline) + && Objects.equals(scheduling, connector.scheduling) + && Objects.equals(serviceType, connector.serviceType) + && status == connector.status + && Objects.equals(syncCursor, connector.syncCursor); + } + + @Override + public int hashCode() { + return Objects.hash( + connectorId, + apiKeyId, + configuration, + customScheduling, + description, + error, + features, + filtering, + indexName, + isNative, + language, + syncInfo, + name, + pipeline, + scheduling, + serviceType, + status, + syncCursor, + syncNow + ); + } + + @Override + public String getWriteableName() { + return NAME; + } + + public static class Builder { + + private String connectorId; + private String apiKeyId; + private Map configuration = Collections.emptyMap(); + private Map customScheduling = Collections.emptyMap(); + private String description; + private String error; + private ConnectorFeatures features; + private List filtering = List.of(ConnectorFiltering.getDefaultConnectorFilteringConfig()); + private String indexName; + private boolean isNative = false; + private String language; + private ConnectorSyncInfo syncInfo = new ConnectorSyncInfo.Builder().build(); + private String name; + private ConnectorIngestPipeline pipeline; + private ConnectorScheduling scheduling = ConnectorScheduling.getDefaultConnectorScheduling(); + private String serviceType; + private ConnectorStatus status = ConnectorStatus.CREATED; + private Object syncCursor; + private boolean syncNow = false; + + public Builder setConnectorId(String connectorId) { + this.connectorId = connectorId; + return this; + } + + public Builder setApiKeyId(String apiKeyId) { + this.apiKeyId = apiKeyId; + return this; + } + + public Builder setConfiguration(Map configuration) { + this.configuration = configuration; + return this; + } + + public Builder setCustomScheduling(Map customScheduling) { + this.customScheduling = customScheduling; + return this; + } + + public Builder setDescription(String description) { + this.description = description; + return this; + } + + public Builder setError(String error) { + this.error = error; + return this; + } + + public Builder setFeatures(ConnectorFeatures features) { + this.features = features; + return this; + } + + public Builder setFiltering(List filtering) { + this.filtering = filtering; + return this; + } + + public Builder setIndexName(String indexName) { + this.indexName = indexName; + return this; + } + + public Builder setIsNative(boolean isNative) { + this.isNative = isNative; + if (isNative) { + this.status = ConnectorStatus.NEEDS_CONFIGURATION; + } + return this; + } + + public Builder setLanguage(String language) { + this.language = language; + return this; + } + + public Builder setSyncInfo(ConnectorSyncInfo syncInfo) { + this.syncInfo = syncInfo; + return this; + } + + public Builder setName(String name) { + this.name = Objects.requireNonNullElse(name, ""); + return this; + } + + public Builder setPipeline(ConnectorIngestPipeline pipeline) { + this.pipeline = pipeline; + return this; + } + + public Builder setScheduling(ConnectorScheduling scheduling) { + this.scheduling = scheduling; + return this; + } + + public Builder setServiceType(String serviceType) { + this.serviceType = serviceType; + return this; + } + + public Builder setStatus(ConnectorStatus status) { + this.status = status; + return this; + } + + public Builder setSyncCursor(Object syncCursor) { + this.syncCursor = syncCursor; + return this; + } + + public Builder setSyncNow(boolean syncNow) { + this.syncNow = syncNow; + return this; + } + + public Connector build() { + return new Connector( + connectorId, + apiKeyId, + configuration, + customScheduling, + description, + error, + features, + filtering, + indexName, + isNative, + language, + syncInfo, + name, + pipeline, + scheduling, + serviceType, + status, + syncCursor, + syncNow + ); + } + } +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorAPIFeature.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorAPIFeature.java new file mode 100644 index 0000000000000..40dcf02a2bf19 --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorAPIFeature.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector; + +import org.elasticsearch.common.util.FeatureFlag; + +/** + * Connector API feature flag. When the feature is complete, this flag will be removed. + */ +public class ConnectorAPIFeature { + + private static final FeatureFlag CONNECTOR_API_FEATURE_FLAG = new FeatureFlag("connector_api"); + + public static boolean isEnabled() { + return CONNECTOR_API_FEATURE_FLAG.isEnabled(); + } +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorCustomSchedule.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorCustomSchedule.java new file mode 100644 index 0000000000000..81239610c3186 --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorCustomSchedule.java @@ -0,0 +1,353 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.scheduler.Cron; + +import java.io.IOException; +import java.time.Instant; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; + +public class ConnectorCustomSchedule implements Writeable, ToXContentObject { + + private final ConfigurationOverrides configurationOverrides; + private final boolean enabled; + private final Cron interval; + @Nullable + private final Instant lastSynced; + private final String name; + + /** + * Constructor for ConnectorCustomSchedule. + * + * @param configurationOverrides Configuration overrides {@link ConfigurationOverrides} specifies custom settings overrides. + * @param enabled Flag indicating whether the custom schedule is active or not. + * @param interval The interval at which the custom schedule runs, specified in a cron-like format. + * @param lastSynced The timestamp of the last successful synchronization performed under this custom schedule, if any. + * @param name The name of the custom schedule, used for identification and reference purposes. + */ + private ConnectorCustomSchedule( + ConfigurationOverrides configurationOverrides, + boolean enabled, + Cron interval, + Instant lastSynced, + String name + ) { + this.configurationOverrides = Objects.requireNonNull(configurationOverrides, CONFIG_OVERRIDES_FIELD.getPreferredName()); + this.enabled = enabled; + this.interval = Objects.requireNonNull(interval, INTERVAL_FIELD.getPreferredName()); + this.lastSynced = lastSynced; + this.name = Objects.requireNonNull(name, NAME_FIELD.getPreferredName()); + } + + public ConnectorCustomSchedule(StreamInput in) throws IOException { + this.configurationOverrides = new ConfigurationOverrides(in); + this.enabled = in.readBoolean(); + this.interval = new Cron(in.readString()); + this.lastSynced = in.readOptionalInstant(); + this.name = in.readString(); + } + + private static final ParseField CONFIG_OVERRIDES_FIELD = new ParseField("configuration_overrides"); + private static final ParseField ENABLED_FIELD = new ParseField("enabled"); + private static final ParseField INTERVAL_FIELD = new ParseField("interval"); + private static final ParseField LAST_SYNCED_FIELD = new ParseField("last_synced"); + + private static final ParseField NAME_FIELD = new ParseField("name"); + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "connector_custom_schedule", + true, + args -> new Builder().setConfigurationOverrides((ConfigurationOverrides) args[0]) + .setEnabled((Boolean) args[1]) + .setInterval(new Cron((String) args[2])) + .setLastSynced((Instant) args[3]) + .setName((String) args[4]) + .build() + ); + + static { + PARSER.declareField( + constructorArg(), + (parser, context) -> ConfigurationOverrides.fromXContent(parser), + CONFIG_OVERRIDES_FIELD, + ObjectParser.ValueType.OBJECT + ); + PARSER.declareBoolean(constructorArg(), ENABLED_FIELD); + PARSER.declareString(constructorArg(), INTERVAL_FIELD); + PARSER.declareField( + optionalConstructorArg(), + (p, c) -> p.currentToken() == XContentParser.Token.VALUE_NULL ? null : Instant.parse(p.text()), + ConnectorSyncInfo.LAST_SYNCED_FIELD, + ObjectParser.ValueType.STRING_OR_NULL + ); + PARSER.declareString(constructorArg(), NAME_FIELD); + } + + public static ConnectorCustomSchedule fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + public static ConnectorCustomSchedule fromXContentBytes(BytesReference source, XContentType xContentType) { + try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, source, xContentType)) { + return ConnectorCustomSchedule.fromXContent(parser); + } catch (IOException e) { + throw new ElasticsearchParseException("Failed to parse a connector custom schedule.", e); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field(CONFIG_OVERRIDES_FIELD.getPreferredName(), configurationOverrides); + builder.field(ENABLED_FIELD.getPreferredName(), enabled); + builder.field(INTERVAL_FIELD.getPreferredName(), interval); + if (lastSynced != null) { + builder.field(LAST_SYNCED_FIELD.getPreferredName(), lastSynced); + } + builder.field(NAME_FIELD.getPreferredName(), name); + } + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeWriteable(configurationOverrides); + out.writeBoolean(enabled); + out.writeString(interval.toString()); + out.writeOptionalInstant(lastSynced); + out.writeString(name); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ConnectorCustomSchedule that = (ConnectorCustomSchedule) o; + return enabled == that.enabled + && Objects.equals(configurationOverrides, that.configurationOverrides) + && Objects.equals(interval, that.interval) + && Objects.equals(lastSynced, that.lastSynced) + && Objects.equals(name, that.name); + } + + @Override + public int hashCode() { + return Objects.hash(configurationOverrides, enabled, interval, lastSynced, name); + } + + public static class Builder { + + private ConfigurationOverrides configurationOverrides; + private boolean enabled; + private Cron interval; + private Instant lastSynced; + private String name; + + public Builder setConfigurationOverrides(ConfigurationOverrides configurationOverrides) { + this.configurationOverrides = configurationOverrides; + return this; + } + + public Builder setEnabled(boolean enabled) { + this.enabled = enabled; + return this; + } + + public Builder setInterval(Cron interval) { + this.interval = interval; + return this; + } + + public Builder setLastSynced(Instant lastSynced) { + this.lastSynced = lastSynced; + return this; + } + + public Builder setName(String name) { + this.name = name; + return this; + } + + public ConnectorCustomSchedule build() { + return new ConnectorCustomSchedule(configurationOverrides, enabled, interval, lastSynced, name); + } + } + + public static class ConfigurationOverrides implements Writeable, ToXContentObject { + @Nullable + private final Integer maxCrawlDepth; + @Nullable + private final Boolean sitemapDiscoveryDisabled; + @Nullable + private final List domainAllowList; + @Nullable + private final List sitemapUrls; + @Nullable + private final List seedUrls; + + private ConfigurationOverrides( + Integer maxCrawlDepth, + Boolean sitemapDiscoveryDisabled, + List domainAllowList, + List sitemapUrls, + List seedUrls + ) { + this.maxCrawlDepth = maxCrawlDepth; + this.sitemapDiscoveryDisabled = sitemapDiscoveryDisabled; + this.domainAllowList = domainAllowList; + this.sitemapUrls = sitemapUrls; + this.seedUrls = seedUrls; + } + + public ConfigurationOverrides(StreamInput in) throws IOException { + this.maxCrawlDepth = in.readOptionalInt(); + this.sitemapDiscoveryDisabled = in.readOptionalBoolean(); + this.domainAllowList = in.readOptionalStringCollectionAsList(); + this.sitemapUrls = in.readOptionalStringCollectionAsList(); + this.seedUrls = in.readOptionalStringCollectionAsList(); + } + + private static final ParseField MAX_CRAWL_DEPTH_FIELD = new ParseField("max_crawl_depth"); + private static final ParseField SITEMAP_DISCOVERY_DISABLED_FIELD = new ParseField("sitemap_discovery_disabled"); + private static final ParseField DOMAIN_ALLOWLIST_FIELD = new ParseField("domain_allowlist"); + private static final ParseField SITEMAP_URLS_FIELD = new ParseField("sitemap_urls"); + private static final ParseField SEED_URLS_FIELD = new ParseField("seed_urls"); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "configuration_override", + true, + args -> new Builder().setMaxCrawlDepth((Integer) args[0]) + .setSitemapDiscoveryDisabled((Boolean) args[1]) + .setDomainAllowList((List) args[2]) + .setSitemapUrls((List) args[3]) + .setSeedUrls((List) args[4]) + .build() + ); + + static { + PARSER.declareInt(optionalConstructorArg(), MAX_CRAWL_DEPTH_FIELD); + PARSER.declareBoolean(optionalConstructorArg(), SITEMAP_DISCOVERY_DISABLED_FIELD); + PARSER.declareStringArray(optionalConstructorArg(), DOMAIN_ALLOWLIST_FIELD); + PARSER.declareStringArray(optionalConstructorArg(), SITEMAP_URLS_FIELD); + PARSER.declareStringArray(optionalConstructorArg(), SEED_URLS_FIELD); + } + + public static ConfigurationOverrides fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (maxCrawlDepth != null) { + builder.field(MAX_CRAWL_DEPTH_FIELD.getPreferredName(), maxCrawlDepth); + } + if (sitemapDiscoveryDisabled != null) { + builder.field(SITEMAP_DISCOVERY_DISABLED_FIELD.getPreferredName(), sitemapDiscoveryDisabled); + } + if (domainAllowList != null) { + builder.stringListField(DOMAIN_ALLOWLIST_FIELD.getPreferredName(), domainAllowList); + } + if (sitemapUrls != null) { + builder.stringListField(SITEMAP_URLS_FIELD.getPreferredName(), sitemapUrls); + } + if (seedUrls != null) { + builder.stringListField(SEED_URLS_FIELD.getPreferredName(), seedUrls); + } + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalInt(maxCrawlDepth); + out.writeOptionalBoolean(sitemapDiscoveryDisabled); + out.writeOptionalStringCollection(domainAllowList); + out.writeOptionalStringCollection(sitemapUrls); + out.writeOptionalStringCollection(seedUrls); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ConfigurationOverrides that = (ConfigurationOverrides) o; + return Objects.equals(maxCrawlDepth, that.maxCrawlDepth) + && Objects.equals(sitemapDiscoveryDisabled, that.sitemapDiscoveryDisabled) + && Objects.equals(domainAllowList, that.domainAllowList) + && Objects.equals(sitemapUrls, that.sitemapUrls) + && Objects.equals(seedUrls, that.seedUrls); + } + + @Override + public int hashCode() { + return Objects.hash(maxCrawlDepth, sitemapDiscoveryDisabled, domainAllowList, sitemapUrls, seedUrls); + } + + public static class Builder { + + private Integer maxCrawlDepth; + private Boolean sitemapDiscoveryDisabled; + private List domainAllowList; + private List sitemapUrls; + private List seedUrls; + + public Builder setMaxCrawlDepth(Integer maxCrawlDepth) { + this.maxCrawlDepth = maxCrawlDepth; + return this; + } + + public Builder setSitemapDiscoveryDisabled(Boolean sitemapDiscoveryDisabled) { + this.sitemapDiscoveryDisabled = sitemapDiscoveryDisabled; + return this; + } + + public Builder setDomainAllowList(List domainAllowList) { + this.domainAllowList = domainAllowList; + return this; + } + + public Builder setSitemapUrls(List sitemapUrls) { + this.sitemapUrls = sitemapUrls; + return this; + } + + public Builder setSeedUrls(List seedUrls) { + this.seedUrls = seedUrls; + return this; + } + + public ConfigurationOverrides build() { + return new ConfigurationOverrides(maxCrawlDepth, sitemapDiscoveryDisabled, domainAllowList, sitemapUrls, seedUrls); + } + } + } +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorFeatures.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorFeatures.java new file mode 100644 index 0000000000000..51aa110342fe9 --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorFeatures.java @@ -0,0 +1,376 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * The {@link ConnectorFeatures} class represents feature flags for a connector. + */ +public class ConnectorFeatures implements Writeable, ToXContentObject { + + @Nullable + private final FeatureEnabled documentLevelSecurityEnabled; + @Nullable + private final Boolean filteringAdvancedConfigEnabled; + @Nullable + private final Boolean filteringRulesEnabled; + @Nullable + private final FeatureEnabled incrementalSyncEnabled; + @Nullable + private final SyncRulesFeatures syncRulesFeatures; + + /** + * Constructs a new instance of ConnectorFeatures. + * + * @param documentLevelSecurityEnabled A flag indicating whether document-level security is enabled. + * @param filteringAdvancedConfig A flag indicating whether advanced filtering configuration is enabled. + * @param filteringRules A flag indicating whether filtering rules are enabled. + * @param incrementalSyncEnabled A flag indicating whether incremental sync is enabled. + * @param syncRulesFeatures An {@link SyncRulesFeatures} object indicating whether basic and advanced sync rules are enabled. + */ + private ConnectorFeatures( + FeatureEnabled documentLevelSecurityEnabled, + Boolean filteringAdvancedConfig, + Boolean filteringRules, + FeatureEnabled incrementalSyncEnabled, + SyncRulesFeatures syncRulesFeatures + ) { + this.documentLevelSecurityEnabled = documentLevelSecurityEnabled; + this.filteringAdvancedConfigEnabled = filteringAdvancedConfig; + this.filteringRulesEnabled = filteringRules; + this.incrementalSyncEnabled = incrementalSyncEnabled; + this.syncRulesFeatures = syncRulesFeatures; + } + + public ConnectorFeatures(StreamInput in) throws IOException { + this.documentLevelSecurityEnabled = in.readOptionalWriteable(FeatureEnabled::new); + this.filteringAdvancedConfigEnabled = in.readOptionalBoolean(); + this.filteringRulesEnabled = in.readOptionalBoolean(); + this.incrementalSyncEnabled = in.readOptionalWriteable(FeatureEnabled::new); + this.syncRulesFeatures = in.readOptionalWriteable(SyncRulesFeatures::new); + } + + private static final ParseField DOCUMENT_LEVEL_SECURITY_ENABLED_FIELD = new ParseField("document_level_security"); + private static final ParseField FILTERING_ADVANCED_CONFIG_ENABLED_FIELD = new ParseField("filtering_advanced_config"); + private static final ParseField FILTERING_RULES_ENABLED_FIELD = new ParseField("filtering_rules"); + private static final ParseField INCREMENTAL_SYNC_ENABLED_FIELD = new ParseField("incremental_sync"); + private static final ParseField SYNC_RULES_FIELD = new ParseField("sync_rules"); + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "connector_features", + true, + args -> { + return new Builder().setDocumentLevelSecurityEnabled((FeatureEnabled) args[0]) + .setFilteringAdvancedConfig((Boolean) args[1]) + .setFilteringRules((Boolean) args[2]) + .setIncrementalSyncEnabled((FeatureEnabled) args[3]) + .setSyncRulesFeatures((SyncRulesFeatures) args[4]) + .build(); + } + ); + + static { + PARSER.declareObject(optionalConstructorArg(), (p, c) -> FeatureEnabled.fromXContent(p), DOCUMENT_LEVEL_SECURITY_ENABLED_FIELD); + PARSER.declareBoolean(optionalConstructorArg(), FILTERING_ADVANCED_CONFIG_ENABLED_FIELD); + PARSER.declareBoolean(optionalConstructorArg(), FILTERING_RULES_ENABLED_FIELD); + PARSER.declareObject(optionalConstructorArg(), (p, c) -> FeatureEnabled.fromXContent(p), INCREMENTAL_SYNC_ENABLED_FIELD); + PARSER.declareObject(optionalConstructorArg(), (p, c) -> SyncRulesFeatures.fromXContent(p), SYNC_RULES_FIELD); + } + + public static ConnectorFeatures fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + public static ConnectorFeatures fromXContentBytes(BytesReference source, XContentType xContentType) { + try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, source, xContentType)) { + return ConnectorFeatures.fromXContent(parser); + } catch (IOException e) { + throw new ElasticsearchParseException("Failed to parse a connector features.", e); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + if (documentLevelSecurityEnabled != null) { + builder.field(DOCUMENT_LEVEL_SECURITY_ENABLED_FIELD.getPreferredName(), documentLevelSecurityEnabled); + } + if (filteringAdvancedConfigEnabled != null) { + builder.field(FILTERING_ADVANCED_CONFIG_ENABLED_FIELD.getPreferredName(), filteringAdvancedConfigEnabled); + } + if (filteringRulesEnabled != null) { + builder.field(FILTERING_RULES_ENABLED_FIELD.getPreferredName(), filteringRulesEnabled); + } + if (incrementalSyncEnabled != null) { + builder.field(INCREMENTAL_SYNC_ENABLED_FIELD.getPreferredName(), incrementalSyncEnabled); + } + if (syncRulesFeatures != null) { + builder.field(SYNC_RULES_FIELD.getPreferredName(), syncRulesFeatures); + } + } + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalWriteable(documentLevelSecurityEnabled); + out.writeOptionalBoolean(filteringAdvancedConfigEnabled); + out.writeOptionalBoolean(filteringRulesEnabled); + out.writeOptionalWriteable(incrementalSyncEnabled); + out.writeOptionalWriteable(syncRulesFeatures); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ConnectorFeatures features = (ConnectorFeatures) o; + return Objects.equals(documentLevelSecurityEnabled, features.documentLevelSecurityEnabled) + && Objects.equals(filteringAdvancedConfigEnabled, features.filteringAdvancedConfigEnabled) + && Objects.equals(filteringRulesEnabled, features.filteringRulesEnabled) + && Objects.equals(incrementalSyncEnabled, features.incrementalSyncEnabled) + && Objects.equals(syncRulesFeatures, features.syncRulesFeatures); + } + + @Override + public int hashCode() { + return Objects.hash( + documentLevelSecurityEnabled, + filteringAdvancedConfigEnabled, + filteringRulesEnabled, + incrementalSyncEnabled, + syncRulesFeatures + ); + } + + public static class Builder { + + private FeatureEnabled documentLevelSecurityEnabled; + private Boolean filteringAdvancedConfig; + private Boolean filteringRules; + private FeatureEnabled incrementalSyncEnabled; + private SyncRulesFeatures syncRulesFeatures; + + public Builder setDocumentLevelSecurityEnabled(FeatureEnabled documentLevelSecurityEnabled) { + this.documentLevelSecurityEnabled = documentLevelSecurityEnabled; + return this; + } + + public Builder setFilteringAdvancedConfig(Boolean filteringAdvancedConfig) { + this.filteringAdvancedConfig = filteringAdvancedConfig; + return this; + } + + public Builder setFilteringRules(Boolean filteringRules) { + this.filteringRules = filteringRules; + return this; + } + + public Builder setIncrementalSyncEnabled(FeatureEnabled incrementalSyncEnabled) { + this.incrementalSyncEnabled = incrementalSyncEnabled; + return this; + } + + public Builder setSyncRulesFeatures(SyncRulesFeatures syncRulesFeatures) { + this.syncRulesFeatures = syncRulesFeatures; + return this; + } + + public ConnectorFeatures build() { + return new ConnectorFeatures( + documentLevelSecurityEnabled, + filteringAdvancedConfig, + filteringRules, + incrementalSyncEnabled, + syncRulesFeatures + ); + } + } + + /** + * The {@link FeatureEnabled} class serves as a helper for serializing and deserializing + * feature representations within the Connector context. This class specifically addresses + * the handling of features represented in a nested JSON structure: + * + *
    +     *     "my_feature": {"enabled": true}
    +     * 
    + */ + public static class FeatureEnabled implements ToXContentObject, Writeable { + + private final boolean enabled; + + public FeatureEnabled(boolean enabled) { + this.enabled = enabled; + } + + public FeatureEnabled(StreamInput in) throws IOException { + this.enabled = in.readBoolean(); + } + + private static final ParseField ENABLED_FIELD = new ParseField("enabled"); + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "connector_feature_enabled", + true, + args -> new FeatureEnabled((boolean) args[0]) + ); + + static { + PARSER.declareBoolean(optionalConstructorArg(), ENABLED_FIELD); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field(ENABLED_FIELD.getPreferredName(), enabled); + } + builder.endObject(); + return builder; + } + + public static FeatureEnabled fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeBoolean(enabled); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + FeatureEnabled that = (FeatureEnabled) o; + return enabled == that.enabled; + } + + @Override + public int hashCode() { + return Objects.hash(enabled); + } + } + + /** + * The {@link SyncRulesFeatures} class represents the feature configuration for advanced and basic + * sync rules in a structured and serializable format. + */ + public static class SyncRulesFeatures implements ToXContentObject, Writeable { + + private final FeatureEnabled syncRulesAdvancedEnabled; + private final FeatureEnabled syncRulesBasicEnabled; + + private SyncRulesFeatures(FeatureEnabled syncRulesAdvancedEnabled, FeatureEnabled syncRulesBasicEnabled) { + this.syncRulesAdvancedEnabled = syncRulesAdvancedEnabled; + this.syncRulesBasicEnabled = syncRulesBasicEnabled; + } + + public SyncRulesFeatures(StreamInput in) throws IOException { + this.syncRulesAdvancedEnabled = in.readOptionalWriteable(FeatureEnabled::new); + this.syncRulesBasicEnabled = in.readOptionalWriteable(FeatureEnabled::new); + } + + private static final ParseField SYNC_RULES_ADVANCED_ENABLED_FIELD = new ParseField("advanced"); + private static final ParseField SYNC_RULES_BASIC_ENABLED_FIELD = new ParseField("basic"); + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "sync_rules_features", + true, + args -> new Builder().setSyncRulesAdvancedEnabled((FeatureEnabled) args[0]) + .setSyncRulesBasicEnabled((FeatureEnabled) args[1]) + .build() + ); + + static { + PARSER.declareObject(optionalConstructorArg(), (p, c) -> FeatureEnabled.fromXContent(p), SYNC_RULES_ADVANCED_ENABLED_FIELD); + PARSER.declareObject(optionalConstructorArg(), (p, c) -> FeatureEnabled.fromXContent(p), SYNC_RULES_BASIC_ENABLED_FIELD); + } + + public static SyncRulesFeatures fromXContent(XContentParser p) throws IOException { + return PARSER.parse(p, null); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + if (syncRulesAdvancedEnabled != null) { + builder.field(SYNC_RULES_ADVANCED_ENABLED_FIELD.getPreferredName(), syncRulesAdvancedEnabled); + } + if (syncRulesBasicEnabled != null) { + builder.field(SYNC_RULES_BASIC_ENABLED_FIELD.getPreferredName(), syncRulesBasicEnabled); + } + } + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalWriteable(syncRulesAdvancedEnabled); + out.writeOptionalWriteable(syncRulesBasicEnabled); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + SyncRulesFeatures that = (SyncRulesFeatures) o; + return Objects.equals(syncRulesAdvancedEnabled, that.syncRulesAdvancedEnabled) + && Objects.equals(syncRulesBasicEnabled, that.syncRulesBasicEnabled); + } + + @Override + public int hashCode() { + return Objects.hash(syncRulesAdvancedEnabled, syncRulesBasicEnabled); + } + + public static class Builder { + + private FeatureEnabled syncRulesAdvancedEnabled; + private FeatureEnabled syncRulesBasicEnabled; + + public Builder setSyncRulesAdvancedEnabled(FeatureEnabled syncRulesAdvancedEnabled) { + this.syncRulesAdvancedEnabled = syncRulesAdvancedEnabled; + return this; + } + + public Builder setSyncRulesBasicEnabled(FeatureEnabled syncRulesBasicEnabled) { + this.syncRulesBasicEnabled = syncRulesBasicEnabled; + return this; + } + + public SyncRulesFeatures build() { + return new SyncRulesFeatures(syncRulesAdvancedEnabled, syncRulesBasicEnabled); + } + } + } +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorFiltering.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorFiltering.java new file mode 100644 index 0000000000000..8ade6cdbcc0b1 --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorFiltering.java @@ -0,0 +1,219 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.application.connector.filtering.FilteringAdvancedSnippet; +import org.elasticsearch.xpack.application.connector.filtering.FilteringPolicy; +import org.elasticsearch.xpack.application.connector.filtering.FilteringRule; +import org.elasticsearch.xpack.application.connector.filtering.FilteringRuleCondition; +import org.elasticsearch.xpack.application.connector.filtering.FilteringRules; +import org.elasticsearch.xpack.application.connector.filtering.FilteringValidationInfo; +import org.elasticsearch.xpack.application.connector.filtering.FilteringValidationState; + +import java.io.IOException; +import java.time.Instant; +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + +/** + * Represents filtering configurations for a connector, encapsulating both active and draft rules. + * The {@link ConnectorFiltering} class stores the current active filtering rules, a domain associated + * with these rules, and any draft filtering rules that are yet to be applied. + */ +public class ConnectorFiltering implements Writeable, ToXContentObject { + + private final FilteringRules active; + private final String domain; + private final FilteringRules draft; + + /** + * Constructs a new ConnectorFiltering instance. + * + * @param active The active filtering rules. + * @param domain The domain associated with the filtering. + * @param draft The draft filtering rules. + */ + public ConnectorFiltering(FilteringRules active, String domain, FilteringRules draft) { + this.active = active; + this.domain = domain; + this.draft = draft; + } + + public ConnectorFiltering(StreamInput in) throws IOException { + this.active = new FilteringRules(in); + this.domain = in.readString(); + this.draft = new FilteringRules(in); + } + + private static final ParseField ACTIVE_FIELD = new ParseField("active"); + private static final ParseField DOMAIN_FIELD = new ParseField("domain"); + private static final ParseField DRAFT_FIELD = new ParseField("draft"); + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "connector_filtering", + true, + args -> new ConnectorFiltering.Builder().setActive((FilteringRules) args[0]) + .setDomain((String) args[1]) + .setDraft((FilteringRules) args[2]) + .build() + ); + + static { + PARSER.declareObject(constructorArg(), (p, c) -> FilteringRules.fromXContent(p), ACTIVE_FIELD); + PARSER.declareString(constructorArg(), DOMAIN_FIELD); + PARSER.declareObject(constructorArg(), (p, c) -> FilteringRules.fromXContent(p), DRAFT_FIELD); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field(ACTIVE_FIELD.getPreferredName(), active); + builder.field(DOMAIN_FIELD.getPreferredName(), domain); + builder.field(DRAFT_FIELD.getPreferredName(), draft); + } + builder.endObject(); + return builder; + } + + public static ConnectorFiltering fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + public static ConnectorFiltering fromXContentBytes(BytesReference source, XContentType xContentType) { + try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, source, xContentType)) { + return ConnectorFiltering.fromXContent(parser); + } catch (IOException e) { + throw new ElasticsearchParseException("Failed to parse a connector filtering.", e); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + active.writeTo(out); + out.writeString(domain); + draft.writeTo(out); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ConnectorFiltering that = (ConnectorFiltering) o; + return Objects.equals(active, that.active) && Objects.equals(domain, that.domain) && Objects.equals(draft, that.draft); + } + + @Override + public int hashCode() { + return Objects.hash(active, domain, draft); + } + + public static class Builder { + + private FilteringRules active; + private String domain; + private FilteringRules draft; + + public Builder setActive(FilteringRules active) { + this.active = active; + return this; + } + + public Builder setDomain(String domain) { + this.domain = domain; + return this; + } + + public Builder setDraft(FilteringRules draft) { + this.draft = draft; + return this; + } + + public ConnectorFiltering build() { + return new ConnectorFiltering(active, domain, draft); + } + } + + public static ConnectorFiltering getDefaultConnectorFilteringConfig() { + + Instant currentTimestamp = Instant.now(); + + return new ConnectorFiltering.Builder().setActive( + new FilteringRules.Builder().setAdvancedSnippet( + new FilteringAdvancedSnippet.Builder().setAdvancedSnippetCreatedAt(currentTimestamp) + .setAdvancedSnippetUpdatedAt(currentTimestamp) + .setAdvancedSnippetValue(Collections.emptyMap()) + .build() + ) + .setRules( + List.of( + new FilteringRule.Builder().setCreatedAt(currentTimestamp) + .setField("_") + .setId("DEFAULT") + .setOrder(0) + .setPolicy(FilteringPolicy.INCLUDE) + .setRule(FilteringRuleCondition.REGEX) + .setUpdatedAt(currentTimestamp) + .setValue(".*") + .build() + ) + ) + .setFilteringValidationInfo( + new FilteringValidationInfo.Builder().setValidationErrors(Collections.emptyList()) + .setValidationState(FilteringValidationState.VALID) + .build() + ) + .build() + ) + .setDomain("DEFAULT") + .setDraft( + new FilteringRules.Builder().setAdvancedSnippet( + new FilteringAdvancedSnippet.Builder().setAdvancedSnippetCreatedAt(currentTimestamp) + .setAdvancedSnippetUpdatedAt(currentTimestamp) + .setAdvancedSnippetValue(Collections.emptyMap()) + .build() + ) + .setRules( + List.of( + new FilteringRule.Builder().setCreatedAt(currentTimestamp) + .setField("_") + .setId("DEFAULT") + .setOrder(0) + .setPolicy(FilteringPolicy.INCLUDE) + .setRule(FilteringRuleCondition.REGEX) + .setUpdatedAt(currentTimestamp) + .setValue(".*") + .build() + ) + ) + .setFilteringValidationInfo( + new FilteringValidationInfo.Builder().setValidationErrors(Collections.emptyList()) + .setValidationState(FilteringValidationState.VALID) + .build() + ) + .build() + ) + .build(); + } +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorIndexService.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorIndexService.java new file mode 100644 index 0000000000000..a8c9749d3fbc1 --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorIndexService.java @@ -0,0 +1,215 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector; + +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DelegatingActionListener; +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.delete.DeleteResponse; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.client.internal.OriginSettingClient; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentType; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.function.BiConsumer; + +import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.core.ClientHelper.CONNECTORS_ORIGIN; + +/** + * A service that manages persistent {@link Connector} configurations. + */ +public class ConnectorIndexService { + + private final Client clientWithOrigin; + + public static final String CONNECTOR_INDEX_NAME = ConnectorTemplateRegistry.CONNECTOR_INDEX_NAME_PATTERN; + + /** + * @param client A client for executing actions on the connector index + */ + public ConnectorIndexService(Client client) { + this.clientWithOrigin = new OriginSettingClient(client, CONNECTORS_ORIGIN); + } + + /** + * Creates or updates the {@link Connector} in the underlying index. + * + * @param connector The connector object. + * @param listener The action listener to invoke on response/failure. + */ + public void putConnector(Connector connector, ActionListener listener) { + try { + final IndexRequest indexRequest = new IndexRequest(CONNECTOR_INDEX_NAME).opType(DocWriteRequest.OpType.INDEX) + .id(connector.getConnectorId()) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .source(connector.toXContent(jsonBuilder(), ToXContent.EMPTY_PARAMS)); + clientWithOrigin.index(indexRequest, listener); + } catch (Exception e) { + listener.onFailure(e); + } + } + + /** + * Gets the {@link Connector} from the underlying index. + * + * @param connectorId The id of the connector object. + * @param listener The action listener to invoke on response/failure. + */ + public void getConnector(String connectorId, ActionListener listener) { + try { + final GetRequest getRequest = new GetRequest(CONNECTOR_INDEX_NAME).id(connectorId).realtime(true); + + clientWithOrigin.get(getRequest, new DelegatingIndexNotFoundActionListener<>(connectorId, listener, (l, getResponse) -> { + if (getResponse.isExists() == false) { + l.onFailure(new ResourceNotFoundException(connectorId)); + return; + } + try { + final Connector connector = Connector.fromXContentBytes(getResponse.getSourceAsBytesRef(), XContentType.JSON); + l.onResponse(connector); + } catch (Exception e) { + listener.onFailure(e); + } + })); + } catch (Exception e) { + listener.onFailure(e); + } + } + + /** + * Deletes the {@link Connector} in the underlying index. + * + * @param connectorId The id of the connector object. + * @param listener The action listener to invoke on response/failure. + */ + public void deleteConnector(String connectorId, ActionListener listener) { + + final DeleteRequest deleteRequest = new DeleteRequest(CONNECTOR_INDEX_NAME).id(connectorId) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + + try { + clientWithOrigin.delete( + deleteRequest, + new DelegatingIndexNotFoundActionListener<>(connectorId, listener, (l, deleteResponse) -> { + if (deleteResponse.getResult() == DocWriteResponse.Result.NOT_FOUND) { + l.onFailure(new ResourceNotFoundException(connectorId)); + return; + } + l.onResponse(deleteResponse); + }) + ); + } catch (Exception e) { + listener.onFailure(e); + } + + } + + /** + * List the {@link Connector} in ascending order of their ids. + * + * @param from From index to start the search from. + * @param size The maximum number of {@link Connector}s to return. + * @param listener The action listener to invoke on response/failure. + */ + public void listConnectors(int from, int size, ActionListener listener) { + try { + final SearchSourceBuilder source = new SearchSourceBuilder().from(from) + .size(size) + .query(new MatchAllQueryBuilder()) + .fetchSource(true) + .sort(Connector.ID_FIELD.getPreferredName(), SortOrder.ASC); + final SearchRequest req = new SearchRequest(CONNECTOR_INDEX_NAME).source(source); + clientWithOrigin.search(req, new ActionListener<>() { + @Override + public void onResponse(SearchResponse searchResponse) { + try { + listener.onResponse(mapSearchResponseToConnectorList(searchResponse)); + } catch (Exception e) { + listener.onFailure(e); + } + } + + @Override + public void onFailure(Exception e) { + if (e instanceof IndexNotFoundException) { + listener.onResponse(new ConnectorIndexService.ConnectorResult(Collections.emptyList(), 0L)); + return; + } + listener.onFailure(e); + } + }); + } catch (Exception e) { + listener.onFailure(e); + } + } + + private static ConnectorIndexService.ConnectorResult mapSearchResponseToConnectorList(SearchResponse response) { + final List connectorResults = Arrays.stream(response.getHits().getHits()) + .map(ConnectorIndexService::hitToConnector) + .toList(); + return new ConnectorIndexService.ConnectorResult(connectorResults, (int) response.getHits().getTotalHits().value); + } + + private static Connector hitToConnector(SearchHit searchHit) { + + // todo: don't return sensitive data from configuration in list endpoint + + return Connector.fromXContentBytes(searchHit.getSourceRef(), XContentType.JSON); + } + + public record ConnectorResult(List connectors, long totalResults) {} + + /** + * Listeners that checks failures for IndexNotFoundException, and transforms them in ResourceNotFoundException, + * invoking onFailure on the delegate listener + */ + static class DelegatingIndexNotFoundActionListener extends DelegatingActionListener { + + private final BiConsumer, T> bc; + private final String connectorId; + + DelegatingIndexNotFoundActionListener(String connectorId, ActionListener delegate, BiConsumer, T> bc) { + super(delegate); + this.bc = bc; + this.connectorId = connectorId; + } + + @Override + public void onResponse(T t) { + bc.accept(delegate, t); + } + + @Override + public void onFailure(Exception e) { + Throwable cause = ExceptionsHelper.unwrapCause(e); + if (cause instanceof IndexNotFoundException) { + delegate.onFailure(new ResourceNotFoundException("connector [" + connectorId + "] not found")); + return; + } + delegate.onFailure(e); + } + } +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorIngestPipeline.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorIngestPipeline.java new file mode 100644 index 0000000000000..620e19968309d --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorIngestPipeline.java @@ -0,0 +1,161 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + +public class ConnectorIngestPipeline implements Writeable, ToXContentObject { + + private final Boolean extractBinaryContent; + private final String name; + private final Boolean reduceWhitespace; + private final Boolean runMlInference; + + /** + * Constructs a new instance of ConnectorIngestPipeline. + * + * @param extractBinaryContent A Boolean flag indicating whether to extract binary content during ingestion. + * @param name The name of the ingest pipeline. + * @param reduceWhitespace A Boolean flag indicating whether to reduce extraneous whitespace in the ingested content. + * @param runMlInference A Boolean flag indicating whether to run machine learning inference on the ingested content. + */ + private ConnectorIngestPipeline(Boolean extractBinaryContent, String name, Boolean reduceWhitespace, Boolean runMlInference) { + this.extractBinaryContent = Objects.requireNonNull(extractBinaryContent, EXTRACT_BINARY_CONTENT_FIELD.getPreferredName()); + this.name = Objects.requireNonNull(name, NAME_FIELD.getPreferredName()); + this.reduceWhitespace = Objects.requireNonNull(reduceWhitespace, REDUCE_WHITESPACE_FIELD.getPreferredName()); + this.runMlInference = Objects.requireNonNull(runMlInference, RUN_ML_INFERENCE_FIELD.getPreferredName()); + } + + public ConnectorIngestPipeline(StreamInput in) throws IOException { + this.extractBinaryContent = in.readBoolean(); + this.name = in.readString(); + this.reduceWhitespace = in.readBoolean(); + this.runMlInference = in.readBoolean(); + } + + private static final ParseField EXTRACT_BINARY_CONTENT_FIELD = new ParseField("extract_binary_content"); + private static final ParseField NAME_FIELD = new ParseField("name"); + private static final ParseField REDUCE_WHITESPACE_FIELD = new ParseField("reduce_whitespace"); + private static final ParseField RUN_ML_INFERENCE_FIELD = new ParseField("run_ml_inference"); + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "connector_ingest_pipeline", + true, + args -> new Builder().setExtractBinaryContent((Boolean) args[0]) + .setName((String) args[1]) + .setReduceWhitespace((Boolean) args[2]) + .setRunMlInference((Boolean) args[3]) + .build() + ); + + static { + PARSER.declareBoolean(constructorArg(), EXTRACT_BINARY_CONTENT_FIELD); + PARSER.declareString(constructorArg(), NAME_FIELD); + PARSER.declareBoolean(constructorArg(), REDUCE_WHITESPACE_FIELD); + PARSER.declareBoolean(constructorArg(), RUN_ML_INFERENCE_FIELD); + } + + public static ConnectorIngestPipeline fromXContentBytes(BytesReference source, XContentType xContentType) { + try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, source, xContentType)) { + return ConnectorIngestPipeline.fromXContent(parser); + } catch (IOException e) { + throw new ElasticsearchParseException("Failed to parse: " + source.utf8ToString(), e); + } + } + + public static ConnectorIngestPipeline fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field(EXTRACT_BINARY_CONTENT_FIELD.getPreferredName(), extractBinaryContent); + builder.field(NAME_FIELD.getPreferredName(), name); + builder.field(REDUCE_WHITESPACE_FIELD.getPreferredName(), reduceWhitespace); + builder.field(RUN_ML_INFERENCE_FIELD.getPreferredName(), runMlInference); + } + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeBoolean(extractBinaryContent); + out.writeString(name); + out.writeBoolean(reduceWhitespace); + out.writeBoolean(runMlInference); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ConnectorIngestPipeline that = (ConnectorIngestPipeline) o; + return Objects.equals(extractBinaryContent, that.extractBinaryContent) + && Objects.equals(name, that.name) + && Objects.equals(reduceWhitespace, that.reduceWhitespace) + && Objects.equals(runMlInference, that.runMlInference); + } + + @Override + public int hashCode() { + return Objects.hash(extractBinaryContent, name, reduceWhitespace, runMlInference); + } + + public static class Builder { + + private Boolean extractBinaryContent; + private String name; + private Boolean reduceWhitespace; + private Boolean runMlInference; + + public Builder setExtractBinaryContent(Boolean extractBinaryContent) { + this.extractBinaryContent = extractBinaryContent; + return this; + } + + public Builder setName(String name) { + this.name = name; + return this; + } + + public Builder setReduceWhitespace(Boolean reduceWhitespace) { + this.reduceWhitespace = reduceWhitespace; + return this; + } + + public Builder setRunMlInference(Boolean runMlInference) { + this.runMlInference = runMlInference; + return this; + } + + public ConnectorIngestPipeline build() { + return new ConnectorIngestPipeline(extractBinaryContent, name, reduceWhitespace, runMlInference); + } + } + +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorScheduling.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorScheduling.java new file mode 100644 index 0000000000000..233bea5d4a842 --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorScheduling.java @@ -0,0 +1,249 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.scheduler.Cron; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + +public class ConnectorScheduling implements Writeable, ToXContentObject { + + private final ScheduleConfig accessControl; + private final ScheduleConfig full; + private final ScheduleConfig incremental; + + private static final ParseField ACCESS_CONTROL_FIELD = new ParseField("access_control"); + private static final ParseField FULL_FIELD = new ParseField("full"); + private static final ParseField INCREMENTAL_FIELD = new ParseField("incremental"); + + /** + * @param accessControl connector access control sync schedule represented as {@link ScheduleConfig} + * @param full connector full sync schedule represented as {@link ScheduleConfig} + * @param incremental connector incremental sync schedule represented as {@link ScheduleConfig} + */ + private ConnectorScheduling(ScheduleConfig accessControl, ScheduleConfig full, ScheduleConfig incremental) { + this.accessControl = Objects.requireNonNull(accessControl, ACCESS_CONTROL_FIELD.getPreferredName()); + this.full = Objects.requireNonNull(full, FULL_FIELD.getPreferredName()); + this.incremental = Objects.requireNonNull(incremental, INCREMENTAL_FIELD.getPreferredName()); + } + + public ConnectorScheduling(StreamInput in) throws IOException { + this.accessControl = new ScheduleConfig(in); + this.full = new ScheduleConfig(in); + this.incremental = new ScheduleConfig(in); + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "connector_scheduling", + true, + args -> new Builder().setAccessControl((ScheduleConfig) args[0]) + .setFull((ScheduleConfig) args[1]) + .setIncremental((ScheduleConfig) args[2]) + .build() + ); + + static { + PARSER.declareField( + constructorArg(), + (p, c) -> ScheduleConfig.fromXContent(p), + ACCESS_CONTROL_FIELD, + ObjectParser.ValueType.OBJECT + ); + PARSER.declareField(constructorArg(), (p, c) -> ScheduleConfig.fromXContent(p), FULL_FIELD, ObjectParser.ValueType.OBJECT); + PARSER.declareField(constructorArg(), (p, c) -> ScheduleConfig.fromXContent(p), INCREMENTAL_FIELD, ObjectParser.ValueType.OBJECT); + } + + public static ConnectorScheduling fromXContentBytes(BytesReference source, XContentType xContentType) { + try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, source, xContentType)) { + return ConnectorScheduling.fromXContent(parser); + } catch (IOException e) { + throw new ElasticsearchParseException("Failed to parse: " + source.utf8ToString(), e); + } + } + + public static ConnectorScheduling fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field(ACCESS_CONTROL_FIELD.getPreferredName(), accessControl); + builder.field(FULL_FIELD.getPreferredName(), full); + builder.field(INCREMENTAL_FIELD.getPreferredName(), incremental); + } + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + accessControl.writeTo(out); + full.writeTo(out); + incremental.writeTo(out); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ConnectorScheduling that = (ConnectorScheduling) o; + return Objects.equals(accessControl, that.accessControl) + && Objects.equals(full, that.full) + && Objects.equals(incremental, that.incremental); + } + + @Override + public int hashCode() { + return Objects.hash(accessControl, full, incremental); + } + + public static class Builder { + + private ScheduleConfig accessControl; + private ScheduleConfig full; + private ScheduleConfig incremental; + + public Builder setAccessControl(ScheduleConfig accessControl) { + this.accessControl = accessControl; + return this; + } + + public Builder setFull(ScheduleConfig full) { + this.full = full; + return this; + } + + public Builder setIncremental(ScheduleConfig incremental) { + this.incremental = incremental; + return this; + } + + public ConnectorScheduling build() { + return new ConnectorScheduling(accessControl, full, incremental); + } + } + + public static class ScheduleConfig implements Writeable, ToXContentObject { + private final boolean enabled; + private final Cron interval; + + private static final ParseField ENABLED_FIELD = new ParseField("enabled"); + private static final ParseField INTERVAL_FIELD = new ParseField("interval"); + + /** + * @param enabled flag to disable/enable scheduling + * @param interval CRON expression representing the sync schedule + */ + private ScheduleConfig(boolean enabled, Cron interval) { + this.enabled = enabled; + this.interval = Objects.requireNonNull(interval, INTERVAL_FIELD.getPreferredName()); + } + + public ScheduleConfig(StreamInput in) throws IOException { + this.enabled = in.readBoolean(); + this.interval = new Cron(in.readString()); + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "schedule_config", + true, + args -> new Builder().setEnabled((boolean) args[0]).setInterval(new Cron((String) args[1])).build() + ); + + static { + PARSER.declareBoolean(constructorArg(), ENABLED_FIELD); + PARSER.declareString(constructorArg(), INTERVAL_FIELD); + } + + public static ScheduleConfig fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + public static ConstructingObjectParser getParser() { + return PARSER; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field(ENABLED_FIELD.getPreferredName(), enabled); + builder.field(INTERVAL_FIELD.getPreferredName(), interval); + } + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeBoolean(enabled); + out.writeString(interval.toString()); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ScheduleConfig that = (ScheduleConfig) o; + return enabled == that.enabled && Objects.equals(interval, that.interval); + } + + @Override + public int hashCode() { + return Objects.hash(enabled, interval); + } + + public static class Builder { + + private boolean enabled; + private Cron interval; + + public Builder setEnabled(boolean enabled) { + this.enabled = enabled; + return this; + } + + public Builder setInterval(Cron interval) { + this.interval = interval; + return this; + } + + public ScheduleConfig build() { + return new ScheduleConfig(enabled, interval); + } + } + } + + public static ConnectorScheduling getDefaultConnectorScheduling() { + return new ConnectorScheduling.Builder().setAccessControl( + new ConnectorScheduling.ScheduleConfig.Builder().setEnabled(false).setInterval(new Cron("0 0 0 * * ?")).build() + ) + .setFull(new ConnectorScheduling.ScheduleConfig.Builder().setEnabled(false).setInterval(new Cron("0 0 0 * * ?")).build()) + .setIncremental(new ConnectorScheduling.ScheduleConfig.Builder().setEnabled(false).setInterval(new Cron("0 0 0 * * ?")).build()) + .build(); + } +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorStatus.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorStatus.java new file mode 100644 index 0000000000000..5ebbab668890b --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorStatus.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector; + +import java.util.Locale; + +/** + * Enum representing the various states of a Connector: + *
      + *
    • CREATED: The connector has been created but is not yet configured.
    • + *
    • NEEDS_CONFIGURATION: The connector requires further configuration to become operational.
    • + *
    • CONFIGURED: The connector has been configured but has not yet established a connection.
    • + *
    • CONNECTED: The connector is successfully connected and operational.
    • + *
    • ERROR: The connector encountered an error and may not be operational.
    • + *
    + */ +public enum ConnectorStatus { + CREATED, + NEEDS_CONFIGURATION, + CONFIGURED, + CONNECTED, + ERROR; + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } + + public static ConnectorStatus connectorStatus(String status) { + for (ConnectorStatus connectorStatus : ConnectorStatus.values()) { + if (connectorStatus.name().equalsIgnoreCase(status)) { + return connectorStatus; + } + } + throw new IllegalArgumentException("Unknown ConnectorStatus: " + status); + } +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorSyncInfo.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorSyncInfo.java new file mode 100644 index 0000000000000..10a2d54e29300 --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorSyncInfo.java @@ -0,0 +1,286 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentFragment; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.time.Instant; +import java.util.Objects; + +public class ConnectorSyncInfo implements Writeable, ToXContentFragment { + @Nullable + private final String lastAccessControlSyncError; + @Nullable + private final Instant lastAccessControlSyncScheduledAt; + @Nullable + private final ConnectorSyncStatus lastAccessControlSyncStatus; + @Nullable + private final Long lastDeletedDocumentCount; + @Nullable + private final Instant lastIncrementalSyncScheduledAt; + @Nullable + private final Long lastIndexedDocumentCount; + @Nullable + private final Instant lastSeen; + @Nullable + private final String lastSyncError; + @Nullable + private final Instant lastSyncScheduledAt; + @Nullable + private final ConnectorSyncStatus lastSyncStatus; + @Nullable + private final Instant lastSynced; + + /** + * @param lastAccessControlSyncError The last error message related to access control sync, if any. + * @param lastAccessControlSyncScheduledAt The timestamp when the last access control sync was scheduled. + * @param lastAccessControlSyncStatus The status of the last access control sync. + * @param lastDeletedDocumentCount The count of documents last deleted during sync. + * @param lastIncrementalSyncScheduledAt The timestamp when the last incremental sync was scheduled. + * @param lastIndexedDocumentCount The count of documents last indexed during sync. + * @param lastSeen The timestamp when the connector was last active or seen. + * @param lastSyncError The last error message encountered during sync, if any. + * @param lastSyncScheduledAt The timestamp when the last sync was scheduled. + * @param lastSyncStatus The status of the last sync. + * @param lastSynced The timestamp when the connector was last successfully synchronized. + */ + private ConnectorSyncInfo( + String lastAccessControlSyncError, + Instant lastAccessControlSyncScheduledAt, + ConnectorSyncStatus lastAccessControlSyncStatus, + Long lastDeletedDocumentCount, + Instant lastIncrementalSyncScheduledAt, + Long lastIndexedDocumentCount, + Instant lastSeen, + String lastSyncError, + Instant lastSyncScheduledAt, + ConnectorSyncStatus lastSyncStatus, + Instant lastSynced + ) { + this.lastAccessControlSyncError = lastAccessControlSyncError; + this.lastAccessControlSyncScheduledAt = lastAccessControlSyncScheduledAt; + this.lastAccessControlSyncStatus = lastAccessControlSyncStatus; + this.lastDeletedDocumentCount = lastDeletedDocumentCount; + this.lastIncrementalSyncScheduledAt = lastIncrementalSyncScheduledAt; + this.lastIndexedDocumentCount = lastIndexedDocumentCount; + this.lastSeen = lastSeen; + this.lastSyncError = lastSyncError; + this.lastSyncScheduledAt = lastSyncScheduledAt; + this.lastSyncStatus = lastSyncStatus; + this.lastSynced = lastSynced; + } + + public ConnectorSyncInfo(StreamInput in) throws IOException { + this.lastAccessControlSyncError = in.readOptionalString(); + this.lastAccessControlSyncScheduledAt = in.readOptionalInstant(); + this.lastAccessControlSyncStatus = in.readOptionalEnum(ConnectorSyncStatus.class); + this.lastDeletedDocumentCount = in.readOptionalLong(); + this.lastIncrementalSyncScheduledAt = in.readOptionalInstant(); + this.lastIndexedDocumentCount = in.readOptionalLong(); + this.lastSeen = in.readOptionalInstant(); + this.lastSyncError = in.readOptionalString(); + this.lastSyncScheduledAt = in.readOptionalInstant(); + this.lastSyncStatus = in.readOptionalEnum(ConnectorSyncStatus.class); + this.lastSynced = in.readOptionalInstant(); + } + + static final ParseField LAST_ACCESS_CONTROL_SYNC_ERROR = new ParseField("last_access_control_sync_error"); + static final ParseField LAST_ACCESS_CONTROL_SYNC_STATUS_FIELD = new ParseField("last_access_control_sync_status"); + static final ParseField LAST_ACCESS_CONTROL_SYNC_SCHEDULED_AT_FIELD = new ParseField("last_access_control_sync_scheduled_at"); + static final ParseField LAST_DELETED_DOCUMENT_COUNT_FIELD = new ParseField("last_deleted_document_count"); + static final ParseField LAST_INCREMENTAL_SYNC_SCHEDULED_AT_FIELD = new ParseField("last_incremental_sync_scheduled_at"); + static final ParseField LAST_INDEXED_DOCUMENT_COUNT_FIELD = new ParseField("last_indexed_document_count"); + static final ParseField LAST_SEEN_FIELD = new ParseField("last_seen"); + static final ParseField LAST_SYNC_ERROR_FIELD = new ParseField("last_sync_error"); + static final ParseField LAST_SYNC_SCHEDULED_AT_FIELD = new ParseField("last_sync_scheduled_at"); + static final ParseField LAST_SYNC_STATUS_FIELD = new ParseField("last_sync_status"); + static final ParseField LAST_SYNCED_FIELD = new ParseField("last_synced"); + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + + if (lastAccessControlSyncError != null) { + builder.field(LAST_ACCESS_CONTROL_SYNC_ERROR.getPreferredName(), lastAccessControlSyncError); + } + if (lastAccessControlSyncStatus != null) { + builder.field(LAST_ACCESS_CONTROL_SYNC_STATUS_FIELD.getPreferredName(), lastAccessControlSyncStatus); + } + if (lastAccessControlSyncScheduledAt != null) { + builder.field(LAST_ACCESS_CONTROL_SYNC_SCHEDULED_AT_FIELD.getPreferredName(), lastAccessControlSyncScheduledAt); + } + if (lastDeletedDocumentCount != null) { + builder.field(LAST_DELETED_DOCUMENT_COUNT_FIELD.getPreferredName(), lastDeletedDocumentCount); + } + if (lastIncrementalSyncScheduledAt != null) { + builder.field(LAST_INCREMENTAL_SYNC_SCHEDULED_AT_FIELD.getPreferredName(), lastIncrementalSyncScheduledAt); + } + if (lastIndexedDocumentCount != null) { + builder.field(LAST_INDEXED_DOCUMENT_COUNT_FIELD.getPreferredName(), lastIndexedDocumentCount); + } + if (lastSeen != null) { + builder.field(LAST_SEEN_FIELD.getPreferredName(), lastSeen); + } + if (lastSyncError != null) { + builder.field(LAST_SYNC_ERROR_FIELD.getPreferredName(), lastSyncError); + } + if (lastSyncScheduledAt != null) { + builder.field(LAST_SYNC_SCHEDULED_AT_FIELD.getPreferredName(), lastSyncScheduledAt); + } + if (lastSyncStatus != null) { + builder.field(LAST_SYNC_STATUS_FIELD.getPreferredName(), lastSyncStatus); + } + if (lastSynced != null) { + builder.field(LAST_SYNCED_FIELD.getPreferredName(), lastSynced); + } + + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalString(lastAccessControlSyncError); + out.writeOptionalInstant(lastAccessControlSyncScheduledAt); + out.writeOptionalEnum(lastAccessControlSyncStatus); + out.writeOptionalLong(lastDeletedDocumentCount); + out.writeOptionalInstant(lastIncrementalSyncScheduledAt); + out.writeOptionalLong(lastIndexedDocumentCount); + out.writeOptionalInstant(lastSeen); + out.writeOptionalString(lastSyncError); + out.writeOptionalInstant(lastSyncScheduledAt); + out.writeOptionalEnum(lastSyncStatus); + out.writeOptionalInstant(lastSynced); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ConnectorSyncInfo that = (ConnectorSyncInfo) o; + return Objects.equals(lastAccessControlSyncError, that.lastAccessControlSyncError) + && Objects.equals(lastAccessControlSyncScheduledAt, that.lastAccessControlSyncScheduledAt) + && lastAccessControlSyncStatus == that.lastAccessControlSyncStatus + && Objects.equals(lastDeletedDocumentCount, that.lastDeletedDocumentCount) + && Objects.equals(lastIncrementalSyncScheduledAt, that.lastIncrementalSyncScheduledAt) + && Objects.equals(lastIndexedDocumentCount, that.lastIndexedDocumentCount) + && Objects.equals(lastSeen, that.lastSeen) + && Objects.equals(lastSyncError, that.lastSyncError) + && Objects.equals(lastSyncScheduledAt, that.lastSyncScheduledAt) + && lastSyncStatus == that.lastSyncStatus + && Objects.equals(lastSynced, that.lastSynced); + } + + @Override + public int hashCode() { + return Objects.hash( + lastAccessControlSyncError, + lastAccessControlSyncScheduledAt, + lastAccessControlSyncStatus, + lastDeletedDocumentCount, + lastIncrementalSyncScheduledAt, + lastIndexedDocumentCount, + lastSeen, + lastSyncError, + lastSyncScheduledAt, + lastSyncStatus, + lastSynced + ); + } + + public static class Builder { + + private String lastAccessControlSyncError; + private Instant lastAccessControlSyncScheduledAt; + private ConnectorSyncStatus lastAccessControlSyncStatus; + private Long lastDeletedDocumentCount; + private Instant lastIncrementalSyncScheduledAt; + private Long lastIndexedDocumentCount; + private Instant lastSeen; + private String lastSyncError; + private Instant lastSyncScheduledAt; + private ConnectorSyncStatus lastSyncStatus; + private Instant lastSynced; + + public Builder setLastAccessControlSyncError(String lastAccessControlSyncError) { + this.lastAccessControlSyncError = lastAccessControlSyncError; + return this; + } + + public Builder setLastAccessControlSyncScheduledAt(Instant lastAccessControlSyncScheduledAt) { + this.lastAccessControlSyncScheduledAt = lastAccessControlSyncScheduledAt; + return this; + } + + public Builder setLastAccessControlSyncStatus(ConnectorSyncStatus lastAccessControlSyncStatus) { + this.lastAccessControlSyncStatus = lastAccessControlSyncStatus; + return this; + } + + public Builder setLastDeletedDocumentCount(Long lastDeletedDocumentCount) { + this.lastDeletedDocumentCount = lastDeletedDocumentCount; + return this; + } + + public Builder setLastIncrementalSyncScheduledAt(Instant lastIncrementalSyncScheduledAt) { + this.lastIncrementalSyncScheduledAt = lastIncrementalSyncScheduledAt; + return this; + } + + public Builder setLastIndexedDocumentCount(Long lastIndexedDocumentCount) { + this.lastIndexedDocumentCount = lastIndexedDocumentCount; + return this; + } + + public Builder setLastSeen(Instant lastSeen) { + this.lastSeen = lastSeen; + return this; + } + + public Builder setLastSyncError(String lastSyncError) { + this.lastSyncError = lastSyncError; + return this; + } + + public Builder setLastSyncScheduledAt(Instant lastSyncScheduledAt) { + this.lastSyncScheduledAt = lastSyncScheduledAt; + return this; + } + + public Builder setLastSyncStatus(ConnectorSyncStatus lastSyncStatus) { + this.lastSyncStatus = lastSyncStatus; + return this; + } + + public Builder setLastSynced(Instant lastSynced) { + this.lastSynced = lastSynced; + return this; + } + + public ConnectorSyncInfo build() { + return new ConnectorSyncInfo( + lastAccessControlSyncError, + lastAccessControlSyncScheduledAt, + lastAccessControlSyncStatus, + lastDeletedDocumentCount, + lastIncrementalSyncScheduledAt, + lastIndexedDocumentCount, + lastSeen, + lastSyncError, + lastSyncScheduledAt, + lastSyncStatus, + lastSynced + ); + } + } +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorSyncStatus.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorSyncStatus.java new file mode 100644 index 0000000000000..30fca79f78876 --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorSyncStatus.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector; + +import java.util.Locale; + +/** + * Enum representing the synchronization status of a Connector: + *
      + *
    • CANCELING: The synchronization process is in the process of being canceled.
    • + *
    • CANCELED: The synchronization process has been canceled before completion.
    • + *
    • COMPLETED: The synchronization process has completed successfully.
    • + *
    • ERROR: The synchronization process encountered an error and may not have completed successfully.
    • + *
    • IN_PROGRESS: The synchronization process is currently in progress.
    • + *
    • PENDING: The synchronization process is scheduled and waiting to start.
    • + *
    • SUSPENDED: The synchronization process has been suspended.
    • + *
    + */ +public enum ConnectorSyncStatus { + CANCELING, + CANCELED, + COMPLETED, + ERROR, + IN_PROGRESS, + PENDING, + SUSPENDED; + + public static ConnectorSyncStatus fromString(String syncStatusString) { + for (ConnectorSyncStatus syncStatus : ConnectorSyncStatus.values()) { + if (syncStatus.toString().equalsIgnoreCase(syncStatusString)) { + return syncStatus; + } + } + + throw new IllegalArgumentException("Unknown sync status '" + syncStatusString + "'."); + } + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } + + public static ConnectorSyncStatus connectorSyncStatus(String status) { + for (ConnectorSyncStatus connectorSyncStatus : ConnectorSyncStatus.values()) { + if (connectorSyncStatus.name().equalsIgnoreCase(status)) { + return connectorSyncStatus; + } + } + throw new IllegalArgumentException("Unknown ConnectorSyncStatus: " + status); + } +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistry.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistry.java index 29c17fedaced0..642295061d17a 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistry.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistry.java @@ -36,14 +36,14 @@ public class ConnectorTemplateRegistry extends IndexTemplateRegistry { public static final NodeFeature CONNECTOR_TEMPLATES_FEATURE = new NodeFeature("elastic-connectors.templates"); // This number must be incremented when we make changes to built-in templates. - static final int REGISTRY_VERSION = 1; + static final int REGISTRY_VERSION = 3; // Connector indices constants - public static final String CONNECTOR_INDEX_NAME_PATTERN = ".elastic-connectors-v" + REGISTRY_VERSION; + public static final String CONNECTOR_INDEX_NAME_PATTERN = ".elastic-connectors-v1"; public static final String CONNECTOR_TEMPLATE_NAME = "elastic-connectors"; - public static final String CONNECTOR_SYNC_JOBS_INDEX_NAME_PATTERN = ".elastic-connectors-sync-jobs-v" + REGISTRY_VERSION; + public static final String CONNECTOR_SYNC_JOBS_INDEX_NAME_PATTERN = ".elastic-connectors-sync-jobs-v1"; public static final String CONNECTOR_SYNC_JOBS_TEMPLATE_NAME = "elastic-connectors-sync-jobs"; public static final String ACCESS_CONTROL_INDEX_NAME_PATTERN = ".search-acl-filter-*"; diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/DeleteConnectorAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/DeleteConnectorAction.java new file mode 100644 index 0000000000000..fab57921772d9 --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/DeleteConnectorAction.java @@ -0,0 +1,109 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.action; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.action.ValidateActions.addValidationError; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + +public class DeleteConnectorAction extends ActionType { + + public static final DeleteConnectorAction INSTANCE = new DeleteConnectorAction(); + public static final String NAME = "cluster:admin/xpack/connector/delete"; + + private DeleteConnectorAction() { + super(NAME, AcknowledgedResponse::readFrom); + } + + public static class Request extends ActionRequest implements ToXContentObject { + + private final String connectorId; + + private static final ParseField CONNECTOR_ID_FIELD = new ParseField("connector_id"); + + public Request(StreamInput in) throws IOException { + super(in); + this.connectorId = in.readString(); + } + + public Request(String connectorId) { + this.connectorId = connectorId; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + + if (Strings.isNullOrEmpty(connectorId)) { + validationException = addValidationError("connector_id missing", validationException); + } + + return validationException; + } + + public String getConnectorId() { + return connectorId; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(connectorId); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Request request = (Request) o; + return Objects.equals(connectorId, request.connectorId); + } + + @Override + public int hashCode() { + return Objects.hash(connectorId); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(CONNECTOR_ID_FIELD.getPreferredName(), connectorId); + builder.endObject(); + return builder; + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "delete_connector_request", + false, + (p) -> new Request((String) p[0]) + ); + static { + PARSER.declareString(constructorArg(), CONNECTOR_ID_FIELD); + } + + public static DeleteConnectorAction.Request parse(XContentParser parser) { + return PARSER.apply(parser, null); + } + + } +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/GetConnectorAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/GetConnectorAction.java new file mode 100644 index 0000000000000..61d5947489322 --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/GetConnectorAction.java @@ -0,0 +1,151 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.action; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.application.connector.Connector; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.action.ValidateActions.addValidationError; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + +public class GetConnectorAction extends ActionType { + + public static final GetConnectorAction INSTANCE = new GetConnectorAction(); + public static final String NAME = "cluster:admin/xpack/connector/get"; + + private GetConnectorAction() { + super(NAME, GetConnectorAction.Response::new); + } + + public static class Request extends ActionRequest implements ToXContentObject { + + private final String connectorId; + + private static final ParseField CONNECTOR_ID_FIELD = new ParseField("connector_id"); + + public Request(String connectorId) { + this.connectorId = connectorId; + } + + public Request(StreamInput in) throws IOException { + super(in); + this.connectorId = in.readString(); + } + + public String getConnectorId() { + return connectorId; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + + if (Strings.isNullOrEmpty(connectorId)) { + validationException = addValidationError("connector_id missing", validationException); + } + + return validationException; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(connectorId); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Request request = (Request) o; + return Objects.equals(connectorId, request.connectorId); + } + + @Override + public int hashCode() { + return Objects.hash(connectorId); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(CONNECTOR_ID_FIELD.getPreferredName(), connectorId); + builder.endObject(); + return builder; + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "get_connector_request", + false, + (p) -> new Request((String) p[0]) + + ); + static { + PARSER.declareString(constructorArg(), CONNECTOR_ID_FIELD); + } + + public static Request parse(XContentParser parser) { + return PARSER.apply(parser, null); + } + } + + public static class Response extends ActionResponse implements ToXContentObject { + + private final Connector connector; + + public Response(Connector connector) { + this.connector = connector; + } + + public Response(StreamInput in) throws IOException { + super(in); + this.connector = new Connector(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + connector.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return connector.toXContent(builder, params); + } + + public static GetConnectorAction.Response fromXContent(XContentParser parser) throws IOException { + return new GetConnectorAction.Response(Connector.fromXContent(parser)); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Response response = (Response) o; + return Objects.equals(connector, response.connector); + } + + @Override + public int hashCode() { + return Objects.hash(connector); + } + } +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/ListConnectorAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/ListConnectorAction.java new file mode 100644 index 0000000000000..70cee8b064c71 --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/ListConnectorAction.java @@ -0,0 +1,149 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.action; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.application.connector.Connector; +import org.elasticsearch.xpack.core.action.util.PageParams; +import org.elasticsearch.xpack.core.action.util.QueryPage; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + +public class ListConnectorAction extends ActionType { + + public static final ListConnectorAction INSTANCE = new ListConnectorAction(); + public static final String NAME = "cluster:admin/xpack/connector/list"; + + public ListConnectorAction() { + super(NAME, ListConnectorAction.Response::new); + } + + public static class Request extends ActionRequest implements ToXContentObject { + + private final PageParams pageParams; + + private static final ParseField PAGE_PARAMS_FIELD = new ParseField("pageParams"); + + public Request(StreamInput in) throws IOException { + super(in); + this.pageParams = new PageParams(in); + } + + public Request(PageParams pageParams) { + this.pageParams = pageParams; + } + + public PageParams getPageParams() { + return pageParams; + } + + @Override + public ActionRequestValidationException validate() { + // Pagination validation is done as part of PageParams constructor + return null; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + pageParams.writeTo(out); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ListConnectorAction.Request that = (ListConnectorAction.Request) o; + return Objects.equals(pageParams, that.pageParams); + } + + @Override + public int hashCode() { + return Objects.hash(pageParams); + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "list_connector_request", + p -> new ListConnectorAction.Request((PageParams) p[0]) + ); + + static { + PARSER.declareObject(constructorArg(), (p, c) -> PageParams.fromXContent(p), PAGE_PARAMS_FIELD); + } + + public static ListConnectorAction.Request parse(XContentParser parser) { + return PARSER.apply(parser, null); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(PAGE_PARAMS_FIELD.getPreferredName(), pageParams); + builder.endObject(); + return builder; + } + } + + public static class Response extends ActionResponse implements ToXContentObject { + + public static final ParseField RESULT_FIELD = new ParseField("results"); + + final QueryPage queryPage; + + public Response(StreamInput in) throws IOException { + super(in); + this.queryPage = new QueryPage<>(in, Connector::new); + } + + public Response(List items, Long totalResults) { + this.queryPage = new QueryPage<>(items, totalResults, RESULT_FIELD); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + queryPage.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return queryPage.toXContent(builder, params); + } + + public QueryPage queryPage() { + return queryPage; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ListConnectorAction.Response that = (ListConnectorAction.Response) o; + return queryPage.equals(that.queryPage); + } + + @Override + public int hashCode() { + return queryPage.hashCode(); + } + } + +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/PutConnectorAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/PutConnectorAction.java new file mode 100644 index 0000000000000..6abb5ef548be5 --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/PutConnectorAction.java @@ -0,0 +1,273 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.action; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.action.ValidateActions.addValidationError; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; + +public class PutConnectorAction extends ActionType { + + public static final PutConnectorAction INSTANCE = new PutConnectorAction(); + public static final String NAME = "cluster:admin/xpack/connector/put"; + + public PutConnectorAction() { + super(NAME, PutConnectorAction.Response::new); + } + + public static class Request extends ActionRequest implements ToXContentObject { + + private final String connectorId; + + @Nullable + private final String description; + @Nullable + private final String indexName; + @Nullable + private final Boolean isNative; + @Nullable + private final String language; + @Nullable + private final String name; + @Nullable + private final String serviceType; + + public Request( + String connectorId, + String description, + String indexName, + Boolean isNative, + String language, + String name, + String serviceType + ) { + this.connectorId = connectorId; + this.description = description; + this.indexName = indexName; + this.isNative = isNative; + this.language = language; + this.name = name; + this.serviceType = serviceType; + } + + public Request(StreamInput in) throws IOException { + super(in); + this.connectorId = in.readString(); + this.description = in.readOptionalString(); + this.indexName = in.readOptionalString(); + this.isNative = in.readOptionalBoolean(); + this.language = in.readOptionalString(); + this.name = in.readOptionalString(); + this.serviceType = in.readOptionalString(); + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "connector_put_request", + false, + ((args, connectorId) -> new Request( + connectorId, + (String) args[0], + (String) args[1], + (Boolean) args[2], + (String) args[3], + (String) args[4], + (String) args[5] + )) + ); + + static { + PARSER.declareString(optionalConstructorArg(), new ParseField("description")); + PARSER.declareString(optionalConstructorArg(), new ParseField("index_name")); + PARSER.declareBoolean(optionalConstructorArg(), new ParseField("is_native")); + PARSER.declareString(optionalConstructorArg(), new ParseField("language")); + PARSER.declareString(optionalConstructorArg(), new ParseField("name")); + PARSER.declareString(optionalConstructorArg(), new ParseField("service_type")); + } + + public static Request fromXContentBytes(String connectorId, BytesReference source, XContentType xContentType) { + try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, source, xContentType)) { + return Request.fromXContent(parser, connectorId); + } catch (IOException e) { + throw new ElasticsearchParseException("Failed to parse: " + source.utf8ToString(), e); + } + } + + public static Request fromXContent(XContentParser parser, String connectorId) throws IOException { + return PARSER.parse(parser, connectorId); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + if (description != null) { + builder.field("description", description); + } + if (indexName != null) { + builder.field("index_name", indexName); + } + if (isNative != null) { + builder.field("is_native", isNative); + } + if (language != null) { + builder.field("language", language); + } + if (name != null) { + builder.field("name", name); + } + if (serviceType != null) { + builder.field("service_type", serviceType); + } + } + builder.endObject(); + return builder; + } + + @Override + public ActionRequestValidationException validate() { + + ActionRequestValidationException validationException = null; + + if (Strings.isNullOrEmpty(getConnectorId())) { + validationException = addValidationError("connector_id cannot be null or empty", validationException); + } + + return validationException; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(connectorId); + out.writeOptionalString(description); + out.writeOptionalString(indexName); + out.writeOptionalBoolean(isNative); + out.writeOptionalString(language); + out.writeOptionalString(name); + out.writeOptionalString(serviceType); + } + + public String getConnectorId() { + return connectorId; + } + + public String getDescription() { + return description; + } + + public String getIndexName() { + return indexName; + } + + public Boolean getIsNative() { + return isNative; + } + + public String getLanguage() { + return language; + } + + public String getName() { + return name; + } + + public String getServiceType() { + return serviceType; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Request request = (Request) o; + return Objects.equals(connectorId, request.connectorId) + && Objects.equals(description, request.description) + && Objects.equals(indexName, request.indexName) + && Objects.equals(isNative, request.isNative) + && Objects.equals(language, request.language) + && Objects.equals(name, request.name) + && Objects.equals(serviceType, request.serviceType); + } + + @Override + public int hashCode() { + return Objects.hash(connectorId, description, indexName, isNative, language, name, serviceType); + } + } + + public static class Response extends ActionResponse implements ToXContentObject { + + final DocWriteResponse.Result result; + + public Response(StreamInput in) throws IOException { + super(in); + result = DocWriteResponse.Result.readFrom(in); + } + + public Response(DocWriteResponse.Result result) { + this.result = result; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + this.result.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("result", this.result.getLowercase()); + builder.endObject(); + return builder; + } + + public RestStatus status() { + return switch (result) { + case CREATED -> RestStatus.CREATED; + case NOT_FOUND -> RestStatus.NOT_FOUND; + default -> RestStatus.OK; + }; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Response response = (Response) o; + return result == response.result; + } + + @Override + public int hashCode() { + return Objects.hash(result); + } + } +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestDeleteConnectorAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestDeleteConnectorAction.java new file mode 100644 index 0000000000000..02153710a99a0 --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestDeleteConnectorAction.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.action; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.application.EnterpriseSearch; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.rest.RestRequest.Method.DELETE; + +public class RestDeleteConnectorAction extends BaseRestHandler { + + @Override + public String getName() { + return "connector_delete_action"; + } + + @Override + public List routes() { + return List.of(new Route(DELETE, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{connector_id}")); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + DeleteConnectorAction.Request request = new DeleteConnectorAction.Request(restRequest.param("connector_id")); + return channel -> client.execute(DeleteConnectorAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestGetConnectorAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestGetConnectorAction.java new file mode 100644 index 0000000000000..50691bf4d5ea8 --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestGetConnectorAction.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.action; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.application.EnterpriseSearch; + +import java.util.List; + +import static org.elasticsearch.rest.RestRequest.Method.GET; + +public class RestGetConnectorAction extends BaseRestHandler { + + @Override + public String getName() { + return "connector_get_action"; + } + + @Override + public List routes() { + return List.of(new Route(GET, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{connector_id}")); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { + GetConnectorAction.Request request = new GetConnectorAction.Request(restRequest.param("connector_id")); + return channel -> client.execute(GetConnectorAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestListConnectorAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestListConnectorAction.java new file mode 100644 index 0000000000000..59d984438ebf6 --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestListConnectorAction.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.action; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.application.EnterpriseSearch; +import org.elasticsearch.xpack.core.action.util.PageParams; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.rest.RestRequest.Method.GET; + +public class RestListConnectorAction extends BaseRestHandler { + + @Override + public String getName() { + return "connector_list_action"; + } + + @Override + public List routes() { + return List.of(new Route(GET, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT)); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + int from = restRequest.paramAsInt("from", PageParams.DEFAULT_FROM); + int size = restRequest.paramAsInt("size", PageParams.DEFAULT_SIZE); + ListConnectorAction.Request request = new ListConnectorAction.Request(new PageParams(from, size)); + + return channel -> client.execute(ListConnectorAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestPutConnectorAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestPutConnectorAction.java new file mode 100644 index 0000000000000..e87719943fc29 --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestPutConnectorAction.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.action; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.application.EnterpriseSearch; + +import java.util.List; + +import static org.elasticsearch.rest.RestRequest.Method.PUT; + +public class RestPutConnectorAction extends BaseRestHandler { + + @Override + public String getName() { + return "connector_put_action"; + } + + @Override + public List routes() { + return List.of(new Route(PUT, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{connector_id}")); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { + PutConnectorAction.Request request = PutConnectorAction.Request.fromXContentBytes( + restRequest.param("connector_id"), + restRequest.content(), + restRequest.getXContentType() + ); + return channel -> client.execute( + PutConnectorAction.INSTANCE, + request, + new RestToXContentListener<>(channel, PutConnectorAction.Response::status, r -> null) + ); + } +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/TransportDeleteConnectorAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/TransportDeleteConnectorAction.java new file mode 100644 index 0000000000000..f83f340ec1ae7 --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/TransportDeleteConnectorAction.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.application.connector.ConnectorIndexService; + +public class TransportDeleteConnectorAction extends HandledTransportAction { + + protected final ConnectorIndexService connectorIndexService; + + @Inject + public TransportDeleteConnectorAction( + TransportService transportService, + ClusterService clusterService, + ActionFilters actionFilters, + Client client + ) { + super( + DeleteConnectorAction.NAME, + transportService, + actionFilters, + DeleteConnectorAction.Request::new, + EsExecutors.DIRECT_EXECUTOR_SERVICE + ); + this.connectorIndexService = new ConnectorIndexService(client); + } + + @Override + protected void doExecute(Task task, DeleteConnectorAction.Request request, ActionListener listener) { + String connectorId = request.getConnectorId(); + connectorIndexService.deleteConnector(connectorId, listener.map(v -> AcknowledgedResponse.TRUE)); + } +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/TransportGetConnectorAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/TransportGetConnectorAction.java new file mode 100644 index 0000000000000..44359ac55d5d0 --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/TransportGetConnectorAction.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.application.connector.ConnectorIndexService; + +public class TransportGetConnectorAction extends HandledTransportAction { + protected final ConnectorIndexService connectorIndexService; + + @Inject + public TransportGetConnectorAction( + TransportService transportService, + ClusterService clusterService, + ActionFilters actionFilters, + Client client + ) { + super( + GetConnectorAction.NAME, + transportService, + actionFilters, + GetConnectorAction.Request::new, + EsExecutors.DIRECT_EXECUTOR_SERVICE + ); + this.connectorIndexService = new ConnectorIndexService(client); + } + + @Override + protected void doExecute(Task task, GetConnectorAction.Request request, ActionListener listener) { + connectorIndexService.getConnector(request.getConnectorId(), listener.map(GetConnectorAction.Response::new)); + } +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/TransportListConnectorAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/TransportListConnectorAction.java new file mode 100644 index 0000000000000..cfe05965da37b --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/TransportListConnectorAction.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.application.connector.ConnectorIndexService; +import org.elasticsearch.xpack.core.action.util.PageParams; + +public class TransportListConnectorAction extends HandledTransportAction { + protected final ConnectorIndexService connectorIndexService; + + @Inject + public TransportListConnectorAction( + TransportService transportService, + ClusterService clusterService, + ActionFilters actionFilters, + Client client + ) { + super( + ListConnectorAction.NAME, + transportService, + actionFilters, + ListConnectorAction.Request::new, + EsExecutors.DIRECT_EXECUTOR_SERVICE + ); + this.connectorIndexService = new ConnectorIndexService(client); + } + + @Override + protected void doExecute(Task task, ListConnectorAction.Request request, ActionListener listener) { + final PageParams pageParams = request.getPageParams(); + connectorIndexService.listConnectors( + pageParams.getFrom(), + pageParams.getSize(), + listener.map(r -> new ListConnectorAction.Response(r.connectors(), r.totalResults())) + ); + } + +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/TransportPutConnectorAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/TransportPutConnectorAction.java new file mode 100644 index 0000000000000..013a8f4a8334d --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/TransportPutConnectorAction.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.application.connector.Connector; +import org.elasticsearch.xpack.application.connector.ConnectorIndexService; + +import java.util.Objects; + +public class TransportPutConnectorAction extends HandledTransportAction { + + protected final ConnectorIndexService connectorIndexService; + + @Inject + public TransportPutConnectorAction( + TransportService transportService, + ClusterService clusterService, + ActionFilters actionFilters, + Client client + ) { + super( + PutConnectorAction.NAME, + transportService, + actionFilters, + PutConnectorAction.Request::new, + EsExecutors.DIRECT_EXECUTOR_SERVICE + ); + this.connectorIndexService = new ConnectorIndexService(client); + } + + @Override + protected void doExecute(Task task, PutConnectorAction.Request request, ActionListener listener) { + + Boolean isNative = Objects.requireNonNullElse(request.getIsNative(), false); + + Connector connector = new Connector.Builder().setConnectorId(request.getConnectorId()) + .setDescription(request.getDescription()) + .setIndexName(request.getIndexName()) + .setIsNative(isNative) + .setLanguage(request.getLanguage()) + .setName(request.getName()) + .setServiceType(request.getServiceType()) + .build(); + + connectorIndexService.putConnector(connector, listener.map(r -> new PutConnectorAction.Response(r.getResult()))); + } +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringAdvancedSnippet.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringAdvancedSnippet.java new file mode 100644 index 0000000000000..ca7d3bfa6d9c8 --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringAdvancedSnippet.java @@ -0,0 +1,142 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.filtering; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.time.Instant; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + +/** + * Represents an advanced snippet used in filtering processes, providing detailed criteria or rules. + * This class includes timestamps for the creation and last update of the snippet, along with the + * actual snippet content represented as a map. + */ +public class FilteringAdvancedSnippet implements Writeable, ToXContentObject { + + private final Instant advancedSnippetCreatedAt; + private final Instant advancedSnippetUpdatedAt; + private final Map advancedSnippetValue; + + /** + * @param advancedSnippetCreatedAt The creation timestamp of the advanced snippet. + * @param advancedSnippetUpdatedAt The update timestamp of the advanced snippet. + * @param advancedSnippetValue The value of the advanced snippet. + */ + private FilteringAdvancedSnippet( + Instant advancedSnippetCreatedAt, + Instant advancedSnippetUpdatedAt, + Map advancedSnippetValue + ) { + this.advancedSnippetCreatedAt = advancedSnippetCreatedAt; + this.advancedSnippetUpdatedAt = advancedSnippetUpdatedAt; + this.advancedSnippetValue = advancedSnippetValue; + } + + public FilteringAdvancedSnippet(StreamInput in) throws IOException { + this.advancedSnippetCreatedAt = in.readInstant(); + this.advancedSnippetUpdatedAt = in.readInstant(); + this.advancedSnippetValue = in.readMap(StreamInput::readString, StreamInput::readGenericValue); + } + + private static final ParseField CREATED_AT_FIELD = new ParseField("created_at"); + private static final ParseField UPDATED_AT_FIELD = new ParseField("updated_at"); + private static final ParseField VALUE_FIELD = new ParseField("value"); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "connector_filtering_advanced_snippet", + true, + args -> new Builder().setAdvancedSnippetCreatedAt((Instant) args[0]) + .setAdvancedSnippetUpdatedAt((Instant) args[1]) + .setAdvancedSnippetValue((Map) args[2]) + .build() + ); + + static { + PARSER.declareField(constructorArg(), (p, c) -> Instant.parse(p.text()), CREATED_AT_FIELD, ObjectParser.ValueType.STRING); + PARSER.declareField(constructorArg(), (p, c) -> Instant.parse(p.text()), UPDATED_AT_FIELD, ObjectParser.ValueType.STRING); + PARSER.declareField(constructorArg(), (p, c) -> p.map(), VALUE_FIELD, ObjectParser.ValueType.OBJECT); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field(CREATED_AT_FIELD.getPreferredName(), advancedSnippetCreatedAt); + builder.field(UPDATED_AT_FIELD.getPreferredName(), advancedSnippetUpdatedAt); + builder.field(VALUE_FIELD.getPreferredName(), advancedSnippetValue); + } + builder.endObject(); + return builder; + } + + public static FilteringAdvancedSnippet fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeInstant(advancedSnippetCreatedAt); + out.writeInstant(advancedSnippetUpdatedAt); + out.writeMap(advancedSnippetValue, StreamOutput::writeString, StreamOutput::writeGenericValue); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + FilteringAdvancedSnippet that = (FilteringAdvancedSnippet) o; + return Objects.equals(advancedSnippetCreatedAt, that.advancedSnippetCreatedAt) + && Objects.equals(advancedSnippetUpdatedAt, that.advancedSnippetUpdatedAt) + && Objects.equals(advancedSnippetValue, that.advancedSnippetValue); + } + + @Override + public int hashCode() { + return Objects.hash(advancedSnippetCreatedAt, advancedSnippetUpdatedAt, advancedSnippetValue); + } + + public static class Builder { + + private Instant advancedSnippetCreatedAt; + private Instant advancedSnippetUpdatedAt; + private Map advancedSnippetValue; + + public Builder setAdvancedSnippetCreatedAt(Instant advancedSnippetCreatedAt) { + this.advancedSnippetCreatedAt = advancedSnippetCreatedAt; + return this; + } + + public Builder setAdvancedSnippetUpdatedAt(Instant advancedSnippetUpdatedAt) { + this.advancedSnippetUpdatedAt = advancedSnippetUpdatedAt; + return this; + } + + public Builder setAdvancedSnippetValue(Map advancedSnippetValue) { + this.advancedSnippetValue = advancedSnippetValue; + return this; + } + + public FilteringAdvancedSnippet build() { + return new FilteringAdvancedSnippet(advancedSnippetCreatedAt, advancedSnippetUpdatedAt, advancedSnippetValue); + } + } +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringPolicy.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringPolicy.java new file mode 100644 index 0000000000000..48170cfc8fae4 --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringPolicy.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.filtering; + +import java.util.Locale; + +public enum FilteringPolicy { + EXCLUDE, + INCLUDE; + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } + + public static FilteringPolicy filteringPolicy(String policy) { + for (FilteringPolicy filteringPolicy : FilteringPolicy.values()) { + if (filteringPolicy.name().equalsIgnoreCase(policy)) { + return filteringPolicy; + } + } + throw new IllegalArgumentException("Unknown FilteringPolicy: " + policy); + } +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringRule.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringRule.java new file mode 100644 index 0000000000000..cfcc639b8b613 --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringRule.java @@ -0,0 +1,237 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.filtering; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.time.Instant; +import java.util.Objects; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + +/** + * Represents a single rule used for filtering in a data processing or querying context. + * Each {@link FilteringRule} includes details such as its creation and update timestamps, + * the specific field it applies to, an identifier, and its order in a set of rules. + * Additionally, it encapsulates the filtering policy, the condition under which the rule applies, + * and the value associated with the rule. + */ +public class FilteringRule implements Writeable, ToXContentObject { + + private final Instant createdAt; + private final String field; + private final String id; + private final Integer order; + private final FilteringPolicy policy; + private final FilteringRuleCondition rule; + private final Instant updatedAt; + private final String value; + + /** + * Constructs a new FilteringRule instance. + * + * @param createdAt The creation timestamp of the filtering rule. + * @param field The field associated with the filtering rule. + * @param id The identifier of the filtering rule. + * @param order The order of the filtering rule. + * @param policy The {@link FilteringPolicy} of the filtering rule. + * @param rule The specific {@link FilteringRuleCondition} + * @param updatedAt The update timestamp of the filtering rule. + * @param value The value associated with the filtering rule. + */ + public FilteringRule( + Instant createdAt, + String field, + String id, + Integer order, + FilteringPolicy policy, + FilteringRuleCondition rule, + Instant updatedAt, + String value + ) { + this.createdAt = createdAt; + this.field = field; + this.id = id; + this.order = order; + this.policy = policy; + this.rule = rule; + this.updatedAt = updatedAt; + this.value = value; + } + + public FilteringRule(StreamInput in) throws IOException { + this.createdAt = in.readInstant(); + this.field = in.readString(); + this.id = in.readString(); + this.order = in.readInt(); + this.policy = in.readEnum(FilteringPolicy.class); + this.rule = in.readEnum(FilteringRuleCondition.class); + this.updatedAt = in.readInstant(); + this.value = in.readString(); + } + + private static final ParseField CREATED_AT_FIELD = new ParseField("created_at"); + private static final ParseField FIELD_FIELD = new ParseField("field"); + private static final ParseField ID_FIELD = new ParseField("id"); + private static final ParseField ORDER_FIELD = new ParseField("order"); + private static final ParseField POLICY_FIELD = new ParseField("policy"); + private static final ParseField RULE_FIELD = new ParseField("rule"); + private static final ParseField UPDATED_AT_FIELD = new ParseField("updated_at"); + private static final ParseField VALUE_FIELD = new ParseField("value"); + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "connector_filtering_rule", + true, + args -> new Builder().setCreatedAt((Instant) args[0]) + .setField((String) args[1]) + .setId((String) args[2]) + .setOrder((Integer) args[3]) + .setPolicy((FilteringPolicy) args[4]) + .setRule((FilteringRuleCondition) args[5]) + .setUpdatedAt((Instant) args[6]) + .setValue((String) args[7]) + .build() + ); + + static { + PARSER.declareField(constructorArg(), (p, c) -> Instant.parse(p.text()), CREATED_AT_FIELD, ObjectParser.ValueType.STRING); + PARSER.declareString(constructorArg(), FIELD_FIELD); + PARSER.declareString(constructorArg(), ID_FIELD); + PARSER.declareInt(constructorArg(), ORDER_FIELD); + PARSER.declareField( + constructorArg(), + (p, c) -> FilteringPolicy.filteringPolicy(p.text()), + POLICY_FIELD, + ObjectParser.ValueType.STRING + ); + PARSER.declareField( + constructorArg(), + (p, c) -> FilteringRuleCondition.filteringRuleCondition(p.text()), + RULE_FIELD, + ObjectParser.ValueType.STRING + ); + PARSER.declareField(constructorArg(), (p, c) -> Instant.parse(p.text()), UPDATED_AT_FIELD, ObjectParser.ValueType.STRING); + PARSER.declareString(constructorArg(), VALUE_FIELD); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(CREATED_AT_FIELD.getPreferredName(), createdAt); + builder.field(FIELD_FIELD.getPreferredName(), field); + builder.field(ID_FIELD.getPreferredName(), id); + builder.field(ORDER_FIELD.getPreferredName(), order); + builder.field(POLICY_FIELD.getPreferredName(), policy.toString()); + builder.field(RULE_FIELD.getPreferredName(), rule.toString()); + builder.field(UPDATED_AT_FIELD.getPreferredName(), updatedAt); + builder.field(VALUE_FIELD.getPreferredName(), value); + builder.endObject(); + return builder; + } + + public static FilteringRule fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeInstant(createdAt); + out.writeString(field); + out.writeString(id); + out.writeInt(order); + out.writeEnum(policy); + out.writeEnum(rule); + out.writeInstant(updatedAt); + out.writeString(value); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + FilteringRule that = (FilteringRule) o; + return Objects.equals(createdAt, that.createdAt) + && Objects.equals(field, that.field) + && Objects.equals(id, that.id) + && Objects.equals(order, that.order) + && policy == that.policy + && rule == that.rule + && Objects.equals(updatedAt, that.updatedAt) + && Objects.equals(value, that.value); + } + + @Override + public int hashCode() { + return Objects.hash(createdAt, field, id, order, policy, rule, updatedAt, value); + } + + public static class Builder { + + private Instant createdAt; + private String field; + private String id; + private Integer order; + private FilteringPolicy policy; + private FilteringRuleCondition rule; + private Instant updatedAt; + private String value; + + public Builder setCreatedAt(Instant createdAt) { + this.createdAt = createdAt; + return this; + } + + public Builder setField(String field) { + this.field = field; + return this; + } + + public Builder setId(String id) { + this.id = id; + return this; + } + + public Builder setOrder(Integer order) { + this.order = order; + return this; + } + + public Builder setPolicy(FilteringPolicy policy) { + this.policy = policy; + return this; + } + + public Builder setRule(FilteringRuleCondition rule) { + this.rule = rule; + return this; + } + + public Builder setUpdatedAt(Instant updatedAt) { + this.updatedAt = updatedAt; + return this; + } + + public Builder setValue(String value) { + this.value = value; + return this; + } + + public FilteringRule build() { + return new FilteringRule(createdAt, field, id, order, policy, rule, updatedAt, value); + } + } +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringRuleCondition.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringRuleCondition.java new file mode 100644 index 0000000000000..967107961b0d4 --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringRuleCondition.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.filtering; + +public enum FilteringRuleCondition { + CONTAINS("contains"), + ENDS_WITH("ends_with"), + EQUALS("equals"), + GT(">"), + LT("<"), + REGEX("regex"), + STARTS_WITH("starts_with"); + + private final String value; + + FilteringRuleCondition(String value) { + this.value = value; + } + + @Override + public String toString() { + return this.value; + } + + public static FilteringRuleCondition filteringRuleCondition(String condition) { + for (FilteringRuleCondition filteringRuleCondition : FilteringRuleCondition.values()) { + if (filteringRuleCondition.value.equals(condition)) { + return filteringRuleCondition; + } + } + throw new IllegalArgumentException("Unknown FilteringRuleCondition: " + condition); + } +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringRules.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringRules.java new file mode 100644 index 0000000000000..dc96006f40349 --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringRules.java @@ -0,0 +1,145 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.filtering; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + +/** + * The {@link FilteringRules} class encapsulates the rules and configurations for filtering operations in a connector. + * It includes an advanced snippet for complex filtering logic, a list of individual filtering rules, and validation + * information for these rules. + */ +public class FilteringRules implements Writeable, ToXContentObject { + + private final FilteringAdvancedSnippet advancedSnippet; + private final List rules; + + private final FilteringValidationInfo filteringValidationInfo; + + /** + * Constructs a new FilteringRules instance. + * + * @param advancedSnippet The {@link FilteringAdvancedSnippet} object. + * @param rules The list of {@link FilteringRule} objects + * @param filteringValidationInfo The {@link FilteringValidationInfo} object. + */ + public FilteringRules( + FilteringAdvancedSnippet advancedSnippet, + List rules, + FilteringValidationInfo filteringValidationInfo + ) { + this.advancedSnippet = advancedSnippet; + this.rules = rules; + this.filteringValidationInfo = filteringValidationInfo; + } + + public FilteringRules(StreamInput in) throws IOException { + this.advancedSnippet = new FilteringAdvancedSnippet(in); + this.rules = in.readCollectionAsList(FilteringRule::new); + this.filteringValidationInfo = new FilteringValidationInfo(in); + } + + private static final ParseField ADVANCED_SNIPPET_FIELD = new ParseField("advanced_snippet"); + private static final ParseField RULES_FIELD = new ParseField("rules"); + private static final ParseField VALIDATION_FIELD = new ParseField("validation"); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "connector_filtering_rules", + true, + args -> new Builder().setAdvancedSnippet((FilteringAdvancedSnippet) args[0]) + .setRules((List) args[1]) + .setFilteringValidationInfo((FilteringValidationInfo) args[2]) + .build() + ); + + static { + PARSER.declareObject(constructorArg(), (p, c) -> FilteringAdvancedSnippet.fromXContent(p), ADVANCED_SNIPPET_FIELD); + PARSER.declareObjectArray(constructorArg(), (p, c) -> FilteringRule.fromXContent(p), RULES_FIELD); + PARSER.declareObject(constructorArg(), (p, c) -> FilteringValidationInfo.fromXContent(p), VALIDATION_FIELD); + + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field(ADVANCED_SNIPPET_FIELD.getPreferredName(), advancedSnippet); + builder.xContentList(RULES_FIELD.getPreferredName(), rules); + builder.field(VALIDATION_FIELD.getPreferredName(), filteringValidationInfo); + } + builder.endObject(); + return builder; + } + + public static FilteringRules fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + advancedSnippet.writeTo(out); + out.writeCollection(rules); + filteringValidationInfo.writeTo(out); + + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + FilteringRules that = (FilteringRules) o; + return Objects.equals(advancedSnippet, that.advancedSnippet) + && Objects.equals(rules, that.rules) + && Objects.equals(filteringValidationInfo, that.filteringValidationInfo); + } + + @Override + public int hashCode() { + return Objects.hash(advancedSnippet, rules, filteringValidationInfo); + } + + public static class Builder { + + private FilteringAdvancedSnippet advancedSnippet; + private List rules; + private FilteringValidationInfo filteringValidationInfo; + + public Builder setAdvancedSnippet(FilteringAdvancedSnippet advancedSnippet) { + this.advancedSnippet = advancedSnippet; + return this; + } + + public Builder setRules(List rules) { + this.rules = rules; + return this; + } + + public Builder setFilteringValidationInfo(FilteringValidationInfo filteringValidationInfo) { + this.filteringValidationInfo = filteringValidationInfo; + return this; + } + + public FilteringRules build() { + return new FilteringRules(advancedSnippet, rules, filteringValidationInfo); + } + } +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringValidation.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringValidation.java new file mode 100644 index 0000000000000..bb2de688f6705 --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringValidation.java @@ -0,0 +1,117 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.filtering; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + +/** + * Represents the details of a validation process, including identifiers and descriptive messages. + * This class is used to encapsulate information about specific validation checks, where each validation + * is associated with a list of IDs and corresponding messages that detail the validation results. + */ +public class FilteringValidation implements Writeable, ToXContentObject { + private final List ids; + private final List messages; + + /** + * Constructs a new FilteringValidation instance. + * + * @param ids The list of identifiers associated with the validation. + * @param messages The list of messages describing the validation results. + */ + public FilteringValidation(List ids, List messages) { + this.ids = ids; + this.messages = messages; + } + + public FilteringValidation(StreamInput in) throws IOException { + this.ids = in.readStringCollectionAsList(); + this.messages = in.readStringCollectionAsList(); + } + + private static final ParseField IDS_FIELD = new ParseField("ids"); + private static final ParseField MESSAGES_FIELD = new ParseField("messages"); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "connector_filtering_validation", + true, + args -> new Builder().setIds((List) args[0]).setMessages((List) args[1]).build() + ); + + static { + PARSER.declareStringArray(constructorArg(), IDS_FIELD); + PARSER.declareStringArray(constructorArg(), MESSAGES_FIELD); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.stringListField(IDS_FIELD.getPreferredName(), ids); + builder.stringListField(MESSAGES_FIELD.getPreferredName(), messages); + } + return builder; + } + + public static FilteringValidation fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeStringCollection(ids); + out.writeStringCollection(messages); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + FilteringValidation that = (FilteringValidation) o; + return Objects.equals(ids, that.ids) && Objects.equals(messages, that.messages); + } + + @Override + public int hashCode() { + return Objects.hash(ids, messages); + } + + public static class Builder { + + private List ids; + private List messages; + + public Builder setIds(List ids) { + this.ids = ids; + return this; + } + + public Builder setMessages(List messages) { + this.messages = messages; + return this; + } + + public FilteringValidation build() { + return new FilteringValidation(ids, messages); + } + } +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringValidationInfo.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringValidationInfo.java new file mode 100644 index 0000000000000..c0cd80d867592 --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringValidationInfo.java @@ -0,0 +1,127 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.filtering; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + +/** + * Encapsulates validation information for filtering rules, including any errors encountered + * during validation and the overall state of validation. + * + * This class holds a list of validation errors, each represented by a {@link FilteringValidation} object, + * and the validation state, indicated by a {@link FilteringValidationState}. + */ +public class FilteringValidationInfo implements Writeable, ToXContentObject { + + private final List validationErrors; + private final FilteringValidationState validationState; + + /** + * @param validationErrors The list of {@link FilteringValidation} errors for the filtering rules. + * @param validationState The {@link FilteringValidationState} of the filtering rules. + */ + public FilteringValidationInfo(List validationErrors, FilteringValidationState validationState) { + this.validationErrors = validationErrors; + this.validationState = validationState; + } + + public FilteringValidationInfo(StreamInput in) throws IOException { + this.validationErrors = in.readCollectionAsList(FilteringValidation::new); + this.validationState = in.readEnum(FilteringValidationState.class); + } + + private static final ParseField ERRORS_FIELD = new ParseField("errors"); + private static final ParseField STATE_FIELD = new ParseField("state"); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "filtering_validation_info", + true, + args -> new Builder().setValidationErrors((List) args[0]) + .setValidationState((FilteringValidationState) args[1]) + .build() + ); + + static { + PARSER.declareObjectArray(constructorArg(), (p, c) -> FilteringValidation.fromXContent(p), ERRORS_FIELD); + PARSER.declareField( + constructorArg(), + (p, c) -> FilteringValidationState.filteringValidationState(p.text()), + STATE_FIELD, + ObjectParser.ValueType.STRING + ); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field(ERRORS_FIELD.getPreferredName(), validationErrors); + builder.field(STATE_FIELD.getPreferredName(), validationState); + } + builder.endObject(); + return builder; + } + + public static FilteringValidationInfo fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeCollection(validationErrors); + out.writeEnum(validationState); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + FilteringValidationInfo that = (FilteringValidationInfo) o; + return Objects.equals(validationErrors, that.validationErrors) && validationState == that.validationState; + } + + @Override + public int hashCode() { + return Objects.hash(validationErrors, validationState); + } + + public static class Builder { + + private List validationErrors; + private FilteringValidationState validationState; + + public Builder setValidationErrors(List validationErrors) { + this.validationErrors = validationErrors; + return this; + } + + public Builder setValidationState(FilteringValidationState validationState) { + this.validationState = validationState; + return this; + } + + public FilteringValidationInfo build() { + return new FilteringValidationInfo(validationErrors, validationState); + } + } +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringValidationState.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringValidationState.java new file mode 100644 index 0000000000000..e2d370e3b9ed8 --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/filtering/FilteringValidationState.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.filtering; + +import java.util.Locale; + +public enum FilteringValidationState { + EDITED, + INVALID, + VALID; + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } + + public static FilteringValidationState filteringValidationState(String validationState) { + for (FilteringValidationState filteringValidationState : FilteringValidationState.values()) { + if (filteringValidationState.name().equalsIgnoreCase(validationState)) { + return filteringValidationState; + } + } + throw new IllegalArgumentException("Unknown FilteringValidationState: " + validationState); + } +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJob.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJob.java new file mode 100644 index 0000000000000..6c0e9635d986d --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJob.java @@ -0,0 +1,485 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.syncjob; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.application.connector.Connector; +import org.elasticsearch.xpack.application.connector.ConnectorSyncStatus; + +import java.io.IOException; +import java.time.Instant; +import java.util.Collections; +import java.util.Map; +import java.util.Objects; + +/** + * Represents a sync job in the Elasticsearch ecosystem. Sync jobs refer to a unit of work, which syncs data from a 3rd party + * data source into an Elasticsearch index using the Connectors service. A ConnectorSyncJob always refers + * to a corresponding {@link Connector}. Each ConnectorSyncJob instance encapsulates various settings and state information, including: + *
      + *
    • A timestamp, when the sync job cancellation was requested.
    • + *
    • A timestamp, when the sync job was cancelled.
    • + *
    • A timestamp, when the sync job was completed.
    • + *
    • A subset of the {@link Connector} fields the sync job is referring to.
    • + *
    • A timestamp, when the sync job was created.
    • + *
    • The number of documents deleted by the sync job.
    • + *
    • An error, which might have appeared during the sync job execution.
    • + *
    • A unique identifier for distinguishing different connectors.
    • + *
    • The number of documents indexed by the sync job.
    • + *
    • The volume of the indexed documents.
    • + *
    • The {@link ConnectorSyncJobType} of the sync job.
    • + *
    • A timestamp, when the sync job was last seen by the Connectors service.
    • + *
    • A {@link Map} containing metadata of the sync job.
    • + *
    • A timestamp, when the sync job was started.
    • + *
    • The {@link ConnectorSyncStatus} of the connector.
    • + *
    • The total number of documents present in the index after the sync job completes.
    • + *
    • The {@link ConnectorSyncJobTriggerMethod} of the sync job.
    • + *
    • The hostname of the worker to run the sync job.
    • + *
    + */ +public class ConnectorSyncJob implements Writeable, ToXContentObject { + + static final ParseField CANCELATION_REQUESTED_AT_FIELD = new ParseField("cancelation_requested_at"); + + static final ParseField CANCELED_AT_FIELD = new ParseField("canceled_at"); + + static final ParseField COMPLETED_AT_FIELD = new ParseField("completed_at"); + + static final ParseField CONNECTOR_FIELD = new ParseField("connector"); + + static final ParseField CREATED_AT_FIELD = new ParseField("created_at"); + + static final ParseField DELETED_DOCUMENT_COUNT = new ParseField("deleted_document_count"); + + static final ParseField ERROR_FIELD = new ParseField("error"); + + public static final ParseField ID_FIELD = new ParseField("id"); + + static final ParseField INDEXED_DOCUMENT_COUNT_FIELD = new ParseField("indexed_document_count"); + + static final ParseField INDEXED_DOCUMENT_VOLUME_FIELD = new ParseField("indexed_document_volume"); + + public static final ParseField JOB_TYPE_FIELD = new ParseField("job_type"); + + static final ParseField LAST_SEEN_FIELD = new ParseField("last_seen"); + + static final ParseField METADATA_FIELD = new ParseField("metadata"); + + static final ParseField STARTED_AT_FIELD = new ParseField("started_at"); + + static final ParseField STATUS_FIELD = new ParseField("status"); + + static final ParseField TOTAL_DOCUMENT_COUNT_FIELD = new ParseField("total_document_count"); + + public static final ParseField TRIGGER_METHOD_FIELD = new ParseField("trigger_method"); + + static final ParseField WORKER_HOSTNAME_FIELD = new ParseField("worker_hostname"); + + static final ConnectorSyncStatus DEFAULT_INITIAL_STATUS = ConnectorSyncStatus.PENDING; + + static final ConnectorSyncJobType DEFAULT_JOB_TYPE = ConnectorSyncJobType.FULL; + + static final ConnectorSyncJobTriggerMethod DEFAULT_TRIGGER_METHOD = ConnectorSyncJobTriggerMethod.ON_DEMAND; + + private final Instant cancelationRequestedAt; + + @Nullable + private final Instant canceledAt; + + @Nullable + private final Instant completedAt; + + private final Connector connector; + + private final Instant createdAt; + + private final long deletedDocumentCount; + + @Nullable + private final String error; + + private final String id; + + private final long indexedDocumentCount; + + private final long indexedDocumentVolume; + + private final ConnectorSyncJobType jobType; + + @Nullable + private final Instant lastSeen; + + private final Map metadata; + + @Nullable + private final Instant startedAt; + + private final ConnectorSyncStatus status; + + @Nullable + private final long totalDocumentCount; + + private final ConnectorSyncJobTriggerMethod triggerMethod; + + @Nullable + private final String workerHostname; + + /** + * + * @param cancelationRequestedAt Timestamp when the sync job cancellation was requested. + * @param canceledAt Timestamp, when the sync job was cancelled. + * @param completedAt Timestamp, when the sync job was completed. + * @param connector Subset of connector fields the sync job is referring to. + * @param createdAt Timestamp, when the sync job was created. + * @param deletedDocumentCount Number of documents deleted by the sync job. + * @param error Error, which might have appeared during the sync job execution. + * @param id Unique identifier for distinguishing different connectors. + * @param indexedDocumentCount Number of documents indexed by the sync job. + * @param indexedDocumentVolume Volume of the indexed documents. + * @param jobType Job type of the sync job. + * @param lastSeen Timestamp, when the sync was last seen by the Connectors service. + * @param metadata Map containing metadata of the sync job. + * @param startedAt Timestamp, when the sync job was started. + * @param status Sync status of the connector. + * @param totalDocumentCount Total number of documents present in the index after the sync job completes. + * @param triggerMethod Trigger method of the sync job. + * @param workerHostname Hostname of the worker to run the sync job. + */ + private ConnectorSyncJob( + Instant cancelationRequestedAt, + Instant canceledAt, + Instant completedAt, + Connector connector, + Instant createdAt, + long deletedDocumentCount, + String error, + String id, + long indexedDocumentCount, + long indexedDocumentVolume, + ConnectorSyncJobType jobType, + Instant lastSeen, + Map metadata, + Instant startedAt, + ConnectorSyncStatus status, + long totalDocumentCount, + ConnectorSyncJobTriggerMethod triggerMethod, + String workerHostname + ) { + this.cancelationRequestedAt = cancelationRequestedAt; + this.canceledAt = canceledAt; + this.completedAt = completedAt; + this.connector = connector; + this.createdAt = createdAt; + this.deletedDocumentCount = deletedDocumentCount; + this.error = error; + this.id = Objects.requireNonNull(id, "[id] cannot be null"); + this.indexedDocumentCount = indexedDocumentCount; + this.indexedDocumentVolume = indexedDocumentVolume; + this.jobType = Objects.requireNonNullElse(jobType, ConnectorSyncJobType.FULL); + this.lastSeen = lastSeen; + this.metadata = Objects.requireNonNullElse(metadata, Collections.emptyMap()); + this.startedAt = startedAt; + this.status = status; + this.totalDocumentCount = totalDocumentCount; + this.triggerMethod = Objects.requireNonNullElse(triggerMethod, ConnectorSyncJobTriggerMethod.ON_DEMAND); + this.workerHostname = workerHostname; + } + + public ConnectorSyncJob(StreamInput in) throws IOException { + this.cancelationRequestedAt = in.readOptionalInstant(); + this.canceledAt = in.readOptionalInstant(); + this.completedAt = in.readOptionalInstant(); + this.connector = in.readNamedWriteable(Connector.class); + this.createdAt = in.readInstant(); + this.deletedDocumentCount = in.readLong(); + this.error = in.readOptionalString(); + this.id = in.readString(); + this.indexedDocumentCount = in.readLong(); + this.indexedDocumentVolume = in.readLong(); + this.jobType = in.readEnum(ConnectorSyncJobType.class); + this.lastSeen = in.readOptionalInstant(); + this.metadata = in.readMap(StreamInput::readString, StreamInput::readGenericValue); + this.startedAt = in.readOptionalInstant(); + this.status = in.readEnum(ConnectorSyncStatus.class); + this.totalDocumentCount = in.readOptionalLong(); + this.triggerMethod = in.readEnum(ConnectorSyncJobTriggerMethod.class); + this.workerHostname = in.readOptionalString(); + } + + public String getId() { + return id; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field(CANCELATION_REQUESTED_AT_FIELD.getPreferredName(), cancelationRequestedAt); + builder.field(CANCELED_AT_FIELD.getPreferredName(), canceledAt); + builder.field(COMPLETED_AT_FIELD.getPreferredName(), completedAt); + + builder.startObject(CONNECTOR_FIELD.getPreferredName()); + { + builder.field(Connector.ID_FIELD.getPreferredName(), connector.getConnectorId()); + builder.field(Connector.FILTERING_FIELD.getPreferredName(), connector.getFiltering()); + builder.field(Connector.INDEX_NAME_FIELD.getPreferredName(), connector.getIndexName()); + builder.field(Connector.LANGUAGE_FIELD.getPreferredName(), connector.getLanguage()); + builder.field(Connector.PIPELINE_FIELD.getPreferredName(), connector.getPipeline()); + builder.field(Connector.SERVICE_TYPE_FIELD.getPreferredName(), connector.getServiceType()); + builder.field(Connector.CONFIGURATION_FIELD.getPreferredName(), connector.getConfiguration()); + } + builder.endObject(); + + builder.field(CREATED_AT_FIELD.getPreferredName(), createdAt); + builder.field(DELETED_DOCUMENT_COUNT.getPreferredName(), deletedDocumentCount); + builder.field(ERROR_FIELD.getPreferredName(), error); + builder.field(ID_FIELD.getPreferredName(), id); + builder.field(INDEXED_DOCUMENT_COUNT_FIELD.getPreferredName(), indexedDocumentCount); + builder.field(INDEXED_DOCUMENT_VOLUME_FIELD.getPreferredName(), indexedDocumentVolume); + builder.field(JOB_TYPE_FIELD.getPreferredName(), jobType); + builder.field(LAST_SEEN_FIELD.getPreferredName(), lastSeen); + builder.field(METADATA_FIELD.getPreferredName(), metadata); + builder.field(STARTED_AT_FIELD.getPreferredName(), startedAt); + builder.field(STATUS_FIELD.getPreferredName(), status); + builder.field(TOTAL_DOCUMENT_COUNT_FIELD.getPreferredName(), totalDocumentCount); + builder.field(TRIGGER_METHOD_FIELD.getPreferredName(), triggerMethod); + builder.field(WORKER_HOSTNAME_FIELD.getPreferredName(), workerHostname); + } + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalInstant(cancelationRequestedAt); + out.writeOptionalInstant(canceledAt); + out.writeOptionalInstant(completedAt); + out.writeNamedWriteable(connector); + out.writeInstant(createdAt); + out.writeLong(deletedDocumentCount); + out.writeOptionalString(error); + out.writeString(id); + out.writeLong(indexedDocumentCount); + out.writeLong(indexedDocumentVolume); + out.writeEnum(jobType); + out.writeOptionalInstant(lastSeen); + out.writeMap(metadata, StreamOutput::writeString, StreamOutput::writeGenericValue); + out.writeOptionalInstant(startedAt); + out.writeEnum(status); + out.writeOptionalLong(totalDocumentCount); + out.writeEnum(triggerMethod); + out.writeOptionalString(workerHostname); + } + + public boolean equals(Object other) { + if (this == other) return true; + if (other == null || getClass() != other.getClass()) return false; + + ConnectorSyncJob connectorSyncJob = (ConnectorSyncJob) other; + + return Objects.equals(cancelationRequestedAt, connectorSyncJob.cancelationRequestedAt) + && Objects.equals(canceledAt, connectorSyncJob.canceledAt) + && Objects.equals(completedAt, connectorSyncJob.completedAt) + && Objects.equals(connector, connectorSyncJob.connector) + && Objects.equals(createdAt, connectorSyncJob.createdAt) + && Objects.equals(deletedDocumentCount, connectorSyncJob.deletedDocumentCount) + && Objects.equals(error, connectorSyncJob.error) + && Objects.equals(id, connectorSyncJob.id) + && Objects.equals(indexedDocumentCount, connectorSyncJob.indexedDocumentCount) + && Objects.equals(indexedDocumentVolume, connectorSyncJob.indexedDocumentVolume) + && Objects.equals(jobType, connectorSyncJob.jobType) + && Objects.equals(lastSeen, connectorSyncJob.lastSeen) + && Objects.equals(metadata, connectorSyncJob.metadata) + && Objects.equals(startedAt, connectorSyncJob.startedAt) + && Objects.equals(status, connectorSyncJob.status) + && Objects.equals(totalDocumentCount, connectorSyncJob.totalDocumentCount) + && Objects.equals(triggerMethod, connectorSyncJob.triggerMethod) + && Objects.equals(workerHostname, connectorSyncJob.workerHostname); + } + + @Override + public int hashCode() { + return Objects.hash( + cancelationRequestedAt, + canceledAt, + completedAt, + connector, + createdAt, + deletedDocumentCount, + error, + id, + indexedDocumentCount, + indexedDocumentVolume, + jobType, + lastSeen, + metadata, + startedAt, + status, + totalDocumentCount, + triggerMethod, + workerHostname + ); + } + + public static class Builder { + private Instant cancellationRequestedAt; + + private Instant canceledAt; + + private Instant completedAt; + + private Connector connector; + + private Instant createdAt; + + private long deletedDocumentCount; + + private String error; + + private String id; + + private long indexedDocumentCount; + + private long indexedDocumentVolume; + + private ConnectorSyncJobType jobType; + + private Instant lastSeen; + + private Map metadata; + + private Instant startedAt; + + private ConnectorSyncStatus status; + + private long totalDocumentCount; + + private ConnectorSyncJobTriggerMethod triggerMethod; + + private String workerHostname; + + public Builder setCancellationRequestedAt(Instant cancellationRequestedAt) { + this.cancellationRequestedAt = cancellationRequestedAt; + return this; + } + + public Builder setCanceledAt(Instant canceledAt) { + this.canceledAt = canceledAt; + return this; + } + + public Builder setCompletedAt(Instant completedAt) { + this.completedAt = completedAt; + return this; + } + + public Builder setConnector(Connector connector) { + this.connector = connector; + return this; + } + + public Builder setCreatedAt(Instant createdAt) { + this.createdAt = createdAt; + return this; + } + + public Builder setDeletedDocumentCount(long deletedDocumentCount) { + this.deletedDocumentCount = deletedDocumentCount; + return this; + } + + public Builder setError(String error) { + this.error = error; + return this; + } + + public Builder setId(String id) { + this.id = id; + return this; + } + + public Builder setIndexedDocumentCount(long indexedDocumentCount) { + this.indexedDocumentCount = indexedDocumentCount; + return this; + } + + public Builder setIndexedDocumentVolume(long indexedDocumentVolume) { + this.indexedDocumentVolume = indexedDocumentVolume; + return this; + } + + public Builder setJobType(ConnectorSyncJobType jobType) { + this.jobType = jobType; + return this; + } + + public Builder setLastSeen(Instant lastSeen) { + this.lastSeen = lastSeen; + return this; + } + + public Builder setMetadata(Map metadata) { + this.metadata = metadata; + return this; + } + + public Builder setStartedAt(Instant startedAt) { + this.startedAt = startedAt; + return this; + } + + public Builder setStatus(ConnectorSyncStatus status) { + this.status = status; + return this; + } + + public Builder setTotalDocumentCount(long totalDocumentCount) { + this.totalDocumentCount = totalDocumentCount; + return this; + } + + public Builder setTriggerMethod(ConnectorSyncJobTriggerMethod triggerMethod) { + this.triggerMethod = triggerMethod; + return this; + } + + public Builder setWorkerHostname(String workerHostname) { + this.workerHostname = workerHostname; + return this; + } + + public ConnectorSyncJob build() { + return new ConnectorSyncJob( + cancellationRequestedAt, + canceledAt, + completedAt, + connector, + createdAt, + deletedDocumentCount, + error, + id, + indexedDocumentCount, + indexedDocumentVolume, + jobType, + lastSeen, + metadata, + startedAt, + status, + totalDocumentCount, + triggerMethod, + workerHostname + ); + } + } +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexService.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexService.java new file mode 100644 index 0000000000000..f259cb1e0a8c0 --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexService.java @@ -0,0 +1,159 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.syncjob; + +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.client.internal.OriginSettingClient; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xpack.application.connector.Connector; +import org.elasticsearch.xpack.application.connector.ConnectorFiltering; +import org.elasticsearch.xpack.application.connector.ConnectorIndexService; +import org.elasticsearch.xpack.application.connector.ConnectorIngestPipeline; +import org.elasticsearch.xpack.application.connector.ConnectorTemplateRegistry; +import org.elasticsearch.xpack.application.connector.syncjob.action.PostConnectorSyncJobAction; + +import java.io.IOException; +import java.time.Instant; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.core.ClientHelper.CONNECTORS_ORIGIN; + +/** + * A service that manages persistent {@link ConnectorSyncJob} configurations. + */ +public class ConnectorSyncJobIndexService { + + private static final Long ZERO = 0L; + + private final Client clientWithOrigin; + + public static final String CONNECTOR_SYNC_JOB_INDEX_NAME = ConnectorTemplateRegistry.CONNECTOR_SYNC_JOBS_INDEX_NAME_PATTERN; + + /** + * @param client A client for executing actions on the connectors sync jobs index. + */ + public ConnectorSyncJobIndexService(Client client) { + this.clientWithOrigin = new OriginSettingClient(client, CONNECTORS_ORIGIN); + } + + /** + * @param request Request for creating a connector sync job. + * @param listener Listener to respond to a successful response or an error. + */ + public void createConnectorSyncJob( + PostConnectorSyncJobAction.Request request, + ActionListener listener + ) { + try { + getSyncJobConnectorInfo(request.getId(), listener.delegateFailure((l, connector) -> { + Instant now = Instant.now(); + ConnectorSyncJobType jobType = Objects.requireNonNullElse(request.getJobType(), ConnectorSyncJob.DEFAULT_JOB_TYPE); + ConnectorSyncJobTriggerMethod triggerMethod = Objects.requireNonNullElse( + request.getTriggerMethod(), + ConnectorSyncJob.DEFAULT_TRIGGER_METHOD + ); + + try { + String syncJobId = generateId(); + + final IndexRequest indexRequest = new IndexRequest(CONNECTOR_SYNC_JOB_INDEX_NAME).id(syncJobId) + .opType(DocWriteRequest.OpType.INDEX) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + + ConnectorSyncJob syncJob = new ConnectorSyncJob.Builder().setId(syncJobId) + .setJobType(jobType) + .setTriggerMethod(triggerMethod) + .setStatus(ConnectorSyncJob.DEFAULT_INITIAL_STATUS) + .setConnector(connector) + .setCreatedAt(now) + .setLastSeen(now) + .setTotalDocumentCount(ZERO) + .setIndexedDocumentCount(ZERO) + .setIndexedDocumentVolume(ZERO) + .setDeletedDocumentCount(ZERO) + .build(); + + indexRequest.source(syncJob.toXContent(jsonBuilder(), ToXContent.EMPTY_PARAMS)); + + clientWithOrigin.index( + indexRequest, + ActionListener.wrap( + indexResponse -> listener.onResponse(new PostConnectorSyncJobAction.Response(indexResponse.getId())), + listener::onFailure + ) + ); + } catch (IOException e) { + listener.onFailure(e); + } + })); + } catch (Exception e) { + listener.onFailure(e); + } + } + + private String generateId() { + /* Workaround: only needed for generating an id upfront, autoGenerateId() has a side effect generating a timestamp, + * which would raise an error on the response layer later ("autoGeneratedTimestamp should not be set externally"). + * TODO: do we even need to copy the "_id" and set it as "id"? + */ + return UUIDs.base64UUID(); + } + + private void getSyncJobConnectorInfo(String connectorId, ActionListener listener) { + try { + + final GetRequest request = new GetRequest(ConnectorIndexService.CONNECTOR_INDEX_NAME, connectorId); + + clientWithOrigin.get(request, new ActionListener<>() { + @Override + public void onResponse(GetResponse response) { + final boolean connectorDoesNotExist = response.isExists() == false; + + if (connectorDoesNotExist) { + onFailure(new ResourceNotFoundException("Connector with id '" + connectorId + "' does not exist.")); + return; + } + + Map source = response.getSource(); + + @SuppressWarnings("unchecked") + final Connector syncJobConnectorInfo = new Connector.Builder().setConnectorId( + (String) source.get(Connector.ID_FIELD.getPreferredName()) + ) + .setFiltering((List) source.get(Connector.FILTERING_FIELD.getPreferredName())) + .setIndexName((String) source.get(Connector.INDEX_NAME_FIELD.getPreferredName())) + .setLanguage((String) source.get(Connector.LANGUAGE_FIELD.getPreferredName())) + .setPipeline((ConnectorIngestPipeline) source.get(Connector.PIPELINE_FIELD.getPreferredName())) + .setServiceType((String) source.get(Connector.SERVICE_TYPE_FIELD.getPreferredName())) + .setConfiguration((Map) source.get(Connector.CONFIGURATION_FIELD.getPreferredName())) + .build(); + + listener.onResponse(syncJobConnectorInfo); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }); + } catch (Exception e) { + listener.onFailure(e); + } + } +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTriggerMethod.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTriggerMethod.java new file mode 100644 index 0000000000000..110748795fb77 --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTriggerMethod.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.syncjob; + +import java.util.Locale; + +public enum ConnectorSyncJobTriggerMethod { + ON_DEMAND, + SCHEDULED; + + public static ConnectorSyncJobTriggerMethod fromString(String triggerMethodString) { + for (ConnectorSyncJobTriggerMethod triggerMethod : ConnectorSyncJobTriggerMethod.values()) { + if (triggerMethod.name().equalsIgnoreCase(triggerMethodString)) { + return triggerMethod; + } + } + + throw new IllegalArgumentException("Unknown trigger method '" + triggerMethodString + "'."); + } + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobType.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobType.java new file mode 100644 index 0000000000000..2d0a18da6fec5 --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobType.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.syncjob; + +import java.util.Locale; + +public enum ConnectorSyncJobType { + FULL, + INCREMENTAL, + ACCESS_CONTROL; + + public static ConnectorSyncJobType fromString(String syncJobTypeString) { + for (ConnectorSyncJobType syncJobType : ConnectorSyncJobType.values()) { + if (syncJobType.name().equalsIgnoreCase(syncJobTypeString)) { + return syncJobType; + } + } + + throw new IllegalArgumentException("Unknown sync job type '" + syncJobTypeString + "'."); + } + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/PostConnectorSyncJobAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/PostConnectorSyncJobAction.java new file mode 100644 index 0000000000000..05da4dd798c83 --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/PostConnectorSyncJobAction.java @@ -0,0 +1,206 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.syncjob.action; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.application.connector.syncjob.ConnectorSyncJob; +import org.elasticsearch.xpack.application.connector.syncjob.ConnectorSyncJobTriggerMethod; +import org.elasticsearch.xpack.application.connector.syncjob.ConnectorSyncJobType; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.action.ValidateActions.addValidationError; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; +import static org.elasticsearch.xpack.application.EnterpriseSearch.CONNECTOR_API_ENDPOINT; + +public class PostConnectorSyncJobAction extends ActionType { + + public static final PostConnectorSyncJobAction INSTANCE = new PostConnectorSyncJobAction(); + + public static final String NAME = "cluster:admin/xpack/connector/sync_job/post"; + + public static final String CONNECTOR_SYNC_JOB_API_ENDPOINT = CONNECTOR_API_ENDPOINT + "/_sync_job"; + + private PostConnectorSyncJobAction() { + super(NAME, PostConnectorSyncJobAction.Response::new); + } + + public static class Request extends ActionRequest implements ToXContentObject { + public static final String EMPTY_CONNECTOR_ID_ERROR_MESSAGE = "[id] of the connector cannot be null or empty"; + private final String id; + private final ConnectorSyncJobType jobType; + private final ConnectorSyncJobTriggerMethod triggerMethod; + + public Request(String id, ConnectorSyncJobType jobType, ConnectorSyncJobTriggerMethod triggerMethod) { + this.id = id; + this.jobType = jobType; + this.triggerMethod = triggerMethod; + } + + public Request(StreamInput in) throws IOException { + super(in); + this.id = in.readString(); + this.jobType = in.readOptionalEnum(ConnectorSyncJobType.class); + this.triggerMethod = in.readOptionalEnum(ConnectorSyncJobTriggerMethod.class); + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "connector_sync_job_post_request", + false, + ((args) -> { + String connectorId = (String) args[0]; + String syncJobTypeString = (String) args[1]; + String triggerMethodString = (String) args[2]; + + boolean syncJobTypeSpecified = syncJobTypeString != null; + boolean triggerMethodSpecified = triggerMethodString != null; + + return new Request( + connectorId, + syncJobTypeSpecified ? ConnectorSyncJobType.fromString(syncJobTypeString) : null, + triggerMethodSpecified ? ConnectorSyncJobTriggerMethod.fromString(triggerMethodString) : null + ); + }) + ); + + static { + PARSER.declareString(constructorArg(), ConnectorSyncJob.ID_FIELD); + PARSER.declareString(optionalConstructorArg(), ConnectorSyncJob.JOB_TYPE_FIELD); + PARSER.declareString(optionalConstructorArg(), ConnectorSyncJob.TRIGGER_METHOD_FIELD); + } + + public String getId() { + return id; + } + + public ConnectorSyncJobType getJobType() { + return jobType; + } + + public ConnectorSyncJobTriggerMethod getTriggerMethod() { + return triggerMethod; + } + + public static Request fromXContentBytes(BytesReference source, XContentType xContentType) { + try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, source, xContentType)) { + return Request.fromXContent(parser); + } catch (IOException e) { + throw new ElasticsearchParseException("Failed to parse: " + source.utf8ToString(), e); + } + } + + public static Request fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field("id", id); + builder.field("job_type", jobType); + builder.field("trigger_method", triggerMethod); + } + builder.endObject(); + return builder; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + + if (Strings.isNullOrEmpty(getId())) { + validationException = addValidationError(EMPTY_CONNECTOR_ID_ERROR_MESSAGE, validationException); + } + + return validationException; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(id); + out.writeOptionalEnum(jobType); + out.writeOptionalEnum(triggerMethod); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Request request = (Request) o; + return Objects.equals(id, request.id) && jobType == request.jobType && triggerMethod == request.triggerMethod; + } + + @Override + public int hashCode() { + return Objects.hash(id, jobType, triggerMethod); + } + } + + public static class Response extends ActionResponse implements ToXContentObject { + + private final String id; + + public Response(StreamInput in) throws IOException { + super(in); + this.id = in.readString(); + } + + public Response(String id) { + this.id = id; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(id); + } + + public String getId() { + return id; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("id", id); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Response response = (Response) o; + return Objects.equals(id, response.id); + } + + @Override + public int hashCode() { + return Objects.hash(id); + } + } +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestPostConnectorSyncJobAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestPostConnectorSyncJobAction.java new file mode 100644 index 0000000000000..2a1b9d15d2451 --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestPostConnectorSyncJobAction.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.syncjob.action; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestToXContentListener; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.rest.RestRequest.Method.POST; + +public class RestPostConnectorSyncJobAction extends BaseRestHandler { + + @Override + public String getName() { + return "connector_sync_job_post_action"; + } + + @Override + public List routes() { + return List.of(new Route(POST, "/" + PostConnectorSyncJobAction.CONNECTOR_SYNC_JOB_API_ENDPOINT)); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + PostConnectorSyncJobAction.Request request = PostConnectorSyncJobAction.Request.fromXContentBytes( + restRequest.content(), + restRequest.getXContentType() + ); + + return channel -> client.execute( + PostConnectorSyncJobAction.INSTANCE, + request, + new RestToXContentListener<>(channel, r -> RestStatus.CREATED, r -> null) + ); + } +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/TransportPostConnectorSyncJobAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/TransportPostConnectorSyncJobAction.java new file mode 100644 index 0000000000000..73889195d0e08 --- /dev/null +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/TransportPostConnectorSyncJobAction.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.syncjob.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.application.connector.syncjob.ConnectorSyncJobIndexService; + +public class TransportPostConnectorSyncJobAction extends HandledTransportAction< + PostConnectorSyncJobAction.Request, + PostConnectorSyncJobAction.Response> { + + protected final ConnectorSyncJobIndexService syncJobIndexService; + + @Inject + public TransportPostConnectorSyncJobAction( + TransportService transportService, + ClusterService clusterService, + ActionFilters actionFilters, + Client client + ) { + super( + PostConnectorSyncJobAction.NAME, + transportService, + actionFilters, + PostConnectorSyncJobAction.Request::new, + EsExecutors.DIRECT_EXECUTOR_SERVICE + ); + this.syncJobIndexService = new ConnectorSyncJobIndexService(client); + } + + @Override + protected void doExecute( + Task task, + PostConnectorSyncJobAction.Request request, + ActionListener listener + ) { + syncJobIndexService.createConnectorSyncJob(request, listener); + } +} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesIndexService.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesIndexService.java index a2ad6a59e54fe..4e001d38bf279 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesIndexService.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesIndexService.java @@ -279,6 +279,7 @@ public void onResponse(DeleteResponse deleteResponse) { public void onFailure(Exception e) { if (e instanceof IndexNotFoundException) { listener.onFailure(new ResourceNotFoundException(resourceName)); + return; } listener.onFailure(e); } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/RuleQueryBuilder.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/RuleQueryBuilder.java index 731408d3c6ef8..ebd78119ab7d5 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/RuleQueryBuilder.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/RuleQueryBuilder.java @@ -8,10 +8,13 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.SetOnce; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.common.ParsingException; @@ -19,6 +22,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.HeaderWarning; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryRewriteContext; @@ -209,18 +213,35 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws queryRewriteContext.registerAsyncAction((client, listener) -> { Client clientWithOrigin = new OriginSettingClient(client, ENT_SEARCH_ORIGIN); - clientWithOrigin.get(getRequest, listener.delegateFailureAndWrap((l, getResponse) -> { - if (getResponse.isExists() == false) { - throw new ResourceNotFoundException("query ruleset " + rulesetId + " not found"); + clientWithOrigin.get(getRequest, new ActionListener<>() { + @Override + public void onResponse(GetResponse getResponse) { + if (getResponse.isExists() == false) { + throw new ResourceNotFoundException("query ruleset " + rulesetId + " not found"); + } + QueryRuleset queryRuleset = QueryRuleset.fromXContentBytes( + rulesetId, + getResponse.getSourceAsBytesRef(), + XContentType.JSON + ); + for (QueryRule rule : queryRuleset.rules()) { + rule.applyRule(appliedRules, matchCriteria); + } + pinnedIdsSetOnce.set(appliedRules.pinnedIds().stream().distinct().toList()); + pinnedDocsSetOnce.set(appliedRules.pinnedDocs().stream().distinct().toList()); + listener.onResponse(null); } - QueryRuleset queryRuleset = QueryRuleset.fromXContentBytes(rulesetId, getResponse.getSourceAsBytesRef(), XContentType.JSON); - for (QueryRule rule : queryRuleset.rules()) { - rule.applyRule(appliedRules, matchCriteria); + + @Override + public void onFailure(Exception e) { + Throwable cause = ExceptionsHelper.unwrapCause(e); + if (cause instanceof IndexNotFoundException) { + listener.onFailure(new ResourceNotFoundException("query ruleset " + rulesetId + " not found")); + } else { + listener.onFailure(e); + } } - pinnedIdsSetOnce.set(appliedRules.pinnedIds().stream().distinct().toList()); - pinnedDocsSetOnce.set(appliedRules.pinnedDocs().stream().distinct().toList()); - listener.onResponse(null); - })); + }); }); QueryBuilder newOrganicQuery = organicQuery.rewrite(queryRewriteContext); diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/TransportQuerySearchApplicationAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/TransportQuerySearchApplicationAction.java index ac4b15fd43564..89e670f9b1de9 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/TransportQuerySearchApplicationAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/TransportQuerySearchApplicationAction.java @@ -8,9 +8,9 @@ package org.elasticsearch.xpack.application.search.action; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.client.internal.Client; @@ -68,7 +68,7 @@ protected void doExecute(Task task, SearchApplicationSearchRequest request, Acti SearchRequest searchRequest = new SearchRequest(searchApplication.name()).source(sourceBuilder); client.execute( - SearchAction.INSTANCE, + TransportSearchAction.TYPE, searchRequest, listener.delegateFailure((l2, searchResponse) -> l2.onResponse(searchResponse)) ); diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistryTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistryTests.java index d1e0e23ee3230..8bf06b8954080 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistryTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistryTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Strings; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.ingest.IngestMetadata; import org.elasticsearch.ingest.PipelineConfiguration; import org.elasticsearch.test.ClusterServiceUtils; @@ -41,6 +42,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.application.EnterpriseSearchFeatures; import org.elasticsearch.xpack.core.ilm.IndexLifecycleMetadata; import org.elasticsearch.xpack.core.ilm.LifecyclePolicy; import org.elasticsearch.xpack.core.ilm.LifecyclePolicyMetadata; @@ -75,7 +77,13 @@ public void createRegistryAndClient() { threadPool = new TestThreadPool(this.getClass().getName()); client = new VerifyingClient(threadPool); ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); - registry = new AnalyticsTemplateRegistry(clusterService, threadPool, client, NamedXContentRegistry.EMPTY); + registry = new AnalyticsTemplateRegistry( + clusterService, + new FeatureService(List.of(new EnterpriseSearchFeatures())), + threadPool, + client, + NamedXContentRegistry.EMPTY + ); } @After diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorCustomScheduleTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorCustomScheduleTests.java new file mode 100644 index 0000000000000..9a1125410f493 --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorCustomScheduleTests.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector; + +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentType; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; + +import static java.util.Collections.emptyList; +import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; +import static org.hamcrest.CoreMatchers.equalTo; + +public class ConnectorCustomScheduleTests extends ESTestCase { + private NamedWriteableRegistry namedWriteableRegistry; + + @Before + public void registerNamedObjects() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, emptyList()); + + List namedWriteables = searchModule.getNamedWriteables(); + namedWriteableRegistry = new NamedWriteableRegistry(namedWriteables); + } + + public final void testRandomSerialization() throws IOException { + for (int runs = 0; runs < 10; runs++) { + ConnectorCustomSchedule testInstance = ConnectorTestUtils.getRandomConnectorCustomSchedule(); + assertTransportSerialization(testInstance); + } + } + + public void testToXContent() throws IOException { + String content = XContentHelper.stripWhitespace(""" + { + "configuration_overrides": { + "domain_allowlist": [ + "https://example.com" + ], + "max_crawl_depth": 1, + "seed_urls": [ + "https://example.com/blog", + "https://example.com/info" + ], + "sitemap_discovery_disabled": true, + "sitemap_urls": [ + "https://example.com/sitemap.xml" + ] + }, + "enabled": true, + "interval": "0 0 12 * * ?", + "last_synced": null, + "name": "My Schedule" + } + """); + + ConnectorCustomSchedule customSchedule = ConnectorCustomSchedule.fromXContentBytes(new BytesArray(content), XContentType.JSON); + boolean humanReadable = true; + BytesReference originalBytes = toShuffledXContent(customSchedule, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable); + ConnectorCustomSchedule parsed; + try (XContentParser parser = createParser(XContentType.JSON.xContent(), originalBytes)) { + parsed = ConnectorCustomSchedule.fromXContent(parser); + } + assertToXContentEquivalent(originalBytes, toXContent(parsed, XContentType.JSON, humanReadable), XContentType.JSON); + } + + private void assertTransportSerialization(ConnectorCustomSchedule testInstance) throws IOException { + ConnectorCustomSchedule deserializedInstance = copyInstance(testInstance); + assertNotSame(testInstance, deserializedInstance); + assertThat(testInstance, equalTo(deserializedInstance)); + } + + private ConnectorCustomSchedule copyInstance(ConnectorCustomSchedule instance) throws IOException { + return copyWriteable(instance, namedWriteableRegistry, ConnectorCustomSchedule::new); + } +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorFeaturesTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorFeaturesTests.java new file mode 100644 index 0000000000000..1563ff5fcf82c --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorFeaturesTests.java @@ -0,0 +1,133 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector; + +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentType; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; + +import static java.util.Collections.emptyList; +import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; +import static org.hamcrest.CoreMatchers.equalTo; + +public class ConnectorFeaturesTests extends ESTestCase { + + private NamedWriteableRegistry namedWriteableRegistry; + + @Before + public void registerNamedObjects() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, emptyList()); + + List namedWriteables = searchModule.getNamedWriteables(); + namedWriteableRegistry = new NamedWriteableRegistry(namedWriteables); + } + + public final void testRandomSerialization() throws IOException { + for (int runs = 0; runs < 10; runs++) { + ConnectorFeatures testInstance = ConnectorTestUtils.getRandomConnectorFeatures(); + assertTransportSerialization(testInstance); + } + } + + public void testToXContent() throws IOException { + String content = XContentHelper.stripWhitespace(""" + { + "document_level_security": { + "enabled": true + }, + "filtering_advanced_config": true, + "sync_rules": { + "advanced": { + "enabled": false + }, + "basic": { + "enabled": true + } + } + } + """); + + testToXContentChecker(content); + } + + public void testToXContentMissingDocumentLevelSecurity() throws IOException { + String content = XContentHelper.stripWhitespace(""" + { + "filtering_advanced_config": true, + "sync_rules": { + "advanced": { + "enabled": false + }, + "basic": { + "enabled": true + } + } + } + """); + + testToXContentChecker(content); + } + + public void testToXContentMissingSyncRules() throws IOException { + String content = XContentHelper.stripWhitespace(""" + { + "filtering_advanced_config": true + } + """); + + testToXContentChecker(content); + } + + public void testToXContentMissingSyncRulesAdvanced() throws IOException { + String content = XContentHelper.stripWhitespace(""" + { + "filtering_advanced_config": true, + "sync_rules": { + "basic": { + "enabled": true + } + } + } + """); + + testToXContentChecker(content); + } + + private void testToXContentChecker(String content) throws IOException { + ConnectorFeatures features = ConnectorFeatures.fromXContentBytes(new BytesArray(content), XContentType.JSON); + boolean humanReadable = true; + BytesReference originalBytes = toShuffledXContent(features, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable); + ConnectorFeatures parsed; + try (XContentParser parser = createParser(XContentType.JSON.xContent(), originalBytes)) { + parsed = ConnectorFeatures.fromXContent(parser); + } + assertToXContentEquivalent(originalBytes, toXContent(parsed, XContentType.JSON, humanReadable), XContentType.JSON); + } + + private void assertTransportSerialization(ConnectorFeatures testInstance) throws IOException { + ConnectorFeatures deserializedInstance = copyInstance(testInstance); + assertNotSame(testInstance, deserializedInstance); + assertThat(testInstance, equalTo(deserializedInstance)); + } + + private ConnectorFeatures copyInstance(ConnectorFeatures instance) throws IOException { + return copyWriteable(instance, namedWriteableRegistry, ConnectorFeatures::new); + } +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorFilteringTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorFilteringTests.java new file mode 100644 index 0000000000000..e65236e90d928 --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorFilteringTests.java @@ -0,0 +1,122 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector; + +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentType; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; + +import static java.util.Collections.emptyList; +import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; +import static org.hamcrest.CoreMatchers.equalTo; + +public class ConnectorFilteringTests extends ESTestCase { + + private NamedWriteableRegistry namedWriteableRegistry; + + @Before + public void registerNamedObjects() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, emptyList()); + + List namedWriteables = searchModule.getNamedWriteables(); + namedWriteableRegistry = new NamedWriteableRegistry(namedWriteables); + } + + public final void testRandomSerialization() throws IOException { + for (int runs = 0; runs < 10; runs++) { + ConnectorFiltering testInstance = ConnectorTestUtils.getRandomConnectorFiltering(); + assertTransportSerialization(testInstance); + } + } + + public void testToXContent() throws IOException { + String content = XContentHelper.stripWhitespace(""" + { + "active": { + "advanced_snippet": { + "created_at": "2023-11-09T15:13:08.231Z", + "updated_at": "2023-11-09T15:13:08.231Z", + "value": {} + }, + "rules": [ + { + "created_at": "2023-11-09T15:13:08.231Z", + "field": "_", + "id": "DEFAULT", + "order": 0, + "policy": "include", + "rule": "regex", + "updated_at": "2023-11-09T15:13:08.231Z", + "value": ".*" + } + ], + "validation": { + "errors": [], + "state": "valid" + } + }, + "domain": "DEFAULT", + "draft": { + "advanced_snippet": { + "created_at": "2023-11-09T15:13:08.231Z", + "updated_at": "2023-11-09T15:13:08.231Z", + "value": {} + }, + "rules": [ + { + "created_at": "2023-11-09T15:13:08.231Z", + "field": "_", + "id": "DEFAULT", + "order": 0, + "policy": "include", + "rule": "regex", + "updated_at": "2023-11-09T15:13:08.231Z", + "value": ".*" + } + ], + "validation": { + "errors": [], + "state": "valid" + } + } + } + """); + + ConnectorFiltering filtering = ConnectorFiltering.fromXContentBytes(new BytesArray(content), XContentType.JSON); + boolean humanReadable = true; + BytesReference originalBytes = toShuffledXContent(filtering, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable); + ConnectorFiltering parsed; + try (XContentParser parser = createParser(XContentType.JSON.xContent(), originalBytes)) { + parsed = ConnectorFiltering.fromXContent(parser); + } + assertToXContentEquivalent(originalBytes, toXContent(parsed, XContentType.JSON, humanReadable), XContentType.JSON); + + } + + private void assertTransportSerialization(ConnectorFiltering testInstance) throws IOException { + ConnectorFiltering deserializedInstance = copyInstance(testInstance); + assertNotSame(testInstance, deserializedInstance); + assertThat(testInstance, equalTo(deserializedInstance)); + } + + private ConnectorFiltering copyInstance(ConnectorFiltering instance) throws IOException { + return copyWriteable(instance, namedWriteableRegistry, ConnectorFiltering::new); + } +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorIndexServiceTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorIndexServiceTests.java new file mode 100644 index 0000000000000..71076693c07f8 --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorIndexServiceTests.java @@ -0,0 +1,163 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector; + +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.delete.DeleteResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.CoreMatchers.anyOf; +import static org.hamcrest.CoreMatchers.equalTo; + +public class ConnectorIndexServiceTests extends ESSingleNodeTestCase { + + private static final int REQUEST_TIMEOUT_SECONDS = 10; + + private ConnectorIndexService connectorIndexService; + + @Before + public void setup() { + this.connectorIndexService = new ConnectorIndexService(client()); + } + + public void testPutConnector() throws Exception { + + Connector connector = ConnectorTestUtils.getRandomConnector(); + DocWriteResponse resp = awaitPutConnector(connector); + assertThat(resp.status(), anyOf(equalTo(RestStatus.CREATED), equalTo(RestStatus.OK))); + } + + public void testDeleteConnector() throws Exception { + int numConnectors = 5; + List connectorIds = new ArrayList<>(); + for (int i = 0; i < numConnectors; i++) { + Connector connector = ConnectorTestUtils.getRandomConnector(); + connectorIds.add(connector.getConnectorId()); + DocWriteResponse resp = awaitPutConnector(connector); + assertThat(resp.status(), equalTo(RestStatus.CREATED)); + } + + String connectorIdToDelete = connectorIds.get(0); + DeleteResponse resp = awaitDeleteConnector(connectorIdToDelete); + assertThat(resp.status(), equalTo(RestStatus.OK)); + expectThrows(ResourceNotFoundException.class, () -> awaitGetConnector(connectorIdToDelete)); + + expectThrows(ResourceNotFoundException.class, () -> awaitDeleteConnector(connectorIdToDelete)); + } + + private DeleteResponse awaitDeleteConnector(String connectorId) throws Exception { + CountDownLatch latch = new CountDownLatch(1); + final AtomicReference resp = new AtomicReference<>(null); + final AtomicReference exc = new AtomicReference<>(null); + connectorIndexService.deleteConnector(connectorId, new ActionListener<>() { + @Override + public void onResponse(DeleteResponse deleteResponse) { + resp.set(deleteResponse); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + exc.set(e); + latch.countDown(); + } + }); + assertTrue("Timeout waiting for delete request", latch.await(REQUEST_TIMEOUT_SECONDS, TimeUnit.SECONDS)); + if (exc.get() != null) { + throw exc.get(); + } + assertNotNull("Received null response from delete request", resp.get()); + return resp.get(); + } + + private DocWriteResponse awaitPutConnector(Connector connector) throws Exception { + CountDownLatch latch = new CountDownLatch(1); + final AtomicReference resp = new AtomicReference<>(null); + final AtomicReference exc = new AtomicReference<>(null); + connectorIndexService.putConnector(connector, new ActionListener<>() { + @Override + public void onResponse(DocWriteResponse indexResponse) { + resp.set(indexResponse); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + exc.set(e); + latch.countDown(); + } + }); + assertTrue("Timeout waiting for put request", latch.await(REQUEST_TIMEOUT_SECONDS, TimeUnit.SECONDS)); + if (exc.get() != null) { + throw exc.get(); + } + assertNotNull("Received null response from put request", resp.get()); + return resp.get(); + } + + private Connector awaitGetConnector(String connectorId) throws Exception { + CountDownLatch latch = new CountDownLatch(1); + final AtomicReference resp = new AtomicReference<>(null); + final AtomicReference exc = new AtomicReference<>(null); + connectorIndexService.getConnector(connectorId, new ActionListener<>() { + @Override + public void onResponse(Connector connector) { + resp.set(connector); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + exc.set(e); + latch.countDown(); + } + }); + assertTrue("Timeout waiting for get request", latch.await(REQUEST_TIMEOUT_SECONDS, TimeUnit.SECONDS)); + if (exc.get() != null) { + throw exc.get(); + } + assertNotNull("Received null response from get request", resp.get()); + return resp.get(); + } + + private ConnectorIndexService.ConnectorResult awaitListConnector(int from, int size) throws Exception { + CountDownLatch latch = new CountDownLatch(1); + final AtomicReference resp = new AtomicReference<>(null); + final AtomicReference exc = new AtomicReference<>(null); + connectorIndexService.listConnectors(from, size, new ActionListener<>() { + @Override + public void onResponse(ConnectorIndexService.ConnectorResult result) { + resp.set(result); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + exc.set(e); + latch.countDown(); + } + }); + assertTrue("Timeout waiting for list request", latch.await(REQUEST_TIMEOUT_SECONDS, TimeUnit.SECONDS)); + if (exc.get() != null) { + throw exc.get(); + } + assertNotNull("Received null response from list request", resp.get()); + return resp.get(); + } + +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorIngestPipelineTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorIngestPipelineTests.java new file mode 100644 index 0000000000000..f4a92e51e8c6a --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorIngestPipelineTests.java @@ -0,0 +1,79 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector; + +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentType; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; + +import static java.util.Collections.emptyList; +import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; +import static org.hamcrest.CoreMatchers.equalTo; + +public class ConnectorIngestPipelineTests extends ESTestCase { + + private NamedWriteableRegistry namedWriteableRegistry; + + @Before + public void registerNamedObjects() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, emptyList()); + + List namedWriteables = searchModule.getNamedWriteables(); + namedWriteableRegistry = new NamedWriteableRegistry(namedWriteables); + } + + public final void testRandomSerialization() throws IOException { + for (int runs = 0; runs < 10; runs++) { + ConnectorIngestPipeline testInstance = ConnectorTestUtils.getRandomConnectorIngestPipeline(); + assertTransportSerialization(testInstance); + } + } + + public void testToXContent() throws IOException { + String content = XContentHelper.stripWhitespace(""" + { + "extract_binary_content": true, + "name": "ent-search-generic-ingestion", + "reduce_whitespace": true, + "run_ml_inference": false + } + """); + + ConnectorIngestPipeline pipeline = ConnectorIngestPipeline.fromXContentBytes(new BytesArray(content), XContentType.JSON); + boolean humanReadable = true; + BytesReference originalBytes = toShuffledXContent(pipeline, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable); + ConnectorIngestPipeline parsed; + try (XContentParser parser = createParser(XContentType.JSON.xContent(), originalBytes)) { + parsed = ConnectorIngestPipeline.fromXContent(parser); + } + assertToXContentEquivalent(originalBytes, toXContent(parsed, XContentType.JSON, humanReadable), XContentType.JSON); + } + + private void assertTransportSerialization(ConnectorIngestPipeline testInstance) throws IOException { + ConnectorIngestPipeline deserializedInstance = copyInstance(testInstance); + assertNotSame(testInstance, deserializedInstance); + assertThat(testInstance, equalTo(deserializedInstance)); + } + + private ConnectorIngestPipeline copyInstance(ConnectorIngestPipeline instance) throws IOException { + return copyWriteable(instance, namedWriteableRegistry, ConnectorIngestPipeline::new); + } + +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorSchedulingTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorSchedulingTests.java new file mode 100644 index 0000000000000..cb986ce9992e0 --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorSchedulingTests.java @@ -0,0 +1,85 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector; + +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentType; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; + +import static java.util.Collections.emptyList; +import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; +import static org.hamcrest.CoreMatchers.equalTo; + +public class ConnectorSchedulingTests extends ESTestCase { + + private NamedWriteableRegistry namedWriteableRegistry; + + @Before + public void registerNamedObjects() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, emptyList()); + + List namedWriteables = searchModule.getNamedWriteables(); + namedWriteableRegistry = new NamedWriteableRegistry(namedWriteables); + } + + public final void testRandomSerialization() throws IOException { + for (int runs = 0; runs < 10; runs++) { + ConnectorScheduling testInstance = ConnectorTestUtils.getRandomConnectorScheduling(); + assertTransportSerialization(testInstance); + } + } + + public void testToXContent() throws IOException { + String content = XContentHelper.stripWhitespace(""" + { + "access_control": { + "enabled": false, + "interval": "0 0 0 * * ?" + }, + "full": { + "enabled": false, + "interval": "0 0 0 * * ?" + }, + "incremental": { + "enabled": false, + "interval": "0 0 0 * * ?" + } + }"""); + + ConnectorScheduling scheduling = ConnectorScheduling.fromXContentBytes(new BytesArray(content), XContentType.JSON); + boolean humanReadable = true; + BytesReference originalBytes = toShuffledXContent(scheduling, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable); + ConnectorScheduling parsed; + try (XContentParser parser = createParser(XContentType.JSON.xContent(), originalBytes)) { + parsed = ConnectorScheduling.fromXContent(parser); + } + assertToXContentEquivalent(originalBytes, toXContent(parsed, XContentType.JSON, humanReadable), XContentType.JSON); + } + + private void assertTransportSerialization(ConnectorScheduling testInstance) throws IOException { + ConnectorScheduling deserializedInstance = copyInstance(testInstance); + assertNotSame(testInstance, deserializedInstance); + assertThat(testInstance, equalTo(deserializedInstance)); + } + + private ConnectorScheduling copyInstance(ConnectorScheduling instance) throws IOException { + return copyWriteable(instance, namedWriteableRegistry, ConnectorScheduling::new); + } +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorSyncInfoTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorSyncInfoTests.java new file mode 100644 index 0000000000000..0e6a4792d2145 --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorSyncInfoTests.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; + +import static java.util.Collections.emptyList; +import static org.hamcrest.CoreMatchers.equalTo; + +public class ConnectorSyncInfoTests extends ESTestCase { + + private NamedWriteableRegistry namedWriteableRegistry; + + @Before + public void registerNamedObjects() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, emptyList()); + + List namedWriteables = searchModule.getNamedWriteables(); + namedWriteableRegistry = new NamedWriteableRegistry(namedWriteables); + } + + public final void testRandomSerialization() throws IOException { + for (int runs = 0; runs < 10; runs++) { + ConnectorSyncInfo testInstance = ConnectorTestUtils.getRandomConnectorSyncInfo(); + assertTransportSerialization(testInstance); + } + } + + private void assertTransportSerialization(ConnectorSyncInfo testInstance) throws IOException { + ConnectorSyncInfo deserializedInstance = copyInstance(testInstance); + assertNotSame(testInstance, deserializedInstance); + assertThat(testInstance, equalTo(deserializedInstance)); + } + + private ConnectorSyncInfo copyInstance(ConnectorSyncInfo instance) throws IOException { + return copyWriteable(instance, namedWriteableRegistry, ConnectorSyncInfo::new); + } +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTestUtils.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTestUtils.java new file mode 100644 index 0000000000000..dd8550ea73da0 --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTestUtils.java @@ -0,0 +1,254 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector; + +import org.elasticsearch.xpack.application.connector.action.PutConnectorAction; +import org.elasticsearch.xpack.application.connector.filtering.FilteringAdvancedSnippet; +import org.elasticsearch.xpack.application.connector.filtering.FilteringPolicy; +import org.elasticsearch.xpack.application.connector.filtering.FilteringRule; +import org.elasticsearch.xpack.application.connector.filtering.FilteringRuleCondition; +import org.elasticsearch.xpack.application.connector.filtering.FilteringRules; +import org.elasticsearch.xpack.application.connector.filtering.FilteringValidationInfo; +import org.elasticsearch.xpack.application.connector.filtering.FilteringValidationState; +import org.elasticsearch.xpack.core.scheduler.Cron; + +import java.time.Instant; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +import static org.elasticsearch.test.ESTestCase.randomAlphaOfLength; +import static org.elasticsearch.test.ESTestCase.randomAlphaOfLengthBetween; +import static org.elasticsearch.test.ESTestCase.randomBoolean; +import static org.elasticsearch.test.ESTestCase.randomFrom; +import static org.elasticsearch.test.ESTestCase.randomInt; +import static org.elasticsearch.test.ESTestCase.randomList; +import static org.elasticsearch.test.ESTestCase.randomLong; +import static org.elasticsearch.test.ESTestCase.randomLongBetween; + +public final class ConnectorTestUtils { + public static PutConnectorAction.Request getRandomPutConnectorActionRequest() { + return new PutConnectorAction.Request( + randomAlphaOfLengthBetween(5, 15), + randomFrom(randomAlphaOfLengthBetween(5, 15)), + randomFrom(randomAlphaOfLengthBetween(5, 15)), + randomFrom(randomBoolean()), + randomFrom(randomAlphaOfLengthBetween(5, 15)), + randomFrom(randomAlphaOfLengthBetween(5, 15)), + randomFrom(randomAlphaOfLengthBetween(5, 15)) + ); + } + + public static ConnectorScheduling getRandomConnectorScheduling() { + return new ConnectorScheduling.Builder().setAccessControl( + new ConnectorScheduling.ScheduleConfig.Builder().setEnabled(randomBoolean()).setInterval(getRandomCronExpression()).build() + ) + .setFull( + new ConnectorScheduling.ScheduleConfig.Builder().setEnabled(randomBoolean()).setInterval(getRandomCronExpression()).build() + ) + .setIncremental( + new ConnectorScheduling.ScheduleConfig.Builder().setEnabled(randomBoolean()).setInterval(getRandomCronExpression()).build() + ) + .build(); + } + + public static ConnectorIngestPipeline getRandomConnectorIngestPipeline() { + return new ConnectorIngestPipeline.Builder().setName(randomAlphaOfLengthBetween(5, 15)) + .setExtractBinaryContent(randomBoolean()) + .setReduceWhitespace(randomBoolean()) + .setRunMlInference(randomBoolean()) + .build(); + } + + public static ConnectorSyncInfo getRandomConnectorSyncInfo() { + return new ConnectorSyncInfo.Builder().setLastAccessControlSyncError(randomFrom(new String[] { null, randomAlphaOfLength(10) })) + .setLastAccessControlSyncScheduledAt(randomFrom(new Instant[] { null, Instant.ofEpochMilli(randomLong()) })) + .setLastAccessControlSyncStatus(randomFrom(new ConnectorSyncStatus[] { null, getRandomSyncStatus() })) + .setLastDeletedDocumentCount(randomFrom(new Long[] { null, randomLong() })) + .setLastIncrementalSyncScheduledAt(randomFrom(new Instant[] { null, Instant.ofEpochMilli(randomLong()) })) + .setLastIndexedDocumentCount(randomFrom(new Long[] { null, randomLong() })) + .setLastSeen(randomFrom(new Instant[] { null, Instant.ofEpochMilli(randomLong()) })) + .setLastSyncError(randomFrom(new String[] { null, randomAlphaOfLength(10) })) + .setLastSyncScheduledAt(randomFrom(new Instant[] { null, Instant.ofEpochMilli(randomLong()) })) + .setLastSyncStatus(randomFrom(new ConnectorSyncStatus[] { null, getRandomSyncStatus() })) + .setLastSynced(randomFrom(new Instant[] { null, Instant.ofEpochMilli(randomLong()) })) + .build(); + } + + public static ConnectorFeatures getRandomConnectorFeatures() { + return new ConnectorFeatures.Builder().setDocumentLevelSecurityEnabled(randomBoolean() ? randomConnectorFeatureEnabled() : null) + .setFilteringRules(randomFrom(new Boolean[] { null, randomBoolean() })) + .setFilteringAdvancedConfig(randomFrom(new Boolean[] { null, randomBoolean() })) + .setIncrementalSyncEnabled(randomBoolean() ? randomConnectorFeatureEnabled() : null) + .setSyncRulesFeatures(randomBoolean() ? randomSyncRulesFeatures() : null) + .build(); + } + + public static ConnectorCustomSchedule getRandomConnectorCustomSchedule() { + return new ConnectorCustomSchedule.Builder().setInterval(getRandomCronExpression()) + .setEnabled(randomBoolean()) + .setLastSynced(randomFrom(new Instant[] { null, Instant.ofEpochMilli(randomLongBetween(0, 10000)) })) + .setName(randomAlphaOfLength(10)) + .setConfigurationOverrides( + new ConnectorCustomSchedule.ConfigurationOverrides.Builder().setMaxCrawlDepth(randomInt()) + .setSitemapDiscoveryDisabled(randomBoolean()) + .setDomainAllowList(randomList(1, 5, () -> randomAlphaOfLength(5))) + .setSeedUrls(randomList(1, 5, () -> randomAlphaOfLength(5))) + .setSitemapUrls(randomList(1, 5, () -> randomAlphaOfLength(5))) + .build() + ) + .build(); + } + + public static ConnectorFiltering getRandomConnectorFiltering() { + + Instant currentTimestamp = Instant.now(); + + return new ConnectorFiltering.Builder().setActive( + new FilteringRules.Builder().setAdvancedSnippet( + new FilteringAdvancedSnippet.Builder().setAdvancedSnippetCreatedAt(currentTimestamp) + .setAdvancedSnippetUpdatedAt(currentTimestamp) + .setAdvancedSnippetValue(Collections.emptyMap()) + .build() + ) + .setRules( + List.of( + new FilteringRule.Builder().setCreatedAt(currentTimestamp) + .setField(randomAlphaOfLength(10)) + .setId(randomAlphaOfLength(10)) + .setOrder(randomInt()) + .setPolicy(getRandomFilteringPolicy()) + .setRule(getRandomFilteringRule()) + .setUpdatedAt(currentTimestamp) + .setValue(randomAlphaOfLength(10)) + .build() + ) + ) + .setFilteringValidationInfo( + new FilteringValidationInfo.Builder().setValidationErrors(Collections.emptyList()) + .setValidationState(getRandomFilteringValidationState()) + .build() + ) + .build() + ) + .setDomain(randomAlphaOfLength(10)) + .setDraft( + new FilteringRules.Builder().setAdvancedSnippet( + new FilteringAdvancedSnippet.Builder().setAdvancedSnippetCreatedAt(currentTimestamp) + .setAdvancedSnippetUpdatedAt(currentTimestamp) + .setAdvancedSnippetValue(Collections.emptyMap()) + .build() + ) + .setRules( + List.of( + new FilteringRule.Builder().setCreatedAt(currentTimestamp) + .setField(randomAlphaOfLength(10)) + .setId(randomAlphaOfLength(10)) + .setOrder(randomInt()) + .setPolicy(getRandomFilteringPolicy()) + .setRule(getRandomFilteringRule()) + .setUpdatedAt(currentTimestamp) + .setValue(randomAlphaOfLength(10)) + .build() + ) + ) + .setFilteringValidationInfo( + new FilteringValidationInfo.Builder().setValidationErrors(Collections.emptyList()) + .setValidationState(getRandomFilteringValidationState()) + .build() + ) + .build() + ) + .build(); + } + + public static Connector getRandomSyncJobConnectorInfo() { + return new Connector.Builder().setConnectorId(randomAlphaOfLength(10)) + .setFiltering(List.of(getRandomConnectorFiltering())) + .setIndexName(randomAlphaOfLength(10)) + .setLanguage(randomAlphaOfLength(10)) + .setServiceType(randomAlphaOfLength(10)) + .setConfiguration(Collections.emptyMap()) + .build(); + } + + public static Connector getRandomConnector() { + return new Connector.Builder().setConnectorId(randomAlphaOfLength(10)) + .setApiKeyId(randomFrom(new String[] { null, randomAlphaOfLength(10) })) + .setConfiguration(Collections.emptyMap()) + .setCustomScheduling(Map.of(randomAlphaOfLengthBetween(5, 10), getRandomConnectorCustomSchedule())) + .setDescription(randomFrom(new String[] { null, randomAlphaOfLength(10) })) + .setError(randomFrom(new String[] { null, randomAlphaOfLength(10) })) + .setFeatures(randomBoolean() ? getRandomConnectorFeatures() : null) + .setFiltering(randomBoolean() ? List.of(getRandomConnectorFiltering()) : null) + .setIndexName(randomFrom(new String[] { null, randomAlphaOfLength(10) })) + .setIsNative(randomBoolean()) + .setLanguage(randomFrom(new String[] { null, randomAlphaOfLength(10) })) + .setSyncInfo(getRandomConnectorSyncInfo()) + .setName(randomFrom(new String[] { null, randomAlphaOfLength(10) })) + .setPipeline(randomBoolean() ? getRandomConnectorIngestPipeline() : null) + .setScheduling(randomBoolean() ? getRandomConnectorScheduling() : null) + .setStatus(getRandomConnectorStatus()) + .setSyncCursor(randomBoolean() ? Map.of("foo", "bar") : null) + .setSyncNow(randomBoolean()) + .build(); + } + + private static ConnectorFeatures.FeatureEnabled randomConnectorFeatureEnabled() { + return new ConnectorFeatures.FeatureEnabled(randomBoolean()); + } + + private static ConnectorFeatures.SyncRulesFeatures randomSyncRulesFeatures() { + return new ConnectorFeatures.SyncRulesFeatures.Builder().setSyncRulesAdvancedEnabled( + randomBoolean() ? randomConnectorFeatureEnabled() : null + ).setSyncRulesBasicEnabled(randomBoolean() ? randomConnectorFeatureEnabled() : null).build(); + } + + /** + * Second (0 - 59) Minute (0 - 59) Hour (0 - 23) Day of month (1 - 31) Month (1 - 12) + */ + private static Cron getRandomCronExpression() { + return new Cron( + String.format( + Locale.ROOT, + "%d %d %d %d %d ?", + randomInt(59), + randomInt(59), + randomInt(23), + randomInt(30) + 1, + randomInt(11) + 1 + ) + ); + } + + public static ConnectorSyncStatus getRandomSyncStatus() { + ConnectorSyncStatus[] values = ConnectorSyncStatus.values(); + return values[randomInt(values.length - 1)]; + } + + private static ConnectorStatus getRandomConnectorStatus() { + ConnectorStatus[] values = ConnectorStatus.values(); + return values[randomInt(values.length - 1)]; + } + + private static FilteringPolicy getRandomFilteringPolicy() { + FilteringPolicy[] values = FilteringPolicy.values(); + return values[randomInt(values.length - 1)]; + } + + private static FilteringRuleCondition getRandomFilteringRule() { + FilteringRuleCondition[] values = FilteringRuleCondition.values(); + return values[randomInt(values.length - 1)]; + } + + private static FilteringValidationState getRandomFilteringValidationState() { + FilteringValidationState[] values = FilteringValidationState.values(); + return values[randomInt(values.length - 1)]; + } +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTests.java new file mode 100644 index 0000000000000..a83537f32f413 --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTests.java @@ -0,0 +1,202 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector; + +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentType; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; + +import static java.util.Collections.emptyList; +import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; +import static org.hamcrest.CoreMatchers.equalTo; + +public class ConnectorTests extends ESTestCase { + + private NamedWriteableRegistry namedWriteableRegistry; + + @Before + public void registerNamedObjects() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, emptyList()); + + List namedWriteables = searchModule.getNamedWriteables(); + namedWriteableRegistry = new NamedWriteableRegistry(namedWriteables); + } + + public final void testRandomSerialization() throws IOException { + for (int runs = 0; runs < 10; runs++) { + Connector testInstance = ConnectorTestUtils.getRandomConnector(); + assertTransportSerialization(testInstance); + } + } + + public void testToXContent() throws IOException { + String content = XContentHelper.stripWhitespace(""" + { + "api_key_id": "test", + "connector_id": "test-connector", + "custom_scheduling": { + "schedule-key": { + "configuration_overrides": { + "domain_allowlist": [ + "https://example.com" + ], + "max_crawl_depth": 1, + "seed_urls": [ + "https://example.com/blog", + "https://example.com/info" + ], + "sitemap_discovery_disabled": true, + "sitemap_urls": [ + "https://example.com/sitemap.xml" + ] + }, + "enabled": true, + "interval": "0 0 12 * * ?", + "last_synced": null, + "name": "My Schedule" + } + }, + "configuration": {}, + "description": "test-connector", + "features": { + "document_level_security": { + "enabled": true + }, + "filtering_advanced_config": true, + "sync_rules": { + "advanced": { + "enabled": false + }, + "basic": { + "enabled": true + } + } + }, + "filtering": [ + { + "active": { + "advanced_snippet": { + "created_at": "2023-11-09T15:13:08.231Z", + "updated_at": "2023-11-09T15:13:08.231Z", + "value": {} + }, + "rules": [ + { + "created_at": "2023-11-09T15:13:08.231Z", + "field": "_", + "id": "DEFAULT", + "order": 0, + "policy": "include", + "rule": "regex", + "updated_at": "2023-11-09T15:13:08.231Z", + "value": ".*" + } + ], + "validation": { + "errors": [], + "state": "valid" + } + }, + "domain": "DEFAULT", + "draft": { + "advanced_snippet": { + "created_at": "2023-11-09T15:13:08.231Z", + "updated_at": "2023-11-09T15:13:08.231Z", + "value": {} + }, + "rules": [ + { + "created_at": "2023-11-09T15:13:08.231Z", + "field": "_", + "id": "DEFAULT", + "order": 0, + "policy": "include", + "rule": "regex", + "updated_at": "2023-11-09T15:13:08.231Z", + "value": ".*" + } + ], + "validation": { + "errors": [], + "state": "valid" + } + } + } + ], + "index_name": "search-test", + "is_native": true, + "language": "polish", + "last_access_control_sync_error": "some error", + "last_access_control_sync_scheduled_at": "2023-11-09T15:13:08.231Z", + "last_access_control_sync_status": "pending", + "last_deleted_document_count": 42, + "last_incremental_sync_scheduled_at": "2023-11-09T15:13:08.231Z", + "last_indexed_document_count": 42, + "last_seen": "2023-11-09T15:13:08.231Z", + "last_sync_error": "some error", + "last_sync_scheduled_at": "2024-11-09T15:13:08.231Z", + "last_sync_status": "completed", + "last_synced": "2024-11-09T15:13:08.231Z", + "name": "test-name", + "pipeline": { + "extract_binary_content": true, + "name": "ent-search-generic-ingestion", + "reduce_whitespace": true, + "run_ml_inference": false + }, + "scheduling": { + "access_control": { + "enabled": false, + "interval": "0 0 0 * * ?" + }, + "full": { + "enabled": false, + "interval": "0 0 0 * * ?" + }, + "incremental": { + "enabled": false, + "interval": "0 0 0 * * ?" + } + }, + "service_type": "google_drive", + "status": "needs_configuration", + "sync_now": false + }"""); + + Connector connector = Connector.fromXContentBytes(new BytesArray(content), XContentType.JSON); + boolean humanReadable = true; + BytesReference originalBytes = toShuffledXContent(connector, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable); + Connector parsed; + try (XContentParser parser = createParser(XContentType.JSON.xContent(), originalBytes)) { + parsed = Connector.fromXContent(parser); + } + assertToXContentEquivalent(originalBytes, toXContent(parsed, XContentType.JSON, humanReadable), XContentType.JSON); + } + + private void assertTransportSerialization(Connector testInstance) throws IOException { + Connector deserializedInstance = copyInstance(testInstance); + assertNotSame(testInstance, deserializedInstance); + assertThat(testInstance, equalTo(deserializedInstance)); + } + + private Connector copyInstance(Connector instance) throws IOException { + return copyWriteable(instance, namedWriteableRegistry, Connector::new); + } +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/DeleteConnectorActionRequestBWCSerializingTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/DeleteConnectorActionRequestBWCSerializingTests.java new file mode 100644 index 0000000000000..7588db45f5f75 --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/DeleteConnectorActionRequestBWCSerializingTests.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.action; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.AbstractBWCSerializationTestCase; + +import java.io.IOException; + +public class DeleteConnectorActionRequestBWCSerializingTests extends AbstractBWCSerializationTestCase { + + @Override + protected Writeable.Reader instanceReader() { + return DeleteConnectorAction.Request::new; + } + + @Override + protected DeleteConnectorAction.Request createTestInstance() { + return new DeleteConnectorAction.Request(randomAlphaOfLengthBetween(1, 10)); + } + + @Override + protected DeleteConnectorAction.Request mutateInstance(DeleteConnectorAction.Request instance) throws IOException { + return randomValueOtherThan(instance, this::createTestInstance); + } + + @Override + protected DeleteConnectorAction.Request doParseInstance(XContentParser parser) throws IOException { + return DeleteConnectorAction.Request.parse(parser); + } + + @Override + protected DeleteConnectorAction.Request mutateInstanceForVersion(DeleteConnectorAction.Request instance, TransportVersion version) { + return new DeleteConnectorAction.Request(instance.getConnectorId()); + } +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/GetConnectorActionRequestBWCSerializingTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/GetConnectorActionRequestBWCSerializingTests.java new file mode 100644 index 0000000000000..124a068abce93 --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/GetConnectorActionRequestBWCSerializingTests.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.action; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.AbstractBWCSerializationTestCase; + +import java.io.IOException; + +public class GetConnectorActionRequestBWCSerializingTests extends AbstractBWCSerializationTestCase { + + @Override + protected Writeable.Reader instanceReader() { + return GetConnectorAction.Request::new; + } + + @Override + protected GetConnectorAction.Request createTestInstance() { + return new GetConnectorAction.Request(randomAlphaOfLengthBetween(1, 10)); + } + + @Override + protected GetConnectorAction.Request mutateInstance(GetConnectorAction.Request instance) throws IOException { + return randomValueOtherThan(instance, this::createTestInstance); + } + + @Override + protected GetConnectorAction.Request doParseInstance(XContentParser parser) throws IOException { + return GetConnectorAction.Request.parse(parser); + } + + @Override + protected GetConnectorAction.Request mutateInstanceForVersion(GetConnectorAction.Request instance, TransportVersion version) { + return new GetConnectorAction.Request(instance.getConnectorId()); + } +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/GetConnectorActionResponseBWCSerializingTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/GetConnectorActionResponseBWCSerializingTests.java new file mode 100644 index 0000000000000..bcb1bcc86402f --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/GetConnectorActionResponseBWCSerializingTests.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.action; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.application.connector.Connector; +import org.elasticsearch.xpack.application.connector.ConnectorTestUtils; +import org.elasticsearch.xpack.core.ml.AbstractBWCSerializationTestCase; + +import java.io.IOException; + +public class GetConnectorActionResponseBWCSerializingTests extends AbstractBWCSerializationTestCase { + + private Connector connector; + + @Override + protected Writeable.Reader instanceReader() { + return GetConnectorAction.Response::new; + } + + @Override + protected GetConnectorAction.Response createTestInstance() { + this.connector = ConnectorTestUtils.getRandomConnector(); + return new GetConnectorAction.Response(this.connector); + } + + @Override + protected GetConnectorAction.Response mutateInstance(GetConnectorAction.Response instance) throws IOException { + return randomValueOtherThan(instance, this::createTestInstance); + } + + @Override + protected GetConnectorAction.Response doParseInstance(XContentParser parser) throws IOException { + return GetConnectorAction.Response.fromXContent(parser); + } + + @Override + protected GetConnectorAction.Response mutateInstanceForVersion(GetConnectorAction.Response instance, TransportVersion version) { + return instance; + } +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/ListConnectorActionRequestBWCSerializingTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/ListConnectorActionRequestBWCSerializingTests.java new file mode 100644 index 0000000000000..b31c3e90b7403 --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/ListConnectorActionRequestBWCSerializingTests.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.action; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.application.search.SearchApplicationTestUtils; +import org.elasticsearch.xpack.core.action.util.PageParams; +import org.elasticsearch.xpack.core.ml.AbstractBWCSerializationTestCase; + +import java.io.IOException; + +public class ListConnectorActionRequestBWCSerializingTests extends AbstractBWCSerializationTestCase { + @Override + protected Writeable.Reader instanceReader() { + return ListConnectorAction.Request::new; + } + + @Override + protected ListConnectorAction.Request createTestInstance() { + PageParams pageParams = SearchApplicationTestUtils.randomPageParams(); + return new ListConnectorAction.Request(pageParams); + } + + @Override + protected ListConnectorAction.Request mutateInstance(ListConnectorAction.Request instance) throws IOException { + return randomValueOtherThan(instance, this::createTestInstance); + } + + @Override + protected ListConnectorAction.Request doParseInstance(XContentParser parser) throws IOException { + return ListConnectorAction.Request.parse(parser); + } + + @Override + protected ListConnectorAction.Request mutateInstanceForVersion(ListConnectorAction.Request instance, TransportVersion version) { + return new ListConnectorAction.Request(instance.getPageParams()); + } +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/ListConnectorActionResponseBWCSerializingTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/ListConnectorActionResponseBWCSerializingTests.java new file mode 100644 index 0000000000000..1e4ee0d086462 --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/ListConnectorActionResponseBWCSerializingTests.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.action; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xpack.application.connector.ConnectorTestUtils; +import org.elasticsearch.xpack.core.ml.AbstractBWCWireSerializationTestCase; + +import java.io.IOException; + +public class ListConnectorActionResponseBWCSerializingTests extends AbstractBWCWireSerializationTestCase { + @Override + protected Writeable.Reader instanceReader() { + return ListConnectorAction.Response::new; + } + + @Override + protected ListConnectorAction.Response createTestInstance() { + return new ListConnectorAction.Response(randomList(10, ConnectorTestUtils::getRandomConnector), randomLongBetween(0, 100)); + } + + @Override + protected ListConnectorAction.Response mutateInstance(ListConnectorAction.Response instance) throws IOException { + return randomValueOtherThan(instance, this::createTestInstance); + } + + @Override + protected ListConnectorAction.Response mutateInstanceForVersion(ListConnectorAction.Response instance, TransportVersion version) { + return instance; + } +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/PutConnectorActionRequestBWCSerializingTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/PutConnectorActionRequestBWCSerializingTests.java new file mode 100644 index 0000000000000..f618b4562fdc9 --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/PutConnectorActionRequestBWCSerializingTests.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.action; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.application.connector.ConnectorTestUtils; +import org.elasticsearch.xpack.core.ml.AbstractBWCSerializationTestCase; + +import java.io.IOException; + +public class PutConnectorActionRequestBWCSerializingTests extends AbstractBWCSerializationTestCase { + + private String connectorId; + + @Override + protected Writeable.Reader instanceReader() { + return PutConnectorAction.Request::new; + } + + @Override + protected PutConnectorAction.Request createTestInstance() { + PutConnectorAction.Request testInstance = ConnectorTestUtils.getRandomPutConnectorActionRequest(); + this.connectorId = testInstance.getConnectorId(); + return testInstance; + } + + @Override + protected PutConnectorAction.Request mutateInstance(PutConnectorAction.Request instance) throws IOException { + return randomValueOtherThan(instance, this::createTestInstance); + } + + @Override + protected PutConnectorAction.Request doParseInstance(XContentParser parser) throws IOException { + return PutConnectorAction.Request.fromXContent(parser, this.connectorId); + } + + @Override + protected PutConnectorAction.Request mutateInstanceForVersion(PutConnectorAction.Request instance, TransportVersion version) { + return instance; + } +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/PutConnectorActionResponseBWCSerializingTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/PutConnectorActionResponseBWCSerializingTests.java new file mode 100644 index 0000000000000..94be7e9b6b9ca --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/PutConnectorActionResponseBWCSerializingTests.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.action; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xpack.core.ml.AbstractBWCWireSerializationTestCase; + +import java.io.IOException; + +public class PutConnectorActionResponseBWCSerializingTests extends AbstractBWCWireSerializationTestCase { + @Override + protected Writeable.Reader instanceReader() { + return PutConnectorAction.Response::new; + } + + @Override + protected PutConnectorAction.Response createTestInstance() { + return new PutConnectorAction.Response(randomFrom(DocWriteResponse.Result.values())); + } + + @Override + protected PutConnectorAction.Response mutateInstance(PutConnectorAction.Response instance) throws IOException { + return randomValueOtherThan(instance, this::createTestInstance); + } + + @Override + protected PutConnectorAction.Response mutateInstanceForVersion(PutConnectorAction.Response instance, TransportVersion version) { + return instance; + } +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexServiceTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexServiceTests.java new file mode 100644 index 0000000000000..309675490ad99 --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexServiceTests.java @@ -0,0 +1,220 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.syncjob; + +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xpack.application.connector.Connector; +import org.elasticsearch.xpack.application.connector.ConnectorIndexService; +import org.elasticsearch.xpack.application.connector.ConnectorSyncStatus; +import org.elasticsearch.xpack.application.connector.ConnectorTestUtils; +import org.elasticsearch.xpack.application.connector.syncjob.action.PostConnectorSyncJobAction; +import org.junit.Before; + +import java.time.Instant; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicReference; + +import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; + +public class ConnectorSyncJobIndexServiceTests extends ESSingleNodeTestCase { + + private static final String NON_EXISTING_CONNECTOR_ID = "non-existing-connector-id"; + private static final int TIMEOUT_SECONDS = 10; + + private ConnectorSyncJobIndexService connectorSyncJobIndexService; + private Connector connector; + + @Before + public void setup() throws Exception { + connector = ConnectorTestUtils.getRandomSyncJobConnectorInfo(); + + final IndexRequest indexRequest = new IndexRequest(ConnectorIndexService.CONNECTOR_INDEX_NAME).opType(DocWriteRequest.OpType.INDEX) + .id(connector.getConnectorId()) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .source(connector.toXContent(jsonBuilder(), ToXContent.EMPTY_PARAMS)); + ActionFuture index = client().index(indexRequest); + + // wait 10 seconds for connector creation + index.get(TIMEOUT_SECONDS, TimeUnit.SECONDS); + + this.connectorSyncJobIndexService = new ConnectorSyncJobIndexService(client()); + } + + public void testCreateConnectorSyncJob() throws Exception { + PostConnectorSyncJobAction.Request syncJobRequest = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest( + connector.getConnectorId() + ); + PostConnectorSyncJobAction.Response response = awaitPutConnectorSyncJob(syncJobRequest); + Map connectorSyncJobSource = getConnectorSyncJobSourceById(response.getId()); + + String id = (String) connectorSyncJobSource.get(ConnectorSyncJob.ID_FIELD.getPreferredName()); + + ConnectorSyncJobType requestJobType = syncJobRequest.getJobType(); + ConnectorSyncJobType jobType = ConnectorSyncJobType.fromString( + (String) connectorSyncJobSource.get(ConnectorSyncJob.JOB_TYPE_FIELD.getPreferredName()) + ); + + ConnectorSyncJobTriggerMethod requestTriggerMethod = syncJobRequest.getTriggerMethod(); + ConnectorSyncJobTriggerMethod triggerMethod = ConnectorSyncJobTriggerMethod.fromString( + (String) connectorSyncJobSource.get(ConnectorSyncJob.TRIGGER_METHOD_FIELD.getPreferredName()) + ); + + ConnectorSyncStatus initialStatus = ConnectorSyncStatus.fromString( + (String) connectorSyncJobSource.get(ConnectorSyncJob.STATUS_FIELD.getPreferredName()) + ); + + Instant createdNow = Instant.parse((String) connectorSyncJobSource.get(ConnectorSyncJob.CREATED_AT_FIELD.getPreferredName())); + Instant lastSeen = Instant.parse((String) connectorSyncJobSource.get(ConnectorSyncJob.LAST_SEEN_FIELD.getPreferredName())); + + Integer totalDocumentCount = (Integer) connectorSyncJobSource.get(ConnectorSyncJob.TOTAL_DOCUMENT_COUNT_FIELD.getPreferredName()); + Integer indexedDocumentCount = (Integer) connectorSyncJobSource.get( + ConnectorSyncJob.INDEXED_DOCUMENT_COUNT_FIELD.getPreferredName() + ); + Integer indexedDocumentVolume = (Integer) connectorSyncJobSource.get( + ConnectorSyncJob.INDEXED_DOCUMENT_VOLUME_FIELD.getPreferredName() + ); + Integer deletedDocumentCount = (Integer) connectorSyncJobSource.get(ConnectorSyncJob.DELETED_DOCUMENT_COUNT.getPreferredName()); + + assertThat(id, notNullValue()); + assertThat(jobType, equalTo(requestJobType)); + assertThat(triggerMethod, equalTo(requestTriggerMethod)); + assertThat(initialStatus, equalTo(ConnectorSyncJob.DEFAULT_INITIAL_STATUS)); + assertThat(createdNow, equalTo(lastSeen)); + assertThat(totalDocumentCount, equalTo(0)); + assertThat(indexedDocumentCount, equalTo(0)); + assertThat(indexedDocumentVolume, equalTo(0)); + assertThat(deletedDocumentCount, equalTo(0)); + } + + public void testCreateConnectorSyncJob_WithMissingJobType_ExpectDefaultJobTypeToBeSet() throws Exception { + PostConnectorSyncJobAction.Request syncJobRequest = new PostConnectorSyncJobAction.Request( + connector.getConnectorId(), + null, + ConnectorSyncJobTriggerMethod.ON_DEMAND + ); + PostConnectorSyncJobAction.Response response = awaitPutConnectorSyncJob(syncJobRequest); + + Map connectorSyncJobSource = getConnectorSyncJobSourceById(response.getId()); + ConnectorSyncJobType jobType = ConnectorSyncJobType.fromString( + (String) connectorSyncJobSource.get(ConnectorSyncJob.JOB_TYPE_FIELD.getPreferredName()) + ); + + assertThat(jobType, equalTo(ConnectorSyncJob.DEFAULT_JOB_TYPE)); + } + + public void testCreateConnectorSyncJob_WithMissingTriggerMethod_ExpectDefaultTriggerMethodToBeSet() throws Exception { + PostConnectorSyncJobAction.Request syncJobRequest = new PostConnectorSyncJobAction.Request( + connector.getConnectorId(), + ConnectorSyncJobType.FULL, + null + ); + PostConnectorSyncJobAction.Response response = awaitPutConnectorSyncJob(syncJobRequest); + + Map connectorSyncJobSource = getConnectorSyncJobSourceById(response.getId()); + ConnectorSyncJobTriggerMethod triggerMethod = ConnectorSyncJobTriggerMethod.fromString( + (String) connectorSyncJobSource.get(ConnectorSyncJob.TRIGGER_METHOD_FIELD.getPreferredName()) + ); + + assertThat(triggerMethod, equalTo(ConnectorSyncJob.DEFAULT_TRIGGER_METHOD)); + } + + public void testCreateConnectorSyncJob_WithMissingConnectorId_ExpectException() throws Exception { + PostConnectorSyncJobAction.Request syncJobRequest = new PostConnectorSyncJobAction.Request( + NON_EXISTING_CONNECTOR_ID, + ConnectorSyncJobType.FULL, + ConnectorSyncJobTriggerMethod.ON_DEMAND + ); + awaitPutConnectorSyncJobExpectingException( + syncJobRequest, + ActionListener.wrap(response -> {}, exception -> assertThat(exception.getMessage(), containsString(NON_EXISTING_CONNECTOR_ID))) + ); + } + + private Map getConnectorSyncJobSourceById(String syncJobId) throws ExecutionException, InterruptedException, + TimeoutException { + GetRequest getRequest = new GetRequest(ConnectorSyncJobIndexService.CONNECTOR_SYNC_JOB_INDEX_NAME, syncJobId); + ActionFuture getResponseActionFuture = client().get(getRequest); + + return getResponseActionFuture.get(TIMEOUT_SECONDS, TimeUnit.SECONDS).getSource(); + } + + private void awaitPutConnectorSyncJobExpectingException( + PostConnectorSyncJobAction.Request syncJobRequest, + ActionListener listener + ) throws InterruptedException { + CountDownLatch latch = new CountDownLatch(1); + + connectorSyncJobIndexService.createConnectorSyncJob(syncJobRequest, new ActionListener<>() { + @Override + public void onResponse(PostConnectorSyncJobAction.Response putConnectorSyncJobResponse) { + fail("Expected an exception and not a successful response"); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + latch.countDown(); + } + }); + + boolean requestTimedOut = latch.await(TIMEOUT_SECONDS, TimeUnit.SECONDS); + assertTrue("Timeout waiting for put request", requestTimedOut); + } + + private PostConnectorSyncJobAction.Response awaitPutConnectorSyncJob(PostConnectorSyncJobAction.Request syncJobRequest) + throws Exception { + CountDownLatch latch = new CountDownLatch(1); + + final AtomicReference responseRef = new AtomicReference<>(null); + final AtomicReference exception = new AtomicReference<>(null); + + connectorSyncJobIndexService.createConnectorSyncJob(syncJobRequest, new ActionListener<>() { + @Override + public void onResponse(PostConnectorSyncJobAction.Response putConnectorSyncJobResponse) { + responseRef.set(putConnectorSyncJobResponse); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + exception.set(e); + latch.countDown(); + } + }); + + if (exception.get() != null) { + throw exception.get(); + } + + boolean requestTimedOut = latch.await(TIMEOUT_SECONDS, TimeUnit.SECONDS); + PostConnectorSyncJobAction.Response response = responseRef.get(); + + assertTrue("Timeout waiting for post request", requestTimedOut); + assertNotNull("Received null response from post request", response); + + return response; + } + +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTestUtils.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTestUtils.java new file mode 100644 index 0000000000000..5ce6925ae1cda --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTestUtils.java @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.syncjob; + +import org.elasticsearch.core.Tuple; +import org.elasticsearch.xpack.application.connector.ConnectorTestUtils; +import org.elasticsearch.xpack.application.connector.syncjob.action.PostConnectorSyncJobAction; + +import java.time.Instant; + +import static org.elasticsearch.test.ESTestCase.randomAlphaOfLength; +import static org.elasticsearch.test.ESTestCase.randomAlphaOfLengthBetween; +import static org.elasticsearch.test.ESTestCase.randomFrom; +import static org.elasticsearch.test.ESTestCase.randomInstantBetween; +import static org.elasticsearch.test.ESTestCase.randomInt; +import static org.elasticsearch.test.ESTestCase.randomLong; +import static org.elasticsearch.test.ESTestCase.randomMap; + +public class ConnectorSyncJobTestUtils { + + public static ConnectorSyncJob getRandomConnectorSyncJob() { + Instant lowerBoundInstant = Instant.ofEpochSecond(0L); + Instant upperBoundInstant = Instant.ofEpochSecond(3000000000L); + + return new ConnectorSyncJob.Builder().setCancellationRequestedAt( + randomFrom(new Instant[] { null, randomInstantBetween(lowerBoundInstant, upperBoundInstant) }) + ) + .setCanceledAt(randomFrom(new Instant[] { null, randomInstantBetween(lowerBoundInstant, upperBoundInstant) })) + .setCompletedAt(randomFrom(new Instant[] { null, randomInstantBetween(lowerBoundInstant, upperBoundInstant) })) + .setConnector(ConnectorTestUtils.getRandomSyncJobConnectorInfo()) + .setCreatedAt(randomInstantBetween(lowerBoundInstant, upperBoundInstant)) + .setDeletedDocumentCount(randomLong()) + .setError(randomFrom(new String[] { null, randomAlphaOfLength(10) })) + .setId(randomAlphaOfLength(10)) + .setIndexedDocumentCount(randomLong()) + .setIndexedDocumentVolume(randomLong()) + .setJobType(getRandomConnectorJobType()) + .setLastSeen(randomFrom(new Instant[] { null, randomInstantBetween(lowerBoundInstant, upperBoundInstant) })) + .setMetadata( + randomMap( + 0, + 10, + () -> new Tuple<>(randomAlphaOfLength(10), randomFrom(new Object[] { null, randomAlphaOfLength(10), randomLong() })) + ) + ) + .setStartedAt(randomFrom(new Instant[] { null, randomInstantBetween(lowerBoundInstant, upperBoundInstant) })) + .setStatus(ConnectorTestUtils.getRandomSyncStatus()) + .setTotalDocumentCount(randomLong()) + .setTriggerMethod(getRandomConnectorSyncJobTriggerMethod()) + .setWorkerHostname(randomAlphaOfLength(10)) + .build(); + } + + public static ConnectorSyncJobTriggerMethod getRandomConnectorSyncJobTriggerMethod() { + ConnectorSyncJobTriggerMethod[] values = ConnectorSyncJobTriggerMethod.values(); + return values[randomInt(values.length - 1)]; + } + + public static ConnectorSyncJobType getRandomConnectorJobType() { + ConnectorSyncJobType[] values = ConnectorSyncJobType.values(); + return values[randomInt(values.length - 1)]; + } + + public static PostConnectorSyncJobAction.Request getRandomPostConnectorSyncJobActionRequest() { + return new PostConnectorSyncJobAction.Request( + randomAlphaOfLengthBetween(5, 15), + randomFrom(ConnectorSyncJobType.values()), + randomFrom(ConnectorSyncJobTriggerMethod.values()) + ); + } + + public static PostConnectorSyncJobAction.Request getRandomPostConnectorSyncJobActionRequest(String connectorId) { + return new PostConnectorSyncJobAction.Request( + connectorId, + randomFrom(ConnectorSyncJobType.values()), + randomFrom(ConnectorSyncJobTriggerMethod.values()) + ); + } + + public static PostConnectorSyncJobAction.Response getRandomPostConnectorSyncJobActionResponse() { + return new PostConnectorSyncJobAction.Response(randomAlphaOfLength(10)); + } +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTests.java new file mode 100644 index 0000000000000..aeecf582c9ec7 --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTests.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.syncjob; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.application.connector.Connector; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; + +public class ConnectorSyncJobTests extends ESTestCase { + + private NamedWriteableRegistry namedWriteableRegistry; + + @Before + public void registerNamedObjects() { + namedWriteableRegistry = new NamedWriteableRegistry( + List.of(new NamedWriteableRegistry.Entry(Connector.class, Connector.NAME, Connector::new)) + ); + } + + public final void testRandomSerialization() throws IOException { + for (int run = 0; run < 10; run++) { + ConnectorSyncJob syncJob = ConnectorSyncJobTestUtils.getRandomConnectorSyncJob(); + assertTransportSerialization(syncJob); + } + } + + private void assertTransportSerialization(ConnectorSyncJob testInstance) throws IOException { + ConnectorSyncJob deserializedInstance = copyInstance(testInstance); + assertNotSame(testInstance, deserializedInstance); + assertThat(testInstance, equalTo(deserializedInstance)); + } + + private ConnectorSyncJob copyInstance(ConnectorSyncJob instance) throws IOException { + return copyWriteable(instance, namedWriteableRegistry, ConnectorSyncJob::new); + } +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTriggerMethodTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTriggerMethodTests.java new file mode 100644 index 0000000000000..34b0c2a9b281e --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTriggerMethodTests.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.syncjob; + +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.equalTo; + +public class ConnectorSyncJobTriggerMethodTests extends ESTestCase { + + public void testFromString_WithValidTriggerMethodString() { + ConnectorSyncJobTriggerMethod triggerMethod = ConnectorSyncJobTestUtils.getRandomConnectorSyncJobTriggerMethod(); + + assertThat(ConnectorSyncJobTriggerMethod.fromString(triggerMethod.toString()), equalTo(triggerMethod)); + } + + public void testFromString_WithInvalidTriggerMethodString_ExpectException() { + expectThrows(IllegalArgumentException.class, () -> ConnectorSyncJobTriggerMethod.fromString("invalid string")); + } + +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTypeTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTypeTests.java new file mode 100644 index 0000000000000..f716563141edc --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTypeTests.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.syncjob; + +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.equalTo; + +public class ConnectorSyncJobTypeTests extends ESTestCase { + + public void testFromString_WithValidSyncJobTypeString() { + ConnectorSyncJobType syncJobType = ConnectorSyncJobTestUtils.getRandomConnectorJobType(); + + assertThat(ConnectorSyncJobType.fromString(syncJobType.toString()), equalTo(syncJobType)); + } + + public void testFromString_WithInvalidSyncJobTypeString_ExpectException() { + expectThrows(IllegalArgumentException.class, () -> ConnectorSyncJobType.fromString("invalid sync job type")); + } +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/action/PostConnectorSyncJobActionRequestBWCSerializingTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/action/PostConnectorSyncJobActionRequestBWCSerializingTests.java new file mode 100644 index 0000000000000..73e6036dd5148 --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/action/PostConnectorSyncJobActionRequestBWCSerializingTests.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.syncjob.action; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.application.connector.syncjob.ConnectorSyncJobTestUtils; +import org.elasticsearch.xpack.core.ml.AbstractBWCSerializationTestCase; + +import java.io.IOException; + +public class PostConnectorSyncJobActionRequestBWCSerializingTests extends AbstractBWCSerializationTestCase< + PostConnectorSyncJobAction.Request> { + + @Override + protected Writeable.Reader instanceReader() { + return PostConnectorSyncJobAction.Request::new; + } + + @Override + protected PostConnectorSyncJobAction.Request createTestInstance() { + return ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest(); + } + + @Override + protected PostConnectorSyncJobAction.Request mutateInstance(PostConnectorSyncJobAction.Request instance) throws IOException { + return randomValueOtherThan(instance, this::createTestInstance); + } + + @Override + protected PostConnectorSyncJobAction.Request doParseInstance(XContentParser parser) throws IOException { + return PostConnectorSyncJobAction.Request.fromXContent(parser); + } + + @Override + protected PostConnectorSyncJobAction.Request mutateInstanceForVersion( + PostConnectorSyncJobAction.Request instance, + TransportVersion version + ) { + return instance; + } +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/action/PostConnectorSyncJobActionResponseBWCSerializingTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/action/PostConnectorSyncJobActionResponseBWCSerializingTests.java new file mode 100644 index 0000000000000..2493781b7325d --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/action/PostConnectorSyncJobActionResponseBWCSerializingTests.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.syncjob.action; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xpack.application.connector.syncjob.ConnectorSyncJobTestUtils; +import org.elasticsearch.xpack.core.ml.AbstractBWCWireSerializationTestCase; + +import java.io.IOException; + +public class PostConnectorSyncJobActionResponseBWCSerializingTests extends AbstractBWCWireSerializationTestCase< + PostConnectorSyncJobAction.Response> { + + @Override + protected Writeable.Reader instanceReader() { + return PostConnectorSyncJobAction.Response::new; + } + + @Override + protected PostConnectorSyncJobAction.Response createTestInstance() { + return ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionResponse(); + } + + @Override + protected PostConnectorSyncJobAction.Response mutateInstance(PostConnectorSyncJobAction.Response instance) throws IOException { + return randomValueOtherThan(instance, this::createTestInstance); + } + + @Override + protected PostConnectorSyncJobAction.Response mutateInstanceForVersion( + PostConnectorSyncJobAction.Response instance, + TransportVersion version + ) { + return instance; + } + +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/action/PostConnectorSyncJobActionTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/action/PostConnectorSyncJobActionTests.java new file mode 100644 index 0000000000000..0a2f94a5f821e --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/action/PostConnectorSyncJobActionTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.syncjob.action; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.application.connector.syncjob.ConnectorSyncJobTestUtils; +import org.elasticsearch.xpack.application.connector.syncjob.ConnectorSyncJobTriggerMethod; +import org.elasticsearch.xpack.application.connector.syncjob.ConnectorSyncJobType; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +public class PostConnectorSyncJobActionTests extends ESTestCase { + + public void testValidate_WhenConnectorIdIsPresent_ExpectNoValidationError() { + PostConnectorSyncJobAction.Request request = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest(); + ActionRequestValidationException exception = request.validate(); + + assertThat(exception, nullValue()); + } + + public void testValidate_WhenConnectorIdIsNull_ExpectValidationError() { + PostConnectorSyncJobAction.Request requestWithMissingConnectorId = new PostConnectorSyncJobAction.Request( + null, + ConnectorSyncJobType.FULL, + ConnectorSyncJobTriggerMethod.ON_DEMAND + ); + ActionRequestValidationException exception = requestWithMissingConnectorId.validate(); + + assertThat(exception, notNullValue()); + assertThat(exception.getMessage(), containsString(PostConnectorSyncJobAction.Request.EMPTY_CONNECTOR_ID_ERROR_MESSAGE)); + } + +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/action/TransportPostConnectorSyncJobActionTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/action/TransportPostConnectorSyncJobActionTests.java new file mode 100644 index 0000000000000..2463637ada2dd --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/action/TransportPostConnectorSyncJobActionTests.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.syncjob.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.application.connector.syncjob.ConnectorSyncJobTestUtils; +import org.junit.Before; + +import java.util.Collections; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static org.mockito.Mockito.mock; + +public class TransportPostConnectorSyncJobActionTests extends ESSingleNodeTestCase { + + private static final Long TIMEOUT_SECONDS = 10L; + + private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); + private TransportPostConnectorSyncJobAction action; + + @Before + public void setup() { + ClusterService clusterService = getInstanceFromNode(ClusterService.class); + + TransportService transportService = new TransportService( + Settings.EMPTY, + mock(Transport.class), + threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, + null, + Collections.emptySet() + ); + + action = new TransportPostConnectorSyncJobAction(transportService, clusterService, mock(ActionFilters.class), client()); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + ThreadPool.terminate(threadPool, TIMEOUT_SECONDS, TimeUnit.SECONDS); + } + + public void testPostConnectorSyncJob_ExpectNoWarnings() throws InterruptedException { + PostConnectorSyncJobAction.Request request = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest(); + + executeRequest(request); + + ensureNoWarnings(); + } + + private void executeRequest(PostConnectorSyncJobAction.Request request) throws InterruptedException { + final CountDownLatch latch = new CountDownLatch(1); + action.doExecute(mock(Task.class), request, ActionListener.wrap(response -> latch.countDown(), exception -> latch.countDown())); + + boolean requestTimedOut = latch.await(TIMEOUT_SECONDS, TimeUnit.SECONDS); + + assertTrue("Timeout waiting for post request", requestTimedOut); + } +} diff --git a/x-pack/plugin/eql/build.gradle b/x-pack/plugin/eql/build.gradle index 0da3095cc7a95..fc11e04c4ede2 100644 --- a/x-pack/plugin/eql/build.gradle +++ b/x-pack/plugin/eql/build.gradle @@ -19,7 +19,6 @@ dependencies { testImplementation project(':test:framework') testImplementation(testArtifact(project(xpackModule('core')))) - testImplementation(testArtifact(project(xpackModule('security')))) testImplementation(testArtifact(project(xpackModule('ql')))) testImplementation project(path: ':modules:reindex') testImplementation project(path: ':modules:parent-join') diff --git a/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AsyncEqlSearchActionIT.java b/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AsyncEqlSearchActionIT.java index 3a631c7724d09..646a1e896c473 100644 --- a/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AsyncEqlSearchActionIT.java +++ b/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AsyncEqlSearchActionIT.java @@ -95,15 +95,14 @@ private void prepareIndex() throws Exception { for (int i = 0; i < numDocs; i++) { int fieldValue = randomIntBetween(0, 10); builders.add( - client().prepareIndex("test") - .setSource( - jsonBuilder().startObject() - .field("val", fieldValue) - .field("event_type", "my_event") - .field("@timestamp", "2020-04-09T12:35:48Z") - .field("i", i) - .endObject() - ) + prepareIndex("test").setSource( + jsonBuilder().startObject() + .field("val", fieldValue) + .field("event_type", "my_event") + .field("@timestamp", "2020-04-09T12:35:48Z") + .field("i", i) + .endObject() + ) ); } indexRandom(true, builders); diff --git a/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/EqlCancellationIT.java b/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/EqlCancellationIT.java index 3ec8d02befb54..f2d552fa6bbdb 100644 --- a/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/EqlCancellationIT.java +++ b/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/EqlCancellationIT.java @@ -51,14 +51,13 @@ public void testCancellation() throws Exception { for (int i = 0; i < numDocs; i++) { int fieldValue = randomIntBetween(0, 10); builders.add( - client().prepareIndex("test") - .setSource( - jsonBuilder().startObject() - .field("val", fieldValue) - .field("event_type", "my_event") - .field("@timestamp", "2020-04-09T12:35:48Z") - .endObject() - ) + prepareIndex("test").setSource( + jsonBuilder().startObject() + .field("val", fieldValue) + .field("event_type", "my_event") + .field("@timestamp", "2020-04-09T12:35:48Z") + .endObject() + ) ); } diff --git a/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/RestEqlCancellationIT.java b/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/RestEqlCancellationIT.java index 6ae49ea7416bb..2f18f16984f93 100644 --- a/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/RestEqlCancellationIT.java +++ b/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/RestEqlCancellationIT.java @@ -71,14 +71,13 @@ public void testRestCancellation() throws Exception { for (int i = 0; i < numDocs; i++) { int fieldValue = randomIntBetween(0, 10); builders.add( - client().prepareIndex("test") - .setSource( - jsonBuilder().startObject() - .field("val", fieldValue) - .field("event_type", "my_event") - .field("@timestamp", "2020-04-09T12:35:48Z") - .endObject() - ) + prepareIndex("test").setSource( + jsonBuilder().startObject() + .field("val", fieldValue) + .field("event_type", "my_event") + .field("@timestamp", "2020-04-09T12:35:48Z") + .endObject() + ) ); } @@ -101,7 +100,7 @@ public void testRestCancellation() throws Exception { request.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader(Task.X_OPAQUE_ID_HTTP_HEADER, id)); logger.trace("Preparing search"); - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); Cancellable cancellable = getRestClient().performRequestAsync(request, wrapAsRestResponseListener(future)); logger.trace("Waiting for block to be established"); diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchRequestBuilder.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchRequestBuilder.java deleted file mode 100644 index db7c45c07bfbe..0000000000000 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchRequestBuilder.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.eql.action; - -import org.elasticsearch.action.ActionRequestBuilder; -import org.elasticsearch.client.internal.ElasticsearchClient; -import org.elasticsearch.index.query.QueryBuilder; - -public class EqlSearchRequestBuilder extends ActionRequestBuilder { - public EqlSearchRequestBuilder(ElasticsearchClient client, EqlSearchAction action) { - super(client, action, new EqlSearchRequest()); - } - - public EqlSearchRequestBuilder indices(String... indices) { - request.indices(indices); - return this; - } - - public EqlSearchRequestBuilder filter(QueryBuilder filter) { - request.filter(filter); - return this; - } - - public EqlSearchRequestBuilder timestampField(String timestampField) { - request.timestampField(timestampField); - return this; - } - - public EqlSearchRequestBuilder tiebreakerField(String tiebreakerField) { - request.tiebreakerField(tiebreakerField); - return this; - } - - public EqlSearchRequestBuilder eventCategoryField(String eventCategoryField) { - request.eventCategoryField(eventCategoryField); - return this; - } - - public EqlSearchRequestBuilder size(int size) { - request.size(size); - return this; - } - - public EqlSearchRequestBuilder fetchSize(int fetchSize) { - request.fetchSize(fetchSize); - return this; - } - - public EqlSearchRequestBuilder query(String query) { - request.query(query); - return this; - } - - public EqlSearchRequestBuilder maxSamplesPerKey(int maxSamplesPerKey) { - request.maxSamplesPerKey(maxSamplesPerKey); - return this; - } -} diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClient.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClient.java index b5ac09cc39b9e..707964a93ab9e 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClient.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClient.java @@ -8,15 +8,15 @@ package org.elasticsearch.xpack.eql.execution.search; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.search.ClosePointInTimeAction; import org.elasticsearch.action.search.ClosePointInTimeRequest; import org.elasticsearch.action.search.ClosePointInTimeResponse; import org.elasticsearch.action.search.MultiSearchRequest; import org.elasticsearch.action.search.MultiSearchResponse; -import org.elasticsearch.action.search.OpenPointInTimeAction; import org.elasticsearch.action.search.OpenPointInTimeRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportClosePointInTimeAction; +import org.elasticsearch.action.search.TransportOpenPointInTimeAction; import org.elasticsearch.common.Strings; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.core.TimeValue; @@ -131,7 +131,7 @@ private ActionListener pitListener(Function void openPIT(ActionListener listener, Runnable runnable) { OpenPointInTimeRequest request = new OpenPointInTimeRequest(indices).indicesOptions(IndexResolver.FIELD_CAPS_INDICES_OPTIONS) .keepAlive(keepAlive); - client.execute(OpenPointInTimeAction.INSTANCE, request, listener.delegateFailureAndWrap((l, r) -> { + client.execute(TransportOpenPointInTimeAction.TYPE, request, listener.delegateFailureAndWrap((l, r) -> { pitId = r.getPointInTimeId(); runnable.run(); })); @@ -142,7 +142,7 @@ public void close(ActionListener listener) { // the pitId could be null as a consequence of a failure on openPIT if (pitId != null) { client.execute( - ClosePointInTimeAction.INSTANCE, + TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pitId), map(listener, ClosePointInTimeResponse::isSucceeded) ); diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/RuntimeUtils.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/RuntimeUtils.java index 8640378878f10..ecb8ce633d985 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/RuntimeUtils.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/RuntimeUtils.java @@ -9,7 +9,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.action.search.SearchRequest; @@ -54,7 +53,6 @@ public final class RuntimeUtils { static final Logger QUERY_LOG = LogManager.getLogger(QueryClient.class); - public static final Version SWITCH_TO_MULTI_VALUE_FIELDS_VERSION = Version.V_7_15_0; private RuntimeUtils() {} @@ -173,7 +171,7 @@ public static HitExtractor createExtractor(FieldExtraction ref, EqlConfiguration } public static SearchRequest prepareRequest(SearchSourceBuilder source, boolean includeFrozen, String... indices) { - SearchRequest searchRequest = new SearchRequest(SWITCH_TO_MULTI_VALUE_FIELDS_VERSION); + SearchRequest searchRequest = new SearchRequest(); searchRequest.indices(indices); searchRequest.source(source); searchRequest.allowPartialSearchResults(false); diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/parser/LogicalPlanBuilder.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/parser/LogicalPlanBuilder.java index 2f57bc021a1c0..194c2c7fde459 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/parser/LogicalPlanBuilder.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/parser/LogicalPlanBuilder.java @@ -65,6 +65,7 @@ import static org.elasticsearch.xpack.ql.parser.ParserUtils.source; import static org.elasticsearch.xpack.ql.parser.ParserUtils.text; import static org.elasticsearch.xpack.ql.tree.Source.synthetic; +import static org.elasticsearch.xpack.ql.type.DataTypes.INTEGER; public abstract class LogicalPlanBuilder extends ExpressionBuilder { @@ -102,7 +103,7 @@ public Object visitStatement(StatementContext ctx) { if (ctx.pipe().size() > 0) { throw new ParsingException(source(ctx.pipe().get(0)), "Samples do not support pipes yet"); } - return new LimitWithOffset(plan.source(), new Literal(Source.EMPTY, params.size(), DataTypes.INTEGER), 0, plan); + return new LimitWithOffset(plan.source(), new Literal(Source.EMPTY, params.size(), INTEGER), 0, plan); } // // Add implicit blocks @@ -125,7 +126,7 @@ public Object visitStatement(StatementContext ctx) { plan = new OrderBy(defaultOrderSource, plan, orders); // add the default limit only if specified - Literal defaultSize = new Literal(synthetic(""), params.size(), DataTypes.INTEGER); + Literal defaultSize = new Literal(synthetic(""), params.size(), INTEGER); Source defaultLimitSource = synthetic(""); LogicalPlan previous = plan; @@ -521,8 +522,16 @@ private Expression onlyOnePipeArgument(Source source, String pipeName, List exps) { Expression expression = onlyOnePipeArgument(source, pipeName, exps); + boolean foldableInt = expression.foldable() && expression.dataType().isInteger(); + Number value = null; - if (expression.dataType().isInteger() == false || expression.foldable() == false || (int) expression.fold() < 0) { + if (foldableInt) { + try { + value = (Number) expression.fold(); + } catch (ArithmeticException ae) {} + } + + if (foldableInt == false || value == null || value.intValue() != value.longValue() || value.intValue() < 0) { throw new ParsingException( expression.source(), "Pipe [{}] expects a positive integer but found [{}]", @@ -531,6 +540,8 @@ private Expression pipeIntArgument(Source source, String pipeName, List searchRequestCaptor = ArgumentCaptor.forClass(SearchRequest.class); - when(client.prepareSearch(any())).thenReturn(new SearchRequestBuilder(client, SearchAction.INSTANCE).setIndices(indices)); + when(client.prepareSearch(any())).thenReturn(new SearchRequestBuilder(client, TransportSearchAction.TYPE).setIndices(indices)); doAnswer((Answer) invocation -> { @SuppressWarnings("unchecked") SearchRequest request = (SearchRequest) invocation.getArguments()[1]; diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/analysis/VerifierTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/analysis/VerifierTests.java index d13852cb3a016..bec71a9846562 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/analysis/VerifierTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/analysis/VerifierTests.java @@ -6,14 +6,11 @@ */ package org.elasticsearch.xpack.eql.analysis; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.eql.parser.EqlParser; import org.elasticsearch.xpack.eql.parser.ParsingException; import org.elasticsearch.xpack.eql.plan.logical.KeyedFilter; import org.elasticsearch.xpack.eql.plan.logical.Sample; -import org.elasticsearch.xpack.eql.session.EqlConfiguration; import org.elasticsearch.xpack.ql.expression.EmptyAttribute; import org.elasticsearch.xpack.ql.index.EsIndex; import org.elasticsearch.xpack.ql.index.IndexResolution; @@ -21,11 +18,8 @@ import org.elasticsearch.xpack.ql.type.EsField; import org.elasticsearch.xpack.ql.type.TypesTests; -import java.util.Collection; import java.util.Map; -import java.util.function.Function; -import static java.util.Collections.emptyMap; import static org.elasticsearch.xpack.eql.analysis.AnalyzerTestUtils.analyzer; import static org.hamcrest.Matchers.startsWith; @@ -452,29 +446,6 @@ public void testKeysWithSimilarYetDifferentTypes() throws Exception { ); } - private LogicalPlan analyzeWithVerifierFunction(Function> versionIncompatibleClusters) { - PreAnalyzer preAnalyzer = new PreAnalyzer(); - EqlConfiguration eqlConfiguration = new EqlConfiguration( - new String[] { "none" }, - org.elasticsearch.xpack.ql.util.DateUtils.UTC, - "nobody", - "cluster", - null, - emptyMap(), - null, - TimeValue.timeValueSeconds(30), - null, - 123, - 1, - "", - new TaskId("test", 123), - null - ); - Analyzer analyzer = analyzer(eqlConfiguration); - IndexResolution resolution = IndexResolution.valid(new EsIndex("irrelevant", loadEqlMapping("mapping-default.json"))); - return analyzer.analyze(preAnalyzer.preAnalyze(new EqlParser().createStatement("any where true"), resolution)); - } - public void testIgnoredTimestampAndTiebreakerInSamples() { LogicalPlan plan = accept("sample by hostname [any where true] [any where true]"); diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java index 663a0328a575b..bac694996526d 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.indices.breaker.CircuitBreakerMetrics; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.search.SearchHit; @@ -100,6 +101,7 @@ public void testMemoryCleared() { private void testMemoryCleared(boolean fail) { try ( CircuitBreakerService service = new HierarchyCircuitBreakerService( + CircuitBreakerMetrics.NOOP, Settings.EMPTY, Collections.singletonList(EqlTestUtils.circuitBreakerSettings(Settings.EMPTY)), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java index 08d21de6d048a..1c47ff17abfb8 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java @@ -37,6 +37,7 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.breaker.BreakerSettings; +import org.elasticsearch.indices.breaker.CircuitBreakerMetrics; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.search.DocValueFormat; @@ -209,6 +210,7 @@ private void assertMemoryCleared( final int searchRequestsExpectedCount = 2; try ( CircuitBreakerService service = new HierarchyCircuitBreakerService( + CircuitBreakerMetrics.NOOP, Settings.EMPTY, breakerSettings(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) @@ -246,6 +248,7 @@ public void testEqlCBCleanedUp_on_ParentCBBreak() { try ( CircuitBreakerService service = new HierarchyCircuitBreakerService( + CircuitBreakerMetrics.NOOP, settings, breakerSettings(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/parser/LogicalPlanTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/parser/LogicalPlanTests.java index 151de78d5929a..fefd56c3c6d12 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/parser/LogicalPlanTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/parser/LogicalPlanTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.xpack.ql.expression.Order.OrderDirection; import org.elasticsearch.xpack.ql.expression.UnresolvedAttribute; import org.elasticsearch.xpack.ql.expression.predicate.logical.And; +import org.elasticsearch.xpack.ql.expression.predicate.operator.arithmetic.Neg; import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.Equals; import org.elasticsearch.xpack.ql.plan.logical.Filter; import org.elasticsearch.xpack.ql.plan.logical.LogicalPlan; @@ -39,6 +40,8 @@ import static java.util.Collections.singletonList; import static org.elasticsearch.xpack.ql.type.DateUtils.UTC; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; public class LogicalPlanTests extends ESTestCase { @@ -227,6 +230,28 @@ public void testSamplePlan() { assertEquals("host", key.name()); } + public void testFoldedHeadTailValidValue() { + String limit = randomFrom(" head ", " tail "); + // explicitly set this as long to force a LONG literal as the limit + String longValue = String.valueOf((long) Integer.MAX_VALUE + 1L); + LogicalPlan plan = parser.createStatement("any where true |" + limit + longValue + "-1"); + + assertTrue(plan instanceof LimitWithOffset); + LimitWithOffset limitWithOffset = (LimitWithOffset) plan; + Literal limitValue; + if (limit == " head ") { + assertTrue(limitWithOffset.limit() instanceof Literal); + limitValue = (Literal) limitWithOffset.limit(); + } else { + assertTrue(limitWithOffset.limit() instanceof Neg); + assertTrue(((Neg) (limitWithOffset.limit())).field() instanceof Literal); + limitValue = (Literal) ((Neg) (limitWithOffset.limit())).field(); + } + assertThat(limitValue.value(), equalTo(Integer.MAX_VALUE)); + // check also that the data type of the literal is correctly set to INTEGER + assertThat(limitValue.dataType(), is(DataTypes.INTEGER)); + } + private LogicalPlan wrapFilter(Expression exp) { LogicalPlan filter = new Filter(Source.EMPTY, relation(), exp); Order order = new Order(Source.EMPTY, timestamp(), OrderDirection.ASC, NullsPosition.FIRST); diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/planner/QueryTranslatorFailTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/planner/QueryTranslatorFailTests.java index f0bd615b35a23..e94f813a162f6 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/planner/QueryTranslatorFailTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/planner/QueryTranslatorFailTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.xpack.ql.ParsingException; import org.elasticsearch.xpack.ql.QlIllegalArgumentException; +import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.startsWith; public class QueryTranslatorFailTests extends AbstractQueryTranslatorTestCase { @@ -318,6 +319,38 @@ public void testSampleWithDuplicateKeys() { errorParsing("sample by host [success where true] by ?x [failure where true] by host"), startsWith("1:65: Join keys must be used only once, found duplicates: [host]") ); + } + + public void testNegativeHeadTail() { + String query = randomFrom("head -5", "tail -5"); + assertThat(errorParsing("any where true | " + query), endsWith("expects a positive integer but found [-5]")); + } + + public void testNegativeFoldedValueForHeadAndTail() { + String query = randomFrom(" head ", " tail "); + String value = "-10 + 5"; + assertThat(errorParsing("any where true |" + query + value), endsWith("expects a positive integer but found [-10 + 5]")); + } + + public void testLongValueForHeadAndTail() { + String query = randomFrom(" head ", " tail "); + Long value = randomLongBetween(Integer.MAX_VALUE + 1, Long.MAX_VALUE); + assertThat(errorParsing("any where true |" + query + value), endsWith("expects a positive integer but found [" + value + "]")); + } + + public void testFoldedLongValueForHeadAndTail() { + String query = randomFrom(" head ", " tail "); + int validInt1 = Integer.MAX_VALUE - 5; + int validInt2 = 10; + assertThat( + errorParsing("any where true |" + query + validInt1 + " + " + validInt2), + endsWith("expects a positive integer but found [2147483642 + 10]") + ); + } + public void testFloatingPointValueForHeadAndTail() { + String query = randomFrom(" head ", " tail "); + Double value = randomFrom(0.0d, 1.0d, .0d, randomDouble()); + assertThat(errorParsing("any where true |" + query + value), endsWith("expects a positive integer but found [" + value + "]")); } } diff --git a/x-pack/plugin/esql/build.gradle b/x-pack/plugin/esql/build.gradle index ce3413441d927..59edbadf0b514 100644 --- a/x-pack/plugin/esql/build.gradle +++ b/x-pack/plugin/esql/build.gradle @@ -26,10 +26,12 @@ dependencies { testImplementation project('qa:testFixtures') testImplementation project(':test:framework') testImplementation(testArtifact(project(xpackModule('core')))) - testImplementation(testArtifact(project(xpackModule('security')))) + testImplementation project(path: xpackModule('enrich')) + testImplementation project(path: ':modules:reindex') testImplementation project(path: ':modules:parent-join') testImplementation project(path: ':modules:analysis-common') + testImplementation project(path: ':modules:ingest-common') testImplementation('net.nextencia:rrdiagram:0.9.4') testImplementation('org.webjars.npm:fontsource__roboto-mono:4.5.7') diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java index 8c5646ab56011..6acddf6aa5cde 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java @@ -478,8 +478,9 @@ private MethodSpec evaluateIntermediate() { builder.addAnnotation(Override.class) .addModifiers(Modifier.PUBLIC) .addParameter(BLOCK_ARRAY, "blocks") - .addParameter(TypeName.INT, "offset"); - builder.addStatement("state.toIntermediate(blocks, offset)"); + .addParameter(TypeName.INT, "offset") + .addParameter(DRIVER_CONTEXT, "driverContext"); + builder.addStatement("state.toIntermediate(blocks, offset, driverContext)"); return builder.build(); } diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/EvaluatorImplementer.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/EvaluatorImplementer.java index e6ea75af38494..1b44e0d274e32 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/EvaluatorImplementer.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/EvaluatorImplementer.java @@ -35,7 +35,7 @@ import static org.elasticsearch.compute.gen.Methods.appendMethod; import static org.elasticsearch.compute.gen.Methods.buildFromFactory; import static org.elasticsearch.compute.gen.Methods.getMethod; -import static org.elasticsearch.compute.gen.Types.BLOCK_REF; +import static org.elasticsearch.compute.gen.Types.BLOCK; import static org.elasticsearch.compute.gen.Types.BYTES_REF; import static org.elasticsearch.compute.gen.Types.DRIVER_CONTEXT; import static org.elasticsearch.compute.gen.Types.EXPRESSION_EVALUATOR; @@ -121,7 +121,7 @@ private MethodSpec ctor() { private MethodSpec eval() { MethodSpec.Builder builder = MethodSpec.methodBuilder("eval").addAnnotation(Override.class); - builder.addModifiers(Modifier.PUBLIC).returns(BLOCK_REF).addParameter(PAGE, "page"); + builder.addModifiers(Modifier.PUBLIC).returns(BLOCK).addParameter(PAGE, "page"); processFunction.args.stream().forEach(a -> a.evalToBlock(builder)); String invokeBlockEval = invokeRealEval(true); @@ -132,7 +132,7 @@ private MethodSpec eval() { } private String invokeRealEval(boolean blockStyle) { - StringBuilder builder = new StringBuilder("return Block.Ref.floating(eval(page.getPositionCount()"); + StringBuilder builder = new StringBuilder("return eval(page.getPositionCount()"); String params = processFunction.args.stream() .map(a -> a.paramName(blockStyle)) .filter(a -> a != null) @@ -145,7 +145,6 @@ private String invokeRealEval(boolean blockStyle) { if (processFunction.resultDataType(blockStyle).simpleName().endsWith("Vector")) { builder.append(".asBlock()"); } - builder.append(")"); return builder.toString(); } @@ -346,7 +345,7 @@ private interface ProcessFunctionArg { String factoryInvocation(MethodSpec.Builder factoryMethodBuilder); /** - * Emits code to evaluate this parameter to a Block.Ref or array of Block.Refs + * Emits code to evaluate this parameter to a Block or array of Blocks * and begins a {@code try} block for those refs. Noop if the parameter is {@link Fixed}. */ void evalToBlock(MethodSpec.Builder builder); @@ -440,8 +439,7 @@ public String factoryInvocation(MethodSpec.Builder factoryMethodBuilder) { @Override public void evalToBlock(MethodSpec.Builder builder) { TypeName blockType = blockType(type); - builder.beginControlFlow("try (Block.Ref $LRef = $L.eval(page))", name, name); - builder.addStatement("$T $LBlock = ($T) $LRef.block()", blockType, name, blockType, name); + builder.beginControlFlow("try ($T $LBlock = ($T) $L.eval(page))", blockType, name, blockType, name); } @Override @@ -561,13 +559,11 @@ public String factoryInvocation(MethodSpec.Builder factoryMethodBuilder) { @Override public void evalToBlock(MethodSpec.Builder builder) { TypeName blockType = blockType(componentType); - builder.addStatement("Block.Ref[] $LRefs = new Block.Ref[$L.length]", name, name); - builder.beginControlFlow("try ($T $LRelease = $T.wrap($LRefs))", RELEASABLE, name, RELEASABLES, name); builder.addStatement("$T[] $LBlocks = new $T[$L.length]", blockType, name, blockType, name); + builder.beginControlFlow("try ($T $LRelease = $T.wrap($LBlocks))", RELEASABLE, name, RELEASABLES, name); builder.beginControlFlow("for (int i = 0; i < $LBlocks.length; i++)", name); { - builder.addStatement("$LRefs[i] = $L[i].eval(page)", name, name); - builder.addStatement("$LBlocks[i] = ($T) $LRefs[i].block()", name, blockType, name); + builder.addStatement("$LBlocks[i] = ($T)$L[i].eval(page)", name, blockType, name); } builder.endControlFlow(); } diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/MvEvaluatorImplementer.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/MvEvaluatorImplementer.java index 797665d4f62bb..0e794d6fa533f 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/MvEvaluatorImplementer.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/MvEvaluatorImplementer.java @@ -31,7 +31,7 @@ import static org.elasticsearch.compute.gen.Methods.getMethod; import static org.elasticsearch.compute.gen.Types.ABSTRACT_MULTIVALUE_FUNCTION_EVALUATOR; import static org.elasticsearch.compute.gen.Types.ABSTRACT_NULLABLE_MULTIVALUE_FUNCTION_EVALUATOR; -import static org.elasticsearch.compute.gen.Types.BLOCK_REF; +import static org.elasticsearch.compute.gen.Types.BLOCK; import static org.elasticsearch.compute.gen.Types.BYTES_REF; import static org.elasticsearch.compute.gen.Types.DRIVER_CONTEXT; import static org.elasticsearch.compute.gen.Types.EXPRESSION_EVALUATOR; @@ -184,7 +184,7 @@ private MethodSpec evalShell( Consumer body ) { MethodSpec.Builder builder = MethodSpec.methodBuilder(name); - builder.returns(BLOCK_REF).addParameter(BLOCK_REF, "ref"); + builder.returns(BLOCK).addParameter(BLOCK, "fieldVal"); if (override) { builder.addAnnotation(Override.class).addModifiers(Modifier.PUBLIC); } else { @@ -194,9 +194,7 @@ private MethodSpec evalShell( TypeName blockType = blockType(fieldType); preflight.accept(builder); - - builder.beginControlFlow("try (ref)"); - builder.addStatement("$T v = ($T) ref.block()", blockType, blockType); + builder.addStatement("$T v = ($T) fieldVal", blockType, blockType); builder.addStatement("int positionCount = v.getPositionCount()"); TypeName builderType; if (nullable) { @@ -247,8 +245,7 @@ private MethodSpec evalShell( } builder.endControlFlow(); - builder.addStatement("return Block.Ref.floating(builder.build()$L)", nullable ? "" : ".asBlock()"); - builder.endControlFlow(); + builder.addStatement("return builder.build()$L", nullable ? "" : ".asBlock()"); builder.endControlFlow(); return builder.build(); } @@ -259,8 +256,8 @@ private MethodSpec eval(String name, boolean nullable) { if (ascendingFunction == null) { return; } - builder.beginControlFlow("if (ref.block().mvSortedAscending())"); - builder.addStatement("return $L(ref)", name.replace("eval", "evalAscending")); + builder.beginControlFlow("if (fieldVal.mvSortedAscending())"); + builder.addStatement("return $L(fieldVal)", name.replace("eval", "evalAscending")); builder.endControlFlow(); }, builder -> { builder.addStatement("int first = v.getFirstValueIndex(p)"); diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/Types.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/Types.java index 5c316a7c1bdc6..1a09160dae3cd 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/Types.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/Types.java @@ -32,7 +32,6 @@ public class Types { static final ClassName PAGE = ClassName.get(DATA_PACKAGE, "Page"); static final ClassName BLOCK = ClassName.get(DATA_PACKAGE, "Block"); static final TypeName BLOCK_ARRAY = ArrayTypeName.of(BLOCK); - static final ClassName BLOCK_REF = ClassName.get(DATA_PACKAGE, "Block", "Ref"); static final ClassName VECTOR = ClassName.get(DATA_PACKAGE, "Vector"); static final ClassName BIG_ARRAYS = ClassName.get("org.elasticsearch.common.util", "BigArrays"); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/DoubleState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/DoubleState.java index 3536976d47373..f1c92c685bcab 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/DoubleState.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/DoubleState.java @@ -8,8 +8,7 @@ package org.elasticsearch.compute.aggregation; import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.ConstantBooleanVector; -import org.elasticsearch.compute.data.ConstantDoubleVector; +import org.elasticsearch.compute.operator.DriverContext; /** * Aggregator state for a single double. @@ -45,10 +44,10 @@ void seen(boolean seen) { /** Extracts an intermediate view of the contents of this state. */ @Override - public void toIntermediate(Block[] blocks, int offset) { + public void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { assert blocks.length >= offset + 2; - blocks[offset + 0] = new ConstantDoubleVector(value, 1).asBlock(); - blocks[offset + 1] = new ConstantBooleanVector(seen, 1).asBlock(); + blocks[offset + 0] = driverContext.blockFactory().newConstantDoubleBlockWith(value, 1); + blocks[offset + 1] = driverContext.blockFactory().newConstantBooleanBlockWith(seen, 1); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IntState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IntState.java index 8492f29f71a68..e7db40eccf9c8 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IntState.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IntState.java @@ -8,8 +8,7 @@ package org.elasticsearch.compute.aggregation; import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.ConstantBooleanVector; -import org.elasticsearch.compute.data.ConstantIntVector; +import org.elasticsearch.compute.operator.DriverContext; /** * Aggregator state for a single int. @@ -45,10 +44,10 @@ void seen(boolean seen) { /** Extracts an intermediate view of the contents of this state. */ @Override - public void toIntermediate(Block[] blocks, int offset) { + public void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { assert blocks.length >= offset + 2; - blocks[offset + 0] = new ConstantIntVector(value, 1).asBlock(); - blocks[offset + 1] = new ConstantBooleanVector(seen, 1).asBlock(); + blocks[offset + 0] = driverContext.blockFactory().newConstantIntBlockWith(value, 1); + blocks[offset + 1] = driverContext.blockFactory().newConstantBooleanBlockWith(seen, 1); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/LongState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/LongState.java index bd4e8d0637077..da78b649782d5 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/LongState.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/LongState.java @@ -8,8 +8,7 @@ package org.elasticsearch.compute.aggregation; import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.ConstantBooleanVector; -import org.elasticsearch.compute.data.ConstantLongVector; +import org.elasticsearch.compute.operator.DriverContext; /** * Aggregator state for a single long. @@ -45,10 +44,10 @@ void seen(boolean seen) { /** Extracts an intermediate view of the contents of this state. */ @Override - public void toIntermediate(Block[] blocks, int offset) { + public void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { assert blocks.length >= offset + 2; - blocks[offset + 0] = new ConstantLongVector(value, 1).asBlock(); - blocks[offset + 1] = new ConstantBooleanVector(seen, 1).asBlock(); + blocks[offset + 0] = driverContext.blockFactory().newConstantLongBlockWith(value, 1); + blocks[offset + 1] = driverContext.blockFactory().newConstantBooleanBlockWith(seen, 1); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java index eb9364c57e755..ed38d3139dd4a 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java @@ -135,11 +135,7 @@ public String toString() { } @Override - public void close() { - if (released) { - throw new IllegalStateException("can't release already released block [" + this + "]"); - } - released = true; + public void closeInternal() { blockFactory.adjustBreaker(-ramBytesUsed(), true); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java index 1a7a5b4aa6e7e..c5c3a24736c16 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java @@ -17,6 +17,9 @@ public final class BooleanVectorBlock extends AbstractVectorBlock implements Boo private final BooleanVector vector; + /** + * @param vector considered owned by the current block; must not be used in any other {@code Block} + */ BooleanVectorBlock(BooleanVector vector) { super(vector.getPositionCount(), vector.blockFactory()); this.vector = vector; @@ -72,15 +75,12 @@ public String toString() { @Override public boolean isReleased() { - return released || vector.isReleased(); + return super.isReleased() || vector.isReleased(); } @Override - public void close() { - if (released || vector.isReleased()) { - throw new IllegalStateException("can't release already released block [" + this + "]"); - } - released = true; + public void closeInternal() { + assert (vector.isReleased() == false) : "can't release block [" + this + "] containing already released vector"; Releasables.closeExpectNoException(vector); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java index b2729ed370b32..6aef8fa54b134 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java @@ -139,11 +139,7 @@ public String toString() { } @Override - public void close() { - if (released) { - throw new IllegalStateException("can't release already released block [" + this + "]"); - } - released = true; + public void closeInternal() { blockFactory.adjustBreaker(-ramBytesUsed() + values.bigArraysRamBytesUsed(), true); Releasables.closeExpectNoException(values); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java index 5b0f2f2331fbe..d8c2c615a3dfb 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java @@ -18,6 +18,9 @@ public final class BytesRefVectorBlock extends AbstractVectorBlock implements By private final BytesRefVector vector; + /** + * @param vector considered owned by the current block; must not be used in any other {@code Block} + */ BytesRefVectorBlock(BytesRefVector vector) { super(vector.getPositionCount(), vector.blockFactory()); this.vector = vector; @@ -73,15 +76,12 @@ public String toString() { @Override public boolean isReleased() { - return released || vector.isReleased(); + return super.isReleased() || vector.isReleased(); } @Override - public void close() { - if (released || vector.isReleased()) { - throw new IllegalStateException("can't release already released block [" + this + "]"); - } - released = true; + public void closeInternal() { + assert (vector.isReleased() == false) : "can't release block [" + this + "] containing already released vector"; Releasables.closeExpectNoException(vector); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java index b6b1fae0ded03..6a5af2d7ca6de 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java @@ -135,11 +135,7 @@ public String toString() { } @Override - public void close() { - if (released) { - throw new IllegalStateException("can't release already released block [" + this + "]"); - } - released = true; + public void closeInternal() { blockFactory.adjustBreaker(-ramBytesUsed(), true); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java index d05be62744bc8..ac4c826b5f2d2 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java @@ -17,6 +17,9 @@ public final class DoubleVectorBlock extends AbstractVectorBlock implements Doub private final DoubleVector vector; + /** + * @param vector considered owned by the current block; must not be used in any other {@code Block} + */ DoubleVectorBlock(DoubleVector vector) { super(vector.getPositionCount(), vector.blockFactory()); this.vector = vector; @@ -72,15 +75,12 @@ public String toString() { @Override public boolean isReleased() { - return released || vector.isReleased(); + return super.isReleased() || vector.isReleased(); } @Override - public void close() { - if (released || vector.isReleased()) { - throw new IllegalStateException("can't release already released block [" + this + "]"); - } - released = true; + public void closeInternal() { + assert (vector.isReleased() == false) : "can't release block [" + this + "] containing already released vector"; Releasables.closeExpectNoException(vector); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java index 31f71d292f95d..284520a5f3bd6 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java @@ -135,11 +135,7 @@ public String toString() { } @Override - public void close() { - if (released) { - throw new IllegalStateException("can't release already released block [" + this + "]"); - } - released = true; + public void closeInternal() { blockFactory.adjustBreaker(-ramBytesUsed(), true); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java index 472475d0662d7..60280ebb13064 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java @@ -17,6 +17,9 @@ public final class IntVectorBlock extends AbstractVectorBlock implements IntBloc private final IntVector vector; + /** + * @param vector considered owned by the current block; must not be used in any other {@code Block} + */ IntVectorBlock(IntVector vector) { super(vector.getPositionCount(), vector.blockFactory()); this.vector = vector; @@ -72,15 +75,12 @@ public String toString() { @Override public boolean isReleased() { - return released || vector.isReleased(); + return super.isReleased() || vector.isReleased(); } @Override - public void close() { - if (released || vector.isReleased()) { - throw new IllegalStateException("can't release already released block [" + this + "]"); - } - released = true; + public void closeInternal() { + assert (vector.isReleased() == false) : "can't release block [" + this + "] containing already released vector"; Releasables.closeExpectNoException(vector); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java index 8a71703441ebb..fccad0ec1f09b 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java @@ -135,11 +135,7 @@ public String toString() { } @Override - public void close() { - if (released) { - throw new IllegalStateException("can't release already released block [" + this + "]"); - } - released = true; + public void closeInternal() { blockFactory.adjustBreaker(-ramBytesUsed(), true); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java index b94cd4e875dc3..c9b65ba3e9029 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java @@ -17,6 +17,9 @@ public final class LongVectorBlock extends AbstractVectorBlock implements LongBl private final LongVector vector; + /** + * @param vector considered owned by the current block; must not be used in any other {@code Block} + */ LongVectorBlock(LongVector vector) { super(vector.getPositionCount(), vector.blockFactory()); this.vector = vector; @@ -72,15 +75,12 @@ public String toString() { @Override public boolean isReleased() { - return released || vector.isReleased(); + return super.isReleased() || vector.isReleased(); } @Override - public void close() { - if (released || vector.isReleased()) { - throw new IllegalStateException("can't release already released block [" + this + "]"); - } - released = true; + public void closeInternal() { + assert (vector.isReleased() == false) : "can't release block [" + this + "] containing already released vector"; Releasables.closeExpectNoException(vector); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeBytesRef.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeBytesRef.java index cc6a2d4e41104..1fd4c1ea3562d 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeBytesRef.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeBytesRef.java @@ -30,14 +30,12 @@ public class MultivalueDedupeBytesRef { * The choice of number has been experimentally derived. */ private static final int ALWAYS_COPY_MISSING = 20; // TODO BytesRef should try adding to the hash *first* and then comparing. - private final Block.Ref ref; private final BytesRefBlock block; private BytesRef[] work = new BytesRef[ArrayUtil.oversize(2, org.apache.lucene.util.RamUsageEstimator.NUM_BYTES_OBJECT_REF)]; private int w; - public MultivalueDedupeBytesRef(Block.Ref ref) { - this.ref = ref; - this.block = (BytesRefBlock) ref.block(); + public MultivalueDedupeBytesRef(BytesRefBlock block) { + this.block = block; // TODO very large numbers might want a hash based implementation - and for BytesRef that might not be that big fillWork(0, work.length); } @@ -46,11 +44,12 @@ public MultivalueDedupeBytesRef(Block.Ref ref) { * Remove duplicate values from each position and write the results to a * {@link Block} using an adaptive algorithm based on the size of the input list. */ - public Block.Ref dedupeToBlockAdaptive(BlockFactory blockFactory) { + public BytesRefBlock dedupeToBlockAdaptive(BlockFactory blockFactory) { if (block.mvDeduplicated()) { - return ref; + block.incRef(); + return block; } - try (ref; BytesRefBlock.Builder builder = BytesRefBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (BytesRefBlock.Builder builder = BytesRefBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -85,7 +84,7 @@ public Block.Ref dedupeToBlockAdaptive(BlockFactory blockFactory) { } } } - return Block.Ref.floating(builder.build()); + return builder.build(); } } @@ -95,11 +94,12 @@ public Block.Ref dedupeToBlockAdaptive(BlockFactory blockFactory) { * case complexity for larger. Prefer {@link #dedupeToBlockAdaptive} * which picks based on the number of elements at each position. */ - public Block.Ref dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { + public BytesRefBlock dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { if (block.mvDeduplicated()) { - return ref; + block.incRef(); + return block; } - try (ref; BytesRefBlock.Builder builder = BytesRefBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (BytesRefBlock.Builder builder = BytesRefBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -112,7 +112,7 @@ public Block.Ref dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { } } } - return Block.Ref.floating(builder.build()); + return builder.build(); } } @@ -124,11 +124,12 @@ public Block.Ref dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { * performance is dominated by the {@code n*log n} sort. Prefer * {@link #dedupeToBlockAdaptive} unless you need the results sorted. */ - public Block.Ref dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { + public BytesRefBlock dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { if (block.mvDeduplicated()) { - return ref; + block.incRef(); + return block; } - try (ref; BytesRefBlock.Builder builder = BytesRefBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (BytesRefBlock.Builder builder = BytesRefBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -141,7 +142,7 @@ public Block.Ref dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { } } } - return Block.Ref.floating(builder.build()); + return builder.build(); } } @@ -149,8 +150,8 @@ public Block.Ref dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { * Dedupe values and build a {@link IntBlock} suitable for passing * as the grouping block to a {@link GroupingAggregatorFunction}. */ - public MultivalueDedupe.HashResult hash(BytesRefHash hash) { - try (IntBlock.Builder builder = IntBlock.newBlockBuilder(block.getPositionCount())) { + public MultivalueDedupe.HashResult hash(BlockFactory blockFactory, BytesRefHash hash) { + try (IntBlock.Builder builder = blockFactory.newIntBlockBuilder(block.getPositionCount())) { boolean sawNull = false; for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeDouble.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeDouble.java index d9de26a36d830..157b6670e95af 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeDouble.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeDouble.java @@ -29,25 +29,24 @@ public class MultivalueDedupeDouble { * The choice of number has been experimentally derived. */ private static final int ALWAYS_COPY_MISSING = 110; - private final Block.Ref ref; private final DoubleBlock block; private double[] work = new double[ArrayUtil.oversize(2, Double.BYTES)]; private int w; - public MultivalueDedupeDouble(Block.Ref ref) { - this.ref = ref; - this.block = (DoubleBlock) ref.block(); + public MultivalueDedupeDouble(DoubleBlock block) { + this.block = block; } /** * Remove duplicate values from each position and write the results to a * {@link Block} using an adaptive algorithm based on the size of the input list. */ - public Block.Ref dedupeToBlockAdaptive(BlockFactory blockFactory) { + public DoubleBlock dedupeToBlockAdaptive(BlockFactory blockFactory) { if (block.mvDeduplicated()) { - return ref; + block.incRef(); + return block; } - try (ref; DoubleBlock.Builder builder = DoubleBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (DoubleBlock.Builder builder = DoubleBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -82,7 +81,7 @@ public Block.Ref dedupeToBlockAdaptive(BlockFactory blockFactory) { } } } - return Block.Ref.floating(builder.build()); + return builder.build(); } } @@ -92,11 +91,12 @@ public Block.Ref dedupeToBlockAdaptive(BlockFactory blockFactory) { * case complexity for larger. Prefer {@link #dedupeToBlockAdaptive} * which picks based on the number of elements at each position. */ - public Block.Ref dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { + public DoubleBlock dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { if (block.mvDeduplicated()) { - return ref; + block.incRef(); + return block; } - try (ref; DoubleBlock.Builder builder = DoubleBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (DoubleBlock.Builder builder = DoubleBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -109,7 +109,7 @@ public Block.Ref dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { } } } - return Block.Ref.floating(builder.build()); + return builder.build(); } } @@ -121,11 +121,12 @@ public Block.Ref dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { * performance is dominated by the {@code n*log n} sort. Prefer * {@link #dedupeToBlockAdaptive} unless you need the results sorted. */ - public Block.Ref dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { + public DoubleBlock dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { if (block.mvDeduplicated()) { - return ref; + block.incRef(); + return block; } - try (ref; DoubleBlock.Builder builder = DoubleBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (DoubleBlock.Builder builder = DoubleBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -138,7 +139,7 @@ public Block.Ref dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { } } } - return Block.Ref.floating(builder.build()); + return builder.build(); } } @@ -146,8 +147,8 @@ public Block.Ref dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { * Dedupe values and build a {@link IntBlock} suitable for passing * as the grouping block to a {@link GroupingAggregatorFunction}. */ - public MultivalueDedupe.HashResult hash(LongHash hash) { - try (IntBlock.Builder builder = IntBlock.newBlockBuilder(block.getPositionCount())) { + public MultivalueDedupe.HashResult hash(BlockFactory blockFactory, LongHash hash) { + try (IntBlock.Builder builder = blockFactory.newIntBlockBuilder(block.getPositionCount())) { boolean sawNull = false; for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeInt.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeInt.java index aad15dde6aec9..7bc9d77d3f877 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeInt.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeInt.java @@ -28,25 +28,24 @@ public class MultivalueDedupeInt { * The choice of number has been experimentally derived. */ private static final int ALWAYS_COPY_MISSING = 300; - private final Block.Ref ref; private final IntBlock block; private int[] work = new int[ArrayUtil.oversize(2, Integer.BYTES)]; private int w; - public MultivalueDedupeInt(Block.Ref ref) { - this.ref = ref; - this.block = (IntBlock) ref.block(); + public MultivalueDedupeInt(IntBlock block) { + this.block = block; } /** * Remove duplicate values from each position and write the results to a * {@link Block} using an adaptive algorithm based on the size of the input list. */ - public Block.Ref dedupeToBlockAdaptive(BlockFactory blockFactory) { + public IntBlock dedupeToBlockAdaptive(BlockFactory blockFactory) { if (block.mvDeduplicated()) { - return ref; + block.incRef(); + return block; } - try (ref; IntBlock.Builder builder = IntBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (IntBlock.Builder builder = IntBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -81,7 +80,7 @@ public Block.Ref dedupeToBlockAdaptive(BlockFactory blockFactory) { } } } - return Block.Ref.floating(builder.build()); + return builder.build(); } } @@ -91,11 +90,12 @@ public Block.Ref dedupeToBlockAdaptive(BlockFactory blockFactory) { * case complexity for larger. Prefer {@link #dedupeToBlockAdaptive} * which picks based on the number of elements at each position. */ - public Block.Ref dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { + public IntBlock dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { if (block.mvDeduplicated()) { - return ref; + block.incRef(); + return block; } - try (ref; IntBlock.Builder builder = IntBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (IntBlock.Builder builder = IntBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -108,7 +108,7 @@ public Block.Ref dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { } } } - return Block.Ref.floating(builder.build()); + return builder.build(); } } @@ -120,11 +120,12 @@ public Block.Ref dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { * performance is dominated by the {@code n*log n} sort. Prefer * {@link #dedupeToBlockAdaptive} unless you need the results sorted. */ - public Block.Ref dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { + public IntBlock dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { if (block.mvDeduplicated()) { - return ref; + block.incRef(); + return block; } - try (ref; IntBlock.Builder builder = IntBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (IntBlock.Builder builder = IntBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -137,7 +138,7 @@ public Block.Ref dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { } } } - return Block.Ref.floating(builder.build()); + return builder.build(); } } @@ -145,8 +146,8 @@ public Block.Ref dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { * Dedupe values and build a {@link IntBlock} suitable for passing * as the grouping block to a {@link GroupingAggregatorFunction}. */ - public MultivalueDedupe.HashResult hash(LongHash hash) { - try (IntBlock.Builder builder = IntBlock.newBlockBuilder(block.getPositionCount())) { + public MultivalueDedupe.HashResult hash(BlockFactory blockFactory, LongHash hash) { + try (IntBlock.Builder builder = blockFactory.newIntBlockBuilder(block.getPositionCount())) { boolean sawNull = false; for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeLong.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeLong.java index 98f79f3989c27..acbc9139a75c5 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeLong.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeLong.java @@ -30,25 +30,24 @@ public class MultivalueDedupeLong { */ private static final int ALWAYS_COPY_MISSING = 300; - private final Block.Ref ref; private final LongBlock block; private long[] work = new long[ArrayUtil.oversize(2, Long.BYTES)]; private int w; - public MultivalueDedupeLong(Block.Ref ref) { - this.ref = ref; - this.block = (LongBlock) ref.block(); + public MultivalueDedupeLong(LongBlock block) { + this.block = block; } /** * Remove duplicate values from each position and write the results to a * {@link Block} using an adaptive algorithm based on the size of the input list. */ - public Block.Ref dedupeToBlockAdaptive(BlockFactory blockFactory) { + public LongBlock dedupeToBlockAdaptive(BlockFactory blockFactory) { if (block.mvDeduplicated()) { - return ref; + block.incRef(); + return block; } - try (ref; LongBlock.Builder builder = LongBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (LongBlock.Builder builder = LongBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -83,7 +82,7 @@ public Block.Ref dedupeToBlockAdaptive(BlockFactory blockFactory) { } } } - return Block.Ref.floating(builder.build()); + return builder.build(); } } @@ -93,11 +92,12 @@ public Block.Ref dedupeToBlockAdaptive(BlockFactory blockFactory) { * case complexity for larger. Prefer {@link #dedupeToBlockAdaptive} * which picks based on the number of elements at each position. */ - public Block.Ref dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { + public LongBlock dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { if (block.mvDeduplicated()) { - return ref; + block.incRef(); + return block; } - try (ref; LongBlock.Builder builder = LongBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (LongBlock.Builder builder = LongBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -110,7 +110,7 @@ public Block.Ref dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { } } } - return Block.Ref.floating(builder.build()); + return builder.build(); } } @@ -122,11 +122,12 @@ public Block.Ref dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { * performance is dominated by the {@code n*log n} sort. Prefer * {@link #dedupeToBlockAdaptive} unless you need the results sorted. */ - public Block.Ref dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { + public LongBlock dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { if (block.mvDeduplicated()) { - return ref; + block.incRef(); + return block; } - try (ref; LongBlock.Builder builder = LongBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (LongBlock.Builder builder = LongBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -139,7 +140,7 @@ public Block.Ref dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { } } } - return Block.Ref.floating(builder.build()); + return builder.build(); } } @@ -147,8 +148,8 @@ public Block.Ref dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { * Dedupe values and build a {@link IntBlock} suitable for passing * as the grouping block to a {@link GroupingAggregatorFunction}. */ - public MultivalueDedupe.HashResult hash(LongHash hash) { - try (IntBlock.Builder builder = IntBlock.newBlockBuilder(block.getPositionCount())) { + public MultivalueDedupe.HashResult hash(BlockFactory blockFactory, LongHash hash) { + try (IntBlock.Builder builder = blockFactory.newIntBlockBuilder(block.getPositionCount())) { boolean sawNull = false; for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBooleanAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBooleanAggregatorFunction.java index f0344ec6b56ba..dd5450d3b460c 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBooleanAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBooleanAggregatorFunction.java @@ -98,8 +98,8 @@ public void addIntermediateInput(Page page) { } @Override - public void evaluateIntermediate(Block[] blocks, int offset) { - state.toIntermediate(blocks, offset); + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBytesRefAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBytesRefAggregatorFunction.java index 15d3290cdb18f..fd770678d5943 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBytesRefAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBytesRefAggregatorFunction.java @@ -106,8 +106,8 @@ public void addIntermediateInput(Page page) { } @Override - public void evaluateIntermediate(Block[] blocks, int offset) { - state.toIntermediate(blocks, offset); + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctDoubleAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctDoubleAggregatorFunction.java index 77e47b543bd26..a8169b5a901e1 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctDoubleAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctDoubleAggregatorFunction.java @@ -106,8 +106,8 @@ public void addIntermediateInput(Page page) { } @Override - public void evaluateIntermediate(Block[] blocks, int offset) { - state.toIntermediate(blocks, offset); + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctIntAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctIntAggregatorFunction.java index 1f6f91d4d6adc..9f685f4672939 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctIntAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctIntAggregatorFunction.java @@ -106,8 +106,8 @@ public void addIntermediateInput(Page page) { } @Override - public void evaluateIntermediate(Block[] blocks, int offset) { - state.toIntermediate(blocks, offset); + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctLongAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctLongAggregatorFunction.java index cf51b3dcff5a1..55b396aa627d5 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctLongAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctLongAggregatorFunction.java @@ -106,8 +106,8 @@ public void addIntermediateInput(Page page) { } @Override - public void evaluateIntermediate(Block[] blocks, int offset) { - state.toIntermediate(blocks, offset); + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxDoubleAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxDoubleAggregatorFunction.java index c613623230e7f..6929900c29ea1 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxDoubleAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxDoubleAggregatorFunction.java @@ -105,8 +105,8 @@ public void addIntermediateInput(Page page) { } @Override - public void evaluateIntermediate(Block[] blocks, int offset) { - state.toIntermediate(blocks, offset); + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIntAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIntAggregatorFunction.java index 9307522e515a8..1759442fbb12a 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIntAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIntAggregatorFunction.java @@ -105,8 +105,8 @@ public void addIntermediateInput(Page page) { } @Override - public void evaluateIntermediate(Block[] blocks, int offset) { - state.toIntermediate(blocks, offset); + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxLongAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxLongAggregatorFunction.java index 6902539b8b2f0..fe7d797faf10a 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxLongAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxLongAggregatorFunction.java @@ -105,8 +105,8 @@ public void addIntermediateInput(Page page) { } @Override - public void evaluateIntermediate(Block[] blocks, int offset) { - state.toIntermediate(blocks, offset); + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleAggregatorFunction.java index d7b84492c4cbc..a2e8d8fbf592c 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleAggregatorFunction.java @@ -99,8 +99,8 @@ public void addIntermediateInput(Page page) { } @Override - public void evaluateIntermediate(Block[] blocks, int offset) { - state.toIntermediate(blocks, offset); + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntAggregatorFunction.java index 211f0f622b728..21e99587a5d09 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntAggregatorFunction.java @@ -99,8 +99,8 @@ public void addIntermediateInput(Page page) { } @Override - public void evaluateIntermediate(Block[] blocks, int offset) { - state.toIntermediate(blocks, offset); + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongAggregatorFunction.java index 2311ce3c18315..8c3aa95864aff 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongAggregatorFunction.java @@ -99,8 +99,8 @@ public void addIntermediateInput(Page page) { } @Override - public void evaluateIntermediate(Block[] blocks, int offset) { - state.toIntermediate(blocks, offset); + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinDoubleAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinDoubleAggregatorFunction.java index afcace2069ebf..1f9a8fb49fb2d 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinDoubleAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinDoubleAggregatorFunction.java @@ -105,8 +105,8 @@ public void addIntermediateInput(Page page) { } @Override - public void evaluateIntermediate(Block[] blocks, int offset) { - state.toIntermediate(blocks, offset); + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIntAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIntAggregatorFunction.java index e8deaf9cf07fd..bbeba4c8374ab 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIntAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIntAggregatorFunction.java @@ -105,8 +105,8 @@ public void addIntermediateInput(Page page) { } @Override - public void evaluateIntermediate(Block[] blocks, int offset) { - state.toIntermediate(blocks, offset); + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinLongAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinLongAggregatorFunction.java index 61f41c9693725..5299b505e124c 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinLongAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinLongAggregatorFunction.java @@ -105,8 +105,8 @@ public void addIntermediateInput(Page page) { } @Override - public void evaluateIntermediate(Block[] blocks, int offset) { - state.toIntermediate(blocks, offset); + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileDoubleAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileDoubleAggregatorFunction.java index 0757cb91b2747..f7560379e476d 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileDoubleAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileDoubleAggregatorFunction.java @@ -102,8 +102,8 @@ public void addIntermediateInput(Page page) { } @Override - public void evaluateIntermediate(Block[] blocks, int offset) { - state.toIntermediate(blocks, offset); + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileIntAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileIntAggregatorFunction.java index 142772592a9a8..d45ba7a1e350a 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileIntAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileIntAggregatorFunction.java @@ -102,8 +102,8 @@ public void addIntermediateInput(Page page) { } @Override - public void evaluateIntermediate(Block[] blocks, int offset) { - state.toIntermediate(blocks, offset); + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileLongAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileLongAggregatorFunction.java index d360f14453ce5..dac045d814926 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileLongAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileLongAggregatorFunction.java @@ -102,8 +102,8 @@ public void addIntermediateInput(Page page) { } @Override - public void evaluateIntermediate(Block[] blocks, int offset) { - state.toIntermediate(blocks, offset); + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumDoubleAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumDoubleAggregatorFunction.java index 48c50b026f198..5520c587555b3 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumDoubleAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumDoubleAggregatorFunction.java @@ -104,8 +104,8 @@ public void addIntermediateInput(Page page) { } @Override - public void evaluateIntermediate(Block[] blocks, int offset) { - state.toIntermediate(blocks, offset); + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumIntAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumIntAggregatorFunction.java index f834c932b0a56..1225b90bf09f7 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumIntAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumIntAggregatorFunction.java @@ -107,8 +107,8 @@ public void addIntermediateInput(Page page) { } @Override - public void evaluateIntermediate(Block[] blocks, int offset) { - state.toIntermediate(blocks, offset); + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumLongAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumLongAggregatorFunction.java index 058fdcbe507b4..720e7ca9f3bbf 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumLongAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumLongAggregatorFunction.java @@ -105,8 +105,8 @@ public void addIntermediateInput(Page page) { } @Override - public void evaluateIntermediate(Block[] blocks, int offset) { - state.toIntermediate(blocks, offset); + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/OwningChannelActionListener.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/OwningChannelActionListener.java new file mode 100644 index 0000000000000..6512d80859163 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/OwningChannelActionListener.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ChannelActionListener; +import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportResponse; + +/** + * Wraps a {@link ChannelActionListener} and takes ownership of responses passed to + * {@link org.elasticsearch.action.ActionListener#onResponse(Object)}; the reference count will be decreased once sending is done. + * + * Deprecated: use {@link ChannelActionListener} instead and ensure responses sent to it are properly closed after. + */ +@Deprecated(forRemoval = true) +public final class OwningChannelActionListener implements ActionListener { + private final ChannelActionListener listener; + + public OwningChannelActionListener(TransportChannel channel) { + this.listener = new ChannelActionListener<>(channel); + } + + @Override + public void onResponse(Response response) { + try { + listener.onResponse(response); + } finally { + response.decRef(); + } + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + + @Override + public String toString() { + return "OwningChannelActionListener{" + listener + "}"; + } + +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/Aggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/Aggregator.java index f2fdbd951ffd0..1a58a27e3377f 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/Aggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/Aggregator.java @@ -45,7 +45,7 @@ public void processPage(Page page) { public void evaluate(Block[] blocks, int offset, DriverContext driverContext) { if (mode.isOutputPartial()) { - aggregatorFunction.evaluateIntermediate(blocks, offset); + aggregatorFunction.evaluateIntermediate(blocks, offset, driverContext); } else { aggregatorFunction.evaluateFinal(blocks, offset, driverContext); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/AggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/AggregatorFunction.java index ae60f07056327..3d214ff3d2e0d 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/AggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/AggregatorFunction.java @@ -18,7 +18,7 @@ public interface AggregatorFunction extends Releasable { void addIntermediateInput(Page page); - void evaluateIntermediate(Block[] blocks, int offset); + void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext); void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/AggregatorState.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/AggregatorState.java index d0a644215a759..c62dc0b546500 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/AggregatorState.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/AggregatorState.java @@ -8,10 +8,11 @@ package org.elasticsearch.compute.aggregation; import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.core.Releasable; public interface AggregatorState extends Releasable { /** Extracts an intermediate view of the contents of this state. */ - void toIntermediate(Block[] blocks, int offset); + void toIntermediate(Block[] blocks, int offset, DriverContext driverContext); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountAggregatorFunction.java index 7ae173659f3f9..efc275ff6eb35 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountAggregatorFunction.java @@ -97,13 +97,13 @@ public void addIntermediateInput(Page page) { } @Override - public void evaluateIntermediate(Block[] blocks, int offset) { - state.toIntermediate(blocks, offset); + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); } @Override public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { - blocks[offset] = LongBlock.newConstantBlockWith(state.longValue(), 1, driverContext.blockFactory()); + blocks[offset] = driverContext.blockFactory().newConstantLongBlockWith(state.longValue(), 1); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctBooleanAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctBooleanAggregator.java index 0f524a712df59..d083a48fffb7a 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctBooleanAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctBooleanAggregator.java @@ -86,10 +86,10 @@ static class SingleState implements AggregatorState { /** Extracts an intermediate view of the contents of this state. */ @Override - public void toIntermediate(Block[] blocks, int offset) { + public void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { assert blocks.length >= offset + 2; - blocks[offset + 0] = BooleanBlock.newConstantBlockWith((bits & BIT_FALSE) != 0, 1); - blocks[offset + 1] = BooleanBlock.newConstantBlockWith((bits & BIT_TRUE) != 0, 1); + blocks[offset + 0] = driverContext.blockFactory().newConstantBooleanBlockWith((bits & BIT_FALSE) != 0, 1); + blocks[offset + 1] = driverContext.blockFactory().newConstantBooleanBlockWith((bits & BIT_TRUE) != 0, 1); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/HllStates.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/HllStates.java index 0c03d9d9c3698..66844f002111e 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/HllStates.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/HllStates.java @@ -17,7 +17,6 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BytesRefBlock; -import org.elasticsearch.compute.data.ConstantBytesRefVector; import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.core.Releasables; @@ -118,9 +117,9 @@ void merge(int groupId, BytesRef other, int otherGroup) { /** Extracts an intermediate view of the contents of this state. */ @Override - public void toIntermediate(Block[] blocks, int offset) { + public void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { assert blocks.length >= offset + 1; - blocks[offset] = new ConstantBytesRefVector(serializeHLL(SINGLE_BUCKET_ORD, hll), 1).asBlock(); + blocks[offset] = driverContext.blockFactory().newConstantBytesRefBlockWith(serializeHLL(SINGLE_BUCKET_ORD, hll), 1); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/QuantileStates.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/QuantileStates.java index 121e80871aaf0..0b5b89425ed46 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/QuantileStates.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/QuantileStates.java @@ -13,8 +13,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.ObjectArray; import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.BytesRefBlock; -import org.elasticsearch.compute.data.ConstantBytesRefVector; +import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.DoubleBlock; import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.operator.DriverContext; @@ -83,29 +82,28 @@ void add(BytesRef other) { /** Extracts an intermediate view of the contents of this state. */ @Override - public void toIntermediate(Block[] blocks, int offset) { + public void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { assert blocks.length >= offset + 1; - blocks[offset] = new ConstantBytesRefVector(serializeDigest(this.digest), 1).asBlock(); + blocks[offset] = driverContext.blockFactory().newConstantBytesRefBlockWith(serializeDigest(this.digest), 1); } Block evaluateMedianAbsoluteDeviation(DriverContext driverContext) { + BlockFactory blockFactory = driverContext.blockFactory(); assert percentile == MEDIAN : "Median must be 50th percentile [percentile = " + percentile + "]"; if (digest.size() == 0) { - return Block.constantNullBlock(1, driverContext.blockFactory()); + return blockFactory.newConstantNullBlock(1); } double result = InternalMedianAbsoluteDeviation.computeMedianAbsoluteDeviation(digest); - return DoubleBlock.newConstantBlockWith(result, 1, driverContext.blockFactory()); + return blockFactory.newConstantDoubleBlockWith(result, 1); } Block evaluatePercentile(DriverContext driverContext) { - if (percentile == null) { - return DoubleBlock.newBlockBuilder(1, driverContext.blockFactory()).appendNull().build(); - } - if (digest.size() == 0) { - return Block.constantNullBlock(1); + BlockFactory blockFactory = driverContext.blockFactory(); + if (percentile == null || digest.size() == 0) { + return blockFactory.newConstantNullBlock(1); } double result = digest.quantile(percentile / 100); - return DoubleBlock.newConstantBlockWith(result, 1); + return blockFactory.newConstantDoubleBlockWith(result, 1); } } @@ -161,7 +159,7 @@ TDigestState getOrNull(int position) { @Override public void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { assert blocks.length >= offset + 1; - try (var builder = BytesRefBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory())) { + try (var builder = driverContext.blockFactory().newBytesRefBlockBuilder(selected.getPositionCount())) { for (int i = 0; i < selected.getPositionCount(); i++) { int group = selected.getInt(i); TDigestState state; @@ -181,7 +179,7 @@ public void toIntermediate(Block[] blocks, int offset, IntVector selected, Drive Block evaluateMedianAbsoluteDeviation(IntVector selected, DriverContext driverContext) { assert percentile == MEDIAN : "Median must be 50th percentile [percentile = " + percentile + "]"; - try (DoubleBlock.Builder builder = DoubleBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory())) { + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(selected.getPositionCount())) { for (int i = 0; i < selected.getPositionCount(); i++) { int si = selected.getInt(i); if (si >= digests.size()) { @@ -200,7 +198,7 @@ Block evaluateMedianAbsoluteDeviation(IntVector selected, DriverContext driverCo } Block evaluatePercentile(IntVector selected, DriverContext driverContext) { - try (DoubleBlock.Builder builder = DoubleBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory())) { + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(selected.getPositionCount())) { for (int i = 0; i < selected.getPositionCount(); i++) { int si = selected.getInt(i); if (si >= digests.size()) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/SumDoubleAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/SumDoubleAggregator.java index c726eb0a50e59..4c2c38da28b75 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/SumDoubleAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/SumDoubleAggregator.java @@ -13,9 +13,8 @@ import org.elasticsearch.compute.ann.GroupingAggregator; import org.elasticsearch.compute.ann.IntermediateState; import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BooleanBlock; -import org.elasticsearch.compute.data.ConstantBooleanVector; -import org.elasticsearch.compute.data.ConstantDoubleVector; import org.elasticsearch.compute.data.DoubleBlock; import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.operator.DriverContext; @@ -54,11 +53,12 @@ public static void combineIntermediate(SumState state, double inValue, double in } } - public static void evaluateIntermediate(SumState state, Block[] blocks, int offset) { + public static void evaluateIntermediate(SumState state, DriverContext driverContext, Block[] blocks, int offset) { assert blocks.length >= offset + 3; - blocks[offset + 0] = new ConstantDoubleVector(state.value(), 1).asBlock(); - blocks[offset + 1] = new ConstantDoubleVector(state.delta(), 1).asBlock(); - blocks[offset + 2] = new ConstantBooleanVector(state.seen, 1).asBlock(); + BlockFactory blockFactory = driverContext.blockFactory(); + blocks[offset + 0] = blockFactory.newConstantDoubleBlockWith(state.value(), 1); + blocks[offset + 1] = blockFactory.newConstantDoubleBlockWith(state.delta(), 1); + blocks[offset + 2] = blockFactory.newConstantBooleanBlockWith(state.seen(), 1); } public static Block evaluateFinal(SumState state, DriverContext driverContext) { @@ -143,8 +143,8 @@ static class SumState extends CompensatedSum implements AggregatorState { } @Override - public void toIntermediate(Block[] blocks, int offset) { - SumDoubleAggregator.evaluateIntermediate(this, blocks, offset); + public void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + SumDoubleAggregator.evaluateIntermediate(this, driverContext, blocks, offset); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-State.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-State.java.st index 2bcee35b48b4d..427d1a0c312cc 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-State.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-State.java.st @@ -8,8 +8,7 @@ package org.elasticsearch.compute.aggregation; import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.ConstantBooleanVector; -import org.elasticsearch.compute.data.Constant$Type$Vector; +import org.elasticsearch.compute.operator.DriverContext; /** * Aggregator state for a single $type$. @@ -45,10 +44,10 @@ final class $Type$State implements AggregatorState { /** Extracts an intermediate view of the contents of this state. */ @Override - public void toIntermediate(Block[] blocks, int offset) { + public void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { assert blocks.length >= offset + 2; - blocks[offset + 0] = new Constant$Type$Vector(value, 1).asBlock(); - blocks[offset + 1] = new ConstantBooleanVector(seen, 1).asBlock(); + blocks[offset + 0] = driverContext.blockFactory().newConstant$Type$BlockWith(value, 1); + blocks[offset + 1] = driverContext.blockFactory().newConstantBooleanBlockWith(seen, 1); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BooleanBlockHash.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BooleanBlockHash.java index 6f041a6681659..684e6aec60b9e 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BooleanBlockHash.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BooleanBlockHash.java @@ -10,7 +10,6 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.BitArray; import org.elasticsearch.compute.aggregation.GroupingAggregatorFunction; -import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.BooleanVector; import org.elasticsearch.compute.data.IntBlock; @@ -70,7 +69,7 @@ private IntVector add(BooleanVector vector) { } private IntBlock add(BooleanBlock block) { - return new MultivalueDedupeBoolean(Block.Ref.floating(block)).hash(everSeen); + return new MultivalueDedupeBoolean(block).hash(blockFactory, everSeen); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BytesRefBlockHash.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BytesRefBlockHash.java index 2f1bb4f858ff4..fb9b680c62d1d 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BytesRefBlockHash.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BytesRefBlockHash.java @@ -87,7 +87,7 @@ private IntVector add(BytesRefVector vector) { private IntBlock add(BytesRefBlock block) { // TODO: use block factory - MultivalueDedupe.HashResult result = new MultivalueDedupeBytesRef(Block.Ref.floating(block)).hash(bytesRefHash); + MultivalueDedupe.HashResult result = new MultivalueDedupeBytesRef(block).hash(blockFactory, bytesRefHash); seenNull |= result.sawNull(); return result.ords(); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/DoubleBlockHash.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/DoubleBlockHash.java index a8a67180775fb..c03ce2a0a4dce 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/DoubleBlockHash.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/DoubleBlockHash.java @@ -80,7 +80,7 @@ private IntVector add(DoubleVector vector) { } private IntBlock add(DoubleBlock block) { - MultivalueDedupe.HashResult result = new MultivalueDedupeDouble(Block.Ref.floating(block)).hash(longHash); // TODO: block factory + MultivalueDedupe.HashResult result = new MultivalueDedupeDouble(block).hash(blockFactory, longHash); seenNull |= result.sawNull(); return result.ords(); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/IntBlockHash.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/IntBlockHash.java index 79e03e4dc0ed5..bd5438da153e4 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/IntBlockHash.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/IntBlockHash.java @@ -77,7 +77,7 @@ private IntVector add(IntVector vector) { } private IntBlock add(IntBlock block) { - MultivalueDedupe.HashResult result = new MultivalueDedupeInt(Block.Ref.floating(block)).hash(longHash); // TODO: block factory + MultivalueDedupe.HashResult result = new MultivalueDedupeInt(block).hash(blockFactory, longHash); seenNull |= result.sawNull(); return result.ords(); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/LongBlockHash.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/LongBlockHash.java index c736cfae65ee7..d817edb9e059a 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/LongBlockHash.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/LongBlockHash.java @@ -80,7 +80,7 @@ private IntVector add(LongVector vector) { } private IntBlock add(LongBlock block) { - MultivalueDedupe.HashResult result = new MultivalueDedupeLong(Block.Ref.floating(block)).hash(longHash); // TODO: block factory + MultivalueDedupe.HashResult result = new MultivalueDedupeLong(block).hash(blockFactory, longHash); seenNull |= result.sawNull(); return result.ords(); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/PackedValuesBlockHash.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/PackedValuesBlockHash.java index 06b833974a5db..b58c50b79311a 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/PackedValuesBlockHash.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/PackedValuesBlockHash.java @@ -99,7 +99,7 @@ class AddWork extends LongLongBlockHash.AbstractAddBlock { AddWork(Page page, GroupingAggregatorFunction.AddInput addInput, int batchSize) { super(blockFactory, emitBatchSize, addInput); for (Group group : groups) { - group.encoder = MultivalueDedupe.batchEncoder(new Block.Ref(page.getBlock(group.spec.channel()), page), batchSize, true); + group.encoder = MultivalueDedupe.batchEncoder(page.getBlock(group.spec.channel()), batchSize, true); } bytes.grow(nullTrackingBytes); this.positionCount = page.getPositionCount(); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractBlock.java index cbe74c814594d..39f17cfecab1a 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractBlock.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractBlock.java @@ -12,7 +12,7 @@ import java.util.BitSet; abstract class AbstractBlock implements Block { - + private int references = 1; private final int positionCount; @Nullable @@ -23,8 +23,6 @@ abstract class AbstractBlock implements Block { protected final BlockFactory blockFactory; - protected boolean released = false; - /** * @param positionCount the number of values in this block */ @@ -99,6 +97,54 @@ public BlockFactory blockFactory() { @Override public boolean isReleased() { - return released; + return hasReferences() == false; + } + + @Override + public final void incRef() { + if (isReleased()) { + throw new IllegalStateException("can't increase refCount on already released block [" + this + "]"); + } + references++; + } + + @Override + public final boolean tryIncRef() { + if (isReleased()) { + return false; + } + references++; + return true; + } + + @Override + public final boolean decRef() { + if (isReleased()) { + throw new IllegalStateException("can't release already released block [" + this + "]"); + } + + references--; + + if (references <= 0) { + closeInternal(); + return true; + } + return false; + } + + @Override + public final boolean hasReferences() { + return references >= 1; } + + @Override + public final void close() { + decRef(); + } + + /** + * This is called when the number of references reaches zero. + * It must release any resources held by the block (adjusting circuit breakers if needed). + */ + protected abstract void closeInternal(); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java index 75b02ff911df7..481a914dc89e9 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java @@ -10,7 +10,7 @@ import org.apache.lucene.util.Accountable; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; import org.elasticsearch.index.mapper.BlockLoader; @@ -23,12 +23,19 @@ * position. * *

    Blocks can represent various shapes of underlying data. A Block can represent either sparse - * or dense data. A Block can represent either single or multi valued data. A Block that represents + * or dense data. A Block can represent either single or multivalued data. A Block that represents * dense single-valued data can be viewed as a {@link Vector}. * - *

    Block are immutable and can be passed between threads. + *

    Blocks are reference counted; to make a shallow copy of a block (e.g. if a {@link Page} contains + * the same column twice), use {@link Block#incRef()}. Before a block is garbage collected, + * {@link Block#close()} must be called to release a block's resources; it must also be called one + * additional time for each time {@link Block#incRef()} was called. Calls to {@link Block#decRef()} and + * {@link Block#close()} are equivalent. + * + *

    Block are immutable and can be passed between threads as long as no two threads hold a reference to + * the same block at the same time. */ -public interface Block extends Accountable, BlockLoader.Block, NamedWriteable, Releasable { +public interface Block extends Accountable, BlockLoader.Block, NamedWriteable, RefCounted, Releasable { /** * {@return an efficient dense single-value view of this block}. @@ -57,14 +64,15 @@ public interface Block extends Accountable, BlockLoader.Block, NamedWriteable, R /** The block factory associated with this block. */ BlockFactory blockFactory(); - /** Tells if this block has been released. A block is released by calling its {@link Block#close()} method. */ + /** + * Tells if this block has been released. A block is released by calling its {@link Block#close()} or {@link Block#decRef()} methods. + * @return true iff the block's reference count is zero. + * */ boolean isReleased(); /** - * Returns true if the value stored at the given position is null, false otherwise. - * * @param position the position - * @return true or false + * @return true if the value stored at the given position is null, false otherwise */ boolean isNull(int position); @@ -91,6 +99,7 @@ public interface Block extends Accountable, BlockLoader.Block, NamedWriteable, R /** * Creates a new block that only exposes the positions provided. Materialization of the selected positions is avoided. + * The new block may hold a reference to this block, increasing this block's reference count. * @param positions the positions to retain * @return a filtered block */ @@ -137,6 +146,7 @@ default boolean mvSortedAscending() { * Expand multivalued fields into one row per value. Returns the * block if there aren't any multivalued fields to expand. */ + // TODO: We should use refcounting instead of either deep copies or returning the same identical block. Block expand(); /** @@ -229,48 +239,6 @@ static Block[] buildAll(Block.Builder... builders) { } } - /** - * A reference to a {@link Block}. This is {@link Releasable} and - * {@link Ref#close closing} it will {@link Block#close release} - * the underlying {@link Block} if it wasn't borrowed from a {@link Page}. - * - * The usual way to use this is: - *

    {@code
    -     *   try (Block.Ref ref = eval.eval(page)) {
    -     *     return ref.block().doStuff;
    -     *   }
    -     * }
    - * - * The {@code try} block will return the memory used by the block to the - * breaker if it was "free floating", but if it was attached to a {@link Page} - * then it'll do nothing. - * - * @param block the block referenced - * @param containedIn the page containing it or null, if it is "free floating". - */ - record Ref(Block block, @Nullable Page containedIn) implements Releasable { - /** - * Create a "free floating" {@link Ref}. - */ - public static Ref floating(Block block) { - return new Ref(block, null); - } - - /** - * Is this block "free floating" or attached to a page? - */ - public boolean floating() { - return containedIn == null; - } - - @Override - public void close() { - if (floating()) { - block.close(); - } - } - } - static List getNamedWriteables() { return List.of( IntBlock.ENTRY, diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockUtils.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockUtils.java index 89b40d6e46a14..405dd088bf3a5 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockUtils.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockUtils.java @@ -151,7 +151,17 @@ public static Block[] fromList(BlockFactory blockFactory, List> lis wrappers[j].append.accept(values.get(j)); } } - return Arrays.stream(wrappers).map(b -> b.builder.build()).toArray(Block[]::new); + final Block[] blocks = new Block[wrappers.length]; + try { + for (int i = 0; i < blocks.length; i++) { + blocks[i] = wrappers[i].builder.build(); + } + return blocks; + } finally { + if (blocks[blocks.length - 1] == null) { + Releasables.closeExpectNoException(blocks); + } + } } finally { Releasables.closeExpectNoException(wrappers); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java index 9437bdd35e21f..5823a4b98d52c 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java @@ -126,11 +126,7 @@ public String toString() { } @Override - public void close() { - if (isReleased()) { - throw new IllegalStateException("can't release already released block [" + this + "]"); - } - released = true; + public void closeInternal() { blockFactory.adjustBreaker(-ramBytesUsed(), true); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocBlock.java index ed7e317bfc4c7..9dc27196bd128 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocBlock.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocBlock.java @@ -71,11 +71,13 @@ public long ramBytesUsed() { } @Override - public void close() { - if (released) { - throw new IllegalStateException("can't release already released block [" + this + "]"); - } - released = true; + public boolean isReleased() { + return super.isReleased() || vector.isReleased(); + } + + @Override + public void closeInternal() { + assert (vector.isReleased() == false) : "can't release block [" + this + "] containing already released vector"; Releasables.closeExpectNoException(vector); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocVector.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocVector.java index b6ba42f953609..24c656404e89f 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocVector.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocVector.java @@ -225,6 +225,7 @@ public long ramBytesUsed() { @Override public void close() { + released = true; Releasables.closeExpectNoException(shards.asBlock(), segments.asBlock(), docs.asBlock()); // Ugh! we always close blocks } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Page.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Page.java index 451a0b540f308..de6b5385ab167 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Page.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Page.java @@ -14,8 +14,6 @@ import java.io.IOException; import java.util.Arrays; -import java.util.Collections; -import java.util.IdentityHashMap; import java.util.Objects; /** @@ -84,7 +82,7 @@ private Page(boolean copyBlocks, int positionCount, Block[] blocks) { private Page(Page prev, Block[] toAdd) { for (Block block : toAdd) { if (prev.positionCount != block.getPositionCount()) { - throw new IllegalArgumentException("Block does not have same position count"); + throw new IllegalArgumentException("Block [" + block + "] does not have same position count"); } } this.positionCount = prev.positionCount; @@ -235,39 +233,7 @@ public void releaseBlocks() { blocksReleased = true; - // blocks can be used as multiple columns - var map = new IdentityHashMap(mapSize(blocks.length)); - for (Block b : blocks) { - if (map.putIfAbsent(b, Boolean.TRUE) == null) { - Releasables.closeExpectNoException(b); - } - } - } - - /** - * Returns a Page from the given blocks and closes all blocks that are not included, from the current Page. - * That is, allows clean-up of the current page _after_ external manipulation of the blocks. - * The current page should no longer be used and be considered closed. - */ - public Page newPageAndRelease(Block... keep) { - if (blocksReleased) { - throw new IllegalStateException("can't create new page from already released page"); - } - - blocksReleased = true; - - var newPage = new Page(positionCount, keep); - var set = Collections.newSetFromMap(new IdentityHashMap(mapSize(keep.length))); - set.addAll(Arrays.asList(keep)); - - // close blocks that have been left out - for (Block b : blocks) { - if (set.contains(b) == false) { - Releasables.closeExpectNoException(b); - } - } - - return newPage; + Releasables.closeExpectNoException(blocks); } static int mapSize(int expectedSize) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st index 49a4c43709cde..86a8dfc78450d 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st @@ -168,11 +168,7 @@ $endif$ } @Override - public void close() { - if (released) { - throw new IllegalStateException("can't release already released block [" + this + "]"); - } - released = true; + public void closeInternal() { $if(BytesRef)$ blockFactory.adjustBreaker(-ramBytesUsed() + values.bigArraysRamBytesUsed(), true); Releasables.closeExpectNoException(values); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st index 3ef4251f80684..89bc84d551b63 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st @@ -20,6 +20,9 @@ public final class $Type$VectorBlock extends AbstractVectorBlock implements $Typ private final $Type$Vector vector; + /** + * @param vector considered owned by the current block; must not be used in any other {@code Block} + */ $Type$VectorBlock($Type$Vector vector) { super(vector.getPositionCount(), vector.blockFactory()); this.vector = vector; @@ -80,15 +83,12 @@ $endif$ @Override public boolean isReleased() { - return released || vector.isReleased(); + return super.isReleased() || vector.isReleased(); } @Override - public void close() { - if (released || vector.isReleased()) { - throw new IllegalStateException("can't release already released block [" + this + "]"); - } - released = true; + public void closeInternal() { + assert (vector.isReleased() == false) : "can't release block [" + this + "] containing already released vector"; Releasables.closeExpectNoException(vector); } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/BlockReaderFactories.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/BlockReaderFactories.java index a0d08bc798fbb..967111a09f564 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/BlockReaderFactories.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/BlockReaderFactories.java @@ -7,17 +7,13 @@ package org.elasticsearch.compute.lucene; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.SortedSetDocValues; import org.elasticsearch.common.logging.HeaderWarning; -import org.elasticsearch.index.mapper.BlockDocValuesReader; import org.elasticsearch.index.mapper.BlockLoader; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.lookup.SearchLookup; -import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Set; @@ -36,23 +32,19 @@ private BlockReaderFactories() {} * @param asUnsupportedSource should the field be loaded as "unsupported"? * These will always have {@code null} values */ - public static List factories( - List searchContexts, - String fieldName, - boolean asUnsupportedSource - ) { - List factories = new ArrayList<>(searchContexts.size()); + public static List loaders(List searchContexts, String fieldName, boolean asUnsupportedSource) { + List loaders = new ArrayList<>(searchContexts.size()); for (SearchContext searchContext : searchContexts) { SearchExecutionContext ctx = searchContext.getSearchExecutionContext(); if (asUnsupportedSource) { - factories.add(loaderToFactory(ctx.getIndexReader(), BlockDocValuesReader.nulls())); + loaders.add(BlockLoader.CONSTANT_NULLS); continue; } MappedFieldType fieldType = ctx.getFieldType(fieldName); if (fieldType == null) { // the field does not exist in this context - factories.add(loaderToFactory(ctx.getIndexReader(), BlockDocValuesReader.nulls())); + loaders.add(BlockLoader.CONSTANT_NULLS); continue; } BlockLoader loader = fieldType.blockLoader(new MappedFieldType.BlockLoaderContext() { @@ -70,39 +62,20 @@ public SearchLookup lookup() { public Set sourcePaths(String name) { return ctx.sourcePath(name); } + + @Override + public String parentField(String field) { + return ctx.parentPath(field); + } }); if (loader == null) { HeaderWarning.addWarning("Field [{}] cannot be retrieved, it is unsupported or not indexed; returning null", fieldName); - factories.add(loaderToFactory(ctx.getIndexReader(), BlockDocValuesReader.nulls())); + loaders.add(BlockLoader.CONSTANT_NULLS); continue; } - factories.add(loaderToFactory(ctx.getIndexReader(), loader)); + loaders.add(loader); } - return factories; - } - - /** - * Converts a {@link BlockLoader}, something defined in core elasticsearch at - * the field level, into a {@link BlockDocValuesReader.Factory} which can be - * used inside ESQL. - */ - public static BlockDocValuesReader.Factory loaderToFactory(IndexReader reader, BlockLoader loader) { - return new BlockDocValuesReader.Factory() { - @Override - public BlockDocValuesReader build(int segment) throws IOException { - return loader.reader(reader.leaves().get(segment)); - } - - @Override - public boolean supportsOrdinals() { - return loader.supportsOrdinals(); - } - - @Override - public SortedSetDocValues ordinals(int segment) throws IOException { - return loader.ordinals(reader.leaves().get(segment)); - } - }; + return loaders; } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperator.java index 4ce0af3bd0ffe..9624fa48ef20d 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperator.java @@ -219,7 +219,7 @@ private Page emit(boolean startEmitting) { page = new Page(size, new DocVector(shard.asVector(), segments, docs, null).asBlock()); } finally { if (page == null) { - Releasables.close(shard, segments, docs); + Releasables.closeExpectNoException(shard, segments, docs); } } pagesEmitted++; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java index 61c1bd9730e02..a9c6666ce6f94 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java @@ -7,13 +7,17 @@ package org.elasticsearch.compute.lucene; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedDocValues; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.DocBlock; import org.elasticsearch.compute.data.DocVector; import org.elasticsearch.compute.data.ElementType; @@ -23,97 +27,118 @@ import org.elasticsearch.compute.operator.AbstractPageMappingOperator; import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.Operator; -import org.elasticsearch.index.mapper.BlockDocValuesReader; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.index.fieldvisitor.StoredFieldLoader; import org.elasticsearch.index.mapper.BlockLoader; -import org.elasticsearch.search.aggregations.support.ValuesSource; +import org.elasticsearch.index.mapper.BlockLoaderStoredFieldsFromLeafLoader; +import org.elasticsearch.index.mapper.SourceLoader; +import org.elasticsearch.search.fetch.StoredFieldsSpec; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; import java.io.UncheckedIOException; +import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.TreeMap; +import java.util.function.Supplier; +import java.util.stream.Collectors; /** * Operator that extracts doc_values from a Lucene index out of pages that have been produced by {@link LuceneSourceOperator} - * and outputs them to a new column. The operator leverages the {@link ValuesSource} infrastructure for extracting - * field values. This allows for a more uniform way of extracting data compared to deciding the correct doc_values - * loader for different field types. + * and outputs them to a new column. */ public class ValuesSourceReaderOperator extends AbstractPageMappingOperator { /** - * Creates a new extractor that uses ValuesSources load data - * @param sources the value source, type and index readers to use for extraction + * Minimum number of documents for which it is more efficient to use a + * sequential stored field reader when reading stored fields. + *

    + * The sequential stored field reader decompresses a whole block of docs + * at a time so for very short lists it won't be faster to use it. We use + * {@code 10} documents as the boundary for "very short" because it's what + * search does, not because we've done extensive testing on the number. + *

    + */ + static final int SEQUENTIAL_BOUNDARY = 10; + + /** + * Creates a factory for {@link ValuesSourceReaderOperator}. + * @param fields fields to load + * @param shardContexts per-shard loading information * @param docChannel the channel containing the shard, leaf/segment and doc id - * @param field the lucene field being loaded */ - public record ValuesSourceReaderOperatorFactory(List sources, int docChannel, String field) - implements - OperatorFactory { + public record Factory(List fields, List shardContexts, int docChannel) implements OperatorFactory { @Override public Operator get(DriverContext driverContext) { - return new ValuesSourceReaderOperator(driverContext.blockFactory(), sources, docChannel, field); + return new ValuesSourceReaderOperator(driverContext.blockFactory(), fields, shardContexts, docChannel); } @Override public String describe() { - return "ValuesSourceReaderOperator[field = " + field + "]"; + return "ValuesSourceReaderOperator[field = " + fields.stream().map(f -> f.name).collect(Collectors.joining(", ")) + "]"; } } - /** - * A list, one entry per shard, of factories for {@link BlockDocValuesReader}s - * which perform the actual reading. - */ - private final List factories; - private final int docChannel; - private final String field; - private final ComputeBlockLoaderFactory blockFactory; + public record ShardContext(IndexReader reader, Supplier newSourceLoader) {} - private BlockDocValuesReader lastReader; - private int lastShard = -1; - private int lastSegment = -1; + private final List fields; + private final List shardContexts; + private final int docChannel; + private final BlockFactory blockFactory; private final Map readersBuilt = new TreeMap<>(); + /** + * Configuration for a field to load. + * + * {@code blockLoaders} is a list, one entry per shard, of + * {@link BlockLoader}s which load the actual blocks. + */ + public record FieldInfo(String name, List blockLoaders) {} + /** * Creates a new extractor - * @param factories builds {@link BlockDocValuesReader} + * @param fields fields to load * @param docChannel the channel containing the shard, leaf/segment and doc id - * @param field the lucene field being loaded */ - public ValuesSourceReaderOperator( - BlockFactory blockFactory, - List factories, - int docChannel, - String field - ) { - this.factories = factories; + public ValuesSourceReaderOperator(BlockFactory blockFactory, List fields, List shardContexts, int docChannel) { + this.fields = fields.stream().map(f -> new FieldWork(f)).toList(); + this.shardContexts = shardContexts; this.docChannel = docChannel; - this.field = field; - this.blockFactory = new ComputeBlockLoaderFactory(blockFactory); + this.blockFactory = blockFactory; } @Override protected Page process(Page page) { DocVector docVector = page.getBlock(docChannel).asVector(); + Block[] blocks = new Block[fields.size()]; + boolean success = false; try { if (docVector.singleSegmentNonDecreasing()) { - return page.appendBlock(loadFromSingleLeaf(docVector)); + loadFromSingleLeaf(blocks, docVector); + } else { + loadFromManyLeaves(blocks, docVector); } - return page.appendBlock(loadFromManyLeaves(docVector)); + success = true; } catch (IOException e) { throw new UncheckedIOException(e); + } finally { + if (success == false) { + Releasables.closeExpectNoException(blocks); + } } + return page.appendBlocks(blocks); } - private Block loadFromSingleLeaf(DocVector docVector) throws IOException { - setupReader(docVector.shards().getInt(0), docVector.segments().getInt(0), docVector.docs().getInt(0)); - return ((Block) lastReader.readValues(blockFactory, new BlockLoader.Docs() { - private final IntVector docs = docVector.docs(); - + private void loadFromSingleLeaf(Block[] blocks, DocVector docVector) throws IOException { + int shard = docVector.shards().getInt(0); + int segment = docVector.segments().getInt(0); + int firstDoc = docVector.docs().getInt(0); + IntVector docs = docVector.docs(); + BlockLoader.Docs loaderDocs = new BlockLoader.Docs() { @Override public int count() { return docs.getPositionCount(); @@ -123,44 +148,236 @@ public int count() { public int get(int i) { return docs.getInt(i); } - })); + }; + StoredFieldsSpec storedFieldsSpec = StoredFieldsSpec.NO_REQUIREMENTS; + List rowStrideReaders = new ArrayList<>(fields.size()); + ComputeBlockLoaderFactory loaderBlockFactory = new ComputeBlockLoaderFactory(blockFactory, docs.getPositionCount()); + try { + for (int b = 0; b < fields.size(); b++) { + FieldWork field = fields.get(b); + BlockLoader.ColumnAtATimeReader columnAtATime = field.columnAtATime.reader(shard, segment, firstDoc); + if (columnAtATime != null) { + blocks[b] = (Block) columnAtATime.read(loaderBlockFactory, loaderDocs); + } else { + BlockLoader.RowStrideReader rowStride = field.rowStride.reader(shard, segment, firstDoc); + rowStrideReaders.add( + new RowStrideReaderWork( + rowStride, + (Block.Builder) field.info.blockLoaders.get(shard).builder(loaderBlockFactory, docs.getPositionCount()), + b + ) + ); + storedFieldsSpec = storedFieldsSpec.merge(field.info.blockLoaders.get(shard).rowStrideStoredFieldSpec()); + } + } + + if (rowStrideReaders.isEmpty()) { + return; + } + if (storedFieldsSpec.equals(StoredFieldsSpec.NO_REQUIREMENTS)) { + throw new IllegalStateException( + "found row stride readers [" + rowStrideReaders + "] without stored fields [" + storedFieldsSpec + "]" + ); + } + LeafReaderContext ctx = ctx(shard, segment); + StoredFieldLoader storedFieldLoader; + if (useSequentialStoredFieldsReader(docVector.docs())) { + storedFieldLoader = StoredFieldLoader.fromSpecSequential(storedFieldsSpec); + trackStoredFields(storedFieldsSpec, true); + } else { + storedFieldLoader = StoredFieldLoader.fromSpec(storedFieldsSpec); + trackStoredFields(storedFieldsSpec, false); + } + BlockLoaderStoredFieldsFromLeafLoader storedFields = new BlockLoaderStoredFieldsFromLeafLoader( + // TODO enable the optimization by passing non-null to docs if correct + storedFieldLoader.getLoader(ctx, null), + storedFieldsSpec.requiresSource() ? shardContexts.get(shard).newSourceLoader.get().leaf(ctx.reader(), null) : null + ); + for (int p = 0; p < docs.getPositionCount(); p++) { + int doc = docs.getInt(p); + if (storedFields != null) { + storedFields.advanceTo(doc); + } + for (int r = 0; r < rowStrideReaders.size(); r++) { + RowStrideReaderWork work = rowStrideReaders.get(r); + work.reader.read(doc, storedFields, work.builder); + } + } + for (int r = 0; r < rowStrideReaders.size(); r++) { + RowStrideReaderWork work = rowStrideReaders.get(r); + blocks[work.offset] = work.builder.build(); + } + } finally { + Releasables.close(rowStrideReaders); + } } - private Block loadFromManyLeaves(DocVector docVector) throws IOException { + private void loadFromManyLeaves(Block[] blocks, DocVector docVector) throws IOException { + IntVector shards = docVector.shards(); + IntVector segments = docVector.segments(); + IntVector docs = docVector.docs(); + Block.Builder[] builders = new Block.Builder[blocks.length]; int[] forwards = docVector.shardSegmentDocMapForwards(); - int doc = docVector.docs().getInt(forwards[0]); - setupReader(docVector.shards().getInt(forwards[0]), docVector.segments().getInt(forwards[0]), doc); - try (BlockLoader.Builder builder = lastReader.builder(blockFactory, forwards.length)) { - lastReader.readValuesFromSingleDoc(doc, builder); - for (int i = 1; i < forwards.length; i++) { - int shard = docVector.shards().getInt(forwards[i]); - int segment = docVector.segments().getInt(forwards[i]); - doc = docVector.docs().getInt(forwards[i]); - if (segment != lastSegment || shard != lastShard) { - setupReader(shard, segment, doc); + ComputeBlockLoaderFactory loaderBlockFactory = new ComputeBlockLoaderFactory(blockFactory, docs.getPositionCount()); + try { + for (int b = 0; b < fields.size(); b++) { + FieldWork field = fields.get(b); + builders[b] = builderFromFirstNonNull(loaderBlockFactory, field, docs.getPositionCount()); + } + int lastShard = -1; + int lastSegment = -1; + BlockLoaderStoredFieldsFromLeafLoader storedFields = null; + for (int i = 0; i < forwards.length; i++) { + int p = forwards[i]; + int shard = shards.getInt(p); + int segment = segments.getInt(p); + int doc = docs.getInt(p); + if (shard != lastShard || segment != lastSegment) { + lastShard = shard; + lastSegment = segment; + StoredFieldsSpec storedFieldsSpec = storedFieldsSpecForShard(shard); + LeafReaderContext ctx = ctx(shard, segment); + storedFields = new BlockLoaderStoredFieldsFromLeafLoader( + StoredFieldLoader.fromSpec(storedFieldsSpec).getLoader(ctx, null), + storedFieldsSpec.requiresSource() ? shardContexts.get(shard).newSourceLoader.get().leaf(ctx.reader(), null) : null + ); + if (false == storedFieldsSpec.equals(StoredFieldsSpec.NO_REQUIREMENTS)) { + trackStoredFields(storedFieldsSpec, false); + } + } + storedFields.advanceTo(doc); + for (int r = 0; r < blocks.length; r++) { + fields.get(r).rowStride.reader(shard, segment, doc).read(doc, storedFields, builders[r]); } - lastReader.readValuesFromSingleDoc(doc, builder); } - try (Block orig = ((Block.Builder) builder).build()) { - return orig.filter(docVector.shardSegmentDocMapBackwards()); + for (int r = 0; r < blocks.length; r++) { + try (Block orig = builders[r].build()) { + blocks[r] = orig.filter(docVector.shardSegmentDocMapBackwards()); + } } + } finally { + Releasables.closeExpectNoException(builders); } } - private void setupReader(int shard, int segment, int doc) throws IOException { - if (lastSegment == segment && lastShard == shard && BlockDocValuesReader.canReuse(lastReader, doc)) { - return; + /** + * Is it more efficient to use a sequential stored field reader + * when reading stored fields for the documents contained in {@code docIds}? + */ + private boolean useSequentialStoredFieldsReader(IntVector docIds) { + return docIds.getPositionCount() >= SEQUENTIAL_BOUNDARY + && docIds.getInt(docIds.getPositionCount() - 1) - docIds.getInt(0) == docIds.getPositionCount() - 1; + } + + private void trackStoredFields(StoredFieldsSpec spec, boolean sequential) { + readersBuilt.merge( + "stored_fields[" + + "requires_source:" + + spec.requiresSource() + + ", fields:" + + spec.requiredStoredFields().size() + + ", sequential: " + + sequential + + "]", + 1, + (prev, one) -> prev + one + ); + } + + /** + * Returns a builder from the first non - {@link BlockLoader#CONSTANT_NULLS} loader + * in the list. If they are all the null loader then returns a null builder. + */ + private Block.Builder builderFromFirstNonNull(BlockLoader.BlockFactory loaderBlockFactory, FieldWork field, int positionCount) { + for (BlockLoader loader : field.info.blockLoaders) { + if (loader != BlockLoader.CONSTANT_NULLS) { + return (Block.Builder) loader.builder(loaderBlockFactory, positionCount); + } } + // All null, just let the first one build the null block loader. + return (Block.Builder) field.info.blockLoaders.get(0).builder(loaderBlockFactory, positionCount); + } - lastReader = factories.get(shard).build(segment); - lastShard = shard; - lastSegment = segment; - readersBuilt.compute(lastReader.toString(), (k, v) -> v == null ? 1 : v + 1); + private StoredFieldsSpec storedFieldsSpecForShard(int shard) { + StoredFieldsSpec storedFieldsSpec = StoredFieldsSpec.NO_REQUIREMENTS; + for (int b = 0; b < fields.size(); b++) { + FieldWork field = fields.get(b); + storedFieldsSpec = storedFieldsSpec.merge(field.info.blockLoaders.get(shard).rowStrideStoredFieldSpec()); + } + return storedFieldsSpec; + } + + private class FieldWork { + final FieldInfo info; + final GuardedReader columnAtATime = new GuardedReader<>() { + @Override + BlockLoader.ColumnAtATimeReader build(BlockLoader loader, LeafReaderContext ctx) throws IOException { + return loader.columnAtATimeReader(ctx); + } + + @Override + String type() { + return "column_at_a_time"; + } + }; + + final GuardedReader rowStride = new GuardedReader<>() { + @Override + BlockLoader.RowStrideReader build(BlockLoader loader, LeafReaderContext ctx) throws IOException { + return loader.rowStrideReader(ctx); + } + + @Override + String type() { + return "row_stride"; + } + }; + + FieldWork(FieldInfo info) { + this.info = info; + } + + private abstract class GuardedReader { + private int lastShard = -1; + private int lastSegment = -1; + V lastReader; + + V reader(int shard, int segment, int startingDocId) throws IOException { + if (lastShard == shard && lastSegment == segment) { + if (lastReader == null) { + return null; + } + if (lastReader.canReuse(startingDocId)) { + return lastReader; + } + } + lastShard = shard; + lastSegment = segment; + lastReader = build(info.blockLoaders.get(shard), ctx(shard, segment)); + readersBuilt.merge(info.name + ":" + type() + ":" + lastReader, 1, (prev, one) -> prev + one); + return lastReader; + } + + abstract V build(BlockLoader loader, LeafReaderContext ctx) throws IOException; + + abstract String type(); + } + } + + private record RowStrideReaderWork(BlockLoader.RowStrideReader reader, Block.Builder builder, int offset) implements Releasable { + @Override + public void close() { + builder.close(); + } + } + + private LeafReaderContext ctx(int shard, int segment) { + return shardContexts.get(shard).reader.leaves().get(segment); } @Override public String toString() { - return "ValuesSourceReaderOperator[field = " + field + "]"; + return "ValuesSourceReaderOperator[field = " + fields.stream().map(f -> f.info.name).collect(Collectors.joining(", ")) + "]"; } @Override @@ -233,11 +450,14 @@ public String toString() { } } - private static class ComputeBlockLoaderFactory implements BlockLoader.BuilderFactory { + private static class ComputeBlockLoaderFactory implements BlockLoader.BlockFactory { private final BlockFactory factory; + private final int pageSize; + private Block nullBlock; - private ComputeBlockLoaderFactory(BlockFactory factory) { + private ComputeBlockLoaderFactory(BlockFactory factory, int pageSize) { this.factory = factory; + this.pageSize = pageSize; } @Override @@ -295,9 +515,26 @@ public BlockLoader.Builder nulls(int expectedCount) { return ElementType.NULL.newBlockBuilder(expectedCount, factory); } + @Override + public Block constantNulls() { + if (nullBlock == null) { + nullBlock = factory.newConstantNullBlock(pageSize); + } else { + nullBlock.incRef(); + } + return nullBlock; + } + + @Override + public BytesRefBlock constantBytes(BytesRef value) { + return factory.newConstantBytesRefBlockWith(value, pageSize); + } + @Override public BlockLoader.SingletonOrdinalsBuilder singletonOrdinalsBuilder(SortedDocValues ordinals, int count) { return new SingletonOrdinalsBuilder(factory, ordinals, count); } } + + // TODO tests that mix source loaded fields and doc values in the same block } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AggregationOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AggregationOperator.java index 3e653b1a19750..07d1809262c9b 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AggregationOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AggregationOperator.java @@ -124,10 +124,11 @@ public boolean isFinished() { @Override public void close() { - if (output != null) { - Releasables.closeExpectNoException(() -> output.releaseBlocks()); - } - Releasables.close(aggregators); + Releasables.closeExpectNoException(() -> { + if (output != null) { + Releasables.closeExpectNoException(() -> output.releaseBlocks()); + } + }, Releasables.wrap(aggregators)); } private static void checkState(boolean condition, String msg) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java index 34fdd32aab98a..1835bea60de24 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.core.Releasables; import org.elasticsearch.index.seqno.LocalCheckpointTracker; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.tasks.TaskCancelledException; @@ -30,9 +31,11 @@ public abstract class AsyncOperator implements Operator { private final Map buffers = ConcurrentCollections.newConcurrentMap(); private final AtomicReference failure = new AtomicReference<>(); + private final DriverContext driverContext; private final int maxOutstandingRequests; private boolean finished = false; + private volatile boolean closed = false; /* * The checkpoint tracker is used to maintain the order of emitted pages after passing through this async operator. @@ -50,7 +53,8 @@ public abstract class AsyncOperator implements Operator { * * @param maxOutstandingRequests the maximum number of outstanding requests */ - public AsyncOperator(int maxOutstandingRequests) { + public AsyncOperator(DriverContext driverContext, int maxOutstandingRequests) { + this.driverContext = driverContext; this.maxOutstandingRequests = maxOutstandingRequests; } @@ -62,15 +66,29 @@ public boolean needsInput() { @Override public void addInput(Page input) { - checkFailure(); + if (failure.get() != null) { + input.releaseBlocks(); + return; + } final long seqNo = checkpoint.generateSeqNo(); - performAsync(input, ActionListener.wrap(output -> { - buffers.put(seqNo, output); - onSeqNoCompleted(seqNo); - }, e -> { - onFailure(e); - onSeqNoCompleted(seqNo); - })); + driverContext.addAsyncAction(); + boolean success = false; + try { + final ActionListener listener = ActionListener.wrap(output -> { + buffers.put(seqNo, output); + onSeqNoCompleted(seqNo); + }, e -> { + input.releaseBlocks(); + onFailure(e); + onSeqNoCompleted(seqNo); + }); + performAsync(input, ActionListener.runAfter(listener, driverContext::removeAsyncAction)); + success = true; + } finally { + if (success == false) { + driverContext.removeAsyncAction(); + } + } } /** @@ -81,6 +99,8 @@ public void addInput(Page input) { */ protected abstract void performAsync(Page inputPage, ActionListener listener); + protected abstract void doClose(); + private void onFailure(Exception e) { failure.getAndUpdate(first -> { if (first == null) { @@ -105,6 +125,9 @@ private void onSeqNoCompleted(long seqNo) { if (checkpoint.getPersistedCheckpoint() < checkpoint.getProcessedCheckpoint()) { notifyIfBlocked(); } + if (closed || failure.get() != null) { + discardPages(); + } } private void notifyIfBlocked() { @@ -123,10 +146,30 @@ private void notifyIfBlocked() { private void checkFailure() { Exception e = failure.get(); if (e != null) { + discardPages(); throw ExceptionsHelper.convertToElastic(e); } } + private void discardPages() { + long nextCheckpoint; + while ((nextCheckpoint = checkpoint.getPersistedCheckpoint() + 1) <= checkpoint.getProcessedCheckpoint()) { + Page page = buffers.remove(nextCheckpoint); + checkpoint.markSeqNoAsPersisted(nextCheckpoint); + if (page != null) { + Releasables.closeExpectNoException(page::releaseBlocks); + } + } + } + + @Override + public final void close() { + finish(); + closed = true; + discardPages(); + doClose(); + } + @Override public void finish() { finished = true; @@ -154,6 +197,7 @@ public Page getOutput() { @Override public SubscribableListener isBlocked() { + // TODO: Add an exchange service between async operation instead? if (finished) { return Operator.NOT_BLOCKED; } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ColumnExtractOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ColumnExtractOperator.java index 58bf9e097bec3..e83a258957104 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ColumnExtractOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ColumnExtractOperator.java @@ -63,8 +63,7 @@ protected Page process(Page page) { blockBuilders[i] = types[i].newBlockBuilder(rowsCount, driverContext.blockFactory()); } - try (Block.Ref ref = inputEvaluator.eval(page)) { - BytesRefBlock input = (BytesRefBlock) ref.block(); + try (BytesRefBlock input = (BytesRefBlock) inputEvaluator.eval(page)) { BytesRef spare = new BytesRef(); for (int row = 0; row < rowsCount; row++) { if (input.isNull(row)) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Driver.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Driver.java index bd06296309886..176b2bda31e3e 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Driver.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Driver.java @@ -316,7 +316,7 @@ private static void schedule( @Override protected void doRun() { if (driver.isFinished()) { - listener.onResponse(null); + onComplete(listener); return; } SubscribableListener fut = driver.run(maxTime, maxIterations); @@ -339,7 +339,11 @@ protected void doRun() { @Override public void onFailure(Exception e) { driver.drainAndCloseOperators(e); - listener.onFailure(e); + onComplete(ActionListener.running(() -> listener.onFailure(e))); + } + + void onComplete(ActionListener listener) { + driver.driverContext.waitForAsyncActions(ContextPreservingActionListener.wrapPreservingContext(listener, threadContext)); } }); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverContext.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverContext.java index 85860cf8766f1..d645a7cbe0185 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverContext.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverContext.java @@ -7,6 +7,8 @@ package org.elasticsearch.compute.operator; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.BlockFactory; @@ -17,6 +19,8 @@ import java.util.IdentityHashMap; import java.util.Objects; import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; /** @@ -34,7 +38,11 @@ * This allows to "transfer ownership" of a shared resource across operators (and even across * Drivers), while ensuring that the resource can be correctly released when no longer needed. * - * Currently only supports releasables, but additional driver-local context can be added. + * DriverContext can also be used to track async actions. The driver may close an operator while + * some of its async actions are still running. To prevent the driver from finishing in this case, + * methods {@link #addAsyncAction()} and {@link #removeAsyncAction()} are provided for tracking + * such actions. Subsequently, the driver uses {@link #waitForAsyncActions(ActionListener)} to + * await the completion of all async actions before finalizing the Driver. */ public class DriverContext { @@ -47,6 +55,8 @@ public class DriverContext { private final BlockFactory blockFactory; + private final AsyncActions asyncActions = new AsyncActions(); + public DriverContext(BigArrays bigArrays, BlockFactory blockFactory) { Objects.requireNonNull(bigArrays); Objects.requireNonNull(blockFactory); @@ -119,6 +129,7 @@ public void finish() { } // must be called by the thread executing the driver. // no more updates to this context. + asyncActions.finish(); var itr = workingSet.iterator(); workingSet = null; Set releasableSet = Collections.newSetFromMap(new IdentityHashMap<>()); @@ -135,4 +146,45 @@ private void ensureFinished() { throw new IllegalStateException("not finished"); } } + + public void waitForAsyncActions(ActionListener listener) { + asyncActions.addListener(listener); + } + + public void addAsyncAction() { + asyncActions.addInstance(); + } + + public void removeAsyncAction() { + asyncActions.removeInstance(); + } + + private static class AsyncActions { + private final SubscribableListener completion = new SubscribableListener<>(); + private final AtomicBoolean finished = new AtomicBoolean(); + private final AtomicInteger instances = new AtomicInteger(1); + + void addInstance() { + if (finished.get()) { + throw new IllegalStateException("DriverContext was finished already"); + } + instances.incrementAndGet(); + } + + void removeInstance() { + if (instances.decrementAndGet() == 0) { + completion.onResponse(null); + } + } + + void addListener(ActionListener listener) { + completion.addListener(listener); + } + + void finish() { + if (finished.compareAndSet(false, true)) { + removeInstance(); + } + } + } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverTaskRunner.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverTaskRunner.java index 38d879f8f7ad4..1293118680824 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverTaskRunner.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverTaskRunner.java @@ -11,9 +11,9 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.CompositeIndicesRequest; -import org.elasticsearch.action.support.ChannelActionListener; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.compute.OwningChannelActionListener; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; @@ -117,7 +117,7 @@ public Status getStatus() { private record DriverRequestHandler(TransportService transportService) implements TransportRequestHandler { @Override public void messageReceived(DriverRequest request, TransportChannel channel, Task task) { - var listener = new ChannelActionListener(channel); + var listener = new OwningChannelActionListener(channel); Driver.start( transportService.getThreadPool().getThreadContext(), request.executor, diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/EvalOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/EvalOperator.java index 65efdc4266b28..2a6a3c9b6210b 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/EvalOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/EvalOperator.java @@ -9,7 +9,6 @@ import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; -import org.elasticsearch.compute.data.BlockUtils; import org.elasticsearch.compute.data.Page; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; @@ -43,8 +42,7 @@ public EvalOperator(BlockFactory blockFactory, ExpressionEvaluator evaluator) { @Override protected Page process(Page page) { - Block.Ref ref = evaluator.eval(page); - Block block = ref.floating() ? ref.block() : BlockUtils.deepCopyOf(ref.block(), blockFactory); + Block block = evaluator.eval(page); return page.appendBlock(block); } @@ -69,8 +67,9 @@ interface Factory { /** * Evaluate the expression. + * @return the returned Block has its own reference and the caller is responsible for releasing it. */ - Block.Ref eval(Page page); + Block eval(Page page); } public static final ExpressionEvaluator.Factory CONSTANT_NULL_FACTORY = new ExpressionEvaluator.Factory() { @@ -87,8 +86,8 @@ public String toString() { public static final ExpressionEvaluator CONSTANT_NULL = new ExpressionEvaluator() { @Override - public Block.Ref eval(Page page) { - return Block.Ref.floating(Block.constantNullBlock(page.getPositionCount())); + public Block eval(Page page) { + return Block.constantNullBlock(page.getPositionCount()); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/FilterOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/FilterOperator.java index be4996e129d7b..81d788611125b 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/FilterOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/FilterOperator.java @@ -41,13 +41,12 @@ protected Page process(Page page) { int rowCount = 0; int[] positions = new int[page.getPositionCount()]; - try (Block.Ref ref = evaluator.eval(page)) { - if (ref.block().areAllValuesNull()) { + try (BooleanBlock test = (BooleanBlock) evaluator.eval(page)) { + if (test.areAllValuesNull()) { // All results are null which is like false. No values selected. page.releaseBlocks(); return null; } - BooleanBlock test = (BooleanBlock) ref.block(); // TODO we can detect constant true or false from the type // TODO or we could make a new method in bool-valued evaluators that returns a list of numbers for (int p = 0; p < page.getPositionCount(); p++) { @@ -78,7 +77,7 @@ protected Page process(Page page) { } success = true; } finally { - Releasables.closeExpectNoException(page::releaseBlocks); + page.releaseBlocks(); if (success == false) { Releasables.closeExpectNoException(filteredBlocks); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashAggregationOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashAggregationOperator.java index 4b26b74b42a1d..39068787f3c9e 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashAggregationOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashAggregationOperator.java @@ -161,7 +161,7 @@ public void finish() { } finally { // selected should always be closed if (selected != null) { - Releasables.closeExpectNoException(selected.asBlock()); // we always close blocks, not vectors + selected.close(); } if (success == false && blocks != null) { Releasables.closeExpectNoException(blocks); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/LimitOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/LimitOperator.java index a41057386d365..bcd2ffa1f3855 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/LimitOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/LimitOperator.java @@ -100,11 +100,12 @@ public Page getOutput() { } success = true; } finally { - Releasables.closeExpectNoException(lastInput::releaseBlocks); - lastInput = null; if (success == false) { - Releasables.closeExpectNoException(blocks); + Releasables.closeExpectNoException(lastInput::releaseBlocks, Releasables.wrap(blocks)); + } else { + lastInput.releaseBlocks(); } + lastInput = null; } result = new Page(blocks); limitRemaining = 0; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MultivalueDedupe.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MultivalueDedupe.java index ea4f9dc1e05a6..36aa8621062a5 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MultivalueDedupe.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MultivalueDedupe.java @@ -28,13 +28,13 @@ public final class MultivalueDedupe { * Remove duplicate values from each position and write the results to a * {@link Block} using an adaptive algorithm based on the size of the input list. */ - public static Block.Ref dedupeToBlockAdaptive(Block.Ref ref, BlockFactory blockFactory) { - return switch (ref.block().elementType()) { - case BOOLEAN -> new MultivalueDedupeBoolean(ref).dedupeToBlock(blockFactory); - case BYTES_REF -> new MultivalueDedupeBytesRef(ref).dedupeToBlockAdaptive(blockFactory); - case INT -> new MultivalueDedupeInt(ref).dedupeToBlockAdaptive(blockFactory); - case LONG -> new MultivalueDedupeLong(ref).dedupeToBlockAdaptive(blockFactory); - case DOUBLE -> new MultivalueDedupeDouble(ref).dedupeToBlockAdaptive(blockFactory); + public static Block dedupeToBlockAdaptive(Block block, BlockFactory blockFactory) { + return switch (block.elementType()) { + case BOOLEAN -> new MultivalueDedupeBoolean((BooleanBlock) block).dedupeToBlock(blockFactory); + case BYTES_REF -> new MultivalueDedupeBytesRef((BytesRefBlock) block).dedupeToBlockAdaptive(blockFactory); + case INT -> new MultivalueDedupeInt((IntBlock) block).dedupeToBlockAdaptive(blockFactory); + case LONG -> new MultivalueDedupeLong((LongBlock) block).dedupeToBlockAdaptive(blockFactory); + case DOUBLE -> new MultivalueDedupeDouble((DoubleBlock) block).dedupeToBlockAdaptive(blockFactory); default -> throw new IllegalArgumentException(); }; } @@ -45,13 +45,13 @@ public static Block.Ref dedupeToBlockAdaptive(Block.Ref ref, BlockFactory blockF * case complexity for larger. Prefer {@link #dedupeToBlockAdaptive} * which picks based on the number of elements at each position. */ - public static Block.Ref dedupeToBlockUsingCopyMissing(Block.Ref ref, BlockFactory blockFactory) { - return switch (ref.block().elementType()) { - case BOOLEAN -> new MultivalueDedupeBoolean(ref).dedupeToBlock(blockFactory); - case BYTES_REF -> new MultivalueDedupeBytesRef(ref).dedupeToBlockUsingCopyMissing(blockFactory); - case INT -> new MultivalueDedupeInt(ref).dedupeToBlockUsingCopyMissing(blockFactory); - case LONG -> new MultivalueDedupeLong(ref).dedupeToBlockUsingCopyMissing(blockFactory); - case DOUBLE -> new MultivalueDedupeDouble(ref).dedupeToBlockUsingCopyMissing(blockFactory); + public static Block dedupeToBlockUsingCopyMissing(Block block, BlockFactory blockFactory) { + return switch (block.elementType()) { + case BOOLEAN -> new MultivalueDedupeBoolean((BooleanBlock) block).dedupeToBlock(blockFactory); + case BYTES_REF -> new MultivalueDedupeBytesRef((BytesRefBlock) block).dedupeToBlockUsingCopyMissing(blockFactory); + case INT -> new MultivalueDedupeInt((IntBlock) block).dedupeToBlockUsingCopyMissing(blockFactory); + case LONG -> new MultivalueDedupeLong((LongBlock) block).dedupeToBlockUsingCopyMissing(blockFactory); + case DOUBLE -> new MultivalueDedupeDouble((DoubleBlock) block).dedupeToBlockUsingCopyMissing(blockFactory); default -> throw new IllegalArgumentException(); }; } @@ -64,13 +64,13 @@ public static Block.Ref dedupeToBlockUsingCopyMissing(Block.Ref ref, BlockFactor * performance is dominated by the {@code n*log n} sort. Prefer * {@link #dedupeToBlockAdaptive} unless you need the results sorted. */ - public static Block.Ref dedupeToBlockUsingCopyAndSort(Block.Ref ref, BlockFactory blockFactory) { - return switch (ref.block().elementType()) { - case BOOLEAN -> new MultivalueDedupeBoolean(ref).dedupeToBlock(blockFactory); - case BYTES_REF -> new MultivalueDedupeBytesRef(ref).dedupeToBlockUsingCopyAndSort(blockFactory); - case INT -> new MultivalueDedupeInt(ref).dedupeToBlockUsingCopyAndSort(blockFactory); - case LONG -> new MultivalueDedupeLong(ref).dedupeToBlockUsingCopyAndSort(blockFactory); - case DOUBLE -> new MultivalueDedupeDouble(ref).dedupeToBlockUsingCopyAndSort(blockFactory); + public static Block dedupeToBlockUsingCopyAndSort(Block block, BlockFactory blockFactory) { + return switch (block.elementType()) { + case BOOLEAN -> new MultivalueDedupeBoolean((BooleanBlock) block).dedupeToBlock(blockFactory); + case BYTES_REF -> new MultivalueDedupeBytesRef((BytesRefBlock) block).dedupeToBlockUsingCopyAndSort(blockFactory); + case INT -> new MultivalueDedupeInt((IntBlock) block).dedupeToBlockUsingCopyAndSort(blockFactory); + case LONG -> new MultivalueDedupeLong((LongBlock) block).dedupeToBlockUsingCopyAndSort(blockFactory); + case DOUBLE -> new MultivalueDedupeDouble((DoubleBlock) block).dedupeToBlockUsingCopyAndSort(blockFactory); default -> throw new IllegalArgumentException(); }; } @@ -83,23 +83,23 @@ public static ExpressionEvaluator.Factory evaluator(ElementType elementType, Exp return switch (elementType) { case BOOLEAN -> new EvaluatorFactory( field, - (blockFactory, ref) -> new MultivalueDedupeBoolean(ref).dedupeToBlock(blockFactory) + (blockFactory, block) -> new MultivalueDedupeBoolean((BooleanBlock) block).dedupeToBlock(blockFactory) ); case BYTES_REF -> new EvaluatorFactory( field, - (blockFactory, ref) -> new MultivalueDedupeBytesRef(ref).dedupeToBlockAdaptive(blockFactory) + (blockFactory, block) -> new MultivalueDedupeBytesRef((BytesRefBlock) block).dedupeToBlockAdaptive(blockFactory) ); case INT -> new EvaluatorFactory( field, - (blockFactory, ref) -> new MultivalueDedupeInt(ref).dedupeToBlockAdaptive(blockFactory) + (blockFactory, block) -> new MultivalueDedupeInt((IntBlock) block).dedupeToBlockAdaptive(blockFactory) ); case LONG -> new EvaluatorFactory( field, - (blockFactory, ref) -> new MultivalueDedupeLong(ref).dedupeToBlockAdaptive(blockFactory) + (blockFactory, block) -> new MultivalueDedupeLong((LongBlock) block).dedupeToBlockAdaptive(blockFactory) ); case DOUBLE -> new EvaluatorFactory( field, - (blockFactory, ref) -> new MultivalueDedupeDouble(ref).dedupeToBlockAdaptive(blockFactory) + (blockFactory, block) -> new MultivalueDedupeDouble((DoubleBlock) block).dedupeToBlockAdaptive(blockFactory) ); case NULL -> field; // The page is all nulls and when you dedupe that it's still all nulls default -> throw new IllegalArgumentException("unsupported type [" + elementType + "]"); @@ -116,13 +116,12 @@ public record HashResult(IntBlock ords, boolean sawNull) {} * and then encodes the results into a {@link byte[]} which can be used for * things like hashing many fields together. */ - public static BatchEncoder batchEncoder(Block.Ref ref, int batchSize, boolean allowDirectEncoder) { - if (ref.block().areAllValuesNull()) { - return new BatchEncoder.DirectNulls(ref.block()); + public static BatchEncoder batchEncoder(Block block, int batchSize, boolean allowDirectEncoder) { + if (block.areAllValuesNull()) { + return new BatchEncoder.DirectNulls(block); } - var elementType = ref.block().elementType(); - if (allowDirectEncoder && ref.block().mvDeduplicated()) { - var block = ref.block(); + var elementType = block.elementType(); + if (allowDirectEncoder && block.mvDeduplicated()) { return switch (elementType) { case BOOLEAN -> new BatchEncoder.DirectBooleans((BooleanBlock) block); case BYTES_REF -> new BatchEncoder.DirectBytesRefs((BytesRefBlock) block); @@ -133,17 +132,17 @@ public static BatchEncoder batchEncoder(Block.Ref ref, int batchSize, boolean al }; } else { return switch (elementType) { - case BOOLEAN -> new MultivalueDedupeBoolean(ref).batchEncoder(batchSize); - case BYTES_REF -> new MultivalueDedupeBytesRef(ref).batchEncoder(batchSize); - case INT -> new MultivalueDedupeInt(ref).batchEncoder(batchSize); - case LONG -> new MultivalueDedupeLong(ref).batchEncoder(batchSize); - case DOUBLE -> new MultivalueDedupeDouble(ref).batchEncoder(batchSize); + case BOOLEAN -> new MultivalueDedupeBoolean((BooleanBlock) block).batchEncoder(batchSize); + case BYTES_REF -> new MultivalueDedupeBytesRef((BytesRefBlock) block).batchEncoder(batchSize); + case INT -> new MultivalueDedupeInt((IntBlock) block).batchEncoder(batchSize); + case LONG -> new MultivalueDedupeLong((LongBlock) block).batchEncoder(batchSize); + case DOUBLE -> new MultivalueDedupeDouble((DoubleBlock) block).batchEncoder(batchSize); default -> throw new IllegalArgumentException(); }; } } - private record EvaluatorFactory(ExpressionEvaluator.Factory field, BiFunction dedupe) + private record EvaluatorFactory(ExpressionEvaluator.Factory field, BiFunction dedupe) implements ExpressionEvaluator.Factory { @Override @@ -160,17 +159,19 @@ public String toString() { private static class Evaluator implements ExpressionEvaluator { private final BlockFactory blockFactory; private final ExpressionEvaluator field; - private final BiFunction dedupe; + private final BiFunction dedupe; - protected Evaluator(BlockFactory blockFactory, ExpressionEvaluator field, BiFunction dedupe) { + protected Evaluator(BlockFactory blockFactory, ExpressionEvaluator field, BiFunction dedupe) { this.blockFactory = blockFactory; this.field = field; this.dedupe = dedupe; } @Override - public Block.Ref eval(Page page) { - return dedupe.apply(blockFactory, field.eval(page)); + public Block eval(Page page) { + try (Block block = field.eval(page)) { + return dedupe.apply(blockFactory, block); + } } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MultivalueDedupeBoolean.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MultivalueDedupeBoolean.java index 4170b4727df2c..f3570bf7b853b 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MultivalueDedupeBoolean.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MultivalueDedupeBoolean.java @@ -8,7 +8,6 @@ package org.elasticsearch.compute.operator; import org.elasticsearch.compute.aggregation.GroupingAggregatorFunction; -import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.IntBlock; @@ -31,24 +30,23 @@ public class MultivalueDedupeBoolean { */ public static final int TRUE_ORD = 2; - private final Block.Ref ref; private final BooleanBlock block; private boolean seenTrue; private boolean seenFalse; - public MultivalueDedupeBoolean(Block.Ref ref) { - this.ref = ref; - this.block = (BooleanBlock) ref.block(); + public MultivalueDedupeBoolean(BooleanBlock block) { + this.block = block; } /** * Dedupe values using an adaptive algorithm based on the size of the input list. */ - public Block.Ref dedupeToBlock(BlockFactory blockFactory) { + public BooleanBlock dedupeToBlock(BlockFactory blockFactory) { if (false == block.mayHaveMultivaluedFields()) { - return ref; + block.incRef(); + return block; } - try (ref; BooleanBlock.Builder builder = BooleanBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (BooleanBlock.Builder builder = BooleanBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -61,7 +59,7 @@ public Block.Ref dedupeToBlock(BlockFactory blockFactory) { } } } - return Block.Ref.floating(builder.build()); + return builder.build(); } } @@ -70,8 +68,8 @@ public Block.Ref dedupeToBlock(BlockFactory blockFactory) { * as the grouping block to a {@link GroupingAggregatorFunction}. * @param everSeen array tracking if the values {@code false} and {@code true} are ever seen */ - public IntBlock hash(boolean[] everSeen) { - try (IntBlock.Builder builder = IntBlock.newBlockBuilder(block.getPositionCount())) { + public IntBlock hash(BlockFactory blockFactory, boolean[] everSeen) { + try (IntBlock.Builder builder = blockFactory.newIntBlockBuilder(block.getPositionCount())) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OrdinalsGroupingOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OrdinalsGroupingOperator.java index 07494f97cfd6d..4fb90ddb57e25 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OrdinalsGroupingOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OrdinalsGroupingOperator.java @@ -32,7 +32,7 @@ import org.elasticsearch.compute.operator.HashAggregationOperator.GroupSpec; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; -import org.elasticsearch.index.mapper.BlockDocValuesReader; +import org.elasticsearch.index.mapper.BlockLoader; import java.io.IOException; import java.io.UncheckedIOException; @@ -52,7 +52,8 @@ */ public class OrdinalsGroupingOperator implements Operator { public record OrdinalsGroupingOperatorFactory( - List readerFactories, + List blockLoaders, + List shardContexts, ElementType groupingElementType, int docChannel, String groupingField, @@ -64,7 +65,8 @@ public record OrdinalsGroupingOperatorFactory( @Override public Operator get(DriverContext driverContext) { return new OrdinalsGroupingOperator( - readerFactories, + blockLoaders, + shardContexts, groupingElementType, docChannel, groupingField, @@ -81,7 +83,8 @@ public String describe() { } } - private final List readerFactories; + private final List blockLoaders; + private final List shardContexts; private final int docChannel; private final String groupingField; @@ -99,7 +102,8 @@ public String describe() { private ValuesAggregator valuesAggregator; public OrdinalsGroupingOperator( - List readerFactories, + List blockLoaders, + List shardContexts, ElementType groupingElementType, int docChannel, String groupingField, @@ -109,7 +113,8 @@ public OrdinalsGroupingOperator( DriverContext driverContext ) { Objects.requireNonNull(aggregatorFactories); - this.readerFactories = readerFactories; + this.blockLoaders = blockLoaders; + this.shardContexts = shardContexts; this.groupingElementType = groupingElementType; this.docChannel = docChannel; this.groupingField = groupingField; @@ -131,10 +136,10 @@ public void addInput(Page page) { requireNonNull(page, "page is null"); DocVector docVector = page.getBlock(docChannel).asVector(); final int shardIndex = docVector.shards().getInt(0); - final var readerFactory = readerFactories.get(shardIndex); + final var blockLoader = blockLoaders.get(shardIndex); boolean pagePassed = false; try { - if (docVector.singleSegmentNonDecreasing() && readerFactory.supportsOrdinals()) { + if (docVector.singleSegmentNonDecreasing() && blockLoader.supportsOrdinals()) { final IntVector segmentIndexVector = docVector.segments(); assert segmentIndexVector.isConstant(); final OrdinalSegmentAggregator ordinalAggregator = this.ordinalAggregators.computeIfAbsent( @@ -144,7 +149,7 @@ public void addInput(Page page) { return new OrdinalSegmentAggregator( driverContext.blockFactory(), this::createGroupingAggregators, - () -> readerFactory.ordinals(k.segmentIndex), + () -> blockLoader.ordinals(shardContexts.get(k.shardIndex).reader().leaves().get(k.segmentIndex)), bigArrays ); } catch (IOException e) { @@ -158,7 +163,8 @@ public void addInput(Page page) { if (valuesAggregator == null) { int channelIndex = page.getBlockCount(); // extractor will append a new block at the end valuesAggregator = new ValuesAggregator( - readerFactories, + blockLoaders, + shardContexts, groupingElementType, docChannel, groupingField, @@ -458,7 +464,8 @@ private static class ValuesAggregator implements Releasable { private final HashAggregationOperator aggregator; ValuesAggregator( - List factories, + List blockLoaders, + List shardContexts, ElementType groupingElementType, int docChannel, String groupingField, @@ -467,7 +474,12 @@ private static class ValuesAggregator implements Releasable { int maxPageSize, DriverContext driverContext ) { - this.extractor = new ValuesSourceReaderOperator(BlockFactory.getNonBreakingInstance(), factories, docChannel, groupingField); + this.extractor = new ValuesSourceReaderOperator( + BlockFactory.getNonBreakingInstance(), + List.of(new ValuesSourceReaderOperator.FieldInfo(groupingField, blockLoaders)), + shardContexts, + docChannel + ); this.aggregator = new HashAggregationOperator( aggregatorFactories, () -> BlockHash.build(List.of(new GroupSpec(channelIndex, groupingElementType)), driverContext, maxPageSize, false), diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OutputOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OutputOperator.java index 47ee5bb1b6a15..6d5f914b74eb3 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OutputOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OutputOperator.java @@ -13,8 +13,6 @@ import java.util.function.Consumer; import java.util.function.Function; -import static java.util.stream.Collectors.joining; - /** * Sink operator that calls a given listener for each page received. The listener receives both the page as well as schema information, * i.e. the names of the rows that are outputted. @@ -36,7 +34,7 @@ public SinkOperator get(DriverContext driverContext) { @Override public String describe() { - return "OutputOperator[columns = " + columns.stream().collect(joining(", ")) + "]"; + return OutputOperator.describe(columns); } } @@ -75,10 +73,18 @@ public void close() { @Override public String toString() { + return describe(columns); + } + + private static String describe(List columns) { StringBuilder sb = new StringBuilder(); - sb.append(this.getClass().getSimpleName()).append("["); - sb.append("columns=").append(columns).append(", "); - sb.append("pageConsumer=").append(pageConsumer); + sb.append("OutputOperator").append("["); + sb.append("columns = "); + if (columns.size() <= 10) { + sb.append(columns); + } else { + sb.append('[').append(columns.size()).append(" columns").append(']'); + } sb.append("]"); return sb.toString(); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ProjectOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ProjectOperator.java index 6e52a5351de58..4f2790d1d1e53 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ProjectOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ProjectOperator.java @@ -67,8 +67,13 @@ protected Page process(Page page) { } var block = page.getBlock(source); blocks[b++] = block; + block.incRef(); } - return page.newPageAndRelease(blocks); + int positionCount = page.getPositionCount(); + page.releaseBlocks(); + // Use positionCount explicitly to avoid re-computing - also, if the projection is empty, there may be + // no more blocks left to determine the positionCount from. + return new Page(positionCount, blocks); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/SinkOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/SinkOperator.java index f469906379595..93c170cbcfc8a 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/SinkOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/SinkOperator.java @@ -26,7 +26,7 @@ public final Page getOutput() { /** * A factory for creating sink operators. */ - public interface SinkOperatorFactory extends Describable { + public interface SinkOperatorFactory extends OperatorFactory, Describable { /** Creates a new sink operator. */ SinkOperator get(DriverContext driverContext); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/StringExtractOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/StringExtractOperator.java index b3b41a542e465..4ffa530bc5d3a 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/StringExtractOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/StringExtractOperator.java @@ -68,8 +68,7 @@ protected Page process(Page page) { blockBuilders[i] = BytesRefBlock.newBlockBuilder(rowsCount, driverContext.blockFactory()); } - try (Block.Ref ref = inputEvaluator.eval(page)) { - BytesRefBlock input = (BytesRefBlock) ref.block(); + try (BytesRefBlock input = (BytesRefBlock) inputEvaluator.eval(page)) { BytesRef spare = new BytesRef(); for (int row = 0; row < rowsCount; row++) { if (input.isNull(row)) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/X-MultivalueDedupe.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/X-MultivalueDedupe.java.st index 21fdb257845d5..169e7aa427717 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/X-MultivalueDedupe.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/X-MultivalueDedupe.java.st @@ -56,14 +56,12 @@ $elseif(long)$ private static final int ALWAYS_COPY_MISSING = 300; $endif$ - private final Block.Ref ref; private final $Type$Block block; private $type$[] work = new $type$[ArrayUtil.oversize(2, $BYTES$)]; private int w; - public MultivalueDedupe$Type$(Block.Ref ref) { - this.ref = ref; - this.block = ($Type$Block) ref.block(); + public MultivalueDedupe$Type$($Type$Block block) { + this.block = block; $if(BytesRef)$ // TODO very large numbers might want a hash based implementation - and for BytesRef that might not be that big fillWork(0, work.length); @@ -74,11 +72,12 @@ $endif$ * Remove duplicate values from each position and write the results to a * {@link Block} using an adaptive algorithm based on the size of the input list. */ - public Block.Ref dedupeToBlockAdaptive(BlockFactory blockFactory) { + public $Type$Block dedupeToBlockAdaptive(BlockFactory blockFactory) { if (block.mvDeduplicated()) { - return ref; + block.incRef(); + return block; } - try (ref; $Type$Block.Builder builder = $Type$Block.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try ($Type$Block.Builder builder = $Type$Block.newBlockBuilder(block.getPositionCount(), blockFactory)) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -117,7 +116,7 @@ $endif$ } } } - return Block.Ref.floating(builder.build()); + return builder.build(); } } @@ -127,11 +126,12 @@ $endif$ * case complexity for larger. Prefer {@link #dedupeToBlockAdaptive} * which picks based on the number of elements at each position. */ - public Block.Ref dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { + public $Type$Block dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { if (block.mvDeduplicated()) { - return ref; + block.incRef(); + return block; } - try (ref; $Type$Block.Builder builder = $Type$Block.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try ($Type$Block.Builder builder = $Type$Block.newBlockBuilder(block.getPositionCount(), blockFactory)) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -148,7 +148,7 @@ $endif$ } } } - return Block.Ref.floating(builder.build()); + return builder.build(); } } @@ -160,11 +160,12 @@ $endif$ * performance is dominated by the {@code n*log n} sort. Prefer * {@link #dedupeToBlockAdaptive} unless you need the results sorted. */ - public Block.Ref dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { + public $Type$Block dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { if (block.mvDeduplicated()) { - return ref; + block.incRef(); + return block; } - try (ref; $Type$Block.Builder builder = $Type$Block.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try ($Type$Block.Builder builder = $Type$Block.newBlockBuilder(block.getPositionCount(), blockFactory)) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -181,7 +182,7 @@ $endif$ } } } - return Block.Ref.floating(builder.build()); + return builder.build(); } } @@ -190,11 +191,11 @@ $endif$ * as the grouping block to a {@link GroupingAggregatorFunction}. */ $if(BytesRef)$ - public MultivalueDedupe.HashResult hash(BytesRefHash hash) { + public MultivalueDedupe.HashResult hash(BlockFactory blockFactory, BytesRefHash hash) { $else$ - public MultivalueDedupe.HashResult hash(LongHash hash) { + public MultivalueDedupe.HashResult hash(BlockFactory blockFactory, LongHash hash) { $endif$ - try (IntBlock.Builder builder = IntBlock.newBlockBuilder(block.getPositionCount())) { + try (IntBlock.Builder builder = blockFactory.newIntBlockBuilder(block.getPositionCount())) { boolean sawNull = false; for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeResponse.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeResponse.java index 5904c03a01e44..3509b41b2f4c4 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeResponse.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeResponse.java @@ -20,7 +20,7 @@ import java.util.Objects; public final class ExchangeResponse extends TransportResponse implements Releasable { - private final RefCounted counted = AbstractRefCounted.of(this::close); + private final RefCounted counted = AbstractRefCounted.of(this::closeInternal); private final Page page; private final boolean finished; private boolean pageTaken; @@ -98,6 +98,10 @@ public boolean hasReferences() { @Override public void close() { + counted.decRef(); + } + + private void closeInternal() { if (pageTaken == false && page != null) { page.releaseBlocks(); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java index ab9582b20d4aa..8fb38ccf907d6 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java @@ -13,7 +13,6 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; -import org.elasticsearch.action.support.ChannelActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.component.Lifecycle; @@ -22,6 +21,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AbstractAsyncTask; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.compute.OwningChannelActionListener; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockStreamInput; import org.elasticsearch.core.TimeValue; @@ -193,7 +193,7 @@ private class ExchangeTransportAction implements TransportRequestHandler listener = new ChannelActionListener<>(channel); + ActionListener listener = new OwningChannelActionListener<>(channel); final ExchangeSinkHandler sinkHandler = sinks.get(exchangeId); if (sinkHandler == null) { listener.onResponse(new ExchangeResponse(null, true)); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/ResultBuilderForDoc.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/ResultBuilderForDoc.java index 7fb507ffdbead..779e1dece2b33 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/ResultBuilderForDoc.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/ResultBuilderForDoc.java @@ -11,6 +11,8 @@ import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.DocVector; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.core.Releasables; class ResultBuilderForDoc implements ResultBuilder { private final BlockFactory blockFactory; @@ -42,12 +44,21 @@ public void decodeValue(BytesRef values) { @Override public Block build() { - return new DocVector( - blockFactory.newIntArrayVector(shards, position), - blockFactory.newIntArrayVector(segments, position), - blockFactory.newIntArrayVector(docs, position), - null - ).asBlock(); + boolean success = false; + IntVector shardsVector = null; + IntVector segmentsVector = null; + try { + shardsVector = blockFactory.newIntArrayVector(shards, position); + segmentsVector = blockFactory.newIntArrayVector(segments, position); + var docsVector = blockFactory.newIntArrayVector(docs, position); + var docsBlock = new DocVector(shardsVector, segmentsVector, docsVector, null).asBlock(); + success = true; + return docsBlock; + } finally { + if (success == false) { + Releasables.closeExpectNoException(shardsVector, segmentsVector); + } + } } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/OperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/OperatorTests.java index b45f597553e1b..bfa252ded0420 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/OperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/OperatorTests.java @@ -45,10 +45,10 @@ import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.Page; -import org.elasticsearch.compute.lucene.BlockReaderFactories; import org.elasticsearch.compute.lucene.DataPartitioning; import org.elasticsearch.compute.lucene.LuceneOperator; import org.elasticsearch.compute.lucene.LuceneSourceOperator; +import org.elasticsearch.compute.lucene.ValuesSourceReaderOperator; import org.elasticsearch.compute.operator.AbstractPageMappingOperator; import org.elasticsearch.compute.operator.Driver; import org.elasticsearch.compute.operator.DriverContext; @@ -63,6 +63,7 @@ import org.elasticsearch.core.Releasables; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MapperServiceTestCase; +import org.elasticsearch.index.mapper.SourceLoader; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.search.internal.SearchContext; @@ -230,9 +231,8 @@ public String toString() { } }, new OrdinalsGroupingOperator( - List.of( - BlockReaderFactories.loaderToFactory(reader, new KeywordFieldMapper.KeywordFieldType("g").blockLoader(null)) - ), + List.of(new KeywordFieldMapper.KeywordFieldType("g").blockLoader(null)), + List.of(new ValuesSourceReaderOperator.ShardContext(reader, () -> SourceLoader.FROM_STORED_SOURCE)), ElementType.BYTES_REF, 0, gField, diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicBlockTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicBlockTests.java index 0a36617f35b18..2a49feeab9a30 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicBlockTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicBlockTests.java @@ -39,7 +39,6 @@ import static org.mockito.Mockito.when; public class BasicBlockTests extends ESTestCase { - final CircuitBreaker breaker = new MockBigArrays.LimitedBreaker("esql-test-breaker", ByteSizeValue.ofGb(1)); final BigArrays bigArrays = new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, mockBreakerService(breaker)); final BlockFactory blockFactory = BlockFactory.getInstance(breaker, bigArrays); @@ -1004,6 +1003,12 @@ static void assertCannotDoubleRelease(Block block) { assertThat(ex.getMessage(), containsString("can't release already released block")); } + static void assertCannotReleaseIfVectorAlreadyReleased(Block block) { + var ex = expectThrows(IllegalStateException.class, () -> block.close()); + assertThat(ex.getMessage(), containsString("can't release block")); + assertThat(ex.getMessage(), containsString("containing already released vector")); + } + static void assertCannotReadFromPage(Page page) { var e = expectThrows(IllegalStateException.class, () -> page.getBlock(0)); assertThat(e.getMessage(), containsString("can't read released block")); @@ -1028,4 +1033,156 @@ static CircuitBreakerService mockBreakerService(CircuitBreaker breaker) { when(breakerService.getBreaker(CircuitBreaker.REQUEST)).thenReturn(breaker); return breakerService; } + + public void testRefCountingArrayBlock() { + Block block = randomArrayBlock(); + assertThat(breaker.getUsed(), greaterThan(0L)); + assertRefCountingBehavior(block); + assertThat(breaker.getUsed(), is(0L)); + } + + public void testRefCountingConstantNullBlock() { + Block block = blockFactory.newConstantNullBlock(10); + assertThat(breaker.getUsed(), greaterThan(0L)); + assertRefCountingBehavior(block); + assertThat(breaker.getUsed(), is(0L)); + } + + public void testRefCountingDocBlock() { + int positionCount = randomIntBetween(0, 100); + DocBlock block = new DocVector(intVector(positionCount), intVector(positionCount), intVector(positionCount), true).asBlock(); + assertThat(breaker.getUsed(), greaterThan(0L)); + assertRefCountingBehavior(block); + assertThat(breaker.getUsed(), is(0L)); + } + + public void testRefCountingVectorBlock() { + Block block = randomNonDocVector().asBlock(); + assertThat(breaker.getUsed(), greaterThan(0L)); + assertRefCountingBehavior(block); + assertThat(breaker.getUsed(), is(0L)); + } + + // Take a block with exactly 1 reference and assert that ref counting works fine. + static void assertRefCountingBehavior(Block b) { + assertTrue(b.hasReferences()); + int numShallowCopies = randomIntBetween(0, 15); + for (int i = 0; i < numShallowCopies; i++) { + if (randomBoolean()) { + b.incRef(); + } else { + assertTrue(b.tryIncRef()); + } + } + + for (int i = 0; i < numShallowCopies; i++) { + if (randomBoolean()) { + b.close(); + } else { + // closing and decRef'ing must be equivalent + assertFalse(b.decRef()); + } + assertTrue(b.hasReferences()); + } + + if (randomBoolean()) { + b.close(); + } else { + assertTrue(b.decRef()); + } + + assertFalse(b.hasReferences()); + assertFalse(b.tryIncRef()); + + expectThrows(IllegalStateException.class, b::close); + expectThrows(IllegalStateException.class, b::incRef); + } + + public void testReleasedVectorInvalidatesBlockState() { + Vector vector = randomNonDocVector(); + Block block = vector.asBlock(); + + int numRefs = randomIntBetween(1, 10); + for (int i = 0; i < numRefs - 1; i++) { + block.incRef(); + } + + vector.close(); + assertEquals(false, block.tryIncRef()); + expectThrows(IllegalStateException.class, block::close); + expectThrows(IllegalStateException.class, block::incRef); + } + + public void testReleasedDocVectorInvalidatesBlockState() { + int positionCount = randomIntBetween(0, 100); + DocVector vector = new DocVector(intVector(positionCount), intVector(positionCount), intVector(positionCount), true); + DocBlock block = vector.asBlock(); + + int numRefs = randomIntBetween(1, 10); + for (int i = 0; i < numRefs - 1; i++) { + block.incRef(); + } + + vector.close(); + assertEquals(false, block.tryIncRef()); + expectThrows(IllegalStateException.class, block::close); + expectThrows(IllegalStateException.class, block::incRef); + } + + private IntVector intVector(int positionCount) { + return blockFactory.newIntArrayVector(IntStream.range(0, positionCount).toArray(), positionCount); + } + + private Vector randomNonDocVector() { + int positionCount = randomIntBetween(0, 100); + int vectorType = randomIntBetween(0, 4); + + return switch (vectorType) { + case 0 -> blockFactory.newConstantBooleanVector(true, positionCount); + case 1 -> blockFactory.newConstantBytesRefVector(new BytesRef(), positionCount); + case 2 -> blockFactory.newConstantDoubleVector(1.0, positionCount); + case 3 -> blockFactory.newConstantIntVector(1, positionCount); + default -> blockFactory.newConstantLongVector(1L, positionCount); + }; + } + + private Block randomArrayBlock() { + int positionCount = randomIntBetween(0, 100); + int arrayType = randomIntBetween(0, 4); + + return switch (arrayType) { + case 0 -> { + boolean[] values = new boolean[positionCount]; + Arrays.fill(values, true); + + yield blockFactory.newBooleanArrayBlock(values, positionCount, new int[] {}, new BitSet(), randomOrdering()); + } + case 1 -> { + BytesRefArray values = new BytesRefArray(positionCount, BigArrays.NON_RECYCLING_INSTANCE); + for (int i = 0; i < positionCount; i++) { + values.append(new BytesRef(randomByteArrayOfLength(between(1, 20)))); + } + + yield blockFactory.newBytesRefArrayBlock(values, positionCount, new int[] {}, new BitSet(), randomOrdering()); + } + case 2 -> { + double[] values = new double[positionCount]; + Arrays.fill(values, 1.0); + + yield blockFactory.newDoubleArrayBlock(values, positionCount, new int[] {}, new BitSet(), randomOrdering()); + } + case 3 -> { + int[] values = new int[positionCount]; + Arrays.fill(values, 1); + + yield blockFactory.newIntArrayBlock(values, positionCount, new int[] {}, new BitSet(), randomOrdering()); + } + default -> { + long[] values = new long[positionCount]; + Arrays.fill(values, 1L); + + yield blockFactory.newLongArrayBlock(values, positionCount, new int[] {}, new BitSet(), randomOrdering()); + } + }; + } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicPageTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicPageTests.java index 23a257e7afbbe..25cd9ed5b9fe5 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicPageTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicPageTests.java @@ -205,16 +205,6 @@ public void testPageMultiRelease() { page.releaseBlocks(); } - public void testNewPageAndRelease() { - int positions = randomInt(1024); - var blockA = new IntArrayVector(IntStream.range(0, positions).toArray(), positions).asBlock(); - var blockB = new IntArrayVector(IntStream.range(0, positions).toArray(), positions).asBlock(); - Page page = new Page(blockA, blockB); - Page newPage = page.newPageAndRelease(blockA); - assertThat(blockA.isReleased(), is(false)); - assertThat(blockB.isReleased(), is(true)); - } - BytesRefArray bytesRefArrayOf(String... values) { var array = new BytesRefArray(values.length, bigArrays); Arrays.stream(values).map(BytesRef::new).forEach(array::append); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockSerializationTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockSerializationTests.java index 8b958f7bafb8f..e44697ab8534c 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockSerializationTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockSerializationTests.java @@ -159,7 +159,7 @@ public void testSimulateAggs() { function.addRawInput(page); Block[] blocks = new Block[function.intermediateBlockCount()]; try { - function.evaluateIntermediate(blocks, 0); + function.evaluateIntermediate(blocks, 0, driverCtx); Block[] deserBlocks = Arrays.stream(blocks).map(this::uncheckedSerializeDeserializeBlock).toArray(Block[]::new); try { diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/SingletonOrdinalsBuilderTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/SingletonOrdinalsBuilderTests.java index ff231a0cc20e0..016d74aa6c299 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/SingletonOrdinalsBuilderTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/SingletonOrdinalsBuilderTests.java @@ -122,6 +122,37 @@ protected DriverContext breakingDriverContext() { // TODO move this to driverCon return new DriverContext(bigArrays, factory); } + public void testAllNull() throws IOException { + BlockFactory factory = breakingDriverContext().blockFactory(); + int count = 1000; + try (Directory directory = newDirectory(); RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { + for (int i = 0; i < count; i++) { + for (BytesRef v : new BytesRef[] { new BytesRef("a"), new BytesRef("b"), new BytesRef("c"), new BytesRef("d") }) { + indexWriter.addDocument(List.of(new SortedDocValuesField("f", v))); + } + } + try (IndexReader reader = indexWriter.getReader()) { + for (LeafReaderContext ctx : reader.leaves()) { + SortedDocValues docValues = ctx.reader().getSortedDocValues("f"); + try (SingletonOrdinalsBuilder builder = new SingletonOrdinalsBuilder(factory, docValues, ctx.reader().numDocs())) { + for (int i = 0; i < ctx.reader().maxDoc(); i++) { + if (ctx.reader().getLiveDocs() == null || ctx.reader().getLiveDocs().get(i)) { + assertThat(docValues.advanceExact(i), equalTo(true)); + builder.appendNull(); + } + } + try (BytesRefBlock built = builder.build()) { + for (int p = 0; p < built.getPositionCount(); p++) { + assertThat(built.isNull(p), equalTo(true)); + } + assertThat(built.areAllValuesNull(), equalTo(true)); + } + } + } + } + } + } + @After public void allBreakersEmpty() throws Exception { // first check that all big arrays are released, which can affect breakers diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java index 269a478560bac..424f88413af8f 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java @@ -9,10 +9,16 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.DoubleDocValuesField; +import org.apache.lucene.document.FieldType; import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.SortedDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.document.StoredField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.search.MatchAllDocsQuery; @@ -23,6 +29,8 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.NumericUtils; import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; @@ -31,6 +39,7 @@ import org.elasticsearch.compute.data.BooleanVector; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.DocBlock; import org.elasticsearch.compute.data.DoubleBlock; import org.elasticsearch.compute.data.DoubleVector; import org.elasticsearch.compute.data.IntBlock; @@ -46,20 +55,44 @@ import org.elasticsearch.compute.operator.PageConsumerOperator; import org.elasticsearch.compute.operator.SourceOperator; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.index.IndexMode; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.mapper.BlockLoader; import org.elasticsearch.index.mapper.BooleanFieldMapper; +import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.index.mapper.ProvidedIdFieldMapper; +import org.elasticsearch.index.mapper.SourceFieldMapper; +import org.elasticsearch.index.mapper.SourceLoader; +import org.elasticsearch.index.mapper.TextFieldMapper; +import org.elasticsearch.index.mapper.TextSearchInfo; +import org.elasticsearch.index.mapper.TsidExtractingIdFieldMapper; +import org.elasticsearch.search.lookup.SearchLookup; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.json.JsonXContent; +import org.hamcrest.Matcher; import org.junit.After; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; +import java.util.Map; +import java.util.Set; import java.util.stream.IntStream; import static org.elasticsearch.compute.lucene.LuceneSourceOperatorTests.mockSearchContext; +import static org.elasticsearch.test.MapMatcher.assertMap; +import static org.elasticsearch.test.MapMatcher.matchesMap; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; /** * Tests for {@link ValuesSourceReaderOperator}. Turns off {@link HandleLimitFS} @@ -86,64 +119,124 @@ public void closeIndex() throws IOException { @Override protected Operator.OperatorFactory simple(BigArrays bigArrays) { - return factory(reader, new NumberFieldMapper.NumberFieldType("long", NumberFieldMapper.NumberType.LONG)); + if (reader == null) { + // Init a reader if one hasn't been built, so things don't blow up + try { + initIndex(100, 10); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + return factory(reader, docValuesNumberField("long", NumberFieldMapper.NumberType.LONG)); } static Operator.OperatorFactory factory(IndexReader reader, MappedFieldType ft) { - return new ValuesSourceReaderOperator.ValuesSourceReaderOperatorFactory( - List.of(BlockReaderFactories.loaderToFactory(reader, ft.blockLoader(null))), - 0, - ft.name() + return factory(reader, ft.name(), ft.blockLoader(null)); + } + + static Operator.OperatorFactory factory(IndexReader reader, String name, BlockLoader loader) { + return new ValuesSourceReaderOperator.Factory( + List.of(new ValuesSourceReaderOperator.FieldInfo(name, List.of(loader))), + List.of(new ValuesSourceReaderOperator.ShardContext(reader, () -> SourceLoader.FROM_STORED_SOURCE)), + 0 ); } @Override protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { - // The test wants more than one segment. We shoot for about 10. - int commitEvery = Math.max(1, size / 10); + // The test wants more than one segment. We shoot for 10. + int commitEvery = Math.max(1, (int) Math.ceil((double) size / 10)); + return simpleInput(driverContext(), size, commitEvery); + } + + private SourceOperator simpleInput(DriverContext context, int size, int commitEvery) { + try { + initIndex(size, commitEvery); + } catch (IOException e) { + throw new RuntimeException(e); + } + var luceneFactory = new LuceneSourceOperator.Factory( + List.of(mockSearchContext(reader)), + ctx -> new MatchAllDocsQuery(), + DataPartitioning.SHARD, + randomIntBetween(1, 10), + size, + LuceneOperator.NO_LIMIT + ); + return luceneFactory.get(context); + } + + private void initIndex(int size, int commitEvery) throws IOException { try ( - RandomIndexWriter writer = new RandomIndexWriter( - random(), + IndexWriter writer = new IndexWriter( directory, - newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE) + newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE).setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH) ) ) { for (int d = 0; d < size; d++) { List doc = new ArrayList<>(); + doc.add(IdFieldMapper.standardIdField("id")); doc.add(new SortedNumericDocValuesField("key", d)); + doc.add(new SortedNumericDocValuesField("int", d)); + doc.add(new SortedNumericDocValuesField("short", (short) d)); + doc.add(new SortedNumericDocValuesField("byte", (byte) d)); doc.add(new SortedNumericDocValuesField("long", d)); doc.add( new KeywordFieldMapper.KeywordField("kwd", new BytesRef(Integer.toString(d)), KeywordFieldMapper.Defaults.FIELD_TYPE) ); + doc.add(new StoredField("stored_kwd", new BytesRef(Integer.toString(d)))); + doc.add(new StoredField("stored_text", Integer.toString(d))); doc.add(new SortedNumericDocValuesField("bool", d % 2 == 0 ? 1 : 0)); doc.add(new SortedNumericDocValuesField("double", NumericUtils.doubleToSortableLong(d / 123_456d))); for (int v = 0; v <= d % 3; v++) { - doc.add( - new KeywordFieldMapper.KeywordField("mv_kwd", new BytesRef(PREFIX[v] + d), KeywordFieldMapper.Defaults.FIELD_TYPE) - ); doc.add(new SortedNumericDocValuesField("mv_bool", v % 2 == 0 ? 1 : 0)); - doc.add(new SortedNumericDocValuesField("mv_key", 1_000 * d + v)); + doc.add(new SortedNumericDocValuesField("mv_int", 1_000 * d + v)); + doc.add(new SortedNumericDocValuesField("mv_short", (short) (2_000 * d + v))); + doc.add(new SortedNumericDocValuesField("mv_byte", (byte) (3_000 * d + v))); doc.add(new SortedNumericDocValuesField("mv_long", -1_000 * d + v)); doc.add(new SortedNumericDocValuesField("mv_double", NumericUtils.doubleToSortableLong(d / 123_456d + v))); + doc.add( + new KeywordFieldMapper.KeywordField("mv_kwd", new BytesRef(PREFIX[v] + d), KeywordFieldMapper.Defaults.FIELD_TYPE) + ); + doc.add(new StoredField("mv_stored_kwd", new BytesRef(PREFIX[v] + d))); + doc.add(new StoredField("mv_stored_text", PREFIX[v] + d)); + } + XContentBuilder source = JsonXContent.contentBuilder(); + source.startObject(); + source.field("source_kwd", Integer.toString(d)); + source.startArray("mv_source_kwd"); + for (int v = 0; v <= d % 3; v++) { + source.value(PREFIX[v] + d); } + source.endArray(); + source.field("source_text", Integer.toString(d)); + source.startArray("mv_source_text"); + for (int v = 0; v <= d % 3; v++) { + source.value(PREFIX[v] + d); + } + source.endArray(); + source.field("source_long", (long) d); + source.startArray("mv_source_long"); + for (int v = 0; v <= d % 3; v++) { + source.value((long) (-1_000 * d + v)); + } + source.endArray(); + source.field("source_int", d); + source.startArray("mv_source_int"); + for (int v = 0; v <= d % 3; v++) { + source.value(1_000 * d + v); + } + source.endArray(); + + source.endObject(); + doc.add(new StoredField(SourceFieldMapper.NAME, BytesReference.bytes(source).toBytesRef())); writer.addDocument(doc); - if (d % commitEvery == 0) { + if (d % commitEvery == commitEvery - 1) { writer.commit(); } } - reader = writer.getReader(); - } catch (IOException e) { - throw new RuntimeException(e); } - var luceneFactory = new LuceneSourceOperator.Factory( - List.of(mockSearchContext(reader)), - ctx -> new MatchAllDocsQuery(), - randomFrom(DataPartitioning.values()), - randomIntBetween(1, 10), - randomPageSize(), - LuceneOperator.NO_LIMIT - ); - return luceneFactory.get(driverContext()); + reader = DirectoryReader.open(directory); } @Override @@ -184,7 +277,8 @@ public void testLoadAll() { DriverContext driverContext = driverContext(); loadSimpleAndAssert( driverContext, - CannedSourceOperator.collectPages(simpleInput(driverContext.blockFactory(), between(100, 5000))) + CannedSourceOperator.collectPages(simpleInput(driverContext.blockFactory(), between(100, 5000))), + Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING ); } @@ -196,13 +290,18 @@ public void testLoadAllInOnePage() { CannedSourceOperator.mergePages( CannedSourceOperator.collectPages(simpleInput(driverContext.blockFactory(), between(100, 5000))) ) - ) + ), + Block.MvOrdering.UNORDERED ); } public void testEmpty() { DriverContext driverContext = driverContext(); - loadSimpleAndAssert(driverContext, CannedSourceOperator.collectPages(simpleInput(driverContext.blockFactory(), 0))); + loadSimpleAndAssert( + driverContext, + CannedSourceOperator.collectPages(simpleInput(driverContext.blockFactory(), 0)), + Block.MvOrdering.UNORDERED + ); } public void testLoadAllInOnePageShuffled() { @@ -219,99 +318,769 @@ public void testLoadAllInOnePageShuffled() { shuffledBlocks[b] = source.getBlock(b).filter(shuffleArray); } source = new Page(shuffledBlocks); - loadSimpleAndAssert(driverContext, List.of(source)); - } - - private void loadSimpleAndAssert(DriverContext driverContext, List input) { - List operators = List.of( - factory(reader, new NumberFieldMapper.NumberFieldType("key", NumberFieldMapper.NumberType.INTEGER)).get(driverContext), - factory(reader, new NumberFieldMapper.NumberFieldType("long", NumberFieldMapper.NumberType.LONG)).get(driverContext), - factory(reader, new KeywordFieldMapper.KeywordFieldType("kwd")).get(driverContext), - factory(reader, new KeywordFieldMapper.KeywordFieldType("mv_kwd")).get(driverContext), - factory(reader, new BooleanFieldMapper.BooleanFieldType("bool")).get(driverContext), - factory(reader, new BooleanFieldMapper.BooleanFieldType("mv_bool")).get(driverContext), - factory(reader, new NumberFieldMapper.NumberFieldType("mv_key", NumberFieldMapper.NumberType.INTEGER)).get(driverContext), - factory(reader, new NumberFieldMapper.NumberFieldType("mv_long", NumberFieldMapper.NumberType.LONG)).get(driverContext), - factory(reader, new NumberFieldMapper.NumberFieldType("double", NumberFieldMapper.NumberType.DOUBLE)).get(driverContext), - factory(reader, new NumberFieldMapper.NumberFieldType("mv_double", NumberFieldMapper.NumberType.DOUBLE)).get(driverContext) + loadSimpleAndAssert(driverContext, List.of(source), Block.MvOrdering.UNORDERED); + } + + private static ValuesSourceReaderOperator.FieldInfo fieldInfo(MappedFieldType ft) { + return new ValuesSourceReaderOperator.FieldInfo(ft.name(), List.of(ft.blockLoader(new MappedFieldType.BlockLoaderContext() { + @Override + public String indexName() { + return "test_index"; + } + + @Override + public SearchLookup lookup() { + throw new UnsupportedOperationException(); + } + + @Override + public Set sourcePaths(String name) { + return Set.of(name); + } + + @Override + public String parentField(String field) { + return null; + } + }))); + } + + private void loadSimpleAndAssert(DriverContext driverContext, List input, Block.MvOrdering docValuesMvOrdering) { + List cases = infoAndChecksForEachType(docValuesMvOrdering); + + List operators = new ArrayList<>(); + operators.add( + new ValuesSourceReaderOperator.Factory( + List.of(fieldInfo(docValuesNumberField("key", NumberFieldMapper.NumberType.INTEGER))), + List.of(new ValuesSourceReaderOperator.ShardContext(reader, () -> SourceLoader.FROM_STORED_SOURCE)), + 0 + ).get(driverContext) ); + List tests = new ArrayList<>(); + while (cases.isEmpty() == false) { + List b = randomNonEmptySubsetOf(cases); + cases.removeAll(b); + tests.addAll(b); + operators.add( + new ValuesSourceReaderOperator.Factory( + b.stream().map(i -> i.info).toList(), + List.of(new ValuesSourceReaderOperator.ShardContext(reader, () -> SourceLoader.FROM_STORED_SOURCE)), + 0 + ).get(driverContext) + ); + } List results = drive(operators, input.iterator(), driverContext); assertThat(results, hasSize(input.size())); - for (Page p : results) { - assertThat(p.getBlockCount(), equalTo(11)); - IntVector keys = p.getBlock(1).asVector(); - LongVector longs = p.getBlock(2).asVector(); - BytesRefVector keywords = p.getBlock(3).asVector(); - BytesRefBlock mvKeywords = p.getBlock(4); - BooleanVector bools = p.getBlock(5).asVector(); - BooleanBlock mvBools = p.getBlock(6); - IntBlock mvInts = p.getBlock(7); - LongBlock mvLongs = p.getBlock(8); - DoubleVector doubles = p.getBlock(9).asVector(); - DoubleBlock mvDoubles = p.getBlock(10); - - for (int i = 0; i < p.getPositionCount(); i++) { - int key = keys.getInt(i); - assertThat(longs.getLong(i), equalTo((long) key)); - assertThat(keywords.getBytesRef(i, new BytesRef()).utf8ToString(), equalTo(Integer.toString(key))); - - assertThat(mvKeywords.getValueCount(i), equalTo(key % 3 + 1)); - int offset = mvKeywords.getFirstValueIndex(i); - for (int v = 0; v <= key % 3; v++) { - assertThat(mvKeywords.getBytesRef(offset + v, new BytesRef()).utf8ToString(), equalTo(PREFIX[v] + key)); - } - if (key % 3 > 0) { - assertThat(mvKeywords.mvOrdering(), equalTo(Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING)); + for (Page page : results) { + assertThat(page.getBlockCount(), equalTo(tests.size() + 2 /* one for doc and one for keys */)); + IntVector keys = page.getBlock(1).asVector(); + for (int p = 0; p < page.getPositionCount(); p++) { + int key = keys.getInt(p); + for (int i = 0; i < tests.size(); i++) { + try { + tests.get(i).checkResults.check(page.getBlock(2 + i), p, key); + } catch (AssertionError e) { + throw new AssertionError("error checking " + tests.get(i).info.name() + "[" + p + "]: " + e.getMessage(), e); + } } + } + } + for (Operator op : operators) { + assertThat(((ValuesSourceReaderOperator) op).status().pagesProcessed(), equalTo(input.size())); + } + assertDriverContext(driverContext); + } - assertThat(bools.getBoolean(i), equalTo(key % 2 == 0)); - assertThat(mvBools.getValueCount(i), equalTo(key % 3 + 1)); - offset = mvBools.getFirstValueIndex(i); - for (int v = 0; v <= key % 3; v++) { - assertThat(mvBools.getBoolean(offset + v), equalTo(BOOLEANS[key % 3][v])); - } - if (key % 3 > 0) { - assertThat(mvBools.mvOrdering(), equalTo(Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING)); - } + interface CheckResults { + void check(Block block, int position, int key); + } - assertThat(mvInts.getValueCount(i), equalTo(key % 3 + 1)); - offset = mvInts.getFirstValueIndex(i); - for (int v = 0; v <= key % 3; v++) { - assertThat(mvInts.getInt(offset + v), equalTo(1_000 * key + v)); - } - if (key % 3 > 0) { - assertThat(mvInts.mvOrdering(), equalTo(Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING)); - } + interface CheckReaders { + void check(boolean forcedRowByRow, int pageCount, int segmentCount, Map readersBuilt); + } - assertThat(mvLongs.getValueCount(i), equalTo(key % 3 + 1)); - offset = mvLongs.getFirstValueIndex(i); - for (int v = 0; v <= key % 3; v++) { - assertThat(mvLongs.getLong(offset + v), equalTo(-1_000L * key + v)); - } - if (key % 3 > 0) { - assertThat(mvLongs.mvOrdering(), equalTo(Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING)); - } + interface CheckReadersWithName { + void check(String name, boolean forcedRowByRow, int pageCount, int segmentCount, Map readersBuilt); + } + + record FieldCase(ValuesSourceReaderOperator.FieldInfo info, CheckResults checkResults, CheckReadersWithName checkReaders) { + FieldCase(MappedFieldType ft, CheckResults checkResults, CheckReadersWithName checkReaders) { + this(fieldInfo(ft), checkResults, checkReaders); + } + + FieldCase(MappedFieldType ft, CheckResults checkResults, CheckReaders checkReaders) { + this( + ft, + checkResults, + (name, forcedRowByRow, pageCount, segmentCount, readersBuilt) -> checkReaders.check( + forcedRowByRow, + pageCount, + segmentCount, + readersBuilt + ) + ); + } + } + + /** + * Asserts that {@link ValuesSourceReaderOperator#status} claims that only + * the expected readers are built after loading singleton pages. + */ + public void testLoadAllStatus() { + testLoadAllStatus(false); + } + + /** + * Asserts that {@link ValuesSourceReaderOperator#status} claims that only + * the expected readers are built after loading non-singleton pages. + */ + public void testLoadAllStatusAllInOnePage() { + testLoadAllStatus(true); + } + + private void testLoadAllStatus(boolean allInOnePage) { + DriverContext driverContext = driverContext(); + List input = CannedSourceOperator.collectPages(simpleInput(driverContext.blockFactory(), between(100, 5000))); + assertThat(reader.leaves(), hasSize(10)); + assertThat(input, hasSize(10)); + List cases = infoAndChecksForEachType(Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING); + // Build one operator for each field, so we get a unique map to assert on + List operators = cases.stream() + .map( + i -> new ValuesSourceReaderOperator.Factory( + List.of(i.info), + List.of(new ValuesSourceReaderOperator.ShardContext(reader, () -> SourceLoader.FROM_STORED_SOURCE)), + 0 + ).get(driverContext) + ) + .toList(); + if (allInOnePage) { + input = List.of(CannedSourceOperator.mergePages(input)); + } + drive(operators, input.iterator(), driverContext); + for (int i = 0; i < cases.size(); i++) { + ValuesSourceReaderOperator.Status status = (ValuesSourceReaderOperator.Status) operators.get(i).status(); + assertThat(status.pagesProcessed(), equalTo(input.size())); + FieldCase fc = cases.get(i); + fc.checkReaders.check(fc.info.name(), allInOnePage, input.size(), reader.leaves().size(), status.readersBuilt()); + } + } + + private List infoAndChecksForEachType(Block.MvOrdering docValuesMvOrdering) { + Checks checks = new Checks(docValuesMvOrdering); + List r = new ArrayList<>(); + r.add( + new FieldCase(docValuesNumberField("long", NumberFieldMapper.NumberType.LONG), checks::longs, StatusChecks::longsFromDocValues) + ); + r.add( + new FieldCase( + docValuesNumberField("mv_long", NumberFieldMapper.NumberType.LONG), + checks::mvLongsFromDocValues, + StatusChecks::mvLongsFromDocValues + ) + ); + r.add( + new FieldCase( + docValuesNumberField("missing_long", NumberFieldMapper.NumberType.LONG), + checks::constantNulls, + StatusChecks::constantNulls + ) + ); + r.add( + new FieldCase(sourceNumberField("source_long", NumberFieldMapper.NumberType.LONG), checks::longs, StatusChecks::longsFromSource) + ); + r.add( + new FieldCase( + sourceNumberField("mv_source_long", NumberFieldMapper.NumberType.LONG), + checks::mvLongsUnordered, + StatusChecks::mvLongsFromSource + ) + ); + r.add( + new FieldCase(docValuesNumberField("int", NumberFieldMapper.NumberType.INTEGER), checks::ints, StatusChecks::intsFromDocValues) + ); + r.add( + new FieldCase( + docValuesNumberField("mv_int", NumberFieldMapper.NumberType.INTEGER), + checks::mvIntsFromDocValues, + StatusChecks::mvIntsFromDocValues + ) + ); + r.add( + new FieldCase( + docValuesNumberField("missing_int", NumberFieldMapper.NumberType.INTEGER), + checks::constantNulls, + StatusChecks::constantNulls + ) + ); + r.add( + new FieldCase(sourceNumberField("source_int", NumberFieldMapper.NumberType.INTEGER), checks::ints, StatusChecks::intsFromSource) + ); + r.add( + new FieldCase( + sourceNumberField("mv_source_int", NumberFieldMapper.NumberType.INTEGER), + checks::mvIntsUnordered, + StatusChecks::mvIntsFromSource + ) + ); + r.add( + new FieldCase( + docValuesNumberField("short", NumberFieldMapper.NumberType.SHORT), + checks::shorts, + StatusChecks::shortsFromDocValues + ) + ); + r.add( + new FieldCase( + docValuesNumberField("mv_short", NumberFieldMapper.NumberType.SHORT), + checks::mvShorts, + StatusChecks::mvShortsFromDocValues + ) + ); + r.add( + new FieldCase( + docValuesNumberField("missing_short", NumberFieldMapper.NumberType.SHORT), + checks::constantNulls, + StatusChecks::constantNulls + ) + ); + r.add( + new FieldCase(docValuesNumberField("byte", NumberFieldMapper.NumberType.BYTE), checks::bytes, StatusChecks::bytesFromDocValues) + ); + r.add( + new FieldCase( + docValuesNumberField("mv_byte", NumberFieldMapper.NumberType.BYTE), + checks::mvBytes, + StatusChecks::mvBytesFromDocValues + ) + ); + r.add( + new FieldCase( + docValuesNumberField("missing_byte", NumberFieldMapper.NumberType.BYTE), + checks::constantNulls, + StatusChecks::constantNulls + ) + ); + r.add( + new FieldCase( + docValuesNumberField("double", NumberFieldMapper.NumberType.DOUBLE), + checks::doubles, + StatusChecks::doublesFromDocValues + ) + ); + r.add( + new FieldCase( + docValuesNumberField("mv_double", NumberFieldMapper.NumberType.DOUBLE), + checks::mvDoubles, + StatusChecks::mvDoublesFromDocValues + ) + ); + r.add( + new FieldCase( + docValuesNumberField("missing_double", NumberFieldMapper.NumberType.DOUBLE), + checks::constantNulls, + StatusChecks::constantNulls + ) + ); + r.add(new FieldCase(new BooleanFieldMapper.BooleanFieldType("bool"), checks::bools, StatusChecks::boolFromDocValues)); + r.add(new FieldCase(new BooleanFieldMapper.BooleanFieldType("mv_bool"), checks::mvBools, StatusChecks::mvBoolFromDocValues)); + r.add(new FieldCase(new BooleanFieldMapper.BooleanFieldType("missing_bool"), checks::constantNulls, StatusChecks::constantNulls)); + r.add(new FieldCase(new KeywordFieldMapper.KeywordFieldType("kwd"), checks::strings, StatusChecks::keywordsFromDocValues)); + r.add( + new FieldCase( + new KeywordFieldMapper.KeywordFieldType("mv_kwd"), + checks::mvStringsFromDocValues, + StatusChecks::mvKeywordsFromDocValues + ) + ); + r.add(new FieldCase(new KeywordFieldMapper.KeywordFieldType("missing_kwd"), checks::constantNulls, StatusChecks::constantNulls)); + r.add(new FieldCase(storedKeywordField("stored_kwd"), checks::strings, StatusChecks::keywordsFromStored)); + r.add(new FieldCase(storedKeywordField("mv_stored_kwd"), checks::mvStringsUnordered, StatusChecks::mvKeywordsFromStored)); + r.add(new FieldCase(sourceKeywordField("source_kwd"), checks::strings, StatusChecks::keywordsFromSource)); + r.add(new FieldCase(sourceKeywordField("mv_source_kwd"), checks::mvStringsUnordered, StatusChecks::mvKeywordsFromSource)); + r.add(new FieldCase(new TextFieldMapper.TextFieldType("source_text", false), checks::strings, StatusChecks::textFromSource)); + r.add( + new FieldCase( + new TextFieldMapper.TextFieldType("mv_source_text", false), + checks::mvStringsUnordered, + StatusChecks::mvTextFromSource + ) + ); + r.add(new FieldCase(storedTextField("stored_text"), checks::strings, StatusChecks::textFromStored)); + r.add(new FieldCase(storedTextField("mv_stored_text"), checks::mvStringsUnordered, StatusChecks::mvTextFromStored)); + r.add( + new FieldCase( + textFieldWithDelegate("text_with_delegate", new KeywordFieldMapper.KeywordFieldType("kwd")), + checks::strings, + StatusChecks::textWithDelegate + ) + ); + r.add( + new FieldCase( + textFieldWithDelegate("mv_text_with_delegate", new KeywordFieldMapper.KeywordFieldType("mv_kwd")), + checks::mvStringsFromDocValues, + StatusChecks::mvTextWithDelegate + ) + ); + r.add( + new FieldCase( + textFieldWithDelegate("missing_text_with_delegate", new KeywordFieldMapper.KeywordFieldType("missing_kwd")), + checks::constantNulls, + StatusChecks::constantNullTextWithDelegate + ) + ); + r.add(new FieldCase(new ProvidedIdFieldMapper(() -> false).fieldType(), checks::ids, StatusChecks::id)); + r.add(new FieldCase(TsidExtractingIdFieldMapper.INSTANCE.fieldType(), checks::ids, StatusChecks::id)); + r.add( + new FieldCase( + new ValuesSourceReaderOperator.FieldInfo("constant_bytes", List.of(BlockLoader.constantBytes(new BytesRef("foo")))), + checks::constantBytes, + StatusChecks::constantBytes + ) + ); + r.add( + new FieldCase( + new ValuesSourceReaderOperator.FieldInfo("null", List.of(BlockLoader.CONSTANT_NULLS)), + checks::constantNulls, + StatusChecks::constantNulls + ) + ); + Collections.shuffle(r, random()); + return r; + } + + record Checks(Block.MvOrdering docValuesMvOrdering) { + void longs(Block block, int position, int key) { + LongVector longs = ((LongBlock) block).asVector(); + assertThat(longs.getLong(position), equalTo((long) key)); + } + + void ints(Block block, int position, int key) { + IntVector ints = ((IntBlock) block).asVector(); + assertThat(ints.getInt(position), equalTo(key)); + } + + void shorts(Block block, int position, int key) { + IntVector ints = ((IntBlock) block).asVector(); + assertThat(ints.getInt(position), equalTo((int) (short) key)); + } + + void bytes(Block block, int position, int key) { + IntVector ints = ((IntBlock) block).asVector(); + assertThat(ints.getInt(position), equalTo((int) (byte) key)); + } + + void doubles(Block block, int position, int key) { + DoubleVector doubles = ((DoubleBlock) block).asVector(); + assertThat(doubles.getDouble(position), equalTo(key / 123_456d)); + } + + void strings(Block block, int position, int key) { + BytesRefVector keywords = ((BytesRefBlock) block).asVector(); + assertThat(keywords.getBytesRef(position, new BytesRef()).utf8ToString(), equalTo(Integer.toString(key))); + } + + void bools(Block block, int position, int key) { + BooleanVector bools = ((BooleanBlock) block).asVector(); + assertThat(bools.getBoolean(position), equalTo(key % 2 == 0)); + } + + void ids(Block block, int position, int key) { + BytesRefVector ids = ((BytesRefBlock) block).asVector(); + assertThat(ids.getBytesRef(position, new BytesRef()).utf8ToString(), equalTo("id")); + } + + void constantBytes(Block block, int position, int key) { + BytesRefVector keywords = ((BytesRefBlock) block).asVector(); + assertThat(keywords.getBytesRef(position, new BytesRef()).utf8ToString(), equalTo("foo")); + } + + void constantNulls(Block block, int position, int key) { + assertTrue(block.areAllValuesNull()); + assertTrue(block.isNull(position)); + } + + void mvLongsFromDocValues(Block block, int position, int key) { + mvLongs(block, position, key, docValuesMvOrdering); + } + + void mvLongsUnordered(Block block, int position, int key) { + mvLongs(block, position, key, Block.MvOrdering.UNORDERED); + } + + private void mvLongs(Block block, int position, int key, Block.MvOrdering expectedMv) { + LongBlock longs = (LongBlock) block; + assertThat(longs.getValueCount(position), equalTo(key % 3 + 1)); + int offset = longs.getFirstValueIndex(position); + for (int v = 0; v <= key % 3; v++) { + assertThat(longs.getLong(offset + v), equalTo(-1_000L * key + v)); + } + if (key % 3 > 0) { + assertThat(longs.mvOrdering(), equalTo(expectedMv)); + } + } + + void mvIntsFromDocValues(Block block, int position, int key) { + mvInts(block, position, key, docValuesMvOrdering); + } - assertThat(doubles.getDouble(i), equalTo(key / 123_456d)); - offset = mvDoubles.getFirstValueIndex(i); - for (int v = 0; v <= key % 3; v++) { - assertThat(mvDoubles.getDouble(offset + v), equalTo(key / 123_456d + v)); + void mvIntsUnordered(Block block, int position, int key) { + mvInts(block, position, key, Block.MvOrdering.UNORDERED); + } + + private void mvInts(Block block, int position, int key, Block.MvOrdering expectedMv) { + IntBlock ints = (IntBlock) block; + assertThat(ints.getValueCount(position), equalTo(key % 3 + 1)); + int offset = ints.getFirstValueIndex(position); + for (int v = 0; v <= key % 3; v++) { + assertThat(ints.getInt(offset + v), equalTo(1_000 * key + v)); + } + if (key % 3 > 0) { + assertThat(ints.mvOrdering(), equalTo(expectedMv)); + } + } + + void mvShorts(Block block, int position, int key) { + IntBlock ints = (IntBlock) block; + assertThat(ints.getValueCount(position), equalTo(key % 3 + 1)); + int offset = ints.getFirstValueIndex(position); + for (int v = 0; v <= key % 3; v++) { + assertThat(ints.getInt(offset + v), equalTo((int) (short) (2_000 * key + v))); + } + if (key % 3 > 0) { + assertThat(ints.mvOrdering(), equalTo(docValuesMvOrdering)); + } + } + + void mvBytes(Block block, int position, int key) { + IntBlock ints = (IntBlock) block; + assertThat(ints.getValueCount(position), equalTo(key % 3 + 1)); + int offset = ints.getFirstValueIndex(position); + for (int v = 0; v <= key % 3; v++) { + assertThat(ints.getInt(offset + v), equalTo((int) (byte) (3_000 * key + v))); + } + if (key % 3 > 0) { + assertThat(ints.mvOrdering(), equalTo(docValuesMvOrdering)); + } + } + + void mvDoubles(Block block, int position, int key) { + DoubleBlock doubles = (DoubleBlock) block; + int offset = doubles.getFirstValueIndex(position); + for (int v = 0; v <= key % 3; v++) { + assertThat(doubles.getDouble(offset + v), equalTo(key / 123_456d + v)); + } + if (key % 3 > 0) { + assertThat(doubles.mvOrdering(), equalTo(docValuesMvOrdering)); + } + } + + void mvStringsFromDocValues(Block block, int position, int key) { + mvStrings(block, position, key, docValuesMvOrdering); + } + + void mvStringsUnordered(Block block, int position, int key) { + mvStrings(block, position, key, Block.MvOrdering.UNORDERED); + } + + void mvStrings(Block block, int position, int key, Block.MvOrdering expectedMv) { + BytesRefBlock text = (BytesRefBlock) block; + assertThat(text.getValueCount(position), equalTo(key % 3 + 1)); + int offset = text.getFirstValueIndex(position); + for (int v = 0; v <= key % 3; v++) { + assertThat(text.getBytesRef(offset + v, new BytesRef()).utf8ToString(), equalTo(PREFIX[v] + key)); + } + if (key % 3 > 0) { + assertThat(text.mvOrdering(), equalTo(expectedMv)); + } + } + + void mvBools(Block block, int position, int key) { + BooleanBlock bools = (BooleanBlock) block; + assertThat(bools.getValueCount(position), equalTo(key % 3 + 1)); + int offset = bools.getFirstValueIndex(position); + for (int v = 0; v <= key % 3; v++) { + assertThat(bools.getBoolean(offset + v), equalTo(BOOLEANS[key % 3][v])); + } + if (key % 3 > 0) { + assertThat(bools.mvOrdering(), equalTo(docValuesMvOrdering)); + } + } + } + + class StatusChecks { + static void longsFromDocValues(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + docValues("long", "Longs", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void longsFromSource(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + source("source_long", "Longs", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void intsFromDocValues(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + docValues("int", "Ints", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void intsFromSource(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + source("source_int", "Ints", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void shortsFromDocValues(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + docValues("short", "Ints", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void bytesFromDocValues(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + docValues("byte", "Ints", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void doublesFromDocValues(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + docValues("double", "Doubles", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void boolFromDocValues(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + docValues("bool", "Booleans", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void keywordsFromDocValues(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + docValues("kwd", "Ordinals", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void keywordsFromStored(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + stored("stored_kwd", "Bytes", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void keywordsFromSource(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + source("source_kwd", "Bytes", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void textFromSource(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + source("source_text", "Bytes", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void textFromStored(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + stored("stored_text", "Bytes", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void mvLongsFromDocValues(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + mvDocValues("mv_long", "Longs", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void mvLongsFromSource(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + source("mv_source_long", "Longs", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void mvIntsFromDocValues(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + mvDocValues("mv_int", "Ints", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void mvIntsFromSource(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + source("mv_source_int", "Ints", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void mvShortsFromDocValues(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + mvDocValues("mv_short", "Ints", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void mvBytesFromDocValues(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + mvDocValues("mv_byte", "Ints", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void mvDoublesFromDocValues(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + mvDocValues("mv_double", "Doubles", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void mvBoolFromDocValues(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + mvDocValues("mv_bool", "Booleans", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void mvKeywordsFromDocValues(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + mvDocValues("mv_kwd", "Ordinals", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void mvKeywordsFromStored(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + stored("mv_stored_kwd", "Bytes", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void mvKeywordsFromSource(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + source("mv_source_kwd", "Bytes", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void mvTextFromStored(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + stored("mv_stored_text", "Bytes", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void mvTextFromSource(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + source("mv_source_text", "Bytes", forcedRowByRow, pageCount, segmentCount, readers); + } + + static void textWithDelegate(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + if (forcedRowByRow) { + assertMap( + readers, + matchesMap().entry( + "text_with_delegate:row_stride:Delegating[to=kwd, impl=BlockDocValuesReader.SingletonOrdinals]", + segmentCount + ) + ); + } else { + assertMap( + readers, + matchesMap().entry( + "text_with_delegate:column_at_a_time:Delegating[to=kwd, impl=BlockDocValuesReader.SingletonOrdinals]", + lessThanOrEqualTo(pageCount) + ) + ); + } + } + + static void mvTextWithDelegate(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + if (forcedRowByRow) { + assertMap( + readers, + matchesMap().entry( + "mv_text_with_delegate:row_stride:Delegating[to=mv_kwd, impl=BlockDocValuesReader.Ordinals]", + equalTo(segmentCount) + ) + ); + } else { + assertMap( + readers, + matchesMap().entry( + "mv_text_with_delegate:column_at_a_time:Delegating[to=mv_kwd, impl=BlockDocValuesReader.Ordinals]", + lessThanOrEqualTo(pageCount) + ) + ); + } + } + + static void constantNullTextWithDelegate(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + if (forcedRowByRow) { + assertMap( + readers, + matchesMap().entry( + "missing_text_with_delegate:row_stride:Delegating[to=missing_kwd, impl=constant_nulls]", + segmentCount + ) + ); + } else { + assertMap( + readers, + matchesMap().entry( + "missing_text_with_delegate:column_at_a_time:Delegating[to=missing_kwd, impl=constant_nulls]", + lessThanOrEqualTo(pageCount) + ) + ); + } + } + + private static void docValues( + String name, + String type, + boolean forcedRowByRow, + int pageCount, + int segmentCount, + Map readers + ) { + if (forcedRowByRow) { + assertMap( + readers, + matchesMap().entry(name + ":row_stride:BlockDocValuesReader.Singleton" + type, lessThanOrEqualTo(segmentCount)) + ); + } else { + assertMap( + readers, + matchesMap().entry(name + ":column_at_a_time:BlockDocValuesReader.Singleton" + type, lessThanOrEqualTo(pageCount)) + ); + } + } + + private static void mvDocValues( + String name, + String type, + boolean forcedRowByRow, + int pageCount, + int segmentCount, + Map readers + ) { + if (forcedRowByRow) { + Integer singletons = (Integer) readers.remove(name + ":row_stride:BlockDocValuesReader.Singleton" + type); + if (singletons != null) { + segmentCount -= singletons; } - if (key % 3 > 0) { - assertThat(mvDoubles.mvOrdering(), equalTo(Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING)); + assertMap(readers, matchesMap().entry(name + ":row_stride:BlockDocValuesReader." + type, segmentCount)); + } else { + Integer singletons = (Integer) readers.remove(name + ":column_at_a_time:BlockDocValuesReader.Singleton" + type); + if (singletons != null) { + pageCount -= singletons; } + assertMap( + readers, + matchesMap().entry(name + ":column_at_a_time:BlockDocValuesReader." + type, lessThanOrEqualTo(pageCount)) + ); } } - for (Operator op : operators) { - assertThat(((ValuesSourceReaderOperator) op).status().pagesProcessed(), equalTo(input.size())); + + static void id(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + stored("_id", "Id", forcedRowByRow, pageCount, segmentCount, readers); + } + + private static void source(String name, String type, boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + Matcher count; + if (forcedRowByRow) { + count = equalTo(segmentCount); + } else { + count = lessThanOrEqualTo(pageCount); + Integer columnAttempts = (Integer) readers.remove(name + ":column_at_a_time:null"); + assertThat(columnAttempts, not(nullValue())); + } + + Integer sequentialCount = (Integer) readers.remove("stored_fields[requires_source:true, fields:0, sequential: true]"); + Integer nonSequentialCount = (Integer) readers.remove("stored_fields[requires_source:true, fields:0, sequential: false]"); + int totalReaders = (sequentialCount == null ? 0 : sequentialCount) + (nonSequentialCount == null ? 0 : nonSequentialCount); + assertThat(totalReaders, count); + + assertMap(readers, matchesMap().entry(name + ":row_stride:BlockSourceReader." + type, count)); + } + + private static void stored(String name, String type, boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + Matcher count; + if (forcedRowByRow) { + count = equalTo(segmentCount); + } else { + count = lessThanOrEqualTo(pageCount); + Integer columnAttempts = (Integer) readers.remove(name + ":column_at_a_time:null"); + assertThat(columnAttempts, not(nullValue())); + } + + Integer sequentialCount = (Integer) readers.remove("stored_fields[requires_source:false, fields:1, sequential: true]"); + Integer nonSequentialCount = (Integer) readers.remove("stored_fields[requires_source:false, fields:1, sequential: false]"); + int totalReaders = (sequentialCount == null ? 0 : sequentialCount) + (nonSequentialCount == null ? 0 : nonSequentialCount); + assertThat(totalReaders, count); + + assertMap(readers, matchesMap().entry(name + ":row_stride:BlockStoredFieldsReader." + type, count)); + } + + static void constantBytes(String name, boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + if (forcedRowByRow) { + assertMap(readers, matchesMap().entry(name + ":row_stride:constant[[66 6f 6f]]", segmentCount)); + } else { + assertMap(readers, matchesMap().entry(name + ":column_at_a_time:constant[[66 6f 6f]]", lessThanOrEqualTo(pageCount))); + } + } + + static void constantNulls(String name, boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + if (forcedRowByRow) { + assertMap(readers, matchesMap().entry(name + ":row_stride:constant_nulls", segmentCount)); + } else { + assertMap(readers, matchesMap().entry(name + ":column_at_a_time:constant_nulls", lessThanOrEqualTo(pageCount))); + } } - assertDriverContext(driverContext); } - public void testValuesSourceReaderOperatorWithNulls() throws IOException { - MappedFieldType intFt = new NumberFieldMapper.NumberFieldType("i", NumberFieldMapper.NumberType.INTEGER); - MappedFieldType longFt = new NumberFieldMapper.NumberFieldType("j", NumberFieldMapper.NumberType.LONG); - MappedFieldType doubleFt = new NumberFieldMapper.NumberFieldType("d", NumberFieldMapper.NumberType.DOUBLE); + public void testWithNulls() throws IOException { + MappedFieldType intFt = docValuesNumberField("i", NumberFieldMapper.NumberType.INTEGER); + MappedFieldType longFt = docValuesNumberField("j", NumberFieldMapper.NumberType.LONG); + MappedFieldType doubleFt = docValuesNumberField("d", NumberFieldMapper.NumberType.DOUBLE); MappedFieldType kwFt = new KeywordFieldMapper.KeywordFieldType("kw"); NumericDocValuesField intField = new NumericDocValuesField(intFt.name(), 0); @@ -384,4 +1153,164 @@ public void testValuesSourceReaderOperatorWithNulls() throws IOException { } assertDriverContext(driverContext); } + + private NumberFieldMapper.NumberFieldType docValuesNumberField(String name, NumberFieldMapper.NumberType type) { + return new NumberFieldMapper.NumberFieldType(name, type); + } + + private NumberFieldMapper.NumberFieldType sourceNumberField(String name, NumberFieldMapper.NumberType type) { + return new NumberFieldMapper.NumberFieldType( + name, + type, + randomBoolean(), + false, + false, + randomBoolean(), + null, + Map.of(), + null, + false, + null, + randomFrom(IndexMode.values()) + ); + } + + private KeywordFieldMapper.KeywordFieldType storedKeywordField(String name) { + FieldType ft = new FieldType(KeywordFieldMapper.Defaults.FIELD_TYPE); + ft.setDocValuesType(DocValuesType.NONE); + ft.setStored(true); + ft.freeze(); + return new KeywordFieldMapper.KeywordFieldType( + name, + ft, + Lucene.KEYWORD_ANALYZER, + Lucene.KEYWORD_ANALYZER, + Lucene.KEYWORD_ANALYZER, + new KeywordFieldMapper.Builder(name, IndexVersion.current()).docValues(false), + true // TODO randomize - load from stored keyword fields if stored even in synthetic source + ); + } + + private KeywordFieldMapper.KeywordFieldType sourceKeywordField(String name) { + FieldType ft = new FieldType(KeywordFieldMapper.Defaults.FIELD_TYPE); + ft.setDocValuesType(DocValuesType.NONE); + ft.setStored(false); + ft.freeze(); + return new KeywordFieldMapper.KeywordFieldType( + name, + ft, + Lucene.KEYWORD_ANALYZER, + Lucene.KEYWORD_ANALYZER, + Lucene.KEYWORD_ANALYZER, + new KeywordFieldMapper.Builder(name, IndexVersion.current()).docValues(false), + false + ); + } + + private TextFieldMapper.TextFieldType storedTextField(String name) { + return new TextFieldMapper.TextFieldType( + name, + false, + true, + new TextSearchInfo(TextFieldMapper.Defaults.FIELD_TYPE, null, Lucene.STANDARD_ANALYZER, Lucene.STANDARD_ANALYZER), + true, // TODO randomize - if the field is stored we should load from the stored field even if there is source + null, + Map.of(), + false, + false + ); + } + + private TextFieldMapper.TextFieldType textFieldWithDelegate(String name, KeywordFieldMapper.KeywordFieldType delegate) { + return new TextFieldMapper.TextFieldType( + name, + false, + false, + new TextSearchInfo(TextFieldMapper.Defaults.FIELD_TYPE, null, Lucene.STANDARD_ANALYZER, Lucene.STANDARD_ANALYZER), + randomBoolean(), + delegate, + Map.of(), + false, + false + ); + } + + public void testNullsShared() { + DriverContext driverContext = driverContext(); + int[] pages = new int[] { 0 }; + try ( + Driver d = new Driver( + driverContext, + simpleInput(driverContext.blockFactory(), 10), + List.of( + new ValuesSourceReaderOperator.Factory( + List.of( + new ValuesSourceReaderOperator.FieldInfo("null1", List.of(BlockLoader.CONSTANT_NULLS)), + new ValuesSourceReaderOperator.FieldInfo("null2", List.of(BlockLoader.CONSTANT_NULLS)) + ), + List.of(new ValuesSourceReaderOperator.ShardContext(reader, () -> SourceLoader.FROM_STORED_SOURCE)), + 0 + ).get(driverContext) + ), + new PageConsumerOperator(page -> { + try { + assertThat(page.getBlockCount(), equalTo(3)); + assertThat(page.getBlock(1).areAllValuesNull(), equalTo(true)); + assertThat(page.getBlock(2).areAllValuesNull(), equalTo(true)); + assertThat(page.getBlock(1), sameInstance(page.getBlock(2))); + pages[0]++; + } finally { + page.releaseBlocks(); + } + }), + () -> {} + ) + ) { + runDriver(d); + } + assertThat(pages[0], greaterThan(0)); + assertDriverContext(driverContext); + } + + public void testSequentialStoredFieldsTooSmall() { + testSequentialStoredFields(false, between(1, ValuesSourceReaderOperator.SEQUENTIAL_BOUNDARY - 1)); + } + + public void testSequentialStoredFieldsBigEnough() { + testSequentialStoredFields( + true, + between(ValuesSourceReaderOperator.SEQUENTIAL_BOUNDARY, ValuesSourceReaderOperator.SEQUENTIAL_BOUNDARY * 2) + ); + } + + private void testSequentialStoredFields(boolean sequential, int docCount) { + DriverContext driverContext = driverContext(); + List source = CannedSourceOperator.collectPages(simpleInput(driverContext, docCount, docCount)); + assertThat(source, hasSize(1)); // We want one page for simpler assertions, and we want them all in one segment + assertTrue(source.get(0).getBlock(0).asVector().singleSegmentNonDecreasing()); + Operator op = new ValuesSourceReaderOperator.Factory( + List.of( + fieldInfo(docValuesNumberField("key", NumberFieldMapper.NumberType.INTEGER)), + fieldInfo(storedTextField("stored_text")) + ), + List.of(new ValuesSourceReaderOperator.ShardContext(reader, () -> SourceLoader.FROM_STORED_SOURCE)), + 0 + ).get(driverContext); + List results = drive(op, source.iterator(), driverContext); + Checks checks = new Checks(Block.MvOrdering.UNORDERED); + IntVector keys = results.get(0).getBlock(1).asVector(); + for (int p = 0; p < results.get(0).getPositionCount(); p++) { + int key = keys.getInt(p); + checks.strings(results.get(0).getBlock(2), p, key); + } + ValuesSourceReaderOperator.Status status = (ValuesSourceReaderOperator.Status) op.status(); + assertMap( + status.readersBuilt(), + matchesMap().entry("key:column_at_a_time:BlockDocValuesReader.SingletonInts", 1) + .entry("stored_text:column_at_a_time:null", 1) + .entry("stored_text:row_stride:BlockStoredFieldsReader.Bytes", 1) + .entry("stored_fields[requires_source:false, fields:1, sequential: " + sequential + "]", 1) + ); + assertDriverContext(driverContext); + } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorTests.java index 4c808907cda91..290a16f83ed38 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorTests.java @@ -8,22 +8,27 @@ package org.elasticsearch.compute.operator; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.SubscribableListener; +import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.MockBlockFactory; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.core.Releasable; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.FixedExecutorBuilder; import org.elasticsearch.threadpool.TestThreadPool; @@ -36,8 +41,11 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.LongStream; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; public class AsyncOperatorTests extends ESTestCase { @@ -80,16 +88,17 @@ protected int remaining() { @Override protected Page createPage(int positionOffset, int length) { - LongVector.Builder builder = LongVector.newVectorBuilder(length); - for (int i = 0; i < length; i++) { - builder.appendLong(ids.get(currentPosition++)); + try (LongVector.Builder builder = blockFactory.newLongVectorBuilder(length)) { + for (int i = 0; i < length; i++) { + builder.appendLong(ids.get(currentPosition++)); + } + return new Page(builder.build().asBlock()); } - return new Page(builder.build().asBlock()); } }; int maxConcurrentRequests = randomIntBetween(1, 10); - AsyncOperator asyncOperator = new AsyncOperator(maxConcurrentRequests) { - final LookupService lookupService = new LookupService(threadPool, dict, maxConcurrentRequests); + AsyncOperator asyncOperator = new AsyncOperator(driverContext, maxConcurrentRequests) { + final LookupService lookupService = new LookupService(threadPool, driverContext.blockFactory(), dict, maxConcurrentRequests); @Override protected void performAsync(Page inputPage, ActionListener listener) { @@ -97,84 +106,159 @@ protected void performAsync(Page inputPage, ActionListener listener) { } @Override - public void close() { + public void doClose() { } }; - Iterator it = ids.iterator(); + List intermediateOperators = new ArrayList<>(); + intermediateOperators.add(asyncOperator); + final Iterator it; + if (randomBoolean()) { + int limit = between(1, ids.size()); + it = ids.subList(0, limit).iterator(); + intermediateOperators.add(new LimitOperator(limit)); + } else { + it = ids.iterator(); + } SinkOperator outputOperator = new PageConsumerOperator(page -> { - assertThat(page.getBlockCount(), equalTo(2)); - LongBlock b1 = page.getBlock(0); - BytesRefBlock b2 = page.getBlock(1); - BytesRef scratch = new BytesRef(); - for (int i = 0; i < page.getPositionCount(); i++) { - assertTrue(it.hasNext()); - long key = b1.getLong(i); - assertThat(key, equalTo(it.next())); - String v = dict.get(key); - if (v == null) { - assertTrue(b2.isNull(i)); - } else { - assertThat(b2.getBytesRef(i, scratch), equalTo(new BytesRef(v))); + try (Releasable ignored = page::releaseBlocks) { + assertThat(page.getBlockCount(), equalTo(2)); + LongBlock b1 = page.getBlock(0); + BytesRefBlock b2 = page.getBlock(1); + BytesRef scratch = new BytesRef(); + for (int i = 0; i < page.getPositionCount(); i++) { + assertTrue(it.hasNext()); + long key = b1.getLong(i); + assertThat(key, equalTo(it.next())); + String v = dict.get(key); + if (v == null) { + assertTrue(b2.isNull(i)); + } else { + assertThat(b2.getBytesRef(i, scratch), equalTo(new BytesRef(v))); + } } } }); PlainActionFuture future = new PlainActionFuture<>(); - Driver driver = new Driver(driverContext, sourceOperator, List.of(asyncOperator), outputOperator, () -> assertFalse(it.hasNext())); + Driver driver = new Driver(driverContext, sourceOperator, intermediateOperators, outputOperator, () -> assertFalse(it.hasNext())); Driver.start(threadPool.getThreadContext(), threadPool.executor(ESQL_TEST_EXECUTOR), driver, between(1, 10000), future); future.actionGet(); } public void testStatus() { + DriverContext driverContext = driverContext(); Map> handlers = new HashMap<>(); - AsyncOperator operator = new AsyncOperator(2) { + AsyncOperator operator = new AsyncOperator(driverContext, 2) { @Override protected void performAsync(Page inputPage, ActionListener listener) { handlers.put(inputPage, listener); } @Override - public void close() { + protected void doClose() { } }; assertTrue(operator.isBlocked().isDone()); assertTrue(operator.needsInput()); - Page page1 = new Page(Block.constantNullBlock(1)); + Page page1 = new Page(driverContext.blockFactory().newConstantNullBlock(1)); operator.addInput(page1); assertFalse(operator.isBlocked().isDone()); SubscribableListener blocked1 = operator.isBlocked(); assertTrue(operator.needsInput()); - Page page2 = new Page(Block.constantNullBlock(2)); + Page page2 = new Page(driverContext.blockFactory().newConstantNullBlock(2)); operator.addInput(page2); assertFalse(operator.needsInput()); // reached the max outstanding requests assertFalse(operator.isBlocked().isDone()); assertThat(operator.isBlocked(), equalTo(blocked1)); - Page page3 = new Page(Block.constantNullBlock(3)); + Page page3 = new Page(driverContext.blockFactory().newConstantNullBlock(3)); handlers.remove(page1).onResponse(page3); + page1.releaseBlocks(); assertFalse(operator.needsInput()); // still have 2 outstanding requests assertTrue(operator.isBlocked().isDone()); assertTrue(blocked1.isDone()); - assertThat(operator.getOutput(), equalTo(page3)); + page3.releaseBlocks(); + assertTrue(operator.needsInput()); assertFalse(operator.isBlocked().isDone()); + Page page4 = new Page(driverContext.blockFactory().newConstantNullBlock(3)); + handlers.remove(page2).onResponse(page4); + page2.releaseBlocks(); + assertThat(operator.getOutput(), equalTo(page4)); + page4.releaseBlocks(); operator.close(); } + public void testFailure() throws Exception { + DriverContext driverContext = driverContext(); + final SequenceLongBlockSourceOperator sourceOperator = new SequenceLongBlockSourceOperator( + driverContext.blockFactory(), + LongStream.range(0, 100 * 1024) + ); + int maxConcurrentRequests = randomIntBetween(1, 10); + AtomicBoolean failed = new AtomicBoolean(); + AsyncOperator asyncOperator = new AsyncOperator(driverContext, maxConcurrentRequests) { + @Override + protected void performAsync(Page inputPage, ActionListener listener) { + ActionRunnable command = new ActionRunnable<>(listener) { + @Override + protected void doRun() { + if (randomInt(100) < 10) { + failed.set(true); + throw new ElasticsearchException("simulated"); + } + int positionCount = inputPage.getBlock(0).getPositionCount(); + IntBlock block = driverContext.blockFactory().newConstantIntBlockWith(between(1, 100), positionCount); + listener.onResponse(inputPage.appendPage(new Page(block))); + } + }; + if (randomBoolean()) { + command.run(); + } else { + TimeValue delay = TimeValue.timeValueMillis(randomIntBetween(0, 50)); + threadPool.schedule(command, delay, threadPool.executor(ESQL_TEST_EXECUTOR)); + } + } + + @Override + protected void doClose() { + + } + }; + SinkOperator outputOperator = new PageConsumerOperator(Page::releaseBlocks); + PlainActionFuture future = new PlainActionFuture<>(); + Driver driver = new Driver(driverContext, sourceOperator, List.of(asyncOperator), outputOperator, () -> {}); + Driver.start(threadPool.getThreadContext(), threadPool.executor(ESQL_TEST_EXECUTOR), driver, between(1, 1000), future); + assertBusy(() -> assertTrue(future.isDone())); + if (failed.get()) { + ElasticsearchException error = expectThrows(ElasticsearchException.class, future::actionGet); + assertThat(error.getMessage(), containsString("simulated")); + error = expectThrows(ElasticsearchException.class, asyncOperator::isFinished); + assertThat(error.getMessage(), containsString("simulated")); + error = expectThrows(ElasticsearchException.class, asyncOperator::getOutput); + assertThat(error.getMessage(), containsString("simulated")); + } else { + assertTrue(asyncOperator.isFinished()); + assertNull(asyncOperator.getOutput()); + } + } + static class LookupService { private final ThreadPool threadPool; private final Map dict; private final int maxConcurrentRequests; private final AtomicInteger pendingRequests = new AtomicInteger(); + private final BlockFactory blockFactory; - LookupService(ThreadPool threadPool, Map dict, int maxConcurrentRequests) { + LookupService(ThreadPool threadPool, BlockFactory blockFactory, Map dict, int maxConcurrentRequests) { this.threadPool = threadPool; this.dict = dict; + this.blockFactory = blockFactory; this.maxConcurrentRequests = maxConcurrentRequests; } @@ -184,20 +268,21 @@ public void lookupAsync(Page input, ActionListener listener) { ActionRunnable command = new ActionRunnable<>(listener) { @Override protected void doRun() { + int current = pendingRequests.decrementAndGet(); + assert current >= 0 : "pending requests must be non-negative"; LongBlock ids = input.getBlock(0); - BytesRefBlock.Builder builder = BytesRefBlock.newBlockBuilder(ids.getPositionCount()); - for (int i = 0; i < ids.getPositionCount(); i++) { - String v = dict.get(ids.getLong(i)); - if (v != null) { - builder.appendBytesRef(new BytesRef(v)); - } else { - builder.appendNull(); + try (BytesRefBlock.Builder builder = blockFactory.newBytesRefBlockBuilder(ids.getPositionCount())) { + for (int i = 0; i < ids.getPositionCount(); i++) { + String v = dict.get(ids.getLong(i)); + if (v != null) { + builder.appendBytesRef(new BytesRef(v)); + } else { + builder.appendNull(); + } } + Page result = input.appendPage(new Page(builder.build())); + listener.onResponse(result); } - int current = pendingRequests.decrementAndGet(); - assert current >= 0 : "pending requests must be non-negative"; - Page result = input.appendBlock(builder.build()); - listener.onResponse(result); } }; TimeValue delay = TimeValue.timeValueMillis(randomIntBetween(0, 50)); @@ -205,13 +290,30 @@ protected void doRun() { } } - /** - * A {@link DriverContext} with a nonBreakingBigArrays. - */ - DriverContext driverContext() { - return new DriverContext( - new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, new NoneCircuitBreakerService()).withCircuitBreaking(), - BlockFactory.getNonBreakingInstance() - ); + protected DriverContext driverContext() { + BigArrays bigArrays = new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, ByteSizeValue.ofGb(1)).withCircuitBreaking(); + CircuitBreaker breaker = bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST); + breakers.add(breaker); + BlockFactory factory = new MockBlockFactory(breaker, bigArrays); + blockFactories.add(factory); + return new DriverContext(bigArrays, factory); + } + + private final List breakers = new ArrayList<>(); + private final List blockFactories = new ArrayList<>(); + + @After + public void allBreakersEmpty() throws Exception { + // first check that all big arrays are released, which can affect breakers + MockBigArrays.ensureAllArraysAreReleased(); + + for (CircuitBreaker breaker : breakers) { + for (var factory : blockFactories) { + if (factory instanceof MockBlockFactory mockBlockFactory) { + mockBlockFactory.ensureAllBlocksAreReleased(); + } + } + assertThat("Unexpected used in breaker: " + breaker, breaker.getUsed(), equalTo(0L)); + } } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ColumnExtractOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ColumnExtractOperatorTests.java index 6906e5f3adda8..485610f5842bb 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ColumnExtractOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ColumnExtractOperatorTests.java @@ -54,14 +54,15 @@ protected Operator.OperatorFactory simple(BigArrays bigArrays) { new ElementType[] { ElementType.BYTES_REF }, dvrCtx -> new EvalOperator.ExpressionEvaluator() { @Override - public Block.Ref eval(Page page) { + public Block eval(Page page) { BytesRefBlock input = page.getBlock(0); for (int i = 0; i < input.getPositionCount(); i++) { if (input.getBytesRef(i, new BytesRef()).utf8ToString().startsWith("no_")) { - return Block.Ref.floating(Block.constantNullBlock(input.getPositionCount(), input.blockFactory())); + return Block.constantNullBlock(input.getPositionCount(), input.blockFactory()); } } - return new Block.Ref(input, page); + input.incRef(); + return input; } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverContextTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverContextTests.java index 99d7a0eb01748..27076c2adf2d2 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverContextTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverContextTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.compute.operator; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.MockBigArrays; @@ -14,6 +15,7 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.FixedExecutorBuilder; @@ -136,6 +138,24 @@ public void testMultiThreaded() throws Exception { finishedReleasables.stream().flatMap(Set::stream).forEach(Releasable::close); } + public void testWaitForAsyncActions() { + DriverContext driverContext = new AssertingDriverContext(); + driverContext.addAsyncAction(); + driverContext.addAsyncAction(); + PlainActionFuture future = new PlainActionFuture<>(); + driverContext.waitForAsyncActions(future); + assertFalse(future.isDone()); + driverContext.finish(); + assertFalse(future.isDone()); + IllegalStateException error = expectThrows(IllegalStateException.class, driverContext::addAsyncAction); + assertThat(error.getMessage(), equalTo("DriverContext was finished already")); + driverContext.removeAsyncAction(); + assertFalse(future.isDone()); + driverContext.removeAsyncAction(); + assertTrue(future.isDone()); + Releasables.closeExpectNoException(driverContext.getSnapshot()); + } + static TestDriver newTestDriver(int unused) { var driverContext = new AssertingDriverContext(); return new TestDriver(driverContext, randomInt(128), driverContext.bigArrays()); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverTests.java index 38ba64f78523e..ba45db3c48299 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverTests.java @@ -33,6 +33,7 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.CyclicBarrier; import java.util.concurrent.TimeUnit; import static org.hamcrest.Matchers.equalTo; @@ -41,24 +42,36 @@ public class DriverTests extends ESTestCase { public void testThreadContext() throws Exception { DriverContext driverContext = driverContext(); + int asyncActions = randomIntBetween(0, 5); + for (int i = 0; i < asyncActions; i++) { + driverContext.addAsyncAction(); + } ThreadPool threadPool = threadPool(); try { List inPages = randomList(1, 100, DriverTests::randomPage); List outPages = new ArrayList<>(); WarningsOperator warning1 = new WarningsOperator(threadPool); WarningsOperator warning2 = new WarningsOperator(threadPool); + CyclicBarrier allPagesProcessed = new CyclicBarrier(2); Driver driver = new Driver(driverContext, new CannedSourceOperator(inPages.iterator()) { @Override public Page getOutput() { assertRunningWithRegularUser(threadPool); return super.getOutput(); } - }, List.of(warning1, new SwitchContextOperator(threadPool), warning2), new PageConsumerOperator(page -> { + }, List.of(warning1, new SwitchContextOperator(driverContext, threadPool), warning2), new PageConsumerOperator(page -> { assertRunningWithRegularUser(threadPool); outPages.add(page); + if (outPages.size() == inPages.size()) { + try { + allPagesProcessed.await(30, TimeUnit.SECONDS); + } catch (Exception e) { + throw new AssertionError(e); + } + } }), () -> {}); ThreadContext threadContext = threadPool.getThreadContext(); - CountDownLatch latch = new CountDownLatch(1); + CountDownLatch driverCompleted = new CountDownLatch(1); try (ThreadContext.StoredContext ignored = threadContext.stashContext()) { threadContext.putHeader("user", "user1"); Driver.start(threadContext, threadPool.executor("esql"), driver, between(1, 1000), ActionListener.running(() -> { @@ -75,11 +88,19 @@ public Page getOutput() { } assertThat(actualResponseHeaders, equalTo(expectedResponseHeaders)); } finally { - latch.countDown(); + driverCompleted.countDown(); } })); } - assertTrue(latch.await(30, TimeUnit.SECONDS)); + allPagesProcessed.await(30, TimeUnit.SECONDS); + // race with the Driver to notify the listener + for (int i = 0; i < asyncActions; i++) { + try (ThreadContext.StoredContext ignored = threadContext.stashContext()) { + threadContext.putHeader("user", "system"); + driverContext.removeAsyncAction(); + } + } + assertTrue(driverCompleted.await(30, TimeUnit.SECONDS)); } finally { terminate(threadPool); } @@ -106,8 +127,8 @@ private static Page randomPage() { static class SwitchContextOperator extends AsyncOperator { private final ThreadPool threadPool; - SwitchContextOperator(ThreadPool threadPool) { - super(between(1, 3)); + SwitchContextOperator(DriverContext driverContext, ThreadPool threadPool) { + super(driverContext, between(1, 3)); this.threadPool = threadPool; } @@ -123,11 +144,11 @@ protected void performAsync(Page page, ActionListener listener) { threadPool.getThreadContext().putHeader("user", "system"); innerListener.onResponse(page); } - }), TimeValue.timeValueNanos(100), threadPool.executor("esql")); + }), TimeValue.timeValueNanos(between(1, 1_000_000)), threadPool.executor("esql")); } @Override - public void close() { + protected void doClose() { } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/EvalOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/EvalOperatorTests.java index e7f5db7579869..c755c5eafe08d 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/EvalOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/EvalOperatorTests.java @@ -34,14 +34,14 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int end) { record Addition(DriverContext driverContext, int lhs, int rhs) implements EvalOperator.ExpressionEvaluator { @Override - public Block.Ref eval(Page page) { + public Block eval(Page page) { LongVector lhsVector = page.getBlock(0).asVector(); LongVector rhsVector = page.getBlock(1).asVector(); try (LongVector.FixedBuilder result = LongVector.newVectorFixedBuilder(page.getPositionCount(), driverContext.blockFactory())) { for (int p = 0; p < page.getPositionCount(); p++) { result.appendLong(lhsVector.getLong(p) + rhsVector.getLong(p)); } - return Block.Ref.floating(result.build().asBlock()); + return result.build().asBlock(); } } @@ -56,8 +56,10 @@ public void close() {} record LoadFromPage(int channel) implements EvalOperator.ExpressionEvaluator { @Override - public Block.Ref eval(Page page) { - return new Block.Ref(page.getBlock(channel), page); + public Block eval(Page page) { + Block block = page.getBlock(channel); + block.incRef(); + return block; } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/FilterOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/FilterOperatorTests.java index e16f643e1ca4d..d067435ba9aaa 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/FilterOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/FilterOperatorTests.java @@ -33,14 +33,14 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int end) { record SameLastDigit(DriverContext context, int lhs, int rhs) implements EvalOperator.ExpressionEvaluator { @Override - public Block.Ref eval(Page page) { + public Block eval(Page page) { LongVector lhsVector = page.getBlock(0).asVector(); LongVector rhsVector = page.getBlock(1).asVector(); BooleanVector.FixedBuilder result = BooleanVector.newVectorFixedBuilder(page.getPositionCount(), context.blockFactory()); for (int p = 0; p < page.getPositionCount(); p++) { result.appendBoolean(lhsVector.getLong(p) % 10 == rhsVector.getLong(p) % 10); } - return Block.Ref.floating(result.build().asBlock()); + return result.build().asBlock(); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MultivalueDedupeTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MultivalueDedupeTests.java index 517936478ea22..50b20a2ffdcff 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MultivalueDedupeTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MultivalueDedupeTests.java @@ -22,8 +22,13 @@ import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockTestUtils; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.DoubleBlock; import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.core.Releasables; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matcher; import org.junit.After; @@ -104,19 +109,19 @@ public MultivalueDedupeTests( public void testDedupeAdaptive() { BlockFactory blockFactory = blockFactory(); BasicBlockTests.RandomBlock b = randomBlock(); - assertDeduped(blockFactory, b, MultivalueDedupe.dedupeToBlockAdaptive(Block.Ref.floating(b.block()), blockFactory)); + assertDeduped(blockFactory, b, MultivalueDedupe.dedupeToBlockAdaptive(b.block(), blockFactory)); } public void testDedupeViaCopyAndSort() { BlockFactory blockFactory = blockFactory(); BasicBlockTests.RandomBlock b = randomBlock(); - assertDeduped(blockFactory, b, MultivalueDedupe.dedupeToBlockUsingCopyAndSort(Block.Ref.floating(b.block()), blockFactory)); + assertDeduped(blockFactory, b, MultivalueDedupe.dedupeToBlockUsingCopyAndSort(b.block(), blockFactory)); } public void testDedupeViaCopyMissing() { BlockFactory blockFactory = blockFactory(); BasicBlockTests.RandomBlock b = randomBlock(); - assertDeduped(blockFactory, b, MultivalueDedupe.dedupeToBlockUsingCopyMissing(Block.Ref.floating(b.block()), blockFactory)); + assertDeduped(blockFactory, b, MultivalueDedupe.dedupeToBlockUsingCopyMissing(b.block(), blockFactory)); } private BasicBlockTests.RandomBlock randomBlock() { @@ -131,8 +136,8 @@ private BasicBlockTests.RandomBlock randomBlock() { ); } - private void assertDeduped(BlockFactory blockFactory, BasicBlockTests.RandomBlock b, Block.Ref deduped) { - try (Block dedupedBlock = deduped.block()) { + private void assertDeduped(BlockFactory blockFactory, BasicBlockTests.RandomBlock b, Block dedupedBlock) { + try { if (dedupedBlock != b.block()) { assertThat(dedupedBlock.blockFactory(), sameInstance(blockFactory)); } @@ -143,6 +148,8 @@ private void assertDeduped(BlockFactory blockFactory, BasicBlockTests.RandomBloc : containsInAnyOrder(v.stream().collect(Collectors.toSet()).stream().sorted().toArray()); BlockTestUtils.assertPositionValues(dedupedBlock, p, matcher); } + } finally { + Releasables.closeExpectNoException(dedupedBlock); } } @@ -211,7 +218,7 @@ public void testHashWithPreviousValues() { public void testBatchEncodeAll() { int initCapacity = Math.toIntExact(ByteSizeValue.ofKb(10).getBytes()); BasicBlockTests.RandomBlock b = randomBlock(); - var encoder = (BatchEncoder.MVEncoder) MultivalueDedupe.batchEncoder(Block.Ref.floating(b.block()), initCapacity, false); + var encoder = (BatchEncoder.MVEncoder) MultivalueDedupe.batchEncoder(b.block(), initCapacity, false); int valueOffset = 0; for (int p = 0, positionOffset = Integer.MAX_VALUE; p < b.block().getPositionCount(); p++, positionOffset++) { @@ -228,7 +235,7 @@ public void testBatchEncodeAll() { public void testBatchEncoderStartSmall() { assumeFalse("Booleans don't grow in the same way", elementType == ElementType.BOOLEAN); BasicBlockTests.RandomBlock b = randomBlock(); - var encoder = (BatchEncoder.MVEncoder) MultivalueDedupe.batchEncoder(Block.Ref.floating(b.block()), 0, false); + var encoder = (BatchEncoder.MVEncoder) MultivalueDedupe.batchEncoder(b.block(), 0, false); /* * We run can't fit the first non-null position into our 0 bytes. @@ -263,47 +270,56 @@ private void assertBooleanHash(Set previousValues, BasicBlockTests.Rand if (previousValues.contains(true)) { everSeen[2] = true; } - IntBlock hashes = new MultivalueDedupeBoolean(Block.Ref.floating(b.block())).hash(everSeen); - List hashedValues = new ArrayList<>(); - if (everSeen[1]) { - hashedValues.add(false); - } - if (everSeen[2]) { - hashedValues.add(true); + try (IntBlock hashes = new MultivalueDedupeBoolean((BooleanBlock) b.block()).hash(blockFactory(), everSeen)) { + List hashedValues = new ArrayList<>(); + if (everSeen[1]) { + hashedValues.add(false); + } + if (everSeen[2]) { + hashedValues.add(true); + } + assertHash(b, hashes, hashedValues.size(), previousValues, i -> hashedValues.get((int) i)); } - assertHash(b, hashes, hashedValues.size(), previousValues, i -> hashedValues.get((int) i)); } private void assertBytesRefHash(Set previousValues, BasicBlockTests.RandomBlock b) { BytesRefHash hash = new BytesRefHash(1, BigArrays.NON_RECYCLING_INSTANCE); previousValues.stream().forEach(hash::add); - MultivalueDedupe.HashResult hashes = new MultivalueDedupeBytesRef(Block.Ref.floating(b.block())).hash(hash); - assertThat(hashes.sawNull(), equalTo(b.values().stream().anyMatch(v -> v == null))); - assertHash(b, hashes.ords(), hash.size(), previousValues, i -> hash.get(i, new BytesRef())); + MultivalueDedupe.HashResult hashes = new MultivalueDedupeBytesRef((BytesRefBlock) b.block()).hash(blockFactory(), hash); + try (IntBlock ords = hashes.ords()) { + assertThat(hashes.sawNull(), equalTo(b.values().stream().anyMatch(v -> v == null))); + assertHash(b, ords, hash.size(), previousValues, i -> hash.get(i, new BytesRef())); + } } private void assertIntHash(Set previousValues, BasicBlockTests.RandomBlock b) { LongHash hash = new LongHash(1, BigArrays.NON_RECYCLING_INSTANCE); previousValues.stream().forEach(hash::add); - MultivalueDedupe.HashResult hashes = new MultivalueDedupeInt(Block.Ref.floating(b.block())).hash(hash); - assertThat(hashes.sawNull(), equalTo(b.values().stream().anyMatch(v -> v == null))); - assertHash(b, hashes.ords(), hash.size(), previousValues, i -> (int) hash.get(i)); + MultivalueDedupe.HashResult hashes = new MultivalueDedupeInt((IntBlock) b.block()).hash(blockFactory(), hash); + try (IntBlock ords = hashes.ords()) { + assertThat(hashes.sawNull(), equalTo(b.values().stream().anyMatch(v -> v == null))); + assertHash(b, ords, hash.size(), previousValues, i -> (int) hash.get(i)); + } } private void assertLongHash(Set previousValues, BasicBlockTests.RandomBlock b) { LongHash hash = new LongHash(1, BigArrays.NON_RECYCLING_INSTANCE); previousValues.stream().forEach(hash::add); - MultivalueDedupe.HashResult hashes = new MultivalueDedupeLong(Block.Ref.floating(b.block())).hash(hash); - assertThat(hashes.sawNull(), equalTo(b.values().stream().anyMatch(v -> v == null))); - assertHash(b, hashes.ords(), hash.size(), previousValues, i -> hash.get(i)); + MultivalueDedupe.HashResult hashes = new MultivalueDedupeLong((LongBlock) b.block()).hash(blockFactory(), hash); + try (IntBlock ords = hashes.ords()) { + assertThat(hashes.sawNull(), equalTo(b.values().stream().anyMatch(v -> v == null))); + assertHash(b, ords, hash.size(), previousValues, i -> hash.get(i)); + } } private void assertDoubleHash(Set previousValues, BasicBlockTests.RandomBlock b) { LongHash hash = new LongHash(1, BigArrays.NON_RECYCLING_INSTANCE); previousValues.stream().forEach(d -> hash.add(Double.doubleToLongBits(d))); - MultivalueDedupe.HashResult hashes = new MultivalueDedupeDouble(Block.Ref.floating(b.block())).hash(hash); - assertThat(hashes.sawNull(), equalTo(b.values().stream().anyMatch(v -> v == null))); - assertHash(b, hashes.ords(), hash.size(), previousValues, i -> Double.longBitsToDouble(hash.get(i))); + MultivalueDedupe.HashResult hashes = new MultivalueDedupeDouble((DoubleBlock) b.block()).hash(blockFactory(), hash); + try (IntBlock ords = hashes.ords()) { + assertThat(hashes.sawNull(), equalTo(b.values().stream().anyMatch(v -> v == null))); + assertHash(b, ords, hash.size(), previousValues, i -> Double.longBitsToDouble(hash.get(i))); + } } private void assertHash( diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OutputOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OutputOperatorTests.java new file mode 100644 index 0000000000000..bccd5c1b57d81 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OutputOperatorTests.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.operator; + +import org.elasticsearch.common.util.BigArrays; + +import java.util.List; +import java.util.stream.IntStream; + +import static org.hamcrest.Matchers.equalTo; + +public class OutputOperatorTests extends AnyOperatorTestCase { + @Override + protected Operator.OperatorFactory simple(BigArrays bigArrays) { + return new OutputOperator.OutputOperatorFactory(List.of("a"), p -> p, p -> {}); + } + + @Override + protected String expectedDescriptionOfSimple() { + return "OutputOperator[columns = [a]]"; + } + + @Override + protected String expectedToStringOfSimple() { + return expectedDescriptionOfSimple(); + } + + private Operator.OperatorFactory big() { + return new OutputOperator.OutputOperatorFactory(IntStream.range(0, 20).mapToObj(i -> "a" + i).toList(), p -> p, p -> {}); + } + + private String expectedDescriptionOfBig() { + return "OutputOperator[columns = [20 columns]]"; + } + + public void testBigToString() { + try (Operator operator = big().get(driverContext())) { + assertThat(operator.toString(), equalTo(expectedDescriptionOfBig())); + } + } + + public void testBigDescription() { + assertThat(big().describe(), equalTo(expectedDescriptionOfBig())); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/StringExtractOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/StringExtractOperatorTests.java index c55fbeb29a25e..70ef2118fcef0 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/StringExtractOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/StringExtractOperatorTests.java @@ -47,8 +47,10 @@ protected Operator.OperatorFactory simple(BigArrays bigArrays) { new String[] { "test" }, dvrCtx -> new EvalOperator.ExpressionEvaluator() { @Override - public Block.Ref eval(Page page) { - return new Block.Ref(page.getBlock(0), page); + public Block eval(Page page) { + Block block = page.getBlock(0); + block.incRef(); + return block; } @Override @@ -91,8 +93,10 @@ public void testMultivalueDissectInput() { StringExtractOperator operator = new StringExtractOperator(new String[] { "test" }, new EvalOperator.ExpressionEvaluator() { @Override - public Block.Ref eval(Page page) { - return new Block.Ref(page.getBlock(0), page); + public Block eval(Page page) { + Block block = page.getBlock(0); + block.incRef(); + return block; } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java index 1b98d69c313ca..f44131c006b94 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java @@ -412,10 +412,6 @@ public void sendResponse(TransportResponse transportResponse) throws IOException } ExchangeResponse newResp = new ExchangeResponse(page, origResp.finished()); origResp.decRef(); - while (origResp.hasReferences()) { - newResp.incRef(); - origResp.decRef(); - } super.sendResponse(newResp); } }; diff --git a/x-pack/plugin/esql/qa/server/build.gradle b/x-pack/plugin/esql/qa/server/build.gradle index f8a43c52f5ca7..12c3a9d951383 100644 --- a/x-pack/plugin/esql/qa/server/build.gradle +++ b/x-pack/plugin/esql/qa/server/build.gradle @@ -19,7 +19,7 @@ subprojects { } - if (project.name != 'security') { + if (project.name != 'security' && project.name != 'mixed-cluster' ) { // The security project just configures its subprojects apply plugin: 'elasticsearch.legacy-java-rest-test' diff --git a/x-pack/plugin/esql/qa/server/mixed-cluster/build.gradle b/x-pack/plugin/esql/qa/server/mixed-cluster/build.gradle new file mode 100644 index 0000000000000..01955adb3af0c --- /dev/null +++ b/x-pack/plugin/esql/qa/server/mixed-cluster/build.gradle @@ -0,0 +1,53 @@ + +import org.elasticsearch.gradle.Version +import org.elasticsearch.gradle.VersionProperties +import org.elasticsearch.gradle.internal.info.BuildParams +import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask + +apply plugin: 'elasticsearch.internal-testclusters' +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.bwc-test' +apply plugin: 'elasticsearch.rest-resources' + +dependencies { + testImplementation project(xpackModule('esql:qa:testFixtures')) + testImplementation project(xpackModule('esql:qa:server')) +} + +restResources { + restApi { + include '_common', 'bulk', 'get', 'indices', 'esql', 'xpack', 'enrich', 'cluster' + } + restTests { + includeXpack 'esql' + } +} + +BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> + + if (bwcVersion != VersionProperties.getElasticsearchVersion() && bwcVersion.onOrAfter(Version.fromString("8.11.0"))) { + /* This project runs the ESQL spec tests against a 4 node cluster where two of the nodes has a different minor. */ + def baseCluster = testClusters.register(baseName) { + versions = [bwcVersion.toString(), bwcVersion.toString(), project.version, project.version] + numberOfNodes = 4 + testDistribution = 'DEFAULT' + setting 'xpack.license.self_generated.type', 'trial' + setting 'xpack.security.enabled', 'false' + } + + tasks.register("${baseName}#mixedClusterTest", StandaloneRestIntegTestTask) { + useCluster baseCluster + mustRunAfter("precommit") + nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) + nonInputProperties.systemProperty('tests.clustername', baseName) + systemProperty 'tests.bwc_nodes_version', bwcVersion.toString().replace('-SNAPSHOT', '') + systemProperty 'tests.new_nodes_version', project.version.toString().replace('-SNAPSHOT', '') + onlyIf("BWC tests disabled") { project.bwc_tests_enabled } + } + + tasks.register(bwcTaskName(bwcVersion)) { + dependsOn "${baseName}#mixedClusterTest" + } + } +} + diff --git a/x-pack/plugin/esql/qa/server/mixed-cluster/src/test/java/org/elasticsearch/xpack/esql/qa/mixed/EsqlClientYamlIT.java b/x-pack/plugin/esql/qa/server/mixed-cluster/src/test/java/org/elasticsearch/xpack/esql/qa/mixed/EsqlClientYamlIT.java new file mode 100644 index 0000000000000..0965c5506c6a1 --- /dev/null +++ b/x-pack/plugin/esql/qa/server/mixed-cluster/src/test/java/org/elasticsearch/xpack/esql/qa/mixed/EsqlClientYamlIT.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.qa.mixed; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase; +import org.junit.After; +import org.junit.Before; + +public class EsqlClientYamlIT extends ESClientYamlSuiteTestCase { + + public EsqlClientYamlIT(final ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return createParameters(); + } + + @Before + @After + public void assertRequestBreakerEmpty() throws Exception { + EsqlSpecTestCase.assertRequestBreakerEmpty(); + } +} diff --git a/x-pack/plugin/esql/qa/server/mixed-cluster/src/test/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java b/x-pack/plugin/esql/qa/server/mixed-cluster/src/test/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java new file mode 100644 index 0000000000000..b8dab3641c2a0 --- /dev/null +++ b/x-pack/plugin/esql/qa/server/mixed-cluster/src/test/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.qa.mixed; + +import org.elasticsearch.Version; +import org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase; +import org.elasticsearch.xpack.ql.CsvSpecReader.CsvTestCase; + +import static org.elasticsearch.xpack.esql.CsvTestUtils.isEnabled; + +public class MixedClusterEsqlSpecIT extends EsqlSpecTestCase { + + static final Version bwcVersion = Version.fromString(System.getProperty("tests.bwc_nodes_version")); + static final Version newVersion = Version.fromString(System.getProperty("tests.new_nodes_version")); + + public MixedClusterEsqlSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { + super(fileName, groupName, testName, lineNumber, testCase); + } + + @Override + protected void shouldSkipTest(String testName) { + assumeTrue("Test " + testName + " is skipped on " + bwcVersion, isEnabled(testName, bwcVersion)); + assumeTrue("Test " + testName + " is skipped on " + newVersion, isEnabled(testName, newVersion)); + } +} diff --git a/x-pack/plugin/esql/qa/server/single-node/build.gradle b/x-pack/plugin/esql/qa/server/single-node/build.gradle index 3131b4176ee25..2d430965efb21 100644 --- a/x-pack/plugin/esql/qa/server/single-node/build.gradle +++ b/x-pack/plugin/esql/qa/server/single-node/build.gradle @@ -9,10 +9,9 @@ restResources { restApi { include '_common', 'bulk', 'get', 'indices', 'esql', 'xpack', 'enrich', 'cluster' } -} - -artifacts { - restXpackTests(new File(projectDir, "src/yamlRestTest/resources/rest-api-spec/test")) + restTests { + includeXpack 'esql' + } } testClusters.configureEach { diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlSpecTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlSpecTestCase.java index 776a2e732e5e9..5397681e231fd 100644 --- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlSpecTestCase.java +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlSpecTestCase.java @@ -9,6 +9,7 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.apache.http.HttpEntity; +import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.xcontent.XContentHelper; @@ -90,13 +91,17 @@ public boolean logResults() { public final void test() throws Throwable { try { - assumeTrue("Test " + testName + " is not enabled", isEnabled(testName)); + shouldSkipTest(testName); doTest(); } catch (Exception e) { throw reworkException(e); } } + protected void shouldSkipTest(String testName) { + assumeTrue("Test " + testName + " is not enabled", isEnabled(testName, Version.CURRENT)); + } + protected final void doTest() throws Throwable { RequestObjectBuilder builder = new RequestObjectBuilder(randomFrom(XContentType.values())); Map answer = runEsql(builder.query(testCase.query).build(), testCase.expectedWarnings); diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java index 988d77a11beef..3ccf61b3a15ed 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java @@ -9,6 +9,7 @@ import org.apache.lucene.sandbox.document.HalfFloatPoint; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.compute.data.Block; @@ -22,9 +23,9 @@ import org.elasticsearch.core.Releasables; import org.elasticsearch.core.Tuple; import org.elasticsearch.logging.Logger; +import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xpack.esql.action.EsqlQueryResponse; import org.elasticsearch.xpack.ql.util.StringUtils; -import org.elasticsearch.xpack.versionfield.Version; import org.supercsv.io.CsvListReader; import org.supercsv.prefs.CsvPreference; @@ -42,6 +43,8 @@ import java.util.Locale; import java.util.Map; import java.util.function.Function; +import java.util.regex.Matcher; +import java.util.regex.Pattern; import static org.elasticsearch.common.Strings.delimitedListToStringArray; import static org.elasticsearch.common.logging.LoggerMessageFormat.format; @@ -57,8 +60,51 @@ public final class CsvTestUtils { private CsvTestUtils() {} - public static boolean isEnabled(String testName) { - return testName.endsWith("-Ignore") == false; + public static boolean isEnabled(String testName, Version version) { + if (testName.endsWith("-Ignore")) { + return false; + } + Tuple skipRange = skipVersionRange(testName); + if (skipRange != null && version.onOrAfter(skipRange.v1()) && version.onOrBefore(skipRange.v2())) { + return false; + } + return true; + } + + private static final Pattern INSTRUCTION_PATTERN = Pattern.compile("#\\[(.*?)]"); + + public static Map extractInstructions(String testName) { + Matcher matcher = INSTRUCTION_PATTERN.matcher(testName); + Map pairs = new HashMap<>(); + if (matcher.find()) { + String[] groups = matcher.group(1).split(","); + for (String group : groups) { + String[] kv = group.split(":"); + if (kv.length != 2) { + throw new IllegalArgumentException("expected instruction in [k1:v1,k2:v2] format; got " + matcher.group(1)); + } + pairs.put(kv[0].trim(), kv[1].trim()); + } + } + return pairs; + } + + public static Tuple skipVersionRange(String testName) { + Map pairs = extractInstructions(testName); + String versionRange = pairs.get("skip"); + if (versionRange != null) { + String[] skipVersions = versionRange.split("-"); + if (skipVersions.length != 2) { + throw new IllegalArgumentException("malformed version range : " + versionRange); + } + String lower = skipVersions[0].trim(); + String upper = skipVersions[1].trim(); + return Tuple.tuple( + lower.isEmpty() ? VersionUtils.getFirstVersion() : Version.fromString(lower), + upper.isEmpty() ? Version.CURRENT : Version.fromString(upper) + ); + } + return null; } public static Tuple> loadPageFromCsv(URL source) throws Exception { @@ -333,7 +379,7 @@ public enum Type { : ((BytesRef) l).compareTo((BytesRef) r), BytesRef.class ), - VERSION(v -> new Version(v).toBytesRef(), BytesRef.class), + VERSION(v -> new org.elasticsearch.xpack.versionfield.Version(v).toBytesRef(), BytesRef.class), NULL(s -> null, Void.class), DATETIME( x -> x == null ? null : DateFormatters.from(UTC_DATE_TIME_FORMATTER.parse(x)).toInstant().toEpochMilli(), diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec index f85dbeda7f6bc..8b94c022aaf6a 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec @@ -182,7 +182,7 @@ string:keyword |datetime:date // end::to_datetime-str-result[] ; -convertFromUnsignedLong +convertFromUnsignedLong#[skip:-8.11.99, reason:ql exceptions were updated in 8.12] row ul = [9223372036854775808, 520128000000] | eval dt = to_datetime(ul); warning:Line 1:58: evaluation of [to_datetime(ul)] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:58: org.elasticsearch.xpack.ql.InvalidArgumentException: [9223372036854775808] out of [long] range diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/dissect.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/dissect.csv-spec index 8091f7a18463e..4c9c3a2681f50 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/dissect.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/dissect.csv-spec @@ -158,3 +158,33 @@ emp_no:integer | a:keyword | b:keyword | c:keyword 10005 | null | null | null 10006 | [Principal, Senior] | [Support, Team] | [Engineer, Lead] ; + +emptyPattern#[skip:-8.11.99] +ROW a="b c d"| DISSECT a "%{b} %{} %{d}"; + +a:keyword | b:keyword | d:keyword +b c d | b | d +; + + +multipleEmptyPatterns#[skip:-8.11.99] +ROW a="b c d e"| DISSECT a "%{b} %{} %{} %{e}"; + +a:keyword | b:keyword | e:keyword +b c d e | b | e +; + +firstEmptyPattern#[skip:-8.11.99] +ROW a="x b c d"| DISSECT a "%{} %{b} %{} %{d}"; + +a:keyword | b:keyword | d:keyword +x b c d | b | d +; + + +lastEmptyPattern#[skip:-8.11.99] +ROW a="b c d x"| DISSECT a "%{b} %{} %{d} %{}"; + +a:keyword | b:keyword | d:keyword +b c d x | b | d +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec index f2052462f4d8b..bbbfa287ea695 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec @@ -554,4 +554,100 @@ ROW a = "2023-01-23T12:15:00.000Z - some text - 127.0.0.1" msg:keyword | ip:keyword | date:date some text | 127.0.0.1 | 2023-01-23T12:15:00.000Z // end::dissectWithToDatetime-result[] +; + +dissectRightPaddingModifier +// tag::dissectRightPaddingModifier[] +ROW message="1998-08-10T17:15:42 WARN" +| DISSECT message "%{ts->} %{level}" +// end::dissectRightPaddingModifier[] +; + +// tag::dissectRightPaddingModifier-result[] +message:keyword | ts:keyword | level:keyword +1998-08-10T17:15:42 WARN|1998-08-10T17:15:42|WARN +// end::dissectRightPaddingModifier-result[] +; + +dissectEmptyRightPaddingModifier +// tag::dissectEmptyRightPaddingModifier[] +ROW message="[1998-08-10T17:15:42] [WARN]" +| DISSECT message "[%{ts}]%{->}[%{level}]" +// end::dissectEmptyRightPaddingModifier[] +| KEEP message, ts, level +; + +// tag::dissectEmptyRightPaddingModifier-result[] +message:keyword | ts:keyword | level:keyword +[1998-08-10T17:15:42] [WARN]|1998-08-10T17:15:42 |WARN +// end::dissectEmptyRightPaddingModifier-result[] +; + +dissectAppendModifier +// tag::dissectAppendModifier[] +ROW message="john jacob jingleheimer schmidt" +| DISSECT message "%{+name} %{+name} %{+name} %{+name}" APPEND_SEPARATOR=" " +// end::dissectAppendModifier[] +; + +// tag::dissectAppendModifier-result[] +message:keyword | name:keyword +john jacob jingleheimer schmidt|john jacob jingleheimer schmidt +// end::dissectAppendModifier-result[] +; + +dissectAppendWithOrderModifier +// tag::dissectAppendWithOrderModifier[] +ROW message="john jacob jingleheimer schmidt" +| DISSECT message "%{+name/2} %{+name/4} %{+name/3} %{+name/1}" APPEND_SEPARATOR="," +// end::dissectAppendWithOrderModifier[] +; + +// tag::dissectAppendWithOrderModifier-result[] +message:keyword | name:keyword +john jacob jingleheimer schmidt|schmidt,john,jingleheimer,jacob +// end::dissectAppendWithOrderModifier-result[] +; + +dissectNamedSkipKey +// tag::dissectNamedSkipKey[] +ROW message="1.2.3.4 - - 30/Apr/1998:22:00:52 +0000" +| DISSECT message "%{clientip} %{?ident} %{?auth} %{@timestamp}" +// end::dissectNamedSkipKey[] +; + +// tag::dissectNamedSkipKey-result[] +message:keyword | clientip:keyword | @timestamp:keyword +1.2.3.4 - - 30/Apr/1998:22:00:52 +0000|1.2.3.4 |30/Apr/1998:22:00:52 +0000 +// end::dissectNamedSkipKey-result[] +; + +docsLike +// tag::like[] +FROM employees +| WHERE first_name LIKE "?b*" +| KEEP first_name, last_name +// end::like[] +| SORT first_name +; + +// tag::like-result[] +first_name:keyword | last_name:keyword +Ebbe |Callaway +Eberhardt |Terkki +// end::like-result[] +; + +docsRlike +// tag::rlike[] +FROM employees +| WHERE first_name RLIKE ".leja.*" +| KEEP first_name, last_name +// end::rlike[] +; + +// tag::rlike-result[] +first_name:keyword | last_name:keyword +Alejandro |McAlpine +// end::rlike-result[] ; \ No newline at end of file diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/drop.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/drop.csv-spec index cd3afa25fc0a6..601b4f329f9d7 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/drop.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/drop.csv-spec @@ -54,3 +54,36 @@ c:l|mi:i|s:l 0 |null|null ; + +// see https://github.com/elastic/elasticsearch/issues/102121 +dropGrouping#[skip:-8.11.99, reason:planning bug fixed in v8.12] +row a = 1 | rename a AS foo | stats bar = count(*) by foo | drop foo; + +bar:long +1 +; + +dropGroupingMulti#[skip:-8.11.99] +row a = 1, b = 2 | rename a AS foo, b as bar | stats baz = count(*) by foo, bar | drop foo; + +baz:long | bar:integer +1 | 2 +; + +dropGroupingMulti2#[skip:-8.11.99] +row a = 1, b = 2 | rename a AS foo, b as bar | stats baz = count(*) by foo, bar | drop foo, bar; + +baz:long +1 +; + + +dropGroupingMultirow#[skip:-8.11.99] +from employees | rename gender AS foo | stats bar = count(*) by foo | drop foo | sort bar; + +bar:long +10 +33 +57 +; + diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec index 9485bf800dd18..0f6fc42860750 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec @@ -63,7 +63,7 @@ long:long |ul:ul [501379200000, 520128000000] |[501379200000, 520128000000] ; -convertDoubleToUL +convertDoubleToUL#[skip:-8.11.99, reason:ql exceptions updated in 8.12] row d = 123.4 | eval ul = to_ul(d), overflow = to_ul(1e20); warning:Line 1:48: evaluation of [to_ul(1e20)] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:48: org.elasticsearch.xpack.ql.InvalidArgumentException: [1.0E20] out of [unsigned_long] range @@ -120,7 +120,7 @@ int:integer |long:long [5013792, 520128] |[5013792, 520128] ; -convertULToLong +convertULToLong#[skip:-8.11.99, reason:ql exceptions were updated in 8.12] row ul = [9223372036854775807, 9223372036854775808] | eval long = to_long(ul); warning:Line 1:67: evaluation of [to_long(ul)] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:67: org.elasticsearch.xpack.ql.InvalidArgumentException: [9223372036854775808] out of [long] range @@ -161,7 +161,7 @@ str1:keyword |str2:keyword |str3:keyword |long1:long |long2:long |long3:long // end::to_long-str-result[] ; -convertDoubleToLong +convertDoubleToLong#[skip:-8.11.99, reason:ql exceptions were updated in 8.12] row d = 123.4 | eval d2l = to_long(d), overflow = to_long(1e19); warning:Line 1:51: evaluation of [to_long(1e19)] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:51: org.elasticsearch.xpack.ql.InvalidArgumentException: [1.0E19] out of [long] range @@ -179,7 +179,7 @@ int:integer |ii:integer [5013792, 520128] |[5013792, 520128] ; -convertLongToInt +convertLongToInt#[skip:-8.11.99, reason:ql exceptions were updated in 8.12] // tag::to_int-long[] ROW long = [5013792, 2147483647, 501379200000] | EVAL int = TO_INTEGER(long) @@ -194,7 +194,7 @@ long:long |int:integer // end::to_int-long-result[] ; -convertULToInt +convertULToInt#[skip:-8.11.99, reason:ql exceptions were updated in 8.12] row ul = [2147483647, 9223372036854775808] | eval int = to_int(ul); warning:Line 1:57: evaluation of [to_int(ul)] failed, treating result as null. Only first 20 failures recorded. // UL conversion to int dips into long; not the most efficient, but it's how SQL does it too. @@ -229,7 +229,7 @@ int_str:keyword |int_dbl_str:keyword |is2i:integer|ids2i:integer |overflow:in 2147483647 |2147483647.2 |2147483647 |2147483647 |null |null ; -convertDoubleToInt +convertDoubleToInt#[skip:-8.11.99, reason:ql exceptions were updated in 8.12] row d = 123.4 | eval d2i = to_integer(d), overflow = to_integer(1e19); warning:Line 1:54: evaluation of [to_integer(1e19)] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:54: org.elasticsearch.xpack.ql.InvalidArgumentException: [1.0E19] out of [long] range @@ -473,7 +473,7 @@ ROW deg = [90, 180, 270] [90, 180, 270] | [1.5707963267948966, 3.141592653589793, 4.71238898038469] ; -warningWithFromSource +warningWithFromSource-Ignore from employees | sort emp_no | limit 1 | eval x = to_long(emp_no) * 10000000 | eval y = to_int(x) > 1 | keep y; warning:Line 1:89: evaluation of [to_int(x)] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:89: org.elasticsearch.xpack.ql.InvalidArgumentException: [100010000000] out of [integer] range diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/keep.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/keep.csv-spec index 3637081c3c4b6..facf06eb6a960 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/keep.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/keep.csv-spec @@ -533,8 +533,7 @@ emp_no:integer | salary_change:double |salary_change.int:integer|salary_chan projectAllButConstant -from employees | eval c = 1 | keep c | limit 2 -; +from employees | eval c = 1 | keep c | limit 2; c:i 1 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/math.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/math.csv-spec index 70b416a8a9c02..a6e24e9d45289 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/math.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/math.csv-spec @@ -200,10 +200,10 @@ height:double | s:double 1.53 | 0.34 ; -powSalarySquared +powSalarySquared#[skip:-8.11.99,reason:return type changed in 8.12] from employees | eval s = pow(to_long(salary) - 75000, 2) + 10000 | keep salary, s | sort salary desc | limit 4; -salary:integer | s:long +salary:integer | s:double 74999 | 10001 74970 | 10900 74572 | 193184 @@ -328,14 +328,14 @@ base:integer | exponent:double | s:double // end::powID-sqrt-result[] ; -powSqrtNeg +powSqrtNeg#[skip:-8.11.99,reason:return type changed in 8.12] // tag::powNeg-sqrt[] ROW base = -4, exponent = 0.5 | EVAL s = POW(base, exponent) // end::powNeg-sqrt[] ; warning:Line 2:12: evaluation of [POW(base, exponent)] failed, treating result as null. Only first 20 failures recorded. -warning:Line 2:12: java.lang.ArithmeticException: invalid result: pow(-4.0, 0.5) +warning:Line 2:12: java.lang.ArithmeticException: invalid result when computing pow // tag::powNeg-sqrt-result[] base:integer | exponent:double | s:double @@ -356,23 +356,19 @@ base:double | exponent:integer | result:double // end::powDI-result[] ; -powIntInt -// tag::powII[] +powIntInt#[skip:-8.11.99,reason:return type changed in 8.12] ROW base = 2, exponent = 2 | EVAL s = POW(base, exponent) -// end::powII[] ; -// tag::powII-result[] -base:integer | exponent:integer | s:integer -2 | 2 | 4 -// end::powII-result[] +base:integer | exponent:integer | s:double +2 | 2 | 4.0 ; -powIntIntPlusInt +powIntIntPlusInt#[skip:-8.11.99,reason:return type changed in 8.12] row s = 1 + pow(2, 2); -s:integer +s:double 5 ; @@ -383,24 +379,24 @@ s:double 5 ; -powIntUL +powIntUL#[skip:-8.11.99,reason:return type changed in 8.12] row x = pow(1, 9223372036854775808); -x:long +x:double 1 ; -powLongUL +powLongUL#[skip:-8.11.99,reason:return type changed in 8.12] row x = to_long(1) | eval x = pow(x, 9223372036854775808); -x:long +x:double 1 ; -powUnsignedLongUL +powUnsignedLongUL#[skip:-8.11.99,reason:return type changed in 8.12] row x = to_ul(1) | eval x = pow(x, 9223372036854775808); -x:long +x:double 1 ; @@ -411,36 +407,28 @@ x:double 1.0 ; -powIntULOverrun +powIntULOverrun#[skip:-8.11.99,reason:return type changed in 8.12] row x = pow(2, 9223372036854775808); warning:Line 1:9: evaluation of [pow(2, 9223372036854775808)] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:9: java.lang.ArithmeticException: long overflow +warning:Line 1:9: java.lang.ArithmeticException: invalid result when computing pow -x:long +x:double null ; -powULInt +powULInt#[skip:-8.11.99,reason:return type changed in 8.12] row x = pow(to_unsigned_long(9223372036854775807), 1); -x:long +x:double 9223372036854775807 ; -powULIntOverrun -// tag::powULOverrun[] +powULIntOverrun#[skip:-8.11.99,reason:return type changed in 8.12] ROW x = POW(9223372036854775808, 2) -// end::powULOverrun[] ; -// tag::powULOverrun-warning[] -warning:Line 1:9: evaluation of [POW(9223372036854775808, 2)] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:9: java.lang.ArithmeticException: long overflow -// end::powULOverrun-warning[] -// tag::powULOverrun-result[] -x:long -null -// end::powULOverrun-result[] +x:double +8.507059173023462E37 ; powULInt_2d @@ -455,20 +443,18 @@ x:double // end::pow2d-result[] ; -powULLong +powULLong#[skip:-8.11.99,reason:return type changed in 8.12] row x = to_long(10) | eval x = pow(to_unsigned_long(10), x); -x:long +x:double 10000000000 ; -powULLongOverrun +powULLongOverrun#[skip:-8.11.99,reason:return type changed in 8.12] row x = to_long(100) | eval x = pow(to_unsigned_long(10), x); -warning:Line 1:33: evaluation of [pow(to_unsigned_long(10), x)] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:33: java.lang.ArithmeticException: long overflow -x:long -null +x:double +1.0E100 ; powULDouble diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mv_expand.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mv_expand.csv-spec index 7553cea0e26d5..c681a1a7e977c 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mv_expand.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mv_expand.csv-spec @@ -252,3 +252,57 @@ emp_no:integer | first_name:keyword | generate_mv:keyword 10001 | Georgi | foo 10002 | Bezalel | foo ; + + +// see https://github.com/elastic/elasticsearch/issues/102120 +expandAfterDuplicateAggs#[skip:-8.11.99] +row a = 1 | stats a = count(*), b = count(*) | mv_expand b; + +a:long | b:long +1 | 1 +; + +expandAfterDuplicateAggs2#[skip:-8.11.99] +row a = 1 | stats a = count(*), b = count(*) | mv_expand a; + +a:long | b:long +1 | 1 +; + + +expandAfterDuplicateAggsAndEval#[skip:-8.11.99] +row a = 1 | stats a = count(*), b = count(*) | eval c = 2 | mv_expand b; + +a:long | b:long | c:integer +1 | 1 | 2 +; + +expandAfterDuplicateAggsComplex#[skip:-8.11.99] +row x = [1, 2, 3] +| mv_expand x +| stats a = count(*), b = count(*), c = count(*) +| eval x = a + c + b +| mv_expand a +| rename a AS a | drop a; + +b:long | c:long | x:long +3 | 3 | 9 +; + + +expandAfterDuplicateAggsMultirow#[skip:-8.11.99] +from employees +| stats a = count(gender), b = count(*), c = count(*) by gender +| eval str = concat(to_string(b), ",", gender) +| mv_expand b +| eval x = split(str,",") +| mv_expand x +| sort x; + +a:long | b:long | c:long | gender:keyword | str:keyword | x:keyword +33 |33 |33 |F |"33,F" |33 +57 |57 |57 |M |"57,M" |57 +33 |33 |33 |F |"33,F" |F +57 |57 |57 |M |"57,M" |M +0 |10 |10 |null |null |null +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec index 0b45f9ac5aea4..8fe3f7c9eccf3 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec @@ -5,7 +5,7 @@ v:long 1 ; -showFunctions +showFunctions#[skip:-8.11.99] show functions; name:keyword | synopsis:keyword | argNames:keyword | argTypes:keyword | argDescriptions:keyword |returnType:keyword | description:keyword | optionalArgs:boolean | variadic:boolean @@ -40,7 +40,7 @@ least |"? least(first:integer|long|double|boolean|keyword|tex left |"? left(string:keyword, length:integer)" |[string, length] |["keyword", "integer"] |["", ""] |? | "" | [false, false] | false length |? length(arg1:?) |arg1 |? | "" |? | "" | false | false log10 |"? log10(n:integer|long|double|unsigned_long)" |n |"integer|long|double|unsigned_long" | "" |? | "" | false | false -ltrim |? ltrim(arg1:?) |arg1 |? | "" |? | "" | false | false +ltrim |"keyword|text ltrim(str:keyword|text)" |str |"keyword|text" | "" |"keyword|text" |Removes leading whitespaces from a string.| false | false max |? max(arg1:?) |arg1 |? | "" |? | "" | false | false median |? median(arg1:?) |arg1 |? | "" |? | "" | false | false median_absolute_deviation|? median_absolute_deviation(arg1:?) |arg1 |? | "" |? | "" | false | false @@ -56,11 +56,11 @@ mv_sum |? mv_sum(arg1:?) now |? now() | null |null | null |? | "" | null | false percentile |? percentile(arg1:?, arg2:?) |[arg1, arg2] |[?, ?] |["", ""] |? | "" | [false, false] | false pi |? pi() | null | null | null |? | "" | null | false -pow |"? pow(base:integer|long|double, exponent:integer|double)" |[base, exponent] |["integer|long|double", "integer|double"] |["", ""] |? | "" | [false, false] | false +pow |"? pow(base:integer|unsigned_long|long|double, exponent:integer|unsigned_long|long|double)" |[base, exponent] |["integer|unsigned_long|long|double", "integer|unsigned_long|long|double"] |["", ""] |? | "" | [false, false] | false replace |"? replace(arg1:?, arg2:?, arg3:?)" | [arg1, arg2, arg3] | [?, ?, ?] |["", "", ""] |? | "" | [false, false, false]| false right |"? right(string:keyword, length:integer)" |[string, length] |["keyword", "integer"] |["", ""] |? | "" | [false, false] | false round |? round(arg1:?, arg2:?) |[arg1, arg2] |[?, ?] |["", ""] |? | "" | [false, false] | false -rtrim |? rtrim(arg1:?) |arg1 |? | "" |? | "" | false | false +rtrim |"keyword|text rtrim(str:keyword|text)" |str |"keyword|text" | "" |"keyword|text" |Removes trailing whitespaces from a string.| false | false sin |"double sin(n:integer|long|double|unsigned_long)" |n |"integer|long|double|unsigned_long" |An angle, in radians |double |Returns the trigonometric sine of an angle | false | false sinh |"double sinh(n:integer|long|double|unsigned_long)"|n |"integer|long|double|unsigned_long" | "" |double | "" | false | false split |? split(arg1:?, arg2:?) |[arg1, arg2] |[?, ?] |["", ""] |? | "" | [false, false] | false @@ -88,13 +88,13 @@ to_string |"? to_string(v:unsigned_long|date|boolean|double|ip|te to_ul |? to_ul(arg1:?) |arg1 |? | "" |? | "" | false | false to_ulong |? to_ulong(arg1:?) |arg1 |? | "" |? | "" | false | false to_unsigned_long |? to_unsigned_long(arg1:?) |arg1 |? | "" |? | "" | false | false -to_ver |"? to_ver(v:keyword|text|version)" |v |"keyword|text|version"| "" |? | "" | false | false -to_version |"? to_version(v:keyword|text|version)" |v |"keyword|text|version"| "" |? | "" | false | false -trim |? trim(arg1:?) |arg1 |? | "" |? | "" | false | false +to_ver |"? to_ver(v:keyword|text|version)" |v |"keyword|text|version"| "" |? | "" | false | false +to_version |"? to_version(v:keyword|text|version)" |v |"keyword|text|version"| "" |? | "" | false | false +trim |"keyword|text trim(str:keyword|text)" |str |"keyword|text" | "" |"keyword|text" |Removes leading and trailing whitespaces from a string.| false | false ; -showFunctionsSynopsis +showFunctionsSynopsis#[skip:-8.11.99] show functions | keep synopsis; synopsis:keyword @@ -129,7 +129,7 @@ synopsis:keyword "? left(string:keyword, length:integer)" ? length(arg1:?) "? log10(n:integer|long|double|unsigned_long)" -? ltrim(arg1:?) +"keyword|text ltrim(str:keyword|text)" ? max(arg1:?) ? median(arg1:?) ? median_absolute_deviation(arg1:?) @@ -145,11 +145,11 @@ synopsis:keyword ? now() ? percentile(arg1:?, arg2:?) ? pi() -"? pow(base:integer|long|double, exponent:integer|double)" +"? pow(base:integer|unsigned_long|long|double, exponent:integer|unsigned_long|long|double)" "? replace(arg1:?, arg2:?, arg3:?)" "? right(string:keyword, length:integer)" ? round(arg1:?, arg2:?) -? rtrim(arg1:?) +"keyword|text rtrim(str:keyword|text)" "double sin(n:integer|long|double|unsigned_long)" "double sinh(n:integer|long|double|unsigned_long)" ? split(arg1:?, arg2:?) @@ -179,7 +179,7 @@ synopsis:keyword ? to_unsigned_long(arg1:?) "? to_ver(v:keyword|text|version)" "? to_version(v:keyword|text|version)" -? trim(arg1:?) +"keyword|text trim(str:keyword|text)" ; @@ -197,3 +197,12 @@ is_infinite |? is_infinite(arg1:?) is_nan |? is_nan(arg1:?) |arg1 |? | "" |? | "" | false | false // end::showFunctionsFiltered-result[] ; + + +// see https://github.com/elastic/elasticsearch/issues/102120 +countFunctions#[skip:-8.11.99] +show functions | stats a = count(*), b = count(*), c = count(*) | mv_expand c; + +a:long | b:long | c:long +82 | 82 | 82 +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec index acf42d908ed66..6050dba6acf3b 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec @@ -532,10 +532,14 @@ c:l 10 ; +countStar +from employees | stats count=count(*) | sort count desc | limit 0; +count:l +; countAllGrouped -from employees | stats c = count(*) by languages | rename languages as l | sort l DESC ; +from employees | stats c = count(*) by languages | rename languages as l | sort l DESC; c:l | l:i 10 |null @@ -682,14 +686,14 @@ c:l | job_positions:s 4 |Tech Lead ; -duplicateAggregationsWithoutGrouping +duplicateAggregationsWithoutGrouping#[skip:-8.11.99] from employees | eval x = salary | stats c = count(), m = min(x), m1 = min(salary), c1 = count(1); c:l | m:i | m1:i | c1:l 100 | 25324 | 25324 | 100 ; -duplicateAggregationsWithGrouping +duplicateAggregationsWithGrouping#[skip:-8.11.99] from employees | eval x = salary | stats c = count(), m = min(x), m1 = min(salary), c1 = count(1) by gender | sort gender; c:l| m:i | m1:i | c1:l| gender:s @@ -697,3 +701,52 @@ c:l| m:i | m1:i | c1:l| gender:s 57 | 25945 | 25945 | 57 | M 10 | 25324 | 25324 | 10 | null ; + + +twoCountStarInStats#[skip:-8.11.99] +row x = 1 | stats a = count(*), b = count(*) | stats c = count(*); + +c:long +1 +; + + +twoCountStarInStatsOnRealData-Ignore +from employees | stats a = count(*), b = count(*) | stats c = count(*); + +c:long +1 +; + + +twoStatsSameExp#[skip:-8.11.99] +row x = 1 | stats a = max(x), b = max(x) | stats c = max(a); + +c:integer +1 +; + + +twoCountStarByXInStats#[skip:-8.11.99] +row x = 1, y = 2, z = 3 | stats a = count(*), b = count(*) by x | stats c = count(*); + +c:long +1 +; + + +twoCountStarPlusStatsBy#[skip:-8.11.99] +row x = 1, y = 2, z = 3 | stats a = count(*), b = count(*) | stats c = count(*) by a; + +c:long | a:long +1 | 1 +; + + +twoCountStarByPlusStatsBy#[skip:-8.11.99] +row x = 1, y = 2, z = 3 | stats a = count(*), b = count(*) by x | stats c = count(*) by a; + +c:long | a:long +1 | 1 +; + diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractEsqlIntegTestCase.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractEsqlIntegTestCase.java index 5134e05b4cc3d..768353a1c8d35 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractEsqlIntegTestCase.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractEsqlIntegTestCase.java @@ -12,11 +12,13 @@ import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.compute.operator.exchange.ExchangeService; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.junit.annotations.TestLogging; @@ -29,6 +31,7 @@ import java.util.List; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; @TestLogging(value = "org.elasticsearch.xpack.esql.session:DEBUG", reason = "to better understand planning") @@ -76,6 +79,24 @@ protected Collection> nodePlugins() { return CollectionUtils.appendToCopy(super.nodePlugins(), EsqlPlugin.class); } + protected void setRequestCircuitBreakerLimit(ByteSizeValue limit) { + if (limit != null) { + assertAcked( + clusterAdmin().prepareUpdateSettings() + .setPersistentSettings( + Settings.builder().put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), limit).build() + ) + ); + } else { + assertAcked( + clusterAdmin().prepareUpdateSettings() + .setPersistentSettings( + Settings.builder().putNull(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey()).build() + ) + ); + } + } + protected EsqlQueryResponse run(String esqlCommands) { return run(esqlCommands, randomPragmas()); } diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java new file mode 100644 index 0000000000000..a24b643a299c2 --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java @@ -0,0 +1,117 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.compute.operator.exchange.ExchangeService; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.AbstractMultiClustersTestCase; +import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +public class CrossClustersQueryIT extends AbstractMultiClustersTestCase { + private static final String REMOTE_CLUSTER = "cluster-a"; + + @Override + protected Collection remoteClusterAlias() { + return List.of(REMOTE_CLUSTER); + } + + @Override + protected Collection> nodePlugins(String clusterAlias) { + List> plugins = new ArrayList<>(); + plugins.addAll(super.nodePlugins(clusterAlias)); + plugins.add(EsqlPlugin.class); + plugins.add(InternalExchangePlugin.class); + return CollectionUtils.appendToCopy(super.nodePlugins(clusterAlias), EsqlPlugin.class); + } + + public static class InternalExchangePlugin extends Plugin { + @Override + public List> getSettings() { + return List.of( + Setting.timeSetting( + ExchangeService.INACTIVE_SINKS_INTERVAL_SETTING, + TimeValue.timeValueSeconds(30), + Setting.Property.NodeScope + ) + ); + } + } + + public void testUnsupported() { + int numDocs = between(1, 10); + for (String cluster : List.of(LOCAL_CLUSTER, REMOTE_CLUSTER)) { + Client client = client(cluster); + assertAcked( + client.admin() + .indices() + .prepareCreate("events") + .setSettings(Settings.builder().put("index.number_of_shards", randomIntBetween(1, 5))) + .setMapping("tag", "type=keyword", "v", "type=long") + ); + for (int i = 0; i < numDocs; i++) { + client.prepareIndex("events").setSource("tag", cluster, "v", i).get(); + } + client.admin().indices().prepareRefresh("events").get(); + } + var emptyQueries = List.of( + "from *:* | LIMIT 0", + "from *,*:* | LIMIT 0", + "from *:events* | LIMIT 0", + "from events,*:events* | LIMIT 0" + ); + for (String q : emptyQueries) { + try (EsqlQueryResponse resp = runQuery(q)) { + assertThat(resp.columns(), hasSize(2)); + assertFalse(resp.values().hasNext()); + } + } + var remotePatterns = List.of("*:*", "*, *:*", "*:events*", "events, *:events*"); + for (String pattern : remotePatterns) { + var query = "FROM " + pattern + " | LIMIT " + between(1, 100); + IllegalArgumentException error = expectThrows(IllegalArgumentException.class, () -> runQuery(query).close()); + assertThat(error.getMessage(), equalTo("ES|QL does not yet support querying remote indices [" + pattern + "]")); + } + int limit = between(1, numDocs); + var localQueries = List.of("from events* | LIMIT " + limit, "from * | LIMIT " + limit); + for (String q : localQueries) { + try (EsqlQueryResponse resp = runQuery(q)) { + assertThat(resp.columns(), hasSize(2)); + int rows = 0; + Iterator> values = resp.values(); + while (values.hasNext()) { + values.next(); + ++rows; + } + assertThat(rows, equalTo(limit)); + } + } + } + + protected EsqlQueryResponse runQuery(String query) { + logger.info("--> query [{}]", query); + EsqlQueryRequest request = new EsqlQueryRequest(); + request.query(query); + request.pragmas(AbstractEsqlIntegTestCase.randomPragmas()); + return client(LOCAL_CLUSTER).execute(EsqlQueryAction.INSTANCE, request).actionGet(30, TimeUnit.SECONDS); + } +} diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EnrichIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EnrichIT.java new file mode 100644 index 0000000000000..46aaa6fab16a5 --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EnrichIT.java @@ -0,0 +1,349 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.compute.operator.exchange.ExchangeService; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; +import org.elasticsearch.ingest.common.IngestCommonPlugin; +import org.elasticsearch.license.LicenseService; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.protocol.xpack.XPackInfoRequest; +import org.elasticsearch.protocol.xpack.XPackInfoResponse; +import org.elasticsearch.reindex.ReindexPlugin; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.action.TransportXPackInfoAction; +import org.elasticsearch.xpack.core.action.XPackInfoFeatureAction; +import org.elasticsearch.xpack.core.enrich.EnrichPolicy; +import org.elasticsearch.xpack.core.enrich.action.DeleteEnrichPolicyAction; +import org.elasticsearch.xpack.core.enrich.action.ExecuteEnrichPolicyAction; +import org.elasticsearch.xpack.core.enrich.action.PutEnrichPolicyAction; +import org.elasticsearch.xpack.enrich.EnrichPlugin; +import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; +import org.junit.After; +import org.junit.Before; + +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.transport.AbstractSimpleTransportTestCase.IGNORE_DESERIALIZATION_ERRORS_SETTING; +import static org.hamcrest.Matchers.closeTo; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +public class EnrichIT extends AbstractEsqlIntegTestCase { + + @Override + protected Collection> nodePlugins() { + List> plugins = new ArrayList<>(super.nodePlugins()); + plugins.add(EsqlPlugin.class); + plugins.add(InternalExchangePlugin.class); + plugins.add(LocalStateEnrich.class); + plugins.add(IngestCommonPlugin.class); + plugins.add(ReindexPlugin.class); + plugins.add(InternalTransportSettingPlugin.class); + return plugins; + } + + public static class InternalTransportSettingPlugin extends Plugin { + @Override + public List> getSettings() { + return List.of(IGNORE_DESERIALIZATION_ERRORS_SETTING); + } + } + + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal, otherSettings)) + .put(XPackSettings.SECURITY_ENABLED.getKey(), false) + .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "128mb") + /* + * Force standard settings for the request breaker or we may not break at all. + * Without this we can randomly decide to use the `noop` breaker for request + * and it won't break..... + */ + .put( + HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey(), + HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING.getDefault(Settings.EMPTY) + ) + .put( + HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING.getKey(), + HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING.getDefault(Settings.EMPTY) + ) + .put(ExchangeService.INACTIVE_SINKS_INTERVAL_SETTING, TimeValue.timeValueMillis(between(500, 2000))) + // allow reading pages from network can trip the circuit breaker + .put(IGNORE_DESERIALIZATION_ERRORS_SETTING.getKey(), true) + .build(); + } + + @Override + protected EsqlQueryResponse run(EsqlQueryRequest request) { + final Client client; + if (randomBoolean()) { + client = client(randomFrom(clusterService().state().nodes().getCoordinatingOnlyNodes().values()).getName()); + } else { + client = client(); + } + if (randomBoolean()) { + setRequestCircuitBreakerLimit(ByteSizeValue.ofBytes(between(256, 4096))); + try { + return client.execute(EsqlQueryAction.INSTANCE, request).actionGet(2, TimeUnit.MINUTES); + } catch (Exception e) { + logger.info("request failed", e); + ensureBlocksReleased(); + } finally { + setRequestCircuitBreakerLimit(null); + } + } + return client.execute(EsqlQueryAction.INSTANCE, request).actionGet(30, TimeUnit.SECONDS); + } + + @Before + public void setupEnrichPolicies() { + client().admin() + .indices() + .prepareCreate("songs") + .setMapping("song_id", "type=keyword", "title", "type=keyword", "artist", "type=keyword", "length", "type=double") + .get(); + record Song(String id, String title, String artist, double length) { + + } + var songs = List.of( + new Song("s1", "Hotel California", "Eagles", 7.12), + new Song("s2", "In The End", "Linkin Park", 3.36), + new Song("s3", "Numb", "Linkin Park", 3.05), + new Song("s4", "The Sound Of Silence", "Disturbed", 4.08) + ); + for (var s : songs) { + client().prepareIndex("songs").setSource("song_id", s.id, "title", s.title, "artist", s.artist, "length", s.length).get(); + } + client().admin().indices().prepareRefresh("songs").get(); + EnrichPolicy policy = new EnrichPolicy("match", null, List.of("songs"), "song_id", List.of("title", "artist", "length")); + client().execute(PutEnrichPolicyAction.INSTANCE, new PutEnrichPolicyAction.Request("songs", policy)).actionGet(); + client().execute(ExecuteEnrichPolicyAction.INSTANCE, new ExecuteEnrichPolicyAction.Request("songs")).actionGet(); + assertAcked(client().admin().indices().prepareDelete("songs")); + } + + @After + public void cleanEnrichPolicies() { + cluster().wipe(Set.of()); + client().execute(DeleteEnrichPolicyAction.INSTANCE, new DeleteEnrichPolicyAction.Request("songs")); + } + + @Before + public void setupMainIndex() { + var localListens = List.of( + new Listen(1, "s3", 1.5), + new Listen(2, "s2", 2.0), + new Listen(3, "s1", 0.5), + new Listen(4, "s3", 1.0), + new Listen(5, "s1", 2.5), + new Listen(6, "s1", 0.25), + new Listen(7, "s2", 3.0) + ); + client().admin() + .indices() + .prepareCreate("listens") + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)) + .setMapping("timestamp", "type=long", "song_id", "type=keyword", "duration", "type=double") + .get(); + for (Listen listen : localListens) { + client().prepareIndex("listens") + .setSource("timestamp", listen.timestamp, "song_id", listen.songId, "duration", listen.duration) + .get(); + } + client().admin().indices().prepareRefresh("listens").get(); + } + + @Before + public void ensureAtLeastOneCoordinatingNodeOnly() { + if (clusterService().state().nodes().getCoordinatingOnlyNodes().isEmpty()) { + internalCluster().startCoordinatingOnlyNode(Settings.EMPTY); + } + } + + record Listen(long timestamp, String songId, double duration) { + + } + + private static String enrichSongCommand() { + String command = " ENRICH songs "; + if (randomBoolean()) { + command += " ON song_id "; + } + if (randomBoolean()) { + command += " WITH artist, title, length "; + } + return command; + } + + public void testSumDurationByArtist() { + Function> extractStats = resp -> { + List columns = resp.columns(); + assertThat(columns, hasSize(2)); + assertThat(columns.get(0).name(), equalTo("sum(duration)")); + assertThat(columns.get(0).type(), equalTo("double")); + assertThat(columns.get(1).name(), equalTo("artist")); + assertThat(columns.get(1).type(), equalTo("keyword")); + Iterator> rows = resp.values(); + Map actualValues = new HashMap<>(); + while (rows.hasNext()) { + Iterator row = rows.next(); + Object v = row.next(); + Object k = row.next(); + actualValues.put((String) k, (Double) v); + } + return actualValues; + }; + + var statsCommands = List.of( + enrichSongCommand() + " | STATS sum(duration) by artist", + "STATS duration = sum(duration) by song_id | " + enrichSongCommand() + " | STATS sum(duration) by artist" + ); + for (String statsCommand : statsCommands) { + try (var resp = run("from listens* | " + statsCommand)) { + assertThat(extractStats.apply(resp), equalTo(Map.of("Eagles", 3.25, "Linkin Park", 7.5))); + } + } + } + + public void testAvgDurationByArtist() { + Function> extractStats = resp -> { + List columns = resp.columns(); + assertThat(columns, hasSize(2)); + assertThat(columns.get(0).name(), equalTo("avg(duration)")); + assertThat(columns.get(0).type(), equalTo("double")); + assertThat(columns.get(1).name(), equalTo("artist")); + assertThat(columns.get(1).type(), equalTo("keyword")); + Iterator> rows = resp.values(); + Map actualValues = new HashMap<>(); + while (rows.hasNext()) { + Iterator row = rows.next(); + Object v = row.next(); + Object k = row.next(); + actualValues.put((String) k, (Double) v); + } + return actualValues; + }; + try (var resp = run("from listens* | " + enrichSongCommand() + " | STATS avg(duration) by artist")) { + Map stats = extractStats.apply(resp); + assertThat(stats.keySet(), containsInAnyOrder("Eagles", "Linkin Park")); + assertThat(stats.get("Eagles"), closeTo(1.08333, 0.1)); + assertThat(stats.get("Linkin Park"), closeTo(1.875, 0.1)); + } + } + + public void testListeningRatio() { + Function> extractStats = resp -> { + List columns = resp.columns(); + assertThat(columns, hasSize(2)); + assertThat(columns.get(0).name(), equalTo("ratio")); + assertThat(columns.get(0).type(), equalTo("double")); + assertThat(columns.get(1).name(), equalTo("artist")); + assertThat(columns.get(1).type(), equalTo("keyword")); + Iterator> rows = resp.values(); + Map actualValues = new HashMap<>(); + while (rows.hasNext()) { + Iterator row = rows.next(); + Object v = row.next(); + Object k = row.next(); + actualValues.put((String) k, (Double) v); + } + return actualValues; + }; + + var statsCommand = "STATS d = sum(duration), l = sum(length) by artist | EVAL ratio=d /l | KEEP ratio, artist"; + try (var resp = run("from listens* | " + enrichSongCommand() + "|" + statsCommand)) { + Map stats = extractStats.apply(resp); + assertThat(stats.keySet(), containsInAnyOrder("Eagles", "Linkin Park")); + assertThat(stats.get("Eagles"), closeTo(0.1521, 0.05)); + assertThat(stats.get("Linkin Park"), closeTo(0.585, 0.05)); + } + } + + public void testFilterAfterEnrich() { + try (var resp = run("from listens* | " + enrichSongCommand() + " | WHERE length < 3.2 | limit 10 | KEEP artist,title")) { + Iterator row = resp.values().next(); + assertThat(row.next(), equalTo("Linkin Park")); + assertThat(row.next(), equalTo("Numb")); + } + } + + public void testTopN() { + try (var resp = run("from listens* | sort timestamp DESC | limit 1 |" + enrichSongCommand() + " | KEEP timestamp, artist")) { + Iterator row = resp.values().next(); + assertThat(row.next(), equalTo(7L)); + assertThat(row.next(), equalTo("Linkin Park")); + } + try (var resp = run("from listens* | " + enrichSongCommand() + " | sort timestamp DESC | limit 1 | KEEP timestamp, artist")) { + Iterator row = resp.values().next(); + assertThat(row.next(), equalTo(7L)); + assertThat(row.next(), equalTo("Linkin Park")); + } + } + + public static class LocalStateEnrich extends LocalStateCompositeXPackPlugin { + + public LocalStateEnrich(final Settings settings, final Path configPath) throws Exception { + super(settings, configPath); + + plugins.add(new EnrichPlugin(settings) { + @Override + protected XPackLicenseState getLicenseState() { + return this.getLicenseState(); + } + }); + } + + public static class EnrichTransportXPackInfoAction extends TransportXPackInfoAction { + @Inject + public EnrichTransportXPackInfoAction( + TransportService transportService, + ActionFilters actionFilters, + LicenseService licenseService, + NodeClient client + ) { + super(transportService, actionFilters, licenseService, client); + } + + @Override + protected List infoActions() { + return Collections.singletonList(XPackInfoFeatureAction.ENRICH); + } + } + + @Override + protected Class> getInfoAction() { + return EnrichTransportXPackInfoAction.class; + } + } +} diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionBreakerIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionBreakerIT.java index 342df5209ec95..3e8ac6fc3d5fb 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionBreakerIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionBreakerIT.java @@ -26,7 +26,6 @@ import java.util.List; import java.util.concurrent.TimeUnit; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.transport.AbstractSimpleTransportTestCase.IGNORE_DESERIALIZATION_ERRORS_SETTING; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -74,24 +73,6 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { .build(); } - private void setRequestCircuitBreakerLimit(ByteSizeValue limit) { - if (limit != null) { - assertAcked( - clusterAdmin().prepareUpdateSettings() - .setPersistentSettings( - Settings.builder().put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), limit).build() - ) - ); - } else { - assertAcked( - clusterAdmin().prepareUpdateSettings() - .setPersistentSettings( - Settings.builder().putNull(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey()).build() - ) - ); - } - } - @Override protected EsqlQueryResponse run(EsqlQueryRequest request) { setRequestCircuitBreakerLimit(ByteSizeValue.ofBytes(between(256, 2048))); @@ -119,8 +100,7 @@ public void testBreaker() { .get(); int numDocs = between(1000, 5000); for (int i = 0; i < numDocs; i++) { - DocWriteResponse response = client().prepareIndex("test_breaker") - .setId(Integer.toString(i)) + DocWriteResponse response = prepareIndex("test_breaker").setId(Integer.toString(i)) .setSource("foo", "foo-" + i, "bar", "bar-" + (i * 2)) .get(); assertThat(Strings.toString(response), response.getResult(), equalTo(DocWriteResponse.Result.CREATED)); diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java index 06fd9bd469b84..2211512144f99 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java @@ -16,6 +16,7 @@ import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.internal.ClusterAdminClient; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; @@ -781,7 +782,7 @@ public void testESFilter() throws Exception { String id = "id-" + i; long value = randomLongBetween(-100_000, 100_000); docs.put(id, value); - indexRequests.add(client().prepareIndex().setIndex(indexName).setId(id).setSource(Map.of("val", value))); + indexRequests.add(prepareIndex(indexName).setId(id).setSource(Map.of("val", value))); } indexRandom(true, randomBoolean(), indexRequests); String command = "from test_filter | stats avg = avg(val)"; @@ -822,9 +823,7 @@ record Doc(long val, String tag) { for (int i = 0; i < numDocs; i++) { Doc d = new Doc(i, "tag-" + randomIntBetween(1, 100)); allDocs.add(d); - indexRequests.add( - client().prepareIndex().setIndex(indexName).setId(Integer.toString(i)).setSource(Map.of("val", d.val, "tag", d.tag)) - ); + indexRequests.add(prepareIndex(indexName).setId(Integer.toString(i)).setSource(Map.of("val", d.val, "tag", d.tag))); } indexRandom(true, randomBoolean(), indexRequests); int limit = randomIntBetween(1, 10); @@ -1174,7 +1173,7 @@ public void testGroupingMultiValueByOrdinals() { if (values.isEmpty() == false) { source.put("v", values); } - client().prepareIndex(indexName).setSource(source).get(); + prepareIndex(indexName).setSource(source).get(); if (randomInt(100) < 20) { client().admin().indices().prepareRefresh(indexName).get(); } @@ -1212,8 +1211,8 @@ public void testUnsupportedTypesOrdinalGrouping() { long v = randomIntBetween(1, 10); groups.merge(k, v, Long::sum); groups.merge(null, v, Long::sum); // null group - client().prepareIndex("index-1").setSource("f1", k, "v", v).get(); - client().prepareIndex("index-2").setSource("f2", k, "v", v).get(); + prepareIndex("index-1").setSource("f1", k, "v", v).get(); + prepareIndex("index-2").setSource("f2", k, "v", v).get(); } client().admin().indices().prepareRefresh("index-1", "index-2").get(); for (String field : List.of("f1", "f2")) { @@ -1242,8 +1241,15 @@ public void testFilterNestedFields() { } public void testStatsNestFields() { - String node1 = internalCluster().startDataOnlyNode(); - String node2 = internalCluster().startDataOnlyNode(); + final String node1, node2; + if (randomBoolean()) { + internalCluster().ensureAtLeastNumDataNodes(2); + node1 = randomDataNode().getName(); + node2 = randomValueOtherThan(node1, () -> randomDataNode().getName()); + } else { + node1 = randomDataNode().getName(); + node2 = randomDataNode().getName(); + } assertAcked( client().admin() .indices() @@ -1276,8 +1282,15 @@ public void testStatsNestFields() { } public void testStatsMissingFields() { - String node1 = internalCluster().startDataOnlyNode(); - String node2 = internalCluster().startDataOnlyNode(); + final String node1, node2; + if (randomBoolean()) { + internalCluster().ensureAtLeastNumDataNodes(2); + node1 = randomDataNode().getName(); + node2 = randomValueOtherThan(node1, () -> randomDataNode().getName()); + } else { + node1 = randomDataNode().getName(); + node2 = randomDataNode().getName(); + } assertAcked( client().admin() .indices() @@ -1292,7 +1305,6 @@ public void testStatsMissingFields() { .setSettings(Settings.builder().put("index.routing.allocation.require._name", node2)) .setMapping("bar_int", "type=integer", "bar_long", "type=long", "bar_float", "type=float", "bar_double", "type=double") ); - var fields = List.of("foo_int", "foo_long", "foo_float", "foo_double"); var functions = List.of("sum", "count", "avg", "count_distinct"); for (String field : fields) { @@ -1510,4 +1522,8 @@ private void clearPersistentSettings(Setting... settings) { var clearSettingsRequest = new ClusterUpdateSettingsRequest().persistentSettings(clearedSettings.build()); admin().cluster().updateSettings(clearSettingsRequest).actionGet(); } + + private DiscoveryNode randomDataNode() { + return randomFrom(clusterService().state().nodes().getDataNodes().values()); + } } diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionRuntimeFieldIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionRuntimeFieldIT.java index be661b51d41d5..a362609876ea0 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionRuntimeFieldIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionRuntimeFieldIT.java @@ -113,7 +113,7 @@ private void createIndexWithConstRuntimeField(String type) throws InterruptedExc BulkRequestBuilder bulk = client().prepareBulk().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); for (int i = 0; i < SIZE; i++) { - bulk.add(client().prepareIndex("test").setId(Integer.toString(i)).setSource("foo", i)); + bulk.add(prepareIndex("test").setId(Integer.toString(i)).setSource("foo", i)); } bulk.get(); } diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java index edaf9d91e9771..d85d600b4a259 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java @@ -99,7 +99,7 @@ public void setupIndex() throws IOException { \\_AggregationOperator[mode = FINAL, aggs = sum of longs] \\_ProjectOperator[projection = [0]] \\_LimitOperator[limit = 500] - \\_OutputOperator[columns = sum(pause_me)]"""; + \\_OutputOperator[columns = [sum(pause_me)]]"""; XContentBuilder mapping = JsonXContent.contentBuilder().startObject(); mapping.startObject("runtime"); @@ -121,7 +121,7 @@ public void setupIndex() throws IOException { BulkRequestBuilder bulk = client().prepareBulk().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); for (int i = 0; i < NUM_DOCS; i++) { - bulk.add(client().prepareIndex("test").setId(Integer.toString(i)).setSource("foo", i)); + bulk.add(prepareIndex("test").setId(Integer.toString(i)).setSource("foo", i)); } bulk.get(); /* @@ -177,7 +177,10 @@ public void testTaskContents() throws Exception { } if (o.operator().equals("ValuesSourceReaderOperator[field = pause_me]")) { ValuesSourceReaderOperator.Status oStatus = (ValuesSourceReaderOperator.Status) o.status(); - assertMap(oStatus.readersBuilt(), matchesMap().entry("ScriptLongs", greaterThanOrEqualTo(1))); + assertMap( + oStatus.readersBuilt(), + matchesMap().entry("pause_me:column_at_a_time:ScriptLongs", greaterThanOrEqualTo(1)) + ); assertThat(oStatus.pagesProcessed(), greaterThanOrEqualTo(1)); valuesSourceReaders++; continue; diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/SyntheticSourceIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/SyntheticSourceIT.java index 2585b5325df18..b924ad492c0c6 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/SyntheticSourceIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/SyntheticSourceIT.java @@ -36,7 +36,7 @@ public void testMatchOnlyText() throws Exception { int numDocs = between(10, 1000); for (int i = 0; i < numDocs; i++) { - IndexRequestBuilder indexRequest = client().prepareIndex("test").setSource("id", "i" + i, "field", "n" + i); + IndexRequestBuilder indexRequest = prepareIndex("test").setSource("id", "i" + i, "field", "n" + i); if (randomInt(100) < 5) { indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); } diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/lookup/EnrichLookupIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/lookup/EnrichLookupIT.java deleted file mode 100644 index 56ea27e360c1d..0000000000000 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/lookup/EnrichLookupIT.java +++ /dev/null @@ -1,238 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.lookup; - -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.time.DateFormatter; -import org.elasticsearch.common.util.MockBigArrays; -import org.elasticsearch.common.util.PageCacheRecycler; -import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.BlockFactory; -import org.elasticsearch.compute.data.BytesRefBlock; -import org.elasticsearch.compute.data.ElementType; -import org.elasticsearch.compute.data.LongBlock; -import org.elasticsearch.compute.data.Page; -import org.elasticsearch.compute.operator.Driver; -import org.elasticsearch.compute.operator.DriverContext; -import org.elasticsearch.compute.operator.DriverRunner; -import org.elasticsearch.compute.operator.OutputOperator; -import org.elasticsearch.compute.operator.SourceOperator; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; -import org.elasticsearch.tasks.CancellableTask; -import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; -import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.esql.action.AbstractEsqlIntegTestCase; -import org.elasticsearch.xpack.esql.action.EsqlQueryRequest; -import org.elasticsearch.xpack.esql.enrich.EnrichLookupOperator; -import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; -import org.elasticsearch.xpack.esql.plugin.TransportEsqlQueryAction; -import org.elasticsearch.xpack.ql.expression.FieldAttribute; -import org.elasticsearch.xpack.ql.expression.NamedExpression; -import org.elasticsearch.xpack.ql.tree.Source; -import org.elasticsearch.xpack.ql.type.DataTypes; -import org.elasticsearch.xpack.ql.type.EsField; - -import java.util.Arrays; -import java.util.List; -import java.util.Map; -import java.util.concurrent.Executor; -import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Function; - -import static org.hamcrest.Matchers.equalTo; - -public class EnrichLookupIT extends AbstractEsqlIntegTestCase { - - public void testSimple() { - ElasticsearchAssertions.assertAcked( - client().admin() - .indices() - .prepareCreate("users") - .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)) - .setMapping( - "uid", - "type=keyword,doc_values=false", - "name", - "type=keyword,index=false", - "city", - "type=keyword,index=false", - "joined", - "type=date,index=false,format=yyyy-MM-dd" - ) - ); - List> users = List.of( - Map.of("uid", "j1", "name", "John", "city", "New York/NY", "joined", "2020-03-01"), - Map.of("uid", "m4", "name", "Mike", "city", "Boston/MA", "joined", "2010-06-20"), - Map.of("uid", "j2", "name", "Jack", "city", "Austin/TX", "joined", "1999-11-03") - ); - for (Map user : users) { - client().prepareIndex("users").setSource(user).get(); - if (randomBoolean()) { - client().admin().indices().prepareRefresh("users").get(); - } - } - if (randomBoolean()) { - client().admin().indices().prepareForceMerge("users").setMaxNumSegments(1).get(); - } - client().admin().indices().prepareRefresh("users").get(); - List enrichAttributes = List.of( - new FieldAttribute(Source.EMPTY, "name", new EsField("name", DataTypes.KEYWORD, Map.of(), true)), - new FieldAttribute(Source.EMPTY, "city", new EsField("city", DataTypes.KEYWORD, Map.of(), true)), - new FieldAttribute(Source.EMPTY, "joined", new EsField("joined", DataTypes.DATETIME, Map.of(), true)) - ); - - DiscoveryNode clientNode = randomFrom(clusterService().state().nodes().stream().toList()); - TransportEsqlQueryAction queryAction = internalCluster().getInstance(TransportEsqlQueryAction.class, clientNode.getName()); - TransportService transportService = internalCluster().getInstance(TransportService.class, clientNode.getName()); - - EsqlQueryRequest parentRequest = new EsqlQueryRequest(); - parentRequest.query("FROM index"); - CancellableTask parentTask = (CancellableTask) transportService.getTaskManager().register("test", "test-action", parentRequest); - EnrichLookupOperator enrichOperator = new EnrichLookupOperator( - "test-session", - parentTask, - randomIntBetween(1, 3), - 0, - queryAction.enrichLookupService(), - "users", - "match", - "uid", - enrichAttributes - ); - BytesRefBlock userBlock = BytesRefBlock.newBlockBuilder(5) - .appendBytesRef(new BytesRef("j1")) - .appendNull() - .appendBytesRef(new BytesRef("j2")) - .appendBytesRef(new BytesRef("j1")) - .appendBytesRef(new BytesRef("m3")) - .build(); - SourceOperator sourceOperator = sourceOperator(userBlock); - - AtomicReference outputPage = new AtomicReference<>(); - OutputOperator outputOperator = new OutputOperator(List.of(), Function.identity(), page -> { - outputPage.getAndUpdate(current -> { - if (current == null) { - return page; - } - Block.Builder[] builders = new Block.Builder[current.getBlockCount()]; - for (int i = 0; i < current.getBlockCount(); i++) { - ElementType elementType = current.getBlock(i).elementType(); - if (elementType == ElementType.NULL) { - elementType = page.getBlock(i).elementType(); - } - builders[i] = elementType.newBlockBuilder(1); - builders[i].copyFrom(current.getBlock(i), 0, current.getPositionCount()); - builders[i].copyFrom(page.getBlock(i), 0, page.getPositionCount()); - } - return new Page(Arrays.stream(builders).map(Block.Builder::build).toArray(Block[]::new)); - }); - }); - - DateFormatter dateFmt = DateFormatter.forPattern("yyyy-MM-dd"); - - var runner = new DriverRunner(transportService.getThreadPool().getThreadContext()) { - final Executor executor = transportService.getThreadPool().executor(EsqlPlugin.ESQL_THREAD_POOL_NAME); - - @Override - protected void start(Driver driver, ActionListener listener) { - Driver.start(transportService.getThreadPool().getThreadContext(), executor, driver, between(1, 1000), listener); - } - }; - Driver driver = new Driver(driverContext(), sourceOperator, List.of(enrichOperator), outputOperator, () -> {}); - PlainActionFuture future = new PlainActionFuture<>(); - runner.runToCompletion(List.of(driver), future); - future.actionGet(TimeValue.timeValueSeconds(30)); - transportService.getTaskManager().unregister(parentTask); - Page output = outputPage.get(); - assertThat(output.getBlockCount(), equalTo(4)); - assertThat(output.getPositionCount(), equalTo(5)); - BytesRef scratch = new BytesRef(); - BytesRefBlock names = output.getBlock(1); - BytesRefBlock cities = output.getBlock(2); - LongBlock dates = output.getBlock(3); - - assertThat(names.getBytesRef(0, scratch), equalTo(new BytesRef("John"))); - assertThat(cities.getBytesRef(0, scratch), equalTo(new BytesRef("New York/NY"))); - assertThat(dateFmt.formatMillis(dates.getLong(0)), equalTo("2020-03-01")); - - assertTrue(names.isNull(1)); - assertTrue(cities.isNull(1)); - assertTrue(dates.isNull(1)); - - assertThat(names.getBytesRef(2, scratch), equalTo(new BytesRef("Jack"))); - assertThat(cities.getBytesRef(2, scratch), equalTo(new BytesRef("Austin/TX"))); - assertThat(dateFmt.formatMillis(dates.getLong(2)), equalTo("1999-11-03")); - - assertThat(names.getBytesRef(3, scratch), equalTo(new BytesRef("John"))); - assertThat(cities.getBytesRef(3, scratch), equalTo(new BytesRef("New York/NY"))); - assertThat(dateFmt.formatMillis(dates.getLong(3)), equalTo("2020-03-01")); - - assertTrue(names.isNull(4)); - assertTrue(cities.isNull(4)); - assertTrue(dates.isNull(4)); - } - - private static SourceOperator sourceOperator(BytesRefBlock input) { - return new SourceOperator() { - int position = 0; - - @Override - public void finish() { - - } - - @Override - public boolean isFinished() { - return position >= input.getPositionCount(); - } - - @Override - public Page getOutput() { - if (isFinished()) { - return null; - } - int remaining = input.getPositionCount() - position; - int size = between(1, remaining); - BytesRefBlock.Builder builder = BytesRefBlock.newBlockBuilder(size); - builder.copyFrom(input, position, position + size); - position += size; - Block block = builder.build(); - if (block.areAllValuesNull() && randomBoolean()) { - block = Block.constantNullBlock(block.getPositionCount()); - } - return new Page(block); - } - - @Override - public void close() { - - } - }; - } - - public void testRandom() { - - } - - public void testMultipleMatches() { - - } - - static DriverContext driverContext() { - return new DriverContext( - new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, new NoneCircuitBreakerService()).withCircuitBreaking(), - BlockFactory.getNonBreakingInstance() - ); - } -} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsBoolsEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsBoolsEvaluator.java index 5b971a6dc2f11..b5b05d6d395fa 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsBoolsEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsBoolsEvaluator.java @@ -33,20 +33,18 @@ public EqualsBoolsEvaluator(EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - BooleanBlock lhsBlock = (BooleanBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - BooleanBlock rhsBlock = (BooleanBlock) rhsRef.block(); + public Block eval(Page page) { + try (BooleanBlock lhsBlock = (BooleanBlock) lhs.eval(page)) { + try (BooleanBlock rhsBlock = (BooleanBlock) rhs.eval(page)) { BooleanVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } BooleanVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector).asBlock()); + return eval(page.getPositionCount(), lhsVector, rhsVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsDoublesEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsDoublesEvaluator.java index d20b493ca2502..b4a0f127c8fa1 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsDoublesEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsDoublesEvaluator.java @@ -35,20 +35,18 @@ public EqualsDoublesEvaluator(EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - DoubleBlock lhsBlock = (DoubleBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - DoubleBlock rhsBlock = (DoubleBlock) rhsRef.block(); + public Block eval(Page page) { + try (DoubleBlock lhsBlock = (DoubleBlock) lhs.eval(page)) { + try (DoubleBlock rhsBlock = (DoubleBlock) rhs.eval(page)) { DoubleVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } DoubleVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector).asBlock()); + return eval(page.getPositionCount(), lhsVector, rhsVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsIntsEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsIntsEvaluator.java index e563e46e854f9..8e491e14c6dc3 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsIntsEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsIntsEvaluator.java @@ -35,20 +35,18 @@ public EqualsIntsEvaluator(EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - IntBlock lhsBlock = (IntBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - IntBlock rhsBlock = (IntBlock) rhsRef.block(); + public Block eval(Page page) { + try (IntBlock lhsBlock = (IntBlock) lhs.eval(page)) { + try (IntBlock rhsBlock = (IntBlock) rhs.eval(page)) { IntVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } IntVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector).asBlock()); + return eval(page.getPositionCount(), lhsVector, rhsVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsKeywordsEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsKeywordsEvaluator.java index 43ce60ccd085c..0fe04c80a66f1 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsKeywordsEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsKeywordsEvaluator.java @@ -36,20 +36,18 @@ public EqualsKeywordsEvaluator(EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - BytesRefBlock lhsBlock = (BytesRefBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - BytesRefBlock rhsBlock = (BytesRefBlock) rhsRef.block(); + public Block eval(Page page) { + try (BytesRefBlock lhsBlock = (BytesRefBlock) lhs.eval(page)) { + try (BytesRefBlock rhsBlock = (BytesRefBlock) rhs.eval(page)) { BytesRefVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } BytesRefVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector).asBlock()); + return eval(page.getPositionCount(), lhsVector, rhsVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsLongsEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsLongsEvaluator.java index 80dd80145d91b..9e656111ee074 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsLongsEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsLongsEvaluator.java @@ -35,20 +35,18 @@ public EqualsLongsEvaluator(EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - LongBlock lhsBlock = (LongBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - LongBlock rhsBlock = (LongBlock) rhsRef.block(); + public Block eval(Page page) { + try (LongBlock lhsBlock = (LongBlock) lhs.eval(page)) { + try (LongBlock rhsBlock = (LongBlock) rhs.eval(page)) { LongVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } LongVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector).asBlock()); + return eval(page.getPositionCount(), lhsVector, rhsVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanDoublesEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanDoublesEvaluator.java index 940baa0c45cc8..64ab3a28df39c 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanDoublesEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanDoublesEvaluator.java @@ -35,20 +35,18 @@ public GreaterThanDoublesEvaluator(EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - DoubleBlock lhsBlock = (DoubleBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - DoubleBlock rhsBlock = (DoubleBlock) rhsRef.block(); + public Block eval(Page page) { + try (DoubleBlock lhsBlock = (DoubleBlock) lhs.eval(page)) { + try (DoubleBlock rhsBlock = (DoubleBlock) rhs.eval(page)) { DoubleVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } DoubleVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector).asBlock()); + return eval(page.getPositionCount(), lhsVector, rhsVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanIntsEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanIntsEvaluator.java index e82addad8ecbf..7795e9b5f1b4a 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanIntsEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanIntsEvaluator.java @@ -35,20 +35,18 @@ public GreaterThanIntsEvaluator(EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - IntBlock lhsBlock = (IntBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - IntBlock rhsBlock = (IntBlock) rhsRef.block(); + public Block eval(Page page) { + try (IntBlock lhsBlock = (IntBlock) lhs.eval(page)) { + try (IntBlock rhsBlock = (IntBlock) rhs.eval(page)) { IntVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } IntVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector).asBlock()); + return eval(page.getPositionCount(), lhsVector, rhsVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanKeywordsEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanKeywordsEvaluator.java index 587e379d8f6b5..21ae9b1464d2a 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanKeywordsEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanKeywordsEvaluator.java @@ -36,20 +36,18 @@ public GreaterThanKeywordsEvaluator(EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - BytesRefBlock lhsBlock = (BytesRefBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - BytesRefBlock rhsBlock = (BytesRefBlock) rhsRef.block(); + public Block eval(Page page) { + try (BytesRefBlock lhsBlock = (BytesRefBlock) lhs.eval(page)) { + try (BytesRefBlock rhsBlock = (BytesRefBlock) rhs.eval(page)) { BytesRefVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } BytesRefVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector).asBlock()); + return eval(page.getPositionCount(), lhsVector, rhsVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanLongsEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanLongsEvaluator.java index acf5e20afb8fa..b2b559c715126 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanLongsEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanLongsEvaluator.java @@ -35,20 +35,18 @@ public GreaterThanLongsEvaluator(EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - LongBlock lhsBlock = (LongBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - LongBlock rhsBlock = (LongBlock) rhsRef.block(); + public Block eval(Page page) { + try (LongBlock lhsBlock = (LongBlock) lhs.eval(page)) { + try (LongBlock rhsBlock = (LongBlock) rhs.eval(page)) { LongVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } LongVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector).asBlock()); + return eval(page.getPositionCount(), lhsVector, rhsVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanOrEqualDoublesEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanOrEqualDoublesEvaluator.java index d7a2d3daae5eb..b73c6e359afd2 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanOrEqualDoublesEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanOrEqualDoublesEvaluator.java @@ -35,20 +35,18 @@ public GreaterThanOrEqualDoublesEvaluator(EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - DoubleBlock lhsBlock = (DoubleBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - DoubleBlock rhsBlock = (DoubleBlock) rhsRef.block(); + public Block eval(Page page) { + try (DoubleBlock lhsBlock = (DoubleBlock) lhs.eval(page)) { + try (DoubleBlock rhsBlock = (DoubleBlock) rhs.eval(page)) { DoubleVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } DoubleVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector).asBlock()); + return eval(page.getPositionCount(), lhsVector, rhsVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanOrEqualIntsEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanOrEqualIntsEvaluator.java index b0c49c422fa3a..2a77ee8f068e2 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanOrEqualIntsEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanOrEqualIntsEvaluator.java @@ -35,20 +35,18 @@ public GreaterThanOrEqualIntsEvaluator(EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - IntBlock lhsBlock = (IntBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - IntBlock rhsBlock = (IntBlock) rhsRef.block(); + public Block eval(Page page) { + try (IntBlock lhsBlock = (IntBlock) lhs.eval(page)) { + try (IntBlock rhsBlock = (IntBlock) rhs.eval(page)) { IntVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } IntVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector).asBlock()); + return eval(page.getPositionCount(), lhsVector, rhsVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanOrEqualKeywordsEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanOrEqualKeywordsEvaluator.java index 9c28bd4b5781d..6909a3b761dd3 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanOrEqualKeywordsEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanOrEqualKeywordsEvaluator.java @@ -36,20 +36,18 @@ public GreaterThanOrEqualKeywordsEvaluator(EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - BytesRefBlock lhsBlock = (BytesRefBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - BytesRefBlock rhsBlock = (BytesRefBlock) rhsRef.block(); + public Block eval(Page page) { + try (BytesRefBlock lhsBlock = (BytesRefBlock) lhs.eval(page)) { + try (BytesRefBlock rhsBlock = (BytesRefBlock) rhs.eval(page)) { BytesRefVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } BytesRefVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector).asBlock()); + return eval(page.getPositionCount(), lhsVector, rhsVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanOrEqualLongsEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanOrEqualLongsEvaluator.java index 390986a332523..71a68b0bb95e6 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanOrEqualLongsEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanOrEqualLongsEvaluator.java @@ -35,20 +35,18 @@ public GreaterThanOrEqualLongsEvaluator(EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - LongBlock lhsBlock = (LongBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - LongBlock rhsBlock = (LongBlock) rhsRef.block(); + public Block eval(Page page) { + try (LongBlock lhsBlock = (LongBlock) lhs.eval(page)) { + try (LongBlock rhsBlock = (LongBlock) rhs.eval(page)) { LongVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } LongVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector).asBlock()); + return eval(page.getPositionCount(), lhsVector, rhsVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanDoublesEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanDoublesEvaluator.java index 09ead847ba02d..f4990fe06f6cb 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanDoublesEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanDoublesEvaluator.java @@ -35,20 +35,18 @@ public LessThanDoublesEvaluator(EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - DoubleBlock lhsBlock = (DoubleBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - DoubleBlock rhsBlock = (DoubleBlock) rhsRef.block(); + public Block eval(Page page) { + try (DoubleBlock lhsBlock = (DoubleBlock) lhs.eval(page)) { + try (DoubleBlock rhsBlock = (DoubleBlock) rhs.eval(page)) { DoubleVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } DoubleVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector).asBlock()); + return eval(page.getPositionCount(), lhsVector, rhsVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanIntsEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanIntsEvaluator.java index 86c4db7a185dc..db623747a5e61 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanIntsEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanIntsEvaluator.java @@ -35,20 +35,18 @@ public LessThanIntsEvaluator(EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - IntBlock lhsBlock = (IntBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - IntBlock rhsBlock = (IntBlock) rhsRef.block(); + public Block eval(Page page) { + try (IntBlock lhsBlock = (IntBlock) lhs.eval(page)) { + try (IntBlock rhsBlock = (IntBlock) rhs.eval(page)) { IntVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } IntVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector).asBlock()); + return eval(page.getPositionCount(), lhsVector, rhsVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanKeywordsEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanKeywordsEvaluator.java index 9ac90d754315a..be658c3da46ec 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanKeywordsEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanKeywordsEvaluator.java @@ -36,20 +36,18 @@ public LessThanKeywordsEvaluator(EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - BytesRefBlock lhsBlock = (BytesRefBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - BytesRefBlock rhsBlock = (BytesRefBlock) rhsRef.block(); + public Block eval(Page page) { + try (BytesRefBlock lhsBlock = (BytesRefBlock) lhs.eval(page)) { + try (BytesRefBlock rhsBlock = (BytesRefBlock) rhs.eval(page)) { BytesRefVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } BytesRefVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector).asBlock()); + return eval(page.getPositionCount(), lhsVector, rhsVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanLongsEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanLongsEvaluator.java index 991c61705fdec..444c715c753cd 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanLongsEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanLongsEvaluator.java @@ -35,20 +35,18 @@ public LessThanLongsEvaluator(EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - LongBlock lhsBlock = (LongBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - LongBlock rhsBlock = (LongBlock) rhsRef.block(); + public Block eval(Page page) { + try (LongBlock lhsBlock = (LongBlock) lhs.eval(page)) { + try (LongBlock rhsBlock = (LongBlock) rhs.eval(page)) { LongVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } LongVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector).asBlock()); + return eval(page.getPositionCount(), lhsVector, rhsVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanOrEqualDoublesEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanOrEqualDoublesEvaluator.java index 9d87cd2d2a83d..bffdf4a80649c 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanOrEqualDoublesEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanOrEqualDoublesEvaluator.java @@ -35,20 +35,18 @@ public LessThanOrEqualDoublesEvaluator(EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - DoubleBlock lhsBlock = (DoubleBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - DoubleBlock rhsBlock = (DoubleBlock) rhsRef.block(); + public Block eval(Page page) { + try (DoubleBlock lhsBlock = (DoubleBlock) lhs.eval(page)) { + try (DoubleBlock rhsBlock = (DoubleBlock) rhs.eval(page)) { DoubleVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } DoubleVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector).asBlock()); + return eval(page.getPositionCount(), lhsVector, rhsVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanOrEqualIntsEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanOrEqualIntsEvaluator.java index fe50109a776b4..dd47aab76f21c 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanOrEqualIntsEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanOrEqualIntsEvaluator.java @@ -35,20 +35,18 @@ public LessThanOrEqualIntsEvaluator(EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - IntBlock lhsBlock = (IntBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - IntBlock rhsBlock = (IntBlock) rhsRef.block(); + public Block eval(Page page) { + try (IntBlock lhsBlock = (IntBlock) lhs.eval(page)) { + try (IntBlock rhsBlock = (IntBlock) rhs.eval(page)) { IntVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } IntVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector).asBlock()); + return eval(page.getPositionCount(), lhsVector, rhsVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanOrEqualKeywordsEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanOrEqualKeywordsEvaluator.java index d3626a390ca5a..e7a37b3f0fc41 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanOrEqualKeywordsEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanOrEqualKeywordsEvaluator.java @@ -36,20 +36,18 @@ public LessThanOrEqualKeywordsEvaluator(EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - BytesRefBlock lhsBlock = (BytesRefBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - BytesRefBlock rhsBlock = (BytesRefBlock) rhsRef.block(); + public Block eval(Page page) { + try (BytesRefBlock lhsBlock = (BytesRefBlock) lhs.eval(page)) { + try (BytesRefBlock rhsBlock = (BytesRefBlock) rhs.eval(page)) { BytesRefVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } BytesRefVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector).asBlock()); + return eval(page.getPositionCount(), lhsVector, rhsVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanOrEqualLongsEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanOrEqualLongsEvaluator.java index b8d215d06e5d1..fec54d164ac3b 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanOrEqualLongsEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanOrEqualLongsEvaluator.java @@ -35,20 +35,18 @@ public LessThanOrEqualLongsEvaluator(EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - LongBlock lhsBlock = (LongBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - LongBlock rhsBlock = (LongBlock) rhsRef.block(); + public Block eval(Page page) { + try (LongBlock lhsBlock = (LongBlock) lhs.eval(page)) { + try (LongBlock rhsBlock = (LongBlock) rhs.eval(page)) { LongVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } LongVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector).asBlock()); + return eval(page.getPositionCount(), lhsVector, rhsVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsBoolsEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsBoolsEvaluator.java index fd95c698600fa..a8a8882bf54a4 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsBoolsEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsBoolsEvaluator.java @@ -33,20 +33,18 @@ public NotEqualsBoolsEvaluator(EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - BooleanBlock lhsBlock = (BooleanBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - BooleanBlock rhsBlock = (BooleanBlock) rhsRef.block(); + public Block eval(Page page) { + try (BooleanBlock lhsBlock = (BooleanBlock) lhs.eval(page)) { + try (BooleanBlock rhsBlock = (BooleanBlock) rhs.eval(page)) { BooleanVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } BooleanVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector).asBlock()); + return eval(page.getPositionCount(), lhsVector, rhsVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsDoublesEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsDoublesEvaluator.java index 582cdd4690c4a..cf5d7a5717600 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsDoublesEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsDoublesEvaluator.java @@ -35,20 +35,18 @@ public NotEqualsDoublesEvaluator(EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - DoubleBlock lhsBlock = (DoubleBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - DoubleBlock rhsBlock = (DoubleBlock) rhsRef.block(); + public Block eval(Page page) { + try (DoubleBlock lhsBlock = (DoubleBlock) lhs.eval(page)) { + try (DoubleBlock rhsBlock = (DoubleBlock) rhs.eval(page)) { DoubleVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } DoubleVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector).asBlock()); + return eval(page.getPositionCount(), lhsVector, rhsVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsIntsEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsIntsEvaluator.java index d8d794a0d065c..128118d957222 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsIntsEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsIntsEvaluator.java @@ -35,20 +35,18 @@ public NotEqualsIntsEvaluator(EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - IntBlock lhsBlock = (IntBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - IntBlock rhsBlock = (IntBlock) rhsRef.block(); + public Block eval(Page page) { + try (IntBlock lhsBlock = (IntBlock) lhs.eval(page)) { + try (IntBlock rhsBlock = (IntBlock) rhs.eval(page)) { IntVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } IntVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector).asBlock()); + return eval(page.getPositionCount(), lhsVector, rhsVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsKeywordsEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsKeywordsEvaluator.java index 31db6c75c4b55..c2d12fe5840ab 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsKeywordsEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsKeywordsEvaluator.java @@ -36,20 +36,18 @@ public NotEqualsKeywordsEvaluator(EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - BytesRefBlock lhsBlock = (BytesRefBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - BytesRefBlock rhsBlock = (BytesRefBlock) rhsRef.block(); + public Block eval(Page page) { + try (BytesRefBlock lhsBlock = (BytesRefBlock) lhs.eval(page)) { + try (BytesRefBlock rhsBlock = (BytesRefBlock) rhs.eval(page)) { BytesRefVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } BytesRefVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector).asBlock()); + return eval(page.getPositionCount(), lhsVector, rhsVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsLongsEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsLongsEvaluator.java index 44f8de9f7bbbf..57e40c2857449 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsLongsEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsLongsEvaluator.java @@ -35,20 +35,18 @@ public NotEqualsLongsEvaluator(EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - LongBlock lhsBlock = (LongBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - LongBlock rhsBlock = (LongBlock) rhsRef.block(); + public Block eval(Page page) { + try (LongBlock lhsBlock = (LongBlock) lhs.eval(page)) { + try (LongBlock rhsBlock = (LongBlock) rhs.eval(page)) { LongVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } LongVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector).asBlock()); + return eval(page.getPositionCount(), lhsVector, rhsVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/logical/NotEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/logical/NotEvaluator.java index f7785bdc0605e..de3f57d54d8e4 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/logical/NotEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/logical/NotEvaluator.java @@ -29,14 +29,13 @@ public NotEvaluator(EvalOperator.ExpressionEvaluator v, DriverContext driverCont } @Override - public Block.Ref eval(Page page) { - try (Block.Ref vRef = v.eval(page)) { - BooleanBlock vBlock = (BooleanBlock) vRef.block(); + public Block eval(Page page) { + try (BooleanBlock vBlock = (BooleanBlock) v.eval(page)) { BooleanVector vVector = vBlock.asVector(); if (vVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), vBlock)); + return eval(page.getPositionCount(), vBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), vVector).asBlock()); + return eval(page.getPositionCount(), vVector).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/regex/RegexMatchEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/regex/RegexMatchEvaluator.java index f3e09aec53a8e..83860fc328543 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/regex/RegexMatchEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/regex/RegexMatchEvaluator.java @@ -37,14 +37,13 @@ public RegexMatchEvaluator(EvalOperator.ExpressionEvaluator input, CharacterRunA } @Override - public Block.Ref eval(Page page) { - try (Block.Ref inputRef = input.eval(page)) { - BytesRefBlock inputBlock = (BytesRefBlock) inputRef.block(); + public Block eval(Page page) { + try (BytesRefBlock inputBlock = (BytesRefBlock) input.eval(page)) { BytesRefVector inputVector = inputBlock.asVector(); if (inputVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), inputBlock)); + return eval(page.getPositionCount(), inputBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), inputVector).asBlock()); + return eval(page.getPositionCount(), inputVector).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestBooleanEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestBooleanEvaluator.java index bfb80ec0727ce..e335a2cc50add 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestBooleanEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestBooleanEvaluator.java @@ -32,22 +32,20 @@ public GreatestBooleanEvaluator(EvalOperator.ExpressionEvaluator[] values, } @Override - public Block.Ref eval(Page page) { - Block.Ref[] valuesRefs = new Block.Ref[values.length]; - try (Releasable valuesRelease = Releasables.wrap(valuesRefs)) { - BooleanBlock[] valuesBlocks = new BooleanBlock[values.length]; + public Block eval(Page page) { + BooleanBlock[] valuesBlocks = new BooleanBlock[values.length]; + try (Releasable valuesRelease = Releasables.wrap(valuesBlocks)) { for (int i = 0; i < valuesBlocks.length; i++) { - valuesRefs[i] = values[i].eval(page); - valuesBlocks[i] = (BooleanBlock) valuesRefs[i].block(); + valuesBlocks[i] = (BooleanBlock)values[i].eval(page); } BooleanVector[] valuesVectors = new BooleanVector[values.length]; for (int i = 0; i < valuesBlocks.length; i++) { valuesVectors[i] = valuesBlocks[i].asVector(); if (valuesVectors[i] == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valuesBlocks)); + return eval(page.getPositionCount(), valuesBlocks); } } - return Block.Ref.floating(eval(page.getPositionCount(), valuesVectors).asBlock()); + return eval(page.getPositionCount(), valuesVectors).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestBytesRefEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestBytesRefEvaluator.java index acc9e74910878..0919b6c624572 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestBytesRefEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestBytesRefEvaluator.java @@ -33,22 +33,20 @@ public GreatestBytesRefEvaluator(EvalOperator.ExpressionEvaluator[] values, } @Override - public Block.Ref eval(Page page) { - Block.Ref[] valuesRefs = new Block.Ref[values.length]; - try (Releasable valuesRelease = Releasables.wrap(valuesRefs)) { - BytesRefBlock[] valuesBlocks = new BytesRefBlock[values.length]; + public Block eval(Page page) { + BytesRefBlock[] valuesBlocks = new BytesRefBlock[values.length]; + try (Releasable valuesRelease = Releasables.wrap(valuesBlocks)) { for (int i = 0; i < valuesBlocks.length; i++) { - valuesRefs[i] = values[i].eval(page); - valuesBlocks[i] = (BytesRefBlock) valuesRefs[i].block(); + valuesBlocks[i] = (BytesRefBlock)values[i].eval(page); } BytesRefVector[] valuesVectors = new BytesRefVector[values.length]; for (int i = 0; i < valuesBlocks.length; i++) { valuesVectors[i] = valuesBlocks[i].asVector(); if (valuesVectors[i] == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valuesBlocks)); + return eval(page.getPositionCount(), valuesBlocks); } } - return Block.Ref.floating(eval(page.getPositionCount(), valuesVectors).asBlock()); + return eval(page.getPositionCount(), valuesVectors).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestDoubleEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestDoubleEvaluator.java index 7287e9a7a20ec..acabb839e0543 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestDoubleEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestDoubleEvaluator.java @@ -32,22 +32,20 @@ public GreatestDoubleEvaluator(EvalOperator.ExpressionEvaluator[] values, } @Override - public Block.Ref eval(Page page) { - Block.Ref[] valuesRefs = new Block.Ref[values.length]; - try (Releasable valuesRelease = Releasables.wrap(valuesRefs)) { - DoubleBlock[] valuesBlocks = new DoubleBlock[values.length]; + public Block eval(Page page) { + DoubleBlock[] valuesBlocks = new DoubleBlock[values.length]; + try (Releasable valuesRelease = Releasables.wrap(valuesBlocks)) { for (int i = 0; i < valuesBlocks.length; i++) { - valuesRefs[i] = values[i].eval(page); - valuesBlocks[i] = (DoubleBlock) valuesRefs[i].block(); + valuesBlocks[i] = (DoubleBlock)values[i].eval(page); } DoubleVector[] valuesVectors = new DoubleVector[values.length]; for (int i = 0; i < valuesBlocks.length; i++) { valuesVectors[i] = valuesBlocks[i].asVector(); if (valuesVectors[i] == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valuesBlocks)); + return eval(page.getPositionCount(), valuesBlocks); } } - return Block.Ref.floating(eval(page.getPositionCount(), valuesVectors).asBlock()); + return eval(page.getPositionCount(), valuesVectors).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestIntEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestIntEvaluator.java index e2afd0e010091..e2fc35c829b5f 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestIntEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestIntEvaluator.java @@ -32,22 +32,20 @@ public GreatestIntEvaluator(EvalOperator.ExpressionEvaluator[] values, } @Override - public Block.Ref eval(Page page) { - Block.Ref[] valuesRefs = new Block.Ref[values.length]; - try (Releasable valuesRelease = Releasables.wrap(valuesRefs)) { - IntBlock[] valuesBlocks = new IntBlock[values.length]; + public Block eval(Page page) { + IntBlock[] valuesBlocks = new IntBlock[values.length]; + try (Releasable valuesRelease = Releasables.wrap(valuesBlocks)) { for (int i = 0; i < valuesBlocks.length; i++) { - valuesRefs[i] = values[i].eval(page); - valuesBlocks[i] = (IntBlock) valuesRefs[i].block(); + valuesBlocks[i] = (IntBlock)values[i].eval(page); } IntVector[] valuesVectors = new IntVector[values.length]; for (int i = 0; i < valuesBlocks.length; i++) { valuesVectors[i] = valuesBlocks[i].asVector(); if (valuesVectors[i] == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valuesBlocks)); + return eval(page.getPositionCount(), valuesBlocks); } } - return Block.Ref.floating(eval(page.getPositionCount(), valuesVectors).asBlock()); + return eval(page.getPositionCount(), valuesVectors).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestLongEvaluator.java index 5bb0e6185dca2..8f10c02c53c00 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestLongEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestLongEvaluator.java @@ -32,22 +32,20 @@ public GreatestLongEvaluator(EvalOperator.ExpressionEvaluator[] values, } @Override - public Block.Ref eval(Page page) { - Block.Ref[] valuesRefs = new Block.Ref[values.length]; - try (Releasable valuesRelease = Releasables.wrap(valuesRefs)) { - LongBlock[] valuesBlocks = new LongBlock[values.length]; + public Block eval(Page page) { + LongBlock[] valuesBlocks = new LongBlock[values.length]; + try (Releasable valuesRelease = Releasables.wrap(valuesBlocks)) { for (int i = 0; i < valuesBlocks.length; i++) { - valuesRefs[i] = values[i].eval(page); - valuesBlocks[i] = (LongBlock) valuesRefs[i].block(); + valuesBlocks[i] = (LongBlock)values[i].eval(page); } LongVector[] valuesVectors = new LongVector[values.length]; for (int i = 0; i < valuesBlocks.length; i++) { valuesVectors[i] = valuesBlocks[i].asVector(); if (valuesVectors[i] == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valuesBlocks)); + return eval(page.getPositionCount(), valuesBlocks); } } - return Block.Ref.floating(eval(page.getPositionCount(), valuesVectors).asBlock()); + return eval(page.getPositionCount(), valuesVectors).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastBooleanEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastBooleanEvaluator.java index 89f91035ae8b0..ce337ae405cba 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastBooleanEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastBooleanEvaluator.java @@ -32,22 +32,20 @@ public LeastBooleanEvaluator(EvalOperator.ExpressionEvaluator[] values, } @Override - public Block.Ref eval(Page page) { - Block.Ref[] valuesRefs = new Block.Ref[values.length]; - try (Releasable valuesRelease = Releasables.wrap(valuesRefs)) { - BooleanBlock[] valuesBlocks = new BooleanBlock[values.length]; + public Block eval(Page page) { + BooleanBlock[] valuesBlocks = new BooleanBlock[values.length]; + try (Releasable valuesRelease = Releasables.wrap(valuesBlocks)) { for (int i = 0; i < valuesBlocks.length; i++) { - valuesRefs[i] = values[i].eval(page); - valuesBlocks[i] = (BooleanBlock) valuesRefs[i].block(); + valuesBlocks[i] = (BooleanBlock)values[i].eval(page); } BooleanVector[] valuesVectors = new BooleanVector[values.length]; for (int i = 0; i < valuesBlocks.length; i++) { valuesVectors[i] = valuesBlocks[i].asVector(); if (valuesVectors[i] == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valuesBlocks)); + return eval(page.getPositionCount(), valuesBlocks); } } - return Block.Ref.floating(eval(page.getPositionCount(), valuesVectors).asBlock()); + return eval(page.getPositionCount(), valuesVectors).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastBytesRefEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastBytesRefEvaluator.java index 5e1d8e13926a5..621d21e13f691 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastBytesRefEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastBytesRefEvaluator.java @@ -33,22 +33,20 @@ public LeastBytesRefEvaluator(EvalOperator.ExpressionEvaluator[] values, } @Override - public Block.Ref eval(Page page) { - Block.Ref[] valuesRefs = new Block.Ref[values.length]; - try (Releasable valuesRelease = Releasables.wrap(valuesRefs)) { - BytesRefBlock[] valuesBlocks = new BytesRefBlock[values.length]; + public Block eval(Page page) { + BytesRefBlock[] valuesBlocks = new BytesRefBlock[values.length]; + try (Releasable valuesRelease = Releasables.wrap(valuesBlocks)) { for (int i = 0; i < valuesBlocks.length; i++) { - valuesRefs[i] = values[i].eval(page); - valuesBlocks[i] = (BytesRefBlock) valuesRefs[i].block(); + valuesBlocks[i] = (BytesRefBlock)values[i].eval(page); } BytesRefVector[] valuesVectors = new BytesRefVector[values.length]; for (int i = 0; i < valuesBlocks.length; i++) { valuesVectors[i] = valuesBlocks[i].asVector(); if (valuesVectors[i] == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valuesBlocks)); + return eval(page.getPositionCount(), valuesBlocks); } } - return Block.Ref.floating(eval(page.getPositionCount(), valuesVectors).asBlock()); + return eval(page.getPositionCount(), valuesVectors).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastDoubleEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastDoubleEvaluator.java index 0f871cff8f80c..42255e56c6527 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastDoubleEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastDoubleEvaluator.java @@ -32,22 +32,20 @@ public LeastDoubleEvaluator(EvalOperator.ExpressionEvaluator[] values, } @Override - public Block.Ref eval(Page page) { - Block.Ref[] valuesRefs = new Block.Ref[values.length]; - try (Releasable valuesRelease = Releasables.wrap(valuesRefs)) { - DoubleBlock[] valuesBlocks = new DoubleBlock[values.length]; + public Block eval(Page page) { + DoubleBlock[] valuesBlocks = new DoubleBlock[values.length]; + try (Releasable valuesRelease = Releasables.wrap(valuesBlocks)) { for (int i = 0; i < valuesBlocks.length; i++) { - valuesRefs[i] = values[i].eval(page); - valuesBlocks[i] = (DoubleBlock) valuesRefs[i].block(); + valuesBlocks[i] = (DoubleBlock)values[i].eval(page); } DoubleVector[] valuesVectors = new DoubleVector[values.length]; for (int i = 0; i < valuesBlocks.length; i++) { valuesVectors[i] = valuesBlocks[i].asVector(); if (valuesVectors[i] == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valuesBlocks)); + return eval(page.getPositionCount(), valuesBlocks); } } - return Block.Ref.floating(eval(page.getPositionCount(), valuesVectors).asBlock()); + return eval(page.getPositionCount(), valuesVectors).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastIntEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastIntEvaluator.java index 39a80b09d97e8..ca95f0096166e 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastIntEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastIntEvaluator.java @@ -31,22 +31,20 @@ public LeastIntEvaluator(EvalOperator.ExpressionEvaluator[] values, DriverContex } @Override - public Block.Ref eval(Page page) { - Block.Ref[] valuesRefs = new Block.Ref[values.length]; - try (Releasable valuesRelease = Releasables.wrap(valuesRefs)) { - IntBlock[] valuesBlocks = new IntBlock[values.length]; + public Block eval(Page page) { + IntBlock[] valuesBlocks = new IntBlock[values.length]; + try (Releasable valuesRelease = Releasables.wrap(valuesBlocks)) { for (int i = 0; i < valuesBlocks.length; i++) { - valuesRefs[i] = values[i].eval(page); - valuesBlocks[i] = (IntBlock) valuesRefs[i].block(); + valuesBlocks[i] = (IntBlock)values[i].eval(page); } IntVector[] valuesVectors = new IntVector[values.length]; for (int i = 0; i < valuesBlocks.length; i++) { valuesVectors[i] = valuesBlocks[i].asVector(); if (valuesVectors[i] == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valuesBlocks)); + return eval(page.getPositionCount(), valuesBlocks); } } - return Block.Ref.floating(eval(page.getPositionCount(), valuesVectors).asBlock()); + return eval(page.getPositionCount(), valuesVectors).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastLongEvaluator.java index bab58f9b42c24..263972b414dd4 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastLongEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastLongEvaluator.java @@ -32,22 +32,20 @@ public LeastLongEvaluator(EvalOperator.ExpressionEvaluator[] values, } @Override - public Block.Ref eval(Page page) { - Block.Ref[] valuesRefs = new Block.Ref[values.length]; - try (Releasable valuesRelease = Releasables.wrap(valuesRefs)) { - LongBlock[] valuesBlocks = new LongBlock[values.length]; + public Block eval(Page page) { + LongBlock[] valuesBlocks = new LongBlock[values.length]; + try (Releasable valuesRelease = Releasables.wrap(valuesBlocks)) { for (int i = 0; i < valuesBlocks.length; i++) { - valuesRefs[i] = values[i].eval(page); - valuesBlocks[i] = (LongBlock) valuesRefs[i].block(); + valuesBlocks[i] = (LongBlock)values[i].eval(page); } LongVector[] valuesVectors = new LongVector[values.length]; for (int i = 0; i < valuesBlocks.length; i++) { valuesVectors[i] = valuesBlocks[i].asVector(); if (valuesVectors[i] == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valuesBlocks)); + return eval(page.getPositionCount(), valuesBlocks); } } - return Block.Ref.floating(eval(page.getPositionCount(), valuesVectors).asBlock()); + return eval(page.getPositionCount(), valuesVectors).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractConstantEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractConstantEvaluator.java index 5de2f3cf371f2..f4109947c7406 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractConstantEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractConstantEvaluator.java @@ -38,14 +38,13 @@ public DateExtractConstantEvaluator(EvalOperator.ExpressionEvaluator value, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref valueRef = value.eval(page)) { - LongBlock valueBlock = (LongBlock) valueRef.block(); + public Block eval(Page page) { + try (LongBlock valueBlock = (LongBlock) value.eval(page)) { LongVector valueVector = valueBlock.asVector(); if (valueVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valueBlock)); + return eval(page.getPositionCount(), valueBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), valueVector).asBlock()); + return eval(page.getPositionCount(), valueVector).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractEvaluator.java index 7f024bab34c88..37af410e1d49d 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractEvaluator.java @@ -46,20 +46,18 @@ public DateExtractEvaluator(Source source, EvalOperator.ExpressionEvaluator valu } @Override - public Block.Ref eval(Page page) { - try (Block.Ref valueRef = value.eval(page)) { - LongBlock valueBlock = (LongBlock) valueRef.block(); - try (Block.Ref chronoFieldRef = chronoField.eval(page)) { - BytesRefBlock chronoFieldBlock = (BytesRefBlock) chronoFieldRef.block(); + public Block eval(Page page) { + try (LongBlock valueBlock = (LongBlock) value.eval(page)) { + try (BytesRefBlock chronoFieldBlock = (BytesRefBlock) chronoField.eval(page)) { LongVector valueVector = valueBlock.asVector(); if (valueVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valueBlock, chronoFieldBlock)); + return eval(page.getPositionCount(), valueBlock, chronoFieldBlock); } BytesRefVector chronoFieldVector = chronoFieldBlock.asVector(); if (chronoFieldVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valueBlock, chronoFieldBlock)); + return eval(page.getPositionCount(), valueBlock, chronoFieldBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), valueVector, chronoFieldVector)); + return eval(page.getPositionCount(), valueVector, chronoFieldVector); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatConstantEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatConstantEvaluator.java index bee94ff3e64af..1ef4b15860dde 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatConstantEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatConstantEvaluator.java @@ -36,14 +36,13 @@ public DateFormatConstantEvaluator(EvalOperator.ExpressionEvaluator val, DateFor } @Override - public Block.Ref eval(Page page) { - try (Block.Ref valRef = val.eval(page)) { - LongBlock valBlock = (LongBlock) valRef.block(); + public Block eval(Page page) { + try (LongBlock valBlock = (LongBlock) val.eval(page)) { LongVector valVector = valBlock.asVector(); if (valVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock)); + return eval(page.getPositionCount(), valBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), valVector).asBlock()); + return eval(page.getPositionCount(), valVector).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatEvaluator.java index f84645a53ce82..5f8077f908b39 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatEvaluator.java @@ -40,20 +40,18 @@ public DateFormatEvaluator(EvalOperator.ExpressionEvaluator val, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref valRef = val.eval(page)) { - LongBlock valBlock = (LongBlock) valRef.block(); - try (Block.Ref formatterRef = formatter.eval(page)) { - BytesRefBlock formatterBlock = (BytesRefBlock) formatterRef.block(); + public Block eval(Page page) { + try (LongBlock valBlock = (LongBlock) val.eval(page)) { + try (BytesRefBlock formatterBlock = (BytesRefBlock) formatter.eval(page)) { LongVector valVector = valBlock.asVector(); if (valVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock, formatterBlock)); + return eval(page.getPositionCount(), valBlock, formatterBlock); } BytesRefVector formatterVector = formatterBlock.asVector(); if (formatterVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock, formatterBlock)); + return eval(page.getPositionCount(), valBlock, formatterBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), valVector, formatterVector).asBlock()); + return eval(page.getPositionCount(), valVector, formatterVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseConstantEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseConstantEvaluator.java index cf0f196321fb6..84e141dcdf448 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseConstantEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseConstantEvaluator.java @@ -42,14 +42,13 @@ public DateParseConstantEvaluator(Source source, EvalOperator.ExpressionEvaluato } @Override - public Block.Ref eval(Page page) { - try (Block.Ref valRef = val.eval(page)) { - BytesRefBlock valBlock = (BytesRefBlock) valRef.block(); + public Block eval(Page page) { + try (BytesRefBlock valBlock = (BytesRefBlock) val.eval(page)) { BytesRefVector valVector = valBlock.asVector(); if (valVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock)); + return eval(page.getPositionCount(), valBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), valVector)); + return eval(page.getPositionCount(), valVector); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseEvaluator.java index 3deff136d8cff..233d2f45c93fa 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseEvaluator.java @@ -45,20 +45,18 @@ public DateParseEvaluator(Source source, EvalOperator.ExpressionEvaluator val, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref valRef = val.eval(page)) { - BytesRefBlock valBlock = (BytesRefBlock) valRef.block(); - try (Block.Ref formatterRef = formatter.eval(page)) { - BytesRefBlock formatterBlock = (BytesRefBlock) formatterRef.block(); + public Block eval(Page page) { + try (BytesRefBlock valBlock = (BytesRefBlock) val.eval(page)) { + try (BytesRefBlock formatterBlock = (BytesRefBlock) formatter.eval(page)) { BytesRefVector valVector = valBlock.asVector(); if (valVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock, formatterBlock)); + return eval(page.getPositionCount(), valBlock, formatterBlock); } BytesRefVector formatterVector = formatterBlock.asVector(); if (formatterVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock, formatterBlock)); + return eval(page.getPositionCount(), valBlock, formatterBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), valVector, formatterVector)); + return eval(page.getPositionCount(), valVector, formatterVector); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTruncEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTruncEvaluator.java index 28e661e3900b1..ff31d753427d4 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTruncEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTruncEvaluator.java @@ -34,14 +34,13 @@ public DateTruncEvaluator(EvalOperator.ExpressionEvaluator fieldVal, Rounding.Pr } @Override - public Block.Ref eval(Page page) { - try (Block.Ref fieldValRef = fieldVal.eval(page)) { - LongBlock fieldValBlock = (LongBlock) fieldValRef.block(); + public Block eval(Page page) { + try (LongBlock fieldValBlock = (LongBlock) fieldVal.eval(page)) { LongVector fieldValVector = fieldValBlock.asVector(); if (fieldValVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), fieldValBlock)); + return eval(page.getPositionCount(), fieldValBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), fieldValVector).asBlock()); + return eval(page.getPositionCount(), fieldValVector).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/NowEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/NowEvaluator.java index c257680f20f00..d4c04b724377e 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/NowEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/NowEvaluator.java @@ -27,8 +27,8 @@ public NowEvaluator(long now, DriverContext driverContext) { } @Override - public Block.Ref eval(Page page) { - return Block.Ref.floating(eval(page.getPositionCount()).asBlock()); + public Block eval(Page page) { + return eval(page.getPositionCount()).asBlock(); } public LongVector eval(int positionCount) { diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/ip/CIDRMatchEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/ip/CIDRMatchEvaluator.java index 22fa3beb8c7e4..c3a347433ff9f 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/ip/CIDRMatchEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/ip/CIDRMatchEvaluator.java @@ -38,28 +38,25 @@ public CIDRMatchEvaluator(EvalOperator.ExpressionEvaluator ip, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref ipRef = ip.eval(page)) { - BytesRefBlock ipBlock = (BytesRefBlock) ipRef.block(); - Block.Ref[] cidrsRefs = new Block.Ref[cidrs.length]; - try (Releasable cidrsRelease = Releasables.wrap(cidrsRefs)) { - BytesRefBlock[] cidrsBlocks = new BytesRefBlock[cidrs.length]; + public Block eval(Page page) { + try (BytesRefBlock ipBlock = (BytesRefBlock) ip.eval(page)) { + BytesRefBlock[] cidrsBlocks = new BytesRefBlock[cidrs.length]; + try (Releasable cidrsRelease = Releasables.wrap(cidrsBlocks)) { for (int i = 0; i < cidrsBlocks.length; i++) { - cidrsRefs[i] = cidrs[i].eval(page); - cidrsBlocks[i] = (BytesRefBlock) cidrsRefs[i].block(); + cidrsBlocks[i] = (BytesRefBlock)cidrs[i].eval(page); } BytesRefVector ipVector = ipBlock.asVector(); if (ipVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), ipBlock, cidrsBlocks)); + return eval(page.getPositionCount(), ipBlock, cidrsBlocks); } BytesRefVector[] cidrsVectors = new BytesRefVector[cidrs.length]; for (int i = 0; i < cidrsBlocks.length; i++) { cidrsVectors[i] = cidrsBlocks[i].asVector(); if (cidrsVectors[i] == null) { - return Block.Ref.floating(eval(page.getPositionCount(), ipBlock, cidrsBlocks)); + return eval(page.getPositionCount(), ipBlock, cidrsBlocks); } } - return Block.Ref.floating(eval(page.getPositionCount(), ipVector, cidrsVectors).asBlock()); + return eval(page.getPositionCount(), ipVector, cidrsVectors).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/AbsDoubleEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/AbsDoubleEvaluator.java index dbadd5adf963c..b7e061e5e684b 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/AbsDoubleEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/AbsDoubleEvaluator.java @@ -30,14 +30,13 @@ public AbsDoubleEvaluator(EvalOperator.ExpressionEvaluator fieldVal, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref fieldValRef = fieldVal.eval(page)) { - DoubleBlock fieldValBlock = (DoubleBlock) fieldValRef.block(); + public Block eval(Page page) { + try (DoubleBlock fieldValBlock = (DoubleBlock) fieldVal.eval(page)) { DoubleVector fieldValVector = fieldValBlock.asVector(); if (fieldValVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), fieldValBlock)); + return eval(page.getPositionCount(), fieldValBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), fieldValVector).asBlock()); + return eval(page.getPositionCount(), fieldValVector).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/AbsIntEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/AbsIntEvaluator.java index 7cc0692e2481a..9894a8ebcdce3 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/AbsIntEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/AbsIntEvaluator.java @@ -29,14 +29,13 @@ public AbsIntEvaluator(EvalOperator.ExpressionEvaluator fieldVal, DriverContext } @Override - public Block.Ref eval(Page page) { - try (Block.Ref fieldValRef = fieldVal.eval(page)) { - IntBlock fieldValBlock = (IntBlock) fieldValRef.block(); + public Block eval(Page page) { + try (IntBlock fieldValBlock = (IntBlock) fieldVal.eval(page)) { IntVector fieldValVector = fieldValBlock.asVector(); if (fieldValVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), fieldValBlock)); + return eval(page.getPositionCount(), fieldValBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), fieldValVector).asBlock()); + return eval(page.getPositionCount(), fieldValVector).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/AbsLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/AbsLongEvaluator.java index 0ffee4cc699f8..ebbb754e28188 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/AbsLongEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/AbsLongEvaluator.java @@ -29,14 +29,13 @@ public AbsLongEvaluator(EvalOperator.ExpressionEvaluator fieldVal, DriverContext } @Override - public Block.Ref eval(Page page) { - try (Block.Ref fieldValRef = fieldVal.eval(page)) { - LongBlock fieldValBlock = (LongBlock) fieldValRef.block(); + public Block eval(Page page) { + try (LongBlock fieldValBlock = (LongBlock) fieldVal.eval(page)) { LongVector fieldValVector = fieldValBlock.asVector(); if (fieldValVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), fieldValBlock)); + return eval(page.getPositionCount(), fieldValBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), fieldValVector).asBlock()); + return eval(page.getPositionCount(), fieldValVector).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/AcosEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/AcosEvaluator.java index b818cd2f8adfc..ce43cb0d88d09 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/AcosEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/AcosEvaluator.java @@ -36,14 +36,13 @@ public AcosEvaluator(Source source, EvalOperator.ExpressionEvaluator val, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref valRef = val.eval(page)) { - DoubleBlock valBlock = (DoubleBlock) valRef.block(); + public Block eval(Page page) { + try (DoubleBlock valBlock = (DoubleBlock) val.eval(page)) { DoubleVector valVector = valBlock.asVector(); if (valVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock)); + return eval(page.getPositionCount(), valBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), valVector)); + return eval(page.getPositionCount(), valVector); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/AsinEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/AsinEvaluator.java index 923f27dd275b5..2b8168cd2abc7 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/AsinEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/AsinEvaluator.java @@ -36,14 +36,13 @@ public AsinEvaluator(Source source, EvalOperator.ExpressionEvaluator val, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref valRef = val.eval(page)) { - DoubleBlock valBlock = (DoubleBlock) valRef.block(); + public Block eval(Page page) { + try (DoubleBlock valBlock = (DoubleBlock) val.eval(page)) { DoubleVector valVector = valBlock.asVector(); if (valVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock)); + return eval(page.getPositionCount(), valBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), valVector)); + return eval(page.getPositionCount(), valVector); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/Atan2Evaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/Atan2Evaluator.java index 902a6eb0cc6a4..ac4d61502be33 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/Atan2Evaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/Atan2Evaluator.java @@ -33,20 +33,18 @@ public Atan2Evaluator(EvalOperator.ExpressionEvaluator y, EvalOperator.Expressio } @Override - public Block.Ref eval(Page page) { - try (Block.Ref yRef = y.eval(page)) { - DoubleBlock yBlock = (DoubleBlock) yRef.block(); - try (Block.Ref xRef = x.eval(page)) { - DoubleBlock xBlock = (DoubleBlock) xRef.block(); + public Block eval(Page page) { + try (DoubleBlock yBlock = (DoubleBlock) y.eval(page)) { + try (DoubleBlock xBlock = (DoubleBlock) x.eval(page)) { DoubleVector yVector = yBlock.asVector(); if (yVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), yBlock, xBlock)); + return eval(page.getPositionCount(), yBlock, xBlock); } DoubleVector xVector = xBlock.asVector(); if (xVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), yBlock, xBlock)); + return eval(page.getPositionCount(), yBlock, xBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), yVector, xVector).asBlock()); + return eval(page.getPositionCount(), yVector, xVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/AtanEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/AtanEvaluator.java index 14ef9ab1b0e30..2ce4dac48fbf5 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/AtanEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/AtanEvaluator.java @@ -29,14 +29,13 @@ public AtanEvaluator(EvalOperator.ExpressionEvaluator val, DriverContext driverC } @Override - public Block.Ref eval(Page page) { - try (Block.Ref valRef = val.eval(page)) { - DoubleBlock valBlock = (DoubleBlock) valRef.block(); + public Block eval(Page page) { + try (DoubleBlock valBlock = (DoubleBlock) val.eval(page)) { DoubleVector valVector = valBlock.asVector(); if (valVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock)); + return eval(page.getPositionCount(), valBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), valVector).asBlock()); + return eval(page.getPositionCount(), valVector).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/CastIntToDoubleEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/CastIntToDoubleEvaluator.java index 330b0d73aaf13..5b09822354480 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/CastIntToDoubleEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/CastIntToDoubleEvaluator.java @@ -31,14 +31,13 @@ public CastIntToDoubleEvaluator(EvalOperator.ExpressionEvaluator v, DriverContex } @Override - public Block.Ref eval(Page page) { - try (Block.Ref vRef = v.eval(page)) { - IntBlock vBlock = (IntBlock) vRef.block(); + public Block eval(Page page) { + try (IntBlock vBlock = (IntBlock) v.eval(page)) { IntVector vVector = vBlock.asVector(); if (vVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), vBlock)); + return eval(page.getPositionCount(), vBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), vVector).asBlock()); + return eval(page.getPositionCount(), vVector).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/CastIntToLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/CastIntToLongEvaluator.java index 411203a0cf950..0b9f3a5cd2a51 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/CastIntToLongEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/CastIntToLongEvaluator.java @@ -31,14 +31,13 @@ public CastIntToLongEvaluator(EvalOperator.ExpressionEvaluator v, DriverContext } @Override - public Block.Ref eval(Page page) { - try (Block.Ref vRef = v.eval(page)) { - IntBlock vBlock = (IntBlock) vRef.block(); + public Block eval(Page page) { + try (IntBlock vBlock = (IntBlock) v.eval(page)) { IntVector vVector = vBlock.asVector(); if (vVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), vBlock)); + return eval(page.getPositionCount(), vBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), vVector).asBlock()); + return eval(page.getPositionCount(), vVector).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/CastIntToUnsignedLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/CastIntToUnsignedLongEvaluator.java index a79cb9be2d5eb..ee228b79085b7 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/CastIntToUnsignedLongEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/CastIntToUnsignedLongEvaluator.java @@ -32,14 +32,13 @@ public CastIntToUnsignedLongEvaluator(EvalOperator.ExpressionEvaluator v, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref vRef = v.eval(page)) { - IntBlock vBlock = (IntBlock) vRef.block(); + public Block eval(Page page) { + try (IntBlock vBlock = (IntBlock) v.eval(page)) { IntVector vVector = vBlock.asVector(); if (vVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), vBlock)); + return eval(page.getPositionCount(), vBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), vVector).asBlock()); + return eval(page.getPositionCount(), vVector).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/CastLongToDoubleEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/CastLongToDoubleEvaluator.java index 92e85a7214afe..9a70690bf891d 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/CastLongToDoubleEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/CastLongToDoubleEvaluator.java @@ -32,14 +32,13 @@ public CastLongToDoubleEvaluator(EvalOperator.ExpressionEvaluator v, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref vRef = v.eval(page)) { - LongBlock vBlock = (LongBlock) vRef.block(); + public Block eval(Page page) { + try (LongBlock vBlock = (LongBlock) v.eval(page)) { LongVector vVector = vBlock.asVector(); if (vVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), vBlock)); + return eval(page.getPositionCount(), vBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), vVector).asBlock()); + return eval(page.getPositionCount(), vVector).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/CastLongToUnsignedLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/CastLongToUnsignedLongEvaluator.java index 3d59ff67c21d1..a258b2eeb7636 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/CastLongToUnsignedLongEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/CastLongToUnsignedLongEvaluator.java @@ -30,14 +30,13 @@ public CastLongToUnsignedLongEvaluator(EvalOperator.ExpressionEvaluator v, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref vRef = v.eval(page)) { - LongBlock vBlock = (LongBlock) vRef.block(); + public Block eval(Page page) { + try (LongBlock vBlock = (LongBlock) v.eval(page)) { LongVector vVector = vBlock.asVector(); if (vVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), vBlock)); + return eval(page.getPositionCount(), vBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), vVector).asBlock()); + return eval(page.getPositionCount(), vVector).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/CastUnsignedLongToDoubleEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/CastUnsignedLongToDoubleEvaluator.java index c3b880b9bbdb5..f57d0f4dae34d 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/CastUnsignedLongToDoubleEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/CastUnsignedLongToDoubleEvaluator.java @@ -32,14 +32,13 @@ public CastUnsignedLongToDoubleEvaluator(EvalOperator.ExpressionEvaluator v, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref vRef = v.eval(page)) { - LongBlock vBlock = (LongBlock) vRef.block(); + public Block eval(Page page) { + try (LongBlock vBlock = (LongBlock) v.eval(page)) { LongVector vVector = vBlock.asVector(); if (vVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), vBlock)); + return eval(page.getPositionCount(), vBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), vVector).asBlock()); + return eval(page.getPositionCount(), vVector).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/CeilDoubleEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/CeilDoubleEvaluator.java index 9c63adc45b978..fb25d318f7336 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/CeilDoubleEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/CeilDoubleEvaluator.java @@ -29,14 +29,13 @@ public CeilDoubleEvaluator(EvalOperator.ExpressionEvaluator val, DriverContext d } @Override - public Block.Ref eval(Page page) { - try (Block.Ref valRef = val.eval(page)) { - DoubleBlock valBlock = (DoubleBlock) valRef.block(); + public Block eval(Page page) { + try (DoubleBlock valBlock = (DoubleBlock) val.eval(page)) { DoubleVector valVector = valBlock.asVector(); if (valVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock)); + return eval(page.getPositionCount(), valBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), valVector).asBlock()); + return eval(page.getPositionCount(), valVector).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/CosEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/CosEvaluator.java index 3b62f3f5e9a0e..7fb5063875834 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/CosEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/CosEvaluator.java @@ -29,14 +29,13 @@ public CosEvaluator(EvalOperator.ExpressionEvaluator val, DriverContext driverCo } @Override - public Block.Ref eval(Page page) { - try (Block.Ref valRef = val.eval(page)) { - DoubleBlock valBlock = (DoubleBlock) valRef.block(); + public Block eval(Page page) { + try (DoubleBlock valBlock = (DoubleBlock) val.eval(page)) { DoubleVector valVector = valBlock.asVector(); if (valVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock)); + return eval(page.getPositionCount(), valBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), valVector).asBlock()); + return eval(page.getPositionCount(), valVector).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/CoshEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/CoshEvaluator.java index b5c461a2ee9b5..ab862a62c6bfe 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/CoshEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/CoshEvaluator.java @@ -36,14 +36,13 @@ public CoshEvaluator(Source source, EvalOperator.ExpressionEvaluator val, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref valRef = val.eval(page)) { - DoubleBlock valBlock = (DoubleBlock) valRef.block(); + public Block eval(Page page) { + try (DoubleBlock valBlock = (DoubleBlock) val.eval(page)) { DoubleVector valVector = valBlock.asVector(); if (valVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock)); + return eval(page.getPositionCount(), valBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), valVector)); + return eval(page.getPositionCount(), valVector); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/FloorDoubleEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/FloorDoubleEvaluator.java index bf5c32f816892..99ceca3521883 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/FloorDoubleEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/FloorDoubleEvaluator.java @@ -29,14 +29,13 @@ public FloorDoubleEvaluator(EvalOperator.ExpressionEvaluator val, DriverContext } @Override - public Block.Ref eval(Page page) { - try (Block.Ref valRef = val.eval(page)) { - DoubleBlock valBlock = (DoubleBlock) valRef.block(); + public Block eval(Page page) { + try (DoubleBlock valBlock = (DoubleBlock) val.eval(page)) { DoubleVector valVector = valBlock.asVector(); if (valVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock)); + return eval(page.getPositionCount(), valBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), valVector).asBlock()); + return eval(page.getPositionCount(), valVector).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsFiniteEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsFiniteEvaluator.java index 793cf4da31bb2..6ad3ccb6cb287 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsFiniteEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsFiniteEvaluator.java @@ -31,14 +31,13 @@ public IsFiniteEvaluator(EvalOperator.ExpressionEvaluator val, DriverContext dri } @Override - public Block.Ref eval(Page page) { - try (Block.Ref valRef = val.eval(page)) { - DoubleBlock valBlock = (DoubleBlock) valRef.block(); + public Block eval(Page page) { + try (DoubleBlock valBlock = (DoubleBlock) val.eval(page)) { DoubleVector valVector = valBlock.asVector(); if (valVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock)); + return eval(page.getPositionCount(), valBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), valVector).asBlock()); + return eval(page.getPositionCount(), valVector).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsInfiniteEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsInfiniteEvaluator.java index 2522c4cd6a9fe..00b260467046c 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsInfiniteEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsInfiniteEvaluator.java @@ -31,14 +31,13 @@ public IsInfiniteEvaluator(EvalOperator.ExpressionEvaluator val, DriverContext d } @Override - public Block.Ref eval(Page page) { - try (Block.Ref valRef = val.eval(page)) { - DoubleBlock valBlock = (DoubleBlock) valRef.block(); + public Block eval(Page page) { + try (DoubleBlock valBlock = (DoubleBlock) val.eval(page)) { DoubleVector valVector = valBlock.asVector(); if (valVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock)); + return eval(page.getPositionCount(), valBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), valVector).asBlock()); + return eval(page.getPositionCount(), valVector).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsNaNEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsNaNEvaluator.java index 955ff8e5cd355..d7639010d9533 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsNaNEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsNaNEvaluator.java @@ -31,14 +31,13 @@ public IsNaNEvaluator(EvalOperator.ExpressionEvaluator val, DriverContext driver } @Override - public Block.Ref eval(Page page) { - try (Block.Ref valRef = val.eval(page)) { - DoubleBlock valBlock = (DoubleBlock) valRef.block(); + public Block eval(Page page) { + try (DoubleBlock valBlock = (DoubleBlock) val.eval(page)) { DoubleVector valVector = valBlock.asVector(); if (valVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock)); + return eval(page.getPositionCount(), valBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), valVector).asBlock()); + return eval(page.getPositionCount(), valVector).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10DoubleEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10DoubleEvaluator.java index 66d4a9dfc2e53..6a42dadae78ea 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10DoubleEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10DoubleEvaluator.java @@ -36,14 +36,13 @@ public Log10DoubleEvaluator(Source source, EvalOperator.ExpressionEvaluator val, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref valRef = val.eval(page)) { - DoubleBlock valBlock = (DoubleBlock) valRef.block(); + public Block eval(Page page) { + try (DoubleBlock valBlock = (DoubleBlock) val.eval(page)) { DoubleVector valVector = valBlock.asVector(); if (valVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock)); + return eval(page.getPositionCount(), valBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), valVector)); + return eval(page.getPositionCount(), valVector); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10IntEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10IntEvaluator.java index c64ec2ceed680..782e35e9a74ab 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10IntEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10IntEvaluator.java @@ -37,14 +37,13 @@ public Log10IntEvaluator(Source source, EvalOperator.ExpressionEvaluator val, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref valRef = val.eval(page)) { - IntBlock valBlock = (IntBlock) valRef.block(); + public Block eval(Page page) { + try (IntBlock valBlock = (IntBlock) val.eval(page)) { IntVector valVector = valBlock.asVector(); if (valVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock)); + return eval(page.getPositionCount(), valBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), valVector)); + return eval(page.getPositionCount(), valVector); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10LongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10LongEvaluator.java index b630b4b079bf7..cfcf56a637f32 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10LongEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10LongEvaluator.java @@ -37,14 +37,13 @@ public Log10LongEvaluator(Source source, EvalOperator.ExpressionEvaluator val, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref valRef = val.eval(page)) { - LongBlock valBlock = (LongBlock) valRef.block(); + public Block eval(Page page) { + try (LongBlock valBlock = (LongBlock) val.eval(page)) { LongVector valVector = valBlock.asVector(); if (valVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock)); + return eval(page.getPositionCount(), valBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), valVector)); + return eval(page.getPositionCount(), valVector); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10UnsignedLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10UnsignedLongEvaluator.java index c5d690f3c0ad4..1b092bcbfd8a6 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10UnsignedLongEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10UnsignedLongEvaluator.java @@ -37,14 +37,13 @@ public Log10UnsignedLongEvaluator(Source source, EvalOperator.ExpressionEvaluato } @Override - public Block.Ref eval(Page page) { - try (Block.Ref valRef = val.eval(page)) { - LongBlock valBlock = (LongBlock) valRef.block(); + public Block eval(Page page) { + try (LongBlock valBlock = (LongBlock) val.eval(page)) { LongVector valVector = valBlock.asVector(); if (valVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock)); + return eval(page.getPositionCount(), valBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), valVector)); + return eval(page.getPositionCount(), valVector); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/PowDoubleEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/PowEvaluator.java similarity index 78% rename from x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/PowDoubleEvaluator.java rename to x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/PowEvaluator.java index 104385e0e51ef..775cee816be7b 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/PowDoubleEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/PowEvaluator.java @@ -21,7 +21,7 @@ * {@link EvalOperator.ExpressionEvaluator} implementation for {@link Pow}. * This class is generated. Do not edit it. */ -public final class PowDoubleEvaluator implements EvalOperator.ExpressionEvaluator { +public final class PowEvaluator implements EvalOperator.ExpressionEvaluator { private final Warnings warnings; private final EvalOperator.ExpressionEvaluator base; @@ -30,7 +30,7 @@ public final class PowDoubleEvaluator implements EvalOperator.ExpressionEvaluato private final DriverContext driverContext; - public PowDoubleEvaluator(Source source, EvalOperator.ExpressionEvaluator base, + public PowEvaluator(Source source, EvalOperator.ExpressionEvaluator base, EvalOperator.ExpressionEvaluator exponent, DriverContext driverContext) { this.warnings = new Warnings(source); this.base = base; @@ -39,20 +39,18 @@ public PowDoubleEvaluator(Source source, EvalOperator.ExpressionEvaluator base, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref baseRef = base.eval(page)) { - DoubleBlock baseBlock = (DoubleBlock) baseRef.block(); - try (Block.Ref exponentRef = exponent.eval(page)) { - DoubleBlock exponentBlock = (DoubleBlock) exponentRef.block(); + public Block eval(Page page) { + try (DoubleBlock baseBlock = (DoubleBlock) base.eval(page)) { + try (DoubleBlock exponentBlock = (DoubleBlock) exponent.eval(page)) { DoubleVector baseVector = baseBlock.asVector(); if (baseVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), baseBlock, exponentBlock)); + return eval(page.getPositionCount(), baseBlock, exponentBlock); } DoubleVector exponentVector = exponentBlock.asVector(); if (exponentVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), baseBlock, exponentBlock)); + return eval(page.getPositionCount(), baseBlock, exponentBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), baseVector, exponentVector)); + return eval(page.getPositionCount(), baseVector, exponentVector); } } } @@ -95,7 +93,7 @@ public DoubleBlock eval(int positionCount, DoubleVector baseVector, DoubleVector @Override public String toString() { - return "PowDoubleEvaluator[" + "base=" + base + ", exponent=" + exponent + "]"; + return "PowEvaluator[" + "base=" + base + ", exponent=" + exponent + "]"; } @Override @@ -118,13 +116,13 @@ public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory base, } @Override - public PowDoubleEvaluator get(DriverContext context) { - return new PowDoubleEvaluator(source, base.get(context), exponent.get(context), context); + public PowEvaluator get(DriverContext context) { + return new PowEvaluator(source, base.get(context), exponent.get(context), context); } @Override public String toString() { - return "PowDoubleEvaluator[" + "base=" + base + ", exponent=" + exponent + "]"; + return "PowEvaluator[" + "base=" + base + ", exponent=" + exponent + "]"; } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/PowIntEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/PowIntEvaluator.java deleted file mode 100644 index f9d0842a1ab74..0000000000000 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/PowIntEvaluator.java +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License -// 2.0; you may not use this file except in compliance with the Elastic License -// 2.0. -package org.elasticsearch.xpack.esql.expression.function.scalar.math; - -import java.lang.ArithmeticException; -import java.lang.Override; -import java.lang.String; -import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.DoubleBlock; -import org.elasticsearch.compute.data.DoubleVector; -import org.elasticsearch.compute.data.IntBlock; -import org.elasticsearch.compute.data.Page; -import org.elasticsearch.compute.operator.DriverContext; -import org.elasticsearch.compute.operator.EvalOperator; -import org.elasticsearch.core.Releasables; -import org.elasticsearch.xpack.esql.expression.function.Warnings; -import org.elasticsearch.xpack.ql.tree.Source; - -/** - * {@link EvalOperator.ExpressionEvaluator} implementation for {@link Pow}. - * This class is generated. Do not edit it. - */ -public final class PowIntEvaluator implements EvalOperator.ExpressionEvaluator { - private final Warnings warnings; - - private final EvalOperator.ExpressionEvaluator base; - - private final EvalOperator.ExpressionEvaluator exponent; - - private final DriverContext driverContext; - - public PowIntEvaluator(Source source, EvalOperator.ExpressionEvaluator base, - EvalOperator.ExpressionEvaluator exponent, DriverContext driverContext) { - this.warnings = new Warnings(source); - this.base = base; - this.exponent = exponent; - this.driverContext = driverContext; - } - - @Override - public Block.Ref eval(Page page) { - try (Block.Ref baseRef = base.eval(page)) { - DoubleBlock baseBlock = (DoubleBlock) baseRef.block(); - try (Block.Ref exponentRef = exponent.eval(page)) { - DoubleBlock exponentBlock = (DoubleBlock) exponentRef.block(); - DoubleVector baseVector = baseBlock.asVector(); - if (baseVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), baseBlock, exponentBlock)); - } - DoubleVector exponentVector = exponentBlock.asVector(); - if (exponentVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), baseBlock, exponentBlock)); - } - return Block.Ref.floating(eval(page.getPositionCount(), baseVector, exponentVector)); - } - } - } - - public IntBlock eval(int positionCount, DoubleBlock baseBlock, DoubleBlock exponentBlock) { - try(IntBlock.Builder result = driverContext.blockFactory().newIntBlockBuilder(positionCount)) { - position: for (int p = 0; p < positionCount; p++) { - if (baseBlock.isNull(p) || baseBlock.getValueCount(p) != 1) { - result.appendNull(); - continue position; - } - if (exponentBlock.isNull(p) || exponentBlock.getValueCount(p) != 1) { - result.appendNull(); - continue position; - } - try { - result.appendInt(Pow.processInt(baseBlock.getDouble(baseBlock.getFirstValueIndex(p)), exponentBlock.getDouble(exponentBlock.getFirstValueIndex(p)))); - } catch (ArithmeticException e) { - warnings.registerException(e); - result.appendNull(); - } - } - return result.build(); - } - } - - public IntBlock eval(int positionCount, DoubleVector baseVector, DoubleVector exponentVector) { - try(IntBlock.Builder result = driverContext.blockFactory().newIntBlockBuilder(positionCount)) { - position: for (int p = 0; p < positionCount; p++) { - try { - result.appendInt(Pow.processInt(baseVector.getDouble(p), exponentVector.getDouble(p))); - } catch (ArithmeticException e) { - warnings.registerException(e); - result.appendNull(); - } - } - return result.build(); - } - } - - @Override - public String toString() { - return "PowIntEvaluator[" + "base=" + base + ", exponent=" + exponent + "]"; - } - - @Override - public void close() { - Releasables.closeExpectNoException(base, exponent); - } - - static class Factory implements EvalOperator.ExpressionEvaluator.Factory { - private final Source source; - - private final EvalOperator.ExpressionEvaluator.Factory base; - - private final EvalOperator.ExpressionEvaluator.Factory exponent; - - public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory base, - EvalOperator.ExpressionEvaluator.Factory exponent) { - this.source = source; - this.base = base; - this.exponent = exponent; - } - - @Override - public PowIntEvaluator get(DriverContext context) { - return new PowIntEvaluator(source, base.get(context), exponent.get(context), context); - } - - @Override - public String toString() { - return "PowIntEvaluator[" + "base=" + base + ", exponent=" + exponent + "]"; - } - } -} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/PowLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/PowLongEvaluator.java deleted file mode 100644 index 1aba4fe7f0655..0000000000000 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/PowLongEvaluator.java +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License -// 2.0; you may not use this file except in compliance with the Elastic License -// 2.0. -package org.elasticsearch.xpack.esql.expression.function.scalar.math; - -import java.lang.ArithmeticException; -import java.lang.Override; -import java.lang.String; -import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.DoubleBlock; -import org.elasticsearch.compute.data.DoubleVector; -import org.elasticsearch.compute.data.LongBlock; -import org.elasticsearch.compute.data.Page; -import org.elasticsearch.compute.operator.DriverContext; -import org.elasticsearch.compute.operator.EvalOperator; -import org.elasticsearch.core.Releasables; -import org.elasticsearch.xpack.esql.expression.function.Warnings; -import org.elasticsearch.xpack.ql.tree.Source; - -/** - * {@link EvalOperator.ExpressionEvaluator} implementation for {@link Pow}. - * This class is generated. Do not edit it. - */ -public final class PowLongEvaluator implements EvalOperator.ExpressionEvaluator { - private final Warnings warnings; - - private final EvalOperator.ExpressionEvaluator base; - - private final EvalOperator.ExpressionEvaluator exponent; - - private final DriverContext driverContext; - - public PowLongEvaluator(Source source, EvalOperator.ExpressionEvaluator base, - EvalOperator.ExpressionEvaluator exponent, DriverContext driverContext) { - this.warnings = new Warnings(source); - this.base = base; - this.exponent = exponent; - this.driverContext = driverContext; - } - - @Override - public Block.Ref eval(Page page) { - try (Block.Ref baseRef = base.eval(page)) { - DoubleBlock baseBlock = (DoubleBlock) baseRef.block(); - try (Block.Ref exponentRef = exponent.eval(page)) { - DoubleBlock exponentBlock = (DoubleBlock) exponentRef.block(); - DoubleVector baseVector = baseBlock.asVector(); - if (baseVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), baseBlock, exponentBlock)); - } - DoubleVector exponentVector = exponentBlock.asVector(); - if (exponentVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), baseBlock, exponentBlock)); - } - return Block.Ref.floating(eval(page.getPositionCount(), baseVector, exponentVector)); - } - } - } - - public LongBlock eval(int positionCount, DoubleBlock baseBlock, DoubleBlock exponentBlock) { - try(LongBlock.Builder result = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { - position: for (int p = 0; p < positionCount; p++) { - if (baseBlock.isNull(p) || baseBlock.getValueCount(p) != 1) { - result.appendNull(); - continue position; - } - if (exponentBlock.isNull(p) || exponentBlock.getValueCount(p) != 1) { - result.appendNull(); - continue position; - } - try { - result.appendLong(Pow.processLong(baseBlock.getDouble(baseBlock.getFirstValueIndex(p)), exponentBlock.getDouble(exponentBlock.getFirstValueIndex(p)))); - } catch (ArithmeticException e) { - warnings.registerException(e); - result.appendNull(); - } - } - return result.build(); - } - } - - public LongBlock eval(int positionCount, DoubleVector baseVector, DoubleVector exponentVector) { - try(LongBlock.Builder result = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { - position: for (int p = 0; p < positionCount; p++) { - try { - result.appendLong(Pow.processLong(baseVector.getDouble(p), exponentVector.getDouble(p))); - } catch (ArithmeticException e) { - warnings.registerException(e); - result.appendNull(); - } - } - return result.build(); - } - } - - @Override - public String toString() { - return "PowLongEvaluator[" + "base=" + base + ", exponent=" + exponent + "]"; - } - - @Override - public void close() { - Releasables.closeExpectNoException(base, exponent); - } - - static class Factory implements EvalOperator.ExpressionEvaluator.Factory { - private final Source source; - - private final EvalOperator.ExpressionEvaluator.Factory base; - - private final EvalOperator.ExpressionEvaluator.Factory exponent; - - public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory base, - EvalOperator.ExpressionEvaluator.Factory exponent) { - this.source = source; - this.base = base; - this.exponent = exponent; - } - - @Override - public PowLongEvaluator get(DriverContext context) { - return new PowLongEvaluator(source, base.get(context), exponent.get(context), context); - } - - @Override - public String toString() { - return "PowLongEvaluator[" + "base=" + base + ", exponent=" + exponent + "]"; - } - } -} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundDoubleEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundDoubleEvaluator.java index 0be1080fd8710..a658e73a3b44f 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundDoubleEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundDoubleEvaluator.java @@ -35,20 +35,18 @@ public RoundDoubleEvaluator(EvalOperator.ExpressionEvaluator val, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref valRef = val.eval(page)) { - DoubleBlock valBlock = (DoubleBlock) valRef.block(); - try (Block.Ref decimalsRef = decimals.eval(page)) { - LongBlock decimalsBlock = (LongBlock) decimalsRef.block(); + public Block eval(Page page) { + try (DoubleBlock valBlock = (DoubleBlock) val.eval(page)) { + try (LongBlock decimalsBlock = (LongBlock) decimals.eval(page)) { DoubleVector valVector = valBlock.asVector(); if (valVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock, decimalsBlock)); + return eval(page.getPositionCount(), valBlock, decimalsBlock); } LongVector decimalsVector = decimalsBlock.asVector(); if (decimalsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock, decimalsBlock)); + return eval(page.getPositionCount(), valBlock, decimalsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), valVector, decimalsVector).asBlock()); + return eval(page.getPositionCount(), valVector, decimalsVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundDoubleNoDecimalsEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundDoubleNoDecimalsEvaluator.java index e9b486acf110a..316655de1d7b7 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundDoubleNoDecimalsEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundDoubleNoDecimalsEvaluator.java @@ -30,14 +30,13 @@ public RoundDoubleNoDecimalsEvaluator(EvalOperator.ExpressionEvaluator val, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref valRef = val.eval(page)) { - DoubleBlock valBlock = (DoubleBlock) valRef.block(); + public Block eval(Page page) { + try (DoubleBlock valBlock = (DoubleBlock) val.eval(page)) { DoubleVector valVector = valBlock.asVector(); if (valVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock)); + return eval(page.getPositionCount(), valBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), valVector).asBlock()); + return eval(page.getPositionCount(), valVector).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundIntEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundIntEvaluator.java index a6a410aa94938..71ea5938afe48 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundIntEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundIntEvaluator.java @@ -35,20 +35,18 @@ public RoundIntEvaluator(EvalOperator.ExpressionEvaluator val, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref valRef = val.eval(page)) { - IntBlock valBlock = (IntBlock) valRef.block(); - try (Block.Ref decimalsRef = decimals.eval(page)) { - LongBlock decimalsBlock = (LongBlock) decimalsRef.block(); + public Block eval(Page page) { + try (IntBlock valBlock = (IntBlock) val.eval(page)) { + try (LongBlock decimalsBlock = (LongBlock) decimals.eval(page)) { IntVector valVector = valBlock.asVector(); if (valVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock, decimalsBlock)); + return eval(page.getPositionCount(), valBlock, decimalsBlock); } LongVector decimalsVector = decimalsBlock.asVector(); if (decimalsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock, decimalsBlock)); + return eval(page.getPositionCount(), valBlock, decimalsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), valVector, decimalsVector).asBlock()); + return eval(page.getPositionCount(), valVector, decimalsVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundLongEvaluator.java index 5053e92935a82..eae45800fdee0 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundLongEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundLongEvaluator.java @@ -33,20 +33,18 @@ public RoundLongEvaluator(EvalOperator.ExpressionEvaluator val, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref valRef = val.eval(page)) { - LongBlock valBlock = (LongBlock) valRef.block(); - try (Block.Ref decimalsRef = decimals.eval(page)) { - LongBlock decimalsBlock = (LongBlock) decimalsRef.block(); + public Block eval(Page page) { + try (LongBlock valBlock = (LongBlock) val.eval(page)) { + try (LongBlock decimalsBlock = (LongBlock) decimals.eval(page)) { LongVector valVector = valBlock.asVector(); if (valVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock, decimalsBlock)); + return eval(page.getPositionCount(), valBlock, decimalsBlock); } LongVector decimalsVector = decimalsBlock.asVector(); if (decimalsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock, decimalsBlock)); + return eval(page.getPositionCount(), valBlock, decimalsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), valVector, decimalsVector).asBlock()); + return eval(page.getPositionCount(), valVector, decimalsVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundUnsignedLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundUnsignedLongEvaluator.java index 56dcca6ae9420..5f8cb5370b213 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundUnsignedLongEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundUnsignedLongEvaluator.java @@ -33,20 +33,18 @@ public RoundUnsignedLongEvaluator(EvalOperator.ExpressionEvaluator val, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref valRef = val.eval(page)) { - LongBlock valBlock = (LongBlock) valRef.block(); - try (Block.Ref decimalsRef = decimals.eval(page)) { - LongBlock decimalsBlock = (LongBlock) decimalsRef.block(); + public Block eval(Page page) { + try (LongBlock valBlock = (LongBlock) val.eval(page)) { + try (LongBlock decimalsBlock = (LongBlock) decimals.eval(page)) { LongVector valVector = valBlock.asVector(); if (valVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock, decimalsBlock)); + return eval(page.getPositionCount(), valBlock, decimalsBlock); } LongVector decimalsVector = decimalsBlock.asVector(); if (decimalsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock, decimalsBlock)); + return eval(page.getPositionCount(), valBlock, decimalsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), valVector, decimalsVector).asBlock()); + return eval(page.getPositionCount(), valVector, decimalsVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinEvaluator.java index 4f146c8380d2b..fd2f0b1e3de64 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinEvaluator.java @@ -29,14 +29,13 @@ public SinEvaluator(EvalOperator.ExpressionEvaluator val, DriverContext driverCo } @Override - public Block.Ref eval(Page page) { - try (Block.Ref valRef = val.eval(page)) { - DoubleBlock valBlock = (DoubleBlock) valRef.block(); + public Block eval(Page page) { + try (DoubleBlock valBlock = (DoubleBlock) val.eval(page)) { DoubleVector valVector = valBlock.asVector(); if (valVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock)); + return eval(page.getPositionCount(), valBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), valVector).asBlock()); + return eval(page.getPositionCount(), valVector).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinhEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinhEvaluator.java index 96c1c054b1063..342c1b86a873f 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinhEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinhEvaluator.java @@ -36,14 +36,13 @@ public SinhEvaluator(Source source, EvalOperator.ExpressionEvaluator val, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref valRef = val.eval(page)) { - DoubleBlock valBlock = (DoubleBlock) valRef.block(); + public Block eval(Page page) { + try (DoubleBlock valBlock = (DoubleBlock) val.eval(page)) { DoubleVector valVector = valBlock.asVector(); if (valVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock)); + return eval(page.getPositionCount(), valBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), valVector)); + return eval(page.getPositionCount(), valVector); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/SqrtDoubleEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/SqrtDoubleEvaluator.java index 9674ff97de891..7be90cb5c87c0 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/SqrtDoubleEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/SqrtDoubleEvaluator.java @@ -36,14 +36,13 @@ public SqrtDoubleEvaluator(Source source, EvalOperator.ExpressionEvaluator val, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref valRef = val.eval(page)) { - DoubleBlock valBlock = (DoubleBlock) valRef.block(); + public Block eval(Page page) { + try (DoubleBlock valBlock = (DoubleBlock) val.eval(page)) { DoubleVector valVector = valBlock.asVector(); if (valVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock)); + return eval(page.getPositionCount(), valBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), valVector)); + return eval(page.getPositionCount(), valVector); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/SqrtIntEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/SqrtIntEvaluator.java index e7407c8aa43c8..d7a24ebafec97 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/SqrtIntEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/SqrtIntEvaluator.java @@ -37,14 +37,13 @@ public SqrtIntEvaluator(Source source, EvalOperator.ExpressionEvaluator val, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref valRef = val.eval(page)) { - IntBlock valBlock = (IntBlock) valRef.block(); + public Block eval(Page page) { + try (IntBlock valBlock = (IntBlock) val.eval(page)) { IntVector valVector = valBlock.asVector(); if (valVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock)); + return eval(page.getPositionCount(), valBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), valVector)); + return eval(page.getPositionCount(), valVector); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/SqrtLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/SqrtLongEvaluator.java index 4ed28f4fc6735..57055641877c9 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/SqrtLongEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/SqrtLongEvaluator.java @@ -37,14 +37,13 @@ public SqrtLongEvaluator(Source source, EvalOperator.ExpressionEvaluator val, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref valRef = val.eval(page)) { - LongBlock valBlock = (LongBlock) valRef.block(); + public Block eval(Page page) { + try (LongBlock valBlock = (LongBlock) val.eval(page)) { LongVector valVector = valBlock.asVector(); if (valVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock)); + return eval(page.getPositionCount(), valBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), valVector)); + return eval(page.getPositionCount(), valVector); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/SqrtUnsignedLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/SqrtUnsignedLongEvaluator.java index b01d1a8eea222..8eddd0293ae86 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/SqrtUnsignedLongEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/SqrtUnsignedLongEvaluator.java @@ -32,14 +32,13 @@ public SqrtUnsignedLongEvaluator(EvalOperator.ExpressionEvaluator val, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref valRef = val.eval(page)) { - LongBlock valBlock = (LongBlock) valRef.block(); + public Block eval(Page page) { + try (LongBlock valBlock = (LongBlock) val.eval(page)) { LongVector valVector = valBlock.asVector(); if (valVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock)); + return eval(page.getPositionCount(), valBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), valVector).asBlock()); + return eval(page.getPositionCount(), valVector).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanEvaluator.java index 8b499e9be1c35..2ff4ccba94ae0 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanEvaluator.java @@ -29,14 +29,13 @@ public TanEvaluator(EvalOperator.ExpressionEvaluator val, DriverContext driverCo } @Override - public Block.Ref eval(Page page) { - try (Block.Ref valRef = val.eval(page)) { - DoubleBlock valBlock = (DoubleBlock) valRef.block(); + public Block eval(Page page) { + try (DoubleBlock valBlock = (DoubleBlock) val.eval(page)) { DoubleVector valVector = valBlock.asVector(); if (valVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock)); + return eval(page.getPositionCount(), valBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), valVector).asBlock()); + return eval(page.getPositionCount(), valVector).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanhEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanhEvaluator.java index 605cb38492b5a..05cfc6446cdb6 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanhEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanhEvaluator.java @@ -29,14 +29,13 @@ public TanhEvaluator(EvalOperator.ExpressionEvaluator val, DriverContext driverC } @Override - public Block.Ref eval(Page page) { - try (Block.Ref valRef = val.eval(page)) { - DoubleBlock valBlock = (DoubleBlock) valRef.block(); + public Block eval(Page page) { + try (DoubleBlock valBlock = (DoubleBlock) val.eval(page)) { DoubleVector valVector = valBlock.asVector(); if (valVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock)); + return eval(page.getPositionCount(), valBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), valVector).asBlock()); + return eval(page.getPositionCount(), valVector).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgDoubleEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgDoubleEvaluator.java index e533eb6dd5f33..6a9278efd2f6a 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgDoubleEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgDoubleEvaluator.java @@ -34,29 +34,27 @@ public String name() { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNullable(Block.Ref ref) { - try (ref) { - DoubleBlock v = (DoubleBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { - CompensatedSum work = new CompensatedSum(); - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - if (valueCount == 0) { - builder.appendNull(); - continue; - } - int first = v.getFirstValueIndex(p); - int end = first + valueCount; - for (int i = first; i < end; i++) { - double value = v.getDouble(i); - MvAvg.process(work, value); - } - double result = MvAvg.finish(work, valueCount); - builder.appendDouble(result); + public Block evalNullable(Block fieldVal) { + DoubleBlock v = (DoubleBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + CompensatedSum work = new CompensatedSum(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; } - return Block.Ref.floating(builder.build()); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + for (int i = first; i < end; i++) { + double value = v.getDouble(i); + MvAvg.process(work, value); + } + double result = MvAvg.finish(work, valueCount); + builder.appendDouble(result); } + return builder.build(); } } @@ -64,25 +62,23 @@ public Block.Ref evalNullable(Block.Ref ref) { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNotNullable(Block.Ref ref) { - try (ref) { - DoubleBlock v = (DoubleBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (DoubleVector.FixedBuilder builder = driverContext.blockFactory().newDoubleVectorFixedBuilder(positionCount)) { - CompensatedSum work = new CompensatedSum(); - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - int first = v.getFirstValueIndex(p); - int end = first + valueCount; - for (int i = first; i < end; i++) { - double value = v.getDouble(i); - MvAvg.process(work, value); - } - double result = MvAvg.finish(work, valueCount); - builder.appendDouble(result); + public Block evalNotNullable(Block fieldVal) { + DoubleBlock v = (DoubleBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (DoubleVector.FixedBuilder builder = driverContext.blockFactory().newDoubleVectorFixedBuilder(positionCount)) { + CompensatedSum work = new CompensatedSum(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + for (int i = first; i < end; i++) { + double value = v.getDouble(i); + MvAvg.process(work, value); } - return Block.Ref.floating(builder.build().asBlock()); + double result = MvAvg.finish(work, valueCount); + builder.appendDouble(result); } + return builder.build().asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgIntEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgIntEvaluator.java index 5b55cb44072b5..8f2abc5e759b4 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgIntEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgIntEvaluator.java @@ -35,35 +35,33 @@ public String name() { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNullable(Block.Ref ref) { - try (ref) { - IntBlock v = (IntBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { - CompensatedSum work = new CompensatedSum(); - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - if (valueCount == 0) { - builder.appendNull(); - continue; - } - int first = v.getFirstValueIndex(p); - if (valueCount == 1) { - int value = v.getInt(first); - double result = MvAvg.single(value); - builder.appendDouble(result); - continue; - } - int end = first + valueCount; - for (int i = first; i < end; i++) { - int value = v.getInt(i); - MvAvg.process(work, value); - } - double result = MvAvg.finish(work, valueCount); + public Block evalNullable(Block fieldVal) { + IntBlock v = (IntBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + CompensatedSum work = new CompensatedSum(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; + } + int first = v.getFirstValueIndex(p); + if (valueCount == 1) { + int value = v.getInt(first); + double result = MvAvg.single(value); builder.appendDouble(result); + continue; + } + int end = first + valueCount; + for (int i = first; i < end; i++) { + int value = v.getInt(i); + MvAvg.process(work, value); } - return Block.Ref.floating(builder.build()); + double result = MvAvg.finish(work, valueCount); + builder.appendDouble(result); } + return builder.build(); } } @@ -71,31 +69,29 @@ public Block.Ref evalNullable(Block.Ref ref) { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNotNullable(Block.Ref ref) { - try (ref) { - IntBlock v = (IntBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (DoubleVector.FixedBuilder builder = driverContext.blockFactory().newDoubleVectorFixedBuilder(positionCount)) { - CompensatedSum work = new CompensatedSum(); - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - int first = v.getFirstValueIndex(p); - if (valueCount == 1) { - int value = v.getInt(first); - double result = MvAvg.single(value); - builder.appendDouble(result); - continue; - } - int end = first + valueCount; - for (int i = first; i < end; i++) { - int value = v.getInt(i); - MvAvg.process(work, value); - } - double result = MvAvg.finish(work, valueCount); + public Block evalNotNullable(Block fieldVal) { + IntBlock v = (IntBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (DoubleVector.FixedBuilder builder = driverContext.blockFactory().newDoubleVectorFixedBuilder(positionCount)) { + CompensatedSum work = new CompensatedSum(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + if (valueCount == 1) { + int value = v.getInt(first); + double result = MvAvg.single(value); builder.appendDouble(result); + continue; + } + int end = first + valueCount; + for (int i = first; i < end; i++) { + int value = v.getInt(i); + MvAvg.process(work, value); } - return Block.Ref.floating(builder.build().asBlock()); + double result = MvAvg.finish(work, valueCount); + builder.appendDouble(result); } + return builder.build().asBlock(); } } @@ -103,26 +99,24 @@ public Block.Ref evalNotNullable(Block.Ref ref) { * Evaluate blocks containing only single valued fields. */ @Override - public Block.Ref evalSingleValuedNullable(Block.Ref ref) { - try (ref) { - IntBlock v = (IntBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { - CompensatedSum work = new CompensatedSum(); - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - if (valueCount == 0) { - builder.appendNull(); - continue; - } - assert valueCount == 1; - int first = v.getFirstValueIndex(p); - int value = v.getInt(first); - double result = MvAvg.single(value); - builder.appendDouble(result); + public Block evalSingleValuedNullable(Block fieldVal) { + IntBlock v = (IntBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + CompensatedSum work = new CompensatedSum(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; } - return Block.Ref.floating(builder.build()); + assert valueCount == 1; + int first = v.getFirstValueIndex(p); + int value = v.getInt(first); + double result = MvAvg.single(value); + builder.appendDouble(result); } + return builder.build(); } } @@ -130,22 +124,20 @@ public Block.Ref evalSingleValuedNullable(Block.Ref ref) { * Evaluate blocks containing only single valued fields. */ @Override - public Block.Ref evalSingleValuedNotNullable(Block.Ref ref) { - try (ref) { - IntBlock v = (IntBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (DoubleVector.FixedBuilder builder = driverContext.blockFactory().newDoubleVectorFixedBuilder(positionCount)) { - CompensatedSum work = new CompensatedSum(); - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - assert valueCount == 1; - int first = v.getFirstValueIndex(p); - int value = v.getInt(first); - double result = MvAvg.single(value); - builder.appendDouble(result); - } - return Block.Ref.floating(builder.build().asBlock()); + public Block evalSingleValuedNotNullable(Block fieldVal) { + IntBlock v = (IntBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (DoubleVector.FixedBuilder builder = driverContext.blockFactory().newDoubleVectorFixedBuilder(positionCount)) { + CompensatedSum work = new CompensatedSum(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + assert valueCount == 1; + int first = v.getFirstValueIndex(p); + int value = v.getInt(first); + double result = MvAvg.single(value); + builder.appendDouble(result); } + return builder.build().asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgLongEvaluator.java index 21f40e02e92ff..b01424846c4a7 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgLongEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgLongEvaluator.java @@ -35,35 +35,33 @@ public String name() { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNullable(Block.Ref ref) { - try (ref) { - LongBlock v = (LongBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { - CompensatedSum work = new CompensatedSum(); - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - if (valueCount == 0) { - builder.appendNull(); - continue; - } - int first = v.getFirstValueIndex(p); - if (valueCount == 1) { - long value = v.getLong(first); - double result = MvAvg.single(value); - builder.appendDouble(result); - continue; - } - int end = first + valueCount; - for (int i = first; i < end; i++) { - long value = v.getLong(i); - MvAvg.process(work, value); - } - double result = MvAvg.finish(work, valueCount); + public Block evalNullable(Block fieldVal) { + LongBlock v = (LongBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + CompensatedSum work = new CompensatedSum(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; + } + int first = v.getFirstValueIndex(p); + if (valueCount == 1) { + long value = v.getLong(first); + double result = MvAvg.single(value); builder.appendDouble(result); + continue; + } + int end = first + valueCount; + for (int i = first; i < end; i++) { + long value = v.getLong(i); + MvAvg.process(work, value); } - return Block.Ref.floating(builder.build()); + double result = MvAvg.finish(work, valueCount); + builder.appendDouble(result); } + return builder.build(); } } @@ -71,31 +69,29 @@ public Block.Ref evalNullable(Block.Ref ref) { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNotNullable(Block.Ref ref) { - try (ref) { - LongBlock v = (LongBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (DoubleVector.FixedBuilder builder = driverContext.blockFactory().newDoubleVectorFixedBuilder(positionCount)) { - CompensatedSum work = new CompensatedSum(); - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - int first = v.getFirstValueIndex(p); - if (valueCount == 1) { - long value = v.getLong(first); - double result = MvAvg.single(value); - builder.appendDouble(result); - continue; - } - int end = first + valueCount; - for (int i = first; i < end; i++) { - long value = v.getLong(i); - MvAvg.process(work, value); - } - double result = MvAvg.finish(work, valueCount); + public Block evalNotNullable(Block fieldVal) { + LongBlock v = (LongBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (DoubleVector.FixedBuilder builder = driverContext.blockFactory().newDoubleVectorFixedBuilder(positionCount)) { + CompensatedSum work = new CompensatedSum(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + if (valueCount == 1) { + long value = v.getLong(first); + double result = MvAvg.single(value); builder.appendDouble(result); + continue; + } + int end = first + valueCount; + for (int i = first; i < end; i++) { + long value = v.getLong(i); + MvAvg.process(work, value); } - return Block.Ref.floating(builder.build().asBlock()); + double result = MvAvg.finish(work, valueCount); + builder.appendDouble(result); } + return builder.build().asBlock(); } } @@ -103,26 +99,24 @@ public Block.Ref evalNotNullable(Block.Ref ref) { * Evaluate blocks containing only single valued fields. */ @Override - public Block.Ref evalSingleValuedNullable(Block.Ref ref) { - try (ref) { - LongBlock v = (LongBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { - CompensatedSum work = new CompensatedSum(); - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - if (valueCount == 0) { - builder.appendNull(); - continue; - } - assert valueCount == 1; - int first = v.getFirstValueIndex(p); - long value = v.getLong(first); - double result = MvAvg.single(value); - builder.appendDouble(result); + public Block evalSingleValuedNullable(Block fieldVal) { + LongBlock v = (LongBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + CompensatedSum work = new CompensatedSum(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; } - return Block.Ref.floating(builder.build()); + assert valueCount == 1; + int first = v.getFirstValueIndex(p); + long value = v.getLong(first); + double result = MvAvg.single(value); + builder.appendDouble(result); } + return builder.build(); } } @@ -130,22 +124,20 @@ public Block.Ref evalSingleValuedNullable(Block.Ref ref) { * Evaluate blocks containing only single valued fields. */ @Override - public Block.Ref evalSingleValuedNotNullable(Block.Ref ref) { - try (ref) { - LongBlock v = (LongBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (DoubleVector.FixedBuilder builder = driverContext.blockFactory().newDoubleVectorFixedBuilder(positionCount)) { - CompensatedSum work = new CompensatedSum(); - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - assert valueCount == 1; - int first = v.getFirstValueIndex(p); - long value = v.getLong(first); - double result = MvAvg.single(value); - builder.appendDouble(result); - } - return Block.Ref.floating(builder.build().asBlock()); + public Block evalSingleValuedNotNullable(Block fieldVal) { + LongBlock v = (LongBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (DoubleVector.FixedBuilder builder = driverContext.blockFactory().newDoubleVectorFixedBuilder(positionCount)) { + CompensatedSum work = new CompensatedSum(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + assert valueCount == 1; + int first = v.getFirstValueIndex(p); + long value = v.getLong(first); + double result = MvAvg.single(value); + builder.appendDouble(result); } + return builder.build().asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgUnsignedLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgUnsignedLongEvaluator.java index 92ebb14d9a29d..41e18cf1424a3 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgUnsignedLongEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgUnsignedLongEvaluator.java @@ -36,35 +36,33 @@ public String name() { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNullable(Block.Ref ref) { - try (ref) { - LongBlock v = (LongBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { - CompensatedSum work = new CompensatedSum(); - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - if (valueCount == 0) { - builder.appendNull(); - continue; - } - int first = v.getFirstValueIndex(p); - if (valueCount == 1) { - long value = v.getLong(first); - double result = MvAvg.singleUnsignedLong(value); - builder.appendDouble(result); - continue; - } - int end = first + valueCount; - for (int i = first; i < end; i++) { - long value = v.getLong(i); - MvAvg.processUnsignedLong(work, value); - } - double result = MvAvg.finish(work, valueCount); + public Block evalNullable(Block fieldVal) { + LongBlock v = (LongBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + CompensatedSum work = new CompensatedSum(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; + } + int first = v.getFirstValueIndex(p); + if (valueCount == 1) { + long value = v.getLong(first); + double result = MvAvg.singleUnsignedLong(value); builder.appendDouble(result); + continue; + } + int end = first + valueCount; + for (int i = first; i < end; i++) { + long value = v.getLong(i); + MvAvg.processUnsignedLong(work, value); } - return Block.Ref.floating(builder.build()); + double result = MvAvg.finish(work, valueCount); + builder.appendDouble(result); } + return builder.build(); } } @@ -72,31 +70,29 @@ public Block.Ref evalNullable(Block.Ref ref) { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNotNullable(Block.Ref ref) { - try (ref) { - LongBlock v = (LongBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (DoubleVector.FixedBuilder builder = driverContext.blockFactory().newDoubleVectorFixedBuilder(positionCount)) { - CompensatedSum work = new CompensatedSum(); - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - int first = v.getFirstValueIndex(p); - if (valueCount == 1) { - long value = v.getLong(first); - double result = MvAvg.singleUnsignedLong(value); - builder.appendDouble(result); - continue; - } - int end = first + valueCount; - for (int i = first; i < end; i++) { - long value = v.getLong(i); - MvAvg.processUnsignedLong(work, value); - } - double result = MvAvg.finish(work, valueCount); + public Block evalNotNullable(Block fieldVal) { + LongBlock v = (LongBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (DoubleVector.FixedBuilder builder = driverContext.blockFactory().newDoubleVectorFixedBuilder(positionCount)) { + CompensatedSum work = new CompensatedSum(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + if (valueCount == 1) { + long value = v.getLong(first); + double result = MvAvg.singleUnsignedLong(value); builder.appendDouble(result); + continue; + } + int end = first + valueCount; + for (int i = first; i < end; i++) { + long value = v.getLong(i); + MvAvg.processUnsignedLong(work, value); } - return Block.Ref.floating(builder.build().asBlock()); + double result = MvAvg.finish(work, valueCount); + builder.appendDouble(result); } + return builder.build().asBlock(); } } @@ -104,26 +100,24 @@ public Block.Ref evalNotNullable(Block.Ref ref) { * Evaluate blocks containing only single valued fields. */ @Override - public Block.Ref evalSingleValuedNullable(Block.Ref ref) { - try (ref) { - LongBlock v = (LongBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { - CompensatedSum work = new CompensatedSum(); - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - if (valueCount == 0) { - builder.appendNull(); - continue; - } - assert valueCount == 1; - int first = v.getFirstValueIndex(p); - long value = v.getLong(first); - double result = MvAvg.singleUnsignedLong(value); - builder.appendDouble(result); + public Block evalSingleValuedNullable(Block fieldVal) { + LongBlock v = (LongBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + CompensatedSum work = new CompensatedSum(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; } - return Block.Ref.floating(builder.build()); + assert valueCount == 1; + int first = v.getFirstValueIndex(p); + long value = v.getLong(first); + double result = MvAvg.singleUnsignedLong(value); + builder.appendDouble(result); } + return builder.build(); } } @@ -131,22 +125,20 @@ public Block.Ref evalSingleValuedNullable(Block.Ref ref) { * Evaluate blocks containing only single valued fields. */ @Override - public Block.Ref evalSingleValuedNotNullable(Block.Ref ref) { - try (ref) { - LongBlock v = (LongBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (DoubleVector.FixedBuilder builder = driverContext.blockFactory().newDoubleVectorFixedBuilder(positionCount)) { - CompensatedSum work = new CompensatedSum(); - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - assert valueCount == 1; - int first = v.getFirstValueIndex(p); - long value = v.getLong(first); - double result = MvAvg.singleUnsignedLong(value); - builder.appendDouble(result); - } - return Block.Ref.floating(builder.build().asBlock()); + public Block evalSingleValuedNotNullable(Block fieldVal) { + LongBlock v = (LongBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (DoubleVector.FixedBuilder builder = driverContext.blockFactory().newDoubleVectorFixedBuilder(positionCount)) { + CompensatedSum work = new CompensatedSum(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + assert valueCount == 1; + int first = v.getFirstValueIndex(p); + long value = v.getLong(first); + double result = MvAvg.singleUnsignedLong(value); + builder.appendDouble(result); } + return builder.build().asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxBooleanEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxBooleanEvaluator.java index 7aee385dc4c44..46155b23d7512 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxBooleanEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxBooleanEvaluator.java @@ -34,32 +34,30 @@ public String name() { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNullable(Block.Ref ref) { - if (ref.block().mvSortedAscending()) { - return evalAscendingNullable(ref); + public Block evalNullable(Block fieldVal) { + if (fieldVal.mvSortedAscending()) { + return evalAscendingNullable(fieldVal); } - try (ref) { - BooleanBlock v = (BooleanBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (BooleanBlock.Builder builder = driverContext.blockFactory().newBooleanBlockBuilder(positionCount)) { - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - if (valueCount == 0) { - builder.appendNull(); - continue; - } - int first = v.getFirstValueIndex(p); - int end = first + valueCount; - boolean value = v.getBoolean(first); - for (int i = first + 1; i < end; i++) { - boolean next = v.getBoolean(i); - value = MvMax.process(value, next); - } - boolean result = value; - builder.appendBoolean(result); + BooleanBlock v = (BooleanBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (BooleanBlock.Builder builder = driverContext.blockFactory().newBooleanBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; } - return Block.Ref.floating(builder.build()); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + boolean value = v.getBoolean(first); + for (int i = first + 1; i < end; i++) { + boolean next = v.getBoolean(i); + value = MvMax.process(value, next); + } + boolean result = value; + builder.appendBoolean(result); } + return builder.build(); } } @@ -67,72 +65,66 @@ public Block.Ref evalNullable(Block.Ref ref) { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNotNullable(Block.Ref ref) { - if (ref.block().mvSortedAscending()) { - return evalAscendingNotNullable(ref); + public Block evalNotNullable(Block fieldVal) { + if (fieldVal.mvSortedAscending()) { + return evalAscendingNotNullable(fieldVal); } - try (ref) { - BooleanBlock v = (BooleanBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (BooleanVector.FixedBuilder builder = driverContext.blockFactory().newBooleanVectorFixedBuilder(positionCount)) { - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - int first = v.getFirstValueIndex(p); - int end = first + valueCount; - boolean value = v.getBoolean(first); - for (int i = first + 1; i < end; i++) { - boolean next = v.getBoolean(i); - value = MvMax.process(value, next); - } - boolean result = value; - builder.appendBoolean(result); + BooleanBlock v = (BooleanBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (BooleanVector.FixedBuilder builder = driverContext.blockFactory().newBooleanVectorFixedBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + boolean value = v.getBoolean(first); + for (int i = first + 1; i < end; i++) { + boolean next = v.getBoolean(i); + value = MvMax.process(value, next); } - return Block.Ref.floating(builder.build().asBlock()); + boolean result = value; + builder.appendBoolean(result); } + return builder.build().asBlock(); } } /** * Evaluate blocks containing at least one multivalued field and all multivalued fields are in ascending order. */ - private Block.Ref evalAscendingNullable(Block.Ref ref) { - try (ref) { - BooleanBlock v = (BooleanBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (BooleanBlock.Builder builder = driverContext.blockFactory().newBooleanBlockBuilder(positionCount)) { - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - if (valueCount == 0) { - builder.appendNull(); - continue; - } - int first = v.getFirstValueIndex(p); - int idx = MvMax.ascendingIndex(valueCount); - boolean result = v.getBoolean(first + idx); - builder.appendBoolean(result); + private Block evalAscendingNullable(Block fieldVal) { + BooleanBlock v = (BooleanBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (BooleanBlock.Builder builder = driverContext.blockFactory().newBooleanBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; } - return Block.Ref.floating(builder.build()); + int first = v.getFirstValueIndex(p); + int idx = MvMax.ascendingIndex(valueCount); + boolean result = v.getBoolean(first + idx); + builder.appendBoolean(result); } + return builder.build(); } } /** * Evaluate blocks containing at least one multivalued field and all multivalued fields are in ascending order. */ - private Block.Ref evalAscendingNotNullable(Block.Ref ref) { - try (ref) { - BooleanBlock v = (BooleanBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (BooleanVector.FixedBuilder builder = driverContext.blockFactory().newBooleanVectorFixedBuilder(positionCount)) { - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - int first = v.getFirstValueIndex(p); - int idx = MvMax.ascendingIndex(valueCount); - boolean result = v.getBoolean(first + idx); - builder.appendBoolean(result); - } - return Block.Ref.floating(builder.build().asBlock()); + private Block evalAscendingNotNullable(Block fieldVal) { + BooleanBlock v = (BooleanBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (BooleanVector.FixedBuilder builder = driverContext.blockFactory().newBooleanVectorFixedBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + int idx = MvMax.ascendingIndex(valueCount); + boolean result = v.getBoolean(first + idx); + builder.appendBoolean(result); } + return builder.build().asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxBytesRefEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxBytesRefEvaluator.java index b88b2641952d7..6f1469e365336 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxBytesRefEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxBytesRefEvaluator.java @@ -35,34 +35,32 @@ public String name() { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNullable(Block.Ref ref) { - if (ref.block().mvSortedAscending()) { - return evalAscendingNullable(ref); + public Block evalNullable(Block fieldVal) { + if (fieldVal.mvSortedAscending()) { + return evalAscendingNullable(fieldVal); } - try (ref) { - BytesRefBlock v = (BytesRefBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { - BytesRef firstScratch = new BytesRef(); - BytesRef nextScratch = new BytesRef(); - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - if (valueCount == 0) { - builder.appendNull(); - continue; - } - int first = v.getFirstValueIndex(p); - int end = first + valueCount; - BytesRef value = v.getBytesRef(first, firstScratch); - for (int i = first + 1; i < end; i++) { - BytesRef next = v.getBytesRef(i, nextScratch); - MvMax.process(value, next); - } - BytesRef result = value; - builder.appendBytesRef(result); + BytesRefBlock v = (BytesRefBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + BytesRef firstScratch = new BytesRef(); + BytesRef nextScratch = new BytesRef(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; } - return Block.Ref.floating(builder.build()); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + BytesRef value = v.getBytesRef(first, firstScratch); + for (int i = first + 1; i < end; i++) { + BytesRef next = v.getBytesRef(i, nextScratch); + MvMax.process(value, next); + } + BytesRef result = value; + builder.appendBytesRef(result); } + return builder.build(); } } @@ -70,78 +68,72 @@ public Block.Ref evalNullable(Block.Ref ref) { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNotNullable(Block.Ref ref) { - if (ref.block().mvSortedAscending()) { - return evalAscendingNotNullable(ref); + public Block evalNotNullable(Block fieldVal) { + if (fieldVal.mvSortedAscending()) { + return evalAscendingNotNullable(fieldVal); } - try (ref) { - BytesRefBlock v = (BytesRefBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (BytesRefVector.Builder builder = driverContext.blockFactory().newBytesRefVectorBuilder(positionCount)) { - BytesRef firstScratch = new BytesRef(); - BytesRef nextScratch = new BytesRef(); - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - int first = v.getFirstValueIndex(p); - int end = first + valueCount; - BytesRef value = v.getBytesRef(first, firstScratch); - for (int i = first + 1; i < end; i++) { - BytesRef next = v.getBytesRef(i, nextScratch); - MvMax.process(value, next); - } - BytesRef result = value; - builder.appendBytesRef(result); + BytesRefBlock v = (BytesRefBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (BytesRefVector.Builder builder = driverContext.blockFactory().newBytesRefVectorBuilder(positionCount)) { + BytesRef firstScratch = new BytesRef(); + BytesRef nextScratch = new BytesRef(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + BytesRef value = v.getBytesRef(first, firstScratch); + for (int i = first + 1; i < end; i++) { + BytesRef next = v.getBytesRef(i, nextScratch); + MvMax.process(value, next); } - return Block.Ref.floating(builder.build().asBlock()); + BytesRef result = value; + builder.appendBytesRef(result); } + return builder.build().asBlock(); } } /** * Evaluate blocks containing at least one multivalued field and all multivalued fields are in ascending order. */ - private Block.Ref evalAscendingNullable(Block.Ref ref) { - try (ref) { - BytesRefBlock v = (BytesRefBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { - BytesRef firstScratch = new BytesRef(); - BytesRef nextScratch = new BytesRef(); - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - if (valueCount == 0) { - builder.appendNull(); - continue; - } - int first = v.getFirstValueIndex(p); - int idx = MvMax.ascendingIndex(valueCount); - BytesRef result = v.getBytesRef(first + idx, firstScratch); - builder.appendBytesRef(result); + private Block evalAscendingNullable(Block fieldVal) { + BytesRefBlock v = (BytesRefBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + BytesRef firstScratch = new BytesRef(); + BytesRef nextScratch = new BytesRef(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; } - return Block.Ref.floating(builder.build()); + int first = v.getFirstValueIndex(p); + int idx = MvMax.ascendingIndex(valueCount); + BytesRef result = v.getBytesRef(first + idx, firstScratch); + builder.appendBytesRef(result); } + return builder.build(); } } /** * Evaluate blocks containing at least one multivalued field and all multivalued fields are in ascending order. */ - private Block.Ref evalAscendingNotNullable(Block.Ref ref) { - try (ref) { - BytesRefBlock v = (BytesRefBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (BytesRefVector.Builder builder = driverContext.blockFactory().newBytesRefVectorBuilder(positionCount)) { - BytesRef firstScratch = new BytesRef(); - BytesRef nextScratch = new BytesRef(); - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - int first = v.getFirstValueIndex(p); - int idx = MvMax.ascendingIndex(valueCount); - BytesRef result = v.getBytesRef(first + idx, firstScratch); - builder.appendBytesRef(result); - } - return Block.Ref.floating(builder.build().asBlock()); + private Block evalAscendingNotNullable(Block fieldVal) { + BytesRefBlock v = (BytesRefBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (BytesRefVector.Builder builder = driverContext.blockFactory().newBytesRefVectorBuilder(positionCount)) { + BytesRef firstScratch = new BytesRef(); + BytesRef nextScratch = new BytesRef(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + int idx = MvMax.ascendingIndex(valueCount); + BytesRef result = v.getBytesRef(first + idx, firstScratch); + builder.appendBytesRef(result); } + return builder.build().asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxDoubleEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxDoubleEvaluator.java index cdbca7f534cf3..34e51c2d6f221 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxDoubleEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxDoubleEvaluator.java @@ -33,32 +33,30 @@ public String name() { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNullable(Block.Ref ref) { - if (ref.block().mvSortedAscending()) { - return evalAscendingNullable(ref); + public Block evalNullable(Block fieldVal) { + if (fieldVal.mvSortedAscending()) { + return evalAscendingNullable(fieldVal); } - try (ref) { - DoubleBlock v = (DoubleBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - if (valueCount == 0) { - builder.appendNull(); - continue; - } - int first = v.getFirstValueIndex(p); - int end = first + valueCount; - double value = v.getDouble(first); - for (int i = first + 1; i < end; i++) { - double next = v.getDouble(i); - value = MvMax.process(value, next); - } - double result = value; - builder.appendDouble(result); + DoubleBlock v = (DoubleBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; } - return Block.Ref.floating(builder.build()); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + double value = v.getDouble(first); + for (int i = first + 1; i < end; i++) { + double next = v.getDouble(i); + value = MvMax.process(value, next); + } + double result = value; + builder.appendDouble(result); } + return builder.build(); } } @@ -66,72 +64,66 @@ public Block.Ref evalNullable(Block.Ref ref) { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNotNullable(Block.Ref ref) { - if (ref.block().mvSortedAscending()) { - return evalAscendingNotNullable(ref); + public Block evalNotNullable(Block fieldVal) { + if (fieldVal.mvSortedAscending()) { + return evalAscendingNotNullable(fieldVal); } - try (ref) { - DoubleBlock v = (DoubleBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (DoubleVector.FixedBuilder builder = driverContext.blockFactory().newDoubleVectorFixedBuilder(positionCount)) { - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - int first = v.getFirstValueIndex(p); - int end = first + valueCount; - double value = v.getDouble(first); - for (int i = first + 1; i < end; i++) { - double next = v.getDouble(i); - value = MvMax.process(value, next); - } - double result = value; - builder.appendDouble(result); + DoubleBlock v = (DoubleBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (DoubleVector.FixedBuilder builder = driverContext.blockFactory().newDoubleVectorFixedBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + double value = v.getDouble(first); + for (int i = first + 1; i < end; i++) { + double next = v.getDouble(i); + value = MvMax.process(value, next); } - return Block.Ref.floating(builder.build().asBlock()); + double result = value; + builder.appendDouble(result); } + return builder.build().asBlock(); } } /** * Evaluate blocks containing at least one multivalued field and all multivalued fields are in ascending order. */ - private Block.Ref evalAscendingNullable(Block.Ref ref) { - try (ref) { - DoubleBlock v = (DoubleBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - if (valueCount == 0) { - builder.appendNull(); - continue; - } - int first = v.getFirstValueIndex(p); - int idx = MvMax.ascendingIndex(valueCount); - double result = v.getDouble(first + idx); - builder.appendDouble(result); + private Block evalAscendingNullable(Block fieldVal) { + DoubleBlock v = (DoubleBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; } - return Block.Ref.floating(builder.build()); + int first = v.getFirstValueIndex(p); + int idx = MvMax.ascendingIndex(valueCount); + double result = v.getDouble(first + idx); + builder.appendDouble(result); } + return builder.build(); } } /** * Evaluate blocks containing at least one multivalued field and all multivalued fields are in ascending order. */ - private Block.Ref evalAscendingNotNullable(Block.Ref ref) { - try (ref) { - DoubleBlock v = (DoubleBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (DoubleVector.FixedBuilder builder = driverContext.blockFactory().newDoubleVectorFixedBuilder(positionCount)) { - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - int first = v.getFirstValueIndex(p); - int idx = MvMax.ascendingIndex(valueCount); - double result = v.getDouble(first + idx); - builder.appendDouble(result); - } - return Block.Ref.floating(builder.build().asBlock()); + private Block evalAscendingNotNullable(Block fieldVal) { + DoubleBlock v = (DoubleBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (DoubleVector.FixedBuilder builder = driverContext.blockFactory().newDoubleVectorFixedBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + int idx = MvMax.ascendingIndex(valueCount); + double result = v.getDouble(first + idx); + builder.appendDouble(result); } + return builder.build().asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxIntEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxIntEvaluator.java index a670bb5e43044..5382f2dff2fd8 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxIntEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxIntEvaluator.java @@ -33,32 +33,30 @@ public String name() { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNullable(Block.Ref ref) { - if (ref.block().mvSortedAscending()) { - return evalAscendingNullable(ref); + public Block evalNullable(Block fieldVal) { + if (fieldVal.mvSortedAscending()) { + return evalAscendingNullable(fieldVal); } - try (ref) { - IntBlock v = (IntBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (IntBlock.Builder builder = driverContext.blockFactory().newIntBlockBuilder(positionCount)) { - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - if (valueCount == 0) { - builder.appendNull(); - continue; - } - int first = v.getFirstValueIndex(p); - int end = first + valueCount; - int value = v.getInt(first); - for (int i = first + 1; i < end; i++) { - int next = v.getInt(i); - value = MvMax.process(value, next); - } - int result = value; - builder.appendInt(result); + IntBlock v = (IntBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (IntBlock.Builder builder = driverContext.blockFactory().newIntBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; } - return Block.Ref.floating(builder.build()); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + int value = v.getInt(first); + for (int i = first + 1; i < end; i++) { + int next = v.getInt(i); + value = MvMax.process(value, next); + } + int result = value; + builder.appendInt(result); } + return builder.build(); } } @@ -66,72 +64,66 @@ public Block.Ref evalNullable(Block.Ref ref) { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNotNullable(Block.Ref ref) { - if (ref.block().mvSortedAscending()) { - return evalAscendingNotNullable(ref); + public Block evalNotNullable(Block fieldVal) { + if (fieldVal.mvSortedAscending()) { + return evalAscendingNotNullable(fieldVal); } - try (ref) { - IntBlock v = (IntBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (IntVector.FixedBuilder builder = driverContext.blockFactory().newIntVectorFixedBuilder(positionCount)) { - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - int first = v.getFirstValueIndex(p); - int end = first + valueCount; - int value = v.getInt(first); - for (int i = first + 1; i < end; i++) { - int next = v.getInt(i); - value = MvMax.process(value, next); - } - int result = value; - builder.appendInt(result); + IntBlock v = (IntBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (IntVector.FixedBuilder builder = driverContext.blockFactory().newIntVectorFixedBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + int value = v.getInt(first); + for (int i = first + 1; i < end; i++) { + int next = v.getInt(i); + value = MvMax.process(value, next); } - return Block.Ref.floating(builder.build().asBlock()); + int result = value; + builder.appendInt(result); } + return builder.build().asBlock(); } } /** * Evaluate blocks containing at least one multivalued field and all multivalued fields are in ascending order. */ - private Block.Ref evalAscendingNullable(Block.Ref ref) { - try (ref) { - IntBlock v = (IntBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (IntBlock.Builder builder = driverContext.blockFactory().newIntBlockBuilder(positionCount)) { - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - if (valueCount == 0) { - builder.appendNull(); - continue; - } - int first = v.getFirstValueIndex(p); - int idx = MvMax.ascendingIndex(valueCount); - int result = v.getInt(first + idx); - builder.appendInt(result); + private Block evalAscendingNullable(Block fieldVal) { + IntBlock v = (IntBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (IntBlock.Builder builder = driverContext.blockFactory().newIntBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; } - return Block.Ref.floating(builder.build()); + int first = v.getFirstValueIndex(p); + int idx = MvMax.ascendingIndex(valueCount); + int result = v.getInt(first + idx); + builder.appendInt(result); } + return builder.build(); } } /** * Evaluate blocks containing at least one multivalued field and all multivalued fields are in ascending order. */ - private Block.Ref evalAscendingNotNullable(Block.Ref ref) { - try (ref) { - IntBlock v = (IntBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (IntVector.FixedBuilder builder = driverContext.blockFactory().newIntVectorFixedBuilder(positionCount)) { - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - int first = v.getFirstValueIndex(p); - int idx = MvMax.ascendingIndex(valueCount); - int result = v.getInt(first + idx); - builder.appendInt(result); - } - return Block.Ref.floating(builder.build().asBlock()); + private Block evalAscendingNotNullable(Block fieldVal) { + IntBlock v = (IntBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (IntVector.FixedBuilder builder = driverContext.blockFactory().newIntVectorFixedBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + int idx = MvMax.ascendingIndex(valueCount); + int result = v.getInt(first + idx); + builder.appendInt(result); } + return builder.build().asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxLongEvaluator.java index 5b713429785f0..331d070315ea6 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxLongEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxLongEvaluator.java @@ -33,32 +33,30 @@ public String name() { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNullable(Block.Ref ref) { - if (ref.block().mvSortedAscending()) { - return evalAscendingNullable(ref); + public Block evalNullable(Block fieldVal) { + if (fieldVal.mvSortedAscending()) { + return evalAscendingNullable(fieldVal); } - try (ref) { - LongBlock v = (LongBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - if (valueCount == 0) { - builder.appendNull(); - continue; - } - int first = v.getFirstValueIndex(p); - int end = first + valueCount; - long value = v.getLong(first); - for (int i = first + 1; i < end; i++) { - long next = v.getLong(i); - value = MvMax.process(value, next); - } - long result = value; - builder.appendLong(result); + LongBlock v = (LongBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; } - return Block.Ref.floating(builder.build()); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + long value = v.getLong(first); + for (int i = first + 1; i < end; i++) { + long next = v.getLong(i); + value = MvMax.process(value, next); + } + long result = value; + builder.appendLong(result); } + return builder.build(); } } @@ -66,72 +64,66 @@ public Block.Ref evalNullable(Block.Ref ref) { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNotNullable(Block.Ref ref) { - if (ref.block().mvSortedAscending()) { - return evalAscendingNotNullable(ref); + public Block evalNotNullable(Block fieldVal) { + if (fieldVal.mvSortedAscending()) { + return evalAscendingNotNullable(fieldVal); } - try (ref) { - LongBlock v = (LongBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (LongVector.FixedBuilder builder = driverContext.blockFactory().newLongVectorFixedBuilder(positionCount)) { - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - int first = v.getFirstValueIndex(p); - int end = first + valueCount; - long value = v.getLong(first); - for (int i = first + 1; i < end; i++) { - long next = v.getLong(i); - value = MvMax.process(value, next); - } - long result = value; - builder.appendLong(result); + LongBlock v = (LongBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (LongVector.FixedBuilder builder = driverContext.blockFactory().newLongVectorFixedBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + long value = v.getLong(first); + for (int i = first + 1; i < end; i++) { + long next = v.getLong(i); + value = MvMax.process(value, next); } - return Block.Ref.floating(builder.build().asBlock()); + long result = value; + builder.appendLong(result); } + return builder.build().asBlock(); } } /** * Evaluate blocks containing at least one multivalued field and all multivalued fields are in ascending order. */ - private Block.Ref evalAscendingNullable(Block.Ref ref) { - try (ref) { - LongBlock v = (LongBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - if (valueCount == 0) { - builder.appendNull(); - continue; - } - int first = v.getFirstValueIndex(p); - int idx = MvMax.ascendingIndex(valueCount); - long result = v.getLong(first + idx); - builder.appendLong(result); + private Block evalAscendingNullable(Block fieldVal) { + LongBlock v = (LongBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; } - return Block.Ref.floating(builder.build()); + int first = v.getFirstValueIndex(p); + int idx = MvMax.ascendingIndex(valueCount); + long result = v.getLong(first + idx); + builder.appendLong(result); } + return builder.build(); } } /** * Evaluate blocks containing at least one multivalued field and all multivalued fields are in ascending order. */ - private Block.Ref evalAscendingNotNullable(Block.Ref ref) { - try (ref) { - LongBlock v = (LongBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (LongVector.FixedBuilder builder = driverContext.blockFactory().newLongVectorFixedBuilder(positionCount)) { - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - int first = v.getFirstValueIndex(p); - int idx = MvMax.ascendingIndex(valueCount); - long result = v.getLong(first + idx); - builder.appendLong(result); - } - return Block.Ref.floating(builder.build().asBlock()); + private Block evalAscendingNotNullable(Block fieldVal) { + LongBlock v = (LongBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (LongVector.FixedBuilder builder = driverContext.blockFactory().newLongVectorFixedBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + int idx = MvMax.ascendingIndex(valueCount); + long result = v.getLong(first + idx); + builder.appendLong(result); } + return builder.build().asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianDoubleEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianDoubleEvaluator.java index 7049c6a10e0e1..4870712f8f2fb 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianDoubleEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianDoubleEvaluator.java @@ -34,29 +34,27 @@ public String name() { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNullable(Block.Ref ref) { - try (ref) { - DoubleBlock v = (DoubleBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { - MvMedian.Doubles work = new MvMedian.Doubles(); - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - if (valueCount == 0) { - builder.appendNull(); - continue; - } - int first = v.getFirstValueIndex(p); - int end = first + valueCount; - for (int i = first; i < end; i++) { - double value = v.getDouble(i); - MvMedian.process(work, value); - } - double result = MvMedian.finish(work); - builder.appendDouble(result); + public Block evalNullable(Block fieldVal) { + DoubleBlock v = (DoubleBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + MvMedian.Doubles work = new MvMedian.Doubles(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; } - return Block.Ref.floating(builder.build()); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + for (int i = first; i < end; i++) { + double value = v.getDouble(i); + MvMedian.process(work, value); + } + double result = MvMedian.finish(work); + builder.appendDouble(result); } + return builder.build(); } } @@ -64,25 +62,23 @@ public Block.Ref evalNullable(Block.Ref ref) { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNotNullable(Block.Ref ref) { - try (ref) { - DoubleBlock v = (DoubleBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (DoubleVector.FixedBuilder builder = driverContext.blockFactory().newDoubleVectorFixedBuilder(positionCount)) { - MvMedian.Doubles work = new MvMedian.Doubles(); - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - int first = v.getFirstValueIndex(p); - int end = first + valueCount; - for (int i = first; i < end; i++) { - double value = v.getDouble(i); - MvMedian.process(work, value); - } - double result = MvMedian.finish(work); - builder.appendDouble(result); + public Block evalNotNullable(Block fieldVal) { + DoubleBlock v = (DoubleBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (DoubleVector.FixedBuilder builder = driverContext.blockFactory().newDoubleVectorFixedBuilder(positionCount)) { + MvMedian.Doubles work = new MvMedian.Doubles(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + for (int i = first; i < end; i++) { + double value = v.getDouble(i); + MvMedian.process(work, value); } - return Block.Ref.floating(builder.build().asBlock()); + double result = MvMedian.finish(work); + builder.appendDouble(result); } + return builder.build().asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianIntEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianIntEvaluator.java index 1b9d2bfcdc642..83376cb634a8f 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianIntEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianIntEvaluator.java @@ -33,32 +33,30 @@ public String name() { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNullable(Block.Ref ref) { - if (ref.block().mvSortedAscending()) { - return evalAscendingNullable(ref); + public Block evalNullable(Block fieldVal) { + if (fieldVal.mvSortedAscending()) { + return evalAscendingNullable(fieldVal); } - try (ref) { - IntBlock v = (IntBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (IntBlock.Builder builder = driverContext.blockFactory().newIntBlockBuilder(positionCount)) { - MvMedian.Ints work = new MvMedian.Ints(); - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - if (valueCount == 0) { - builder.appendNull(); - continue; - } - int first = v.getFirstValueIndex(p); - int end = first + valueCount; - for (int i = first; i < end; i++) { - int value = v.getInt(i); - MvMedian.process(work, value); - } - int result = MvMedian.finish(work); - builder.appendInt(result); + IntBlock v = (IntBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (IntBlock.Builder builder = driverContext.blockFactory().newIntBlockBuilder(positionCount)) { + MvMedian.Ints work = new MvMedian.Ints(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; } - return Block.Ref.floating(builder.build()); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + for (int i = first; i < end; i++) { + int value = v.getInt(i); + MvMedian.process(work, value); + } + int result = MvMedian.finish(work); + builder.appendInt(result); } + return builder.build(); } } @@ -66,72 +64,66 @@ public Block.Ref evalNullable(Block.Ref ref) { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNotNullable(Block.Ref ref) { - if (ref.block().mvSortedAscending()) { - return evalAscendingNotNullable(ref); + public Block evalNotNullable(Block fieldVal) { + if (fieldVal.mvSortedAscending()) { + return evalAscendingNotNullable(fieldVal); } - try (ref) { - IntBlock v = (IntBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (IntVector.FixedBuilder builder = driverContext.blockFactory().newIntVectorFixedBuilder(positionCount)) { - MvMedian.Ints work = new MvMedian.Ints(); - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - int first = v.getFirstValueIndex(p); - int end = first + valueCount; - for (int i = first; i < end; i++) { - int value = v.getInt(i); - MvMedian.process(work, value); - } - int result = MvMedian.finish(work); - builder.appendInt(result); + IntBlock v = (IntBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (IntVector.FixedBuilder builder = driverContext.blockFactory().newIntVectorFixedBuilder(positionCount)) { + MvMedian.Ints work = new MvMedian.Ints(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + for (int i = first; i < end; i++) { + int value = v.getInt(i); + MvMedian.process(work, value); } - return Block.Ref.floating(builder.build().asBlock()); + int result = MvMedian.finish(work); + builder.appendInt(result); } + return builder.build().asBlock(); } } /** * Evaluate blocks containing at least one multivalued field and all multivalued fields are in ascending order. */ - private Block.Ref evalAscendingNullable(Block.Ref ref) { - try (ref) { - IntBlock v = (IntBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (IntBlock.Builder builder = driverContext.blockFactory().newIntBlockBuilder(positionCount)) { - MvMedian.Ints work = new MvMedian.Ints(); - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - if (valueCount == 0) { - builder.appendNull(); - continue; - } - int first = v.getFirstValueIndex(p); - int result = MvMedian.ascending(v, first, valueCount); - builder.appendInt(result); + private Block evalAscendingNullable(Block fieldVal) { + IntBlock v = (IntBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (IntBlock.Builder builder = driverContext.blockFactory().newIntBlockBuilder(positionCount)) { + MvMedian.Ints work = new MvMedian.Ints(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; } - return Block.Ref.floating(builder.build()); + int first = v.getFirstValueIndex(p); + int result = MvMedian.ascending(v, first, valueCount); + builder.appendInt(result); } + return builder.build(); } } /** * Evaluate blocks containing at least one multivalued field and all multivalued fields are in ascending order. */ - private Block.Ref evalAscendingNotNullable(Block.Ref ref) { - try (ref) { - IntBlock v = (IntBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (IntVector.FixedBuilder builder = driverContext.blockFactory().newIntVectorFixedBuilder(positionCount)) { - MvMedian.Ints work = new MvMedian.Ints(); - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - int first = v.getFirstValueIndex(p); - int result = MvMedian.ascending(v, first, valueCount); - builder.appendInt(result); - } - return Block.Ref.floating(builder.build().asBlock()); + private Block evalAscendingNotNullable(Block fieldVal) { + IntBlock v = (IntBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (IntVector.FixedBuilder builder = driverContext.blockFactory().newIntVectorFixedBuilder(positionCount)) { + MvMedian.Ints work = new MvMedian.Ints(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + int result = MvMedian.ascending(v, first, valueCount); + builder.appendInt(result); } + return builder.build().asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianLongEvaluator.java index 6a2221366ea38..bf324d4db4f72 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianLongEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianLongEvaluator.java @@ -34,32 +34,30 @@ public String name() { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNullable(Block.Ref ref) { - if (ref.block().mvSortedAscending()) { - return evalAscendingNullable(ref); + public Block evalNullable(Block fieldVal) { + if (fieldVal.mvSortedAscending()) { + return evalAscendingNullable(fieldVal); } - try (ref) { - LongBlock v = (LongBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { - MvMedian.Longs work = new MvMedian.Longs(); - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - if (valueCount == 0) { - builder.appendNull(); - continue; - } - int first = v.getFirstValueIndex(p); - int end = first + valueCount; - for (int i = first; i < end; i++) { - long value = v.getLong(i); - MvMedian.process(work, value); - } - long result = MvMedian.finish(work); - builder.appendLong(result); + LongBlock v = (LongBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { + MvMedian.Longs work = new MvMedian.Longs(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; } - return Block.Ref.floating(builder.build()); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + for (int i = first; i < end; i++) { + long value = v.getLong(i); + MvMedian.process(work, value); + } + long result = MvMedian.finish(work); + builder.appendLong(result); } + return builder.build(); } } @@ -67,72 +65,66 @@ public Block.Ref evalNullable(Block.Ref ref) { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNotNullable(Block.Ref ref) { - if (ref.block().mvSortedAscending()) { - return evalAscendingNotNullable(ref); + public Block evalNotNullable(Block fieldVal) { + if (fieldVal.mvSortedAscending()) { + return evalAscendingNotNullable(fieldVal); } - try (ref) { - LongBlock v = (LongBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (LongVector.FixedBuilder builder = driverContext.blockFactory().newLongVectorFixedBuilder(positionCount)) { - MvMedian.Longs work = new MvMedian.Longs(); - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - int first = v.getFirstValueIndex(p); - int end = first + valueCount; - for (int i = first; i < end; i++) { - long value = v.getLong(i); - MvMedian.process(work, value); - } - long result = MvMedian.finish(work); - builder.appendLong(result); + LongBlock v = (LongBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (LongVector.FixedBuilder builder = driverContext.blockFactory().newLongVectorFixedBuilder(positionCount)) { + MvMedian.Longs work = new MvMedian.Longs(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + for (int i = first; i < end; i++) { + long value = v.getLong(i); + MvMedian.process(work, value); } - return Block.Ref.floating(builder.build().asBlock()); + long result = MvMedian.finish(work); + builder.appendLong(result); } + return builder.build().asBlock(); } } /** * Evaluate blocks containing at least one multivalued field and all multivalued fields are in ascending order. */ - private Block.Ref evalAscendingNullable(Block.Ref ref) { - try (ref) { - LongBlock v = (LongBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { - MvMedian.Longs work = new MvMedian.Longs(); - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - if (valueCount == 0) { - builder.appendNull(); - continue; - } - int first = v.getFirstValueIndex(p); - long result = MvMedian.ascending(v, first, valueCount); - builder.appendLong(result); + private Block evalAscendingNullable(Block fieldVal) { + LongBlock v = (LongBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { + MvMedian.Longs work = new MvMedian.Longs(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; } - return Block.Ref.floating(builder.build()); + int first = v.getFirstValueIndex(p); + long result = MvMedian.ascending(v, first, valueCount); + builder.appendLong(result); } + return builder.build(); } } /** * Evaluate blocks containing at least one multivalued field and all multivalued fields are in ascending order. */ - private Block.Ref evalAscendingNotNullable(Block.Ref ref) { - try (ref) { - LongBlock v = (LongBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (LongVector.FixedBuilder builder = driverContext.blockFactory().newLongVectorFixedBuilder(positionCount)) { - MvMedian.Longs work = new MvMedian.Longs(); - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - int first = v.getFirstValueIndex(p); - long result = MvMedian.ascending(v, first, valueCount); - builder.appendLong(result); - } - return Block.Ref.floating(builder.build().asBlock()); + private Block evalAscendingNotNullable(Block fieldVal) { + LongBlock v = (LongBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (LongVector.FixedBuilder builder = driverContext.blockFactory().newLongVectorFixedBuilder(positionCount)) { + MvMedian.Longs work = new MvMedian.Longs(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + long result = MvMedian.ascending(v, first, valueCount); + builder.appendLong(result); } + return builder.build().asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianUnsignedLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianUnsignedLongEvaluator.java index 3864d0253ba5f..3f95ba060f825 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianUnsignedLongEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianUnsignedLongEvaluator.java @@ -34,32 +34,30 @@ public String name() { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNullable(Block.Ref ref) { - if (ref.block().mvSortedAscending()) { - return evalAscendingNullable(ref); + public Block evalNullable(Block fieldVal) { + if (fieldVal.mvSortedAscending()) { + return evalAscendingNullable(fieldVal); } - try (ref) { - LongBlock v = (LongBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { - MvMedian.Longs work = new MvMedian.Longs(); - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - if (valueCount == 0) { - builder.appendNull(); - continue; - } - int first = v.getFirstValueIndex(p); - int end = first + valueCount; - for (int i = first; i < end; i++) { - long value = v.getLong(i); - MvMedian.processUnsignedLong(work, value); - } - long result = MvMedian.finishUnsignedLong(work); - builder.appendLong(result); + LongBlock v = (LongBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { + MvMedian.Longs work = new MvMedian.Longs(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; } - return Block.Ref.floating(builder.build()); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + for (int i = first; i < end; i++) { + long value = v.getLong(i); + MvMedian.processUnsignedLong(work, value); + } + long result = MvMedian.finishUnsignedLong(work); + builder.appendLong(result); } + return builder.build(); } } @@ -67,72 +65,66 @@ public Block.Ref evalNullable(Block.Ref ref) { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNotNullable(Block.Ref ref) { - if (ref.block().mvSortedAscending()) { - return evalAscendingNotNullable(ref); + public Block evalNotNullable(Block fieldVal) { + if (fieldVal.mvSortedAscending()) { + return evalAscendingNotNullable(fieldVal); } - try (ref) { - LongBlock v = (LongBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (LongVector.FixedBuilder builder = driverContext.blockFactory().newLongVectorFixedBuilder(positionCount)) { - MvMedian.Longs work = new MvMedian.Longs(); - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - int first = v.getFirstValueIndex(p); - int end = first + valueCount; - for (int i = first; i < end; i++) { - long value = v.getLong(i); - MvMedian.processUnsignedLong(work, value); - } - long result = MvMedian.finishUnsignedLong(work); - builder.appendLong(result); + LongBlock v = (LongBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (LongVector.FixedBuilder builder = driverContext.blockFactory().newLongVectorFixedBuilder(positionCount)) { + MvMedian.Longs work = new MvMedian.Longs(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + for (int i = first; i < end; i++) { + long value = v.getLong(i); + MvMedian.processUnsignedLong(work, value); } - return Block.Ref.floating(builder.build().asBlock()); + long result = MvMedian.finishUnsignedLong(work); + builder.appendLong(result); } + return builder.build().asBlock(); } } /** * Evaluate blocks containing at least one multivalued field and all multivalued fields are in ascending order. */ - private Block.Ref evalAscendingNullable(Block.Ref ref) { - try (ref) { - LongBlock v = (LongBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { - MvMedian.Longs work = new MvMedian.Longs(); - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - if (valueCount == 0) { - builder.appendNull(); - continue; - } - int first = v.getFirstValueIndex(p); - long result = MvMedian.ascendingUnsignedLong(v, first, valueCount); - builder.appendLong(result); + private Block evalAscendingNullable(Block fieldVal) { + LongBlock v = (LongBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { + MvMedian.Longs work = new MvMedian.Longs(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; } - return Block.Ref.floating(builder.build()); + int first = v.getFirstValueIndex(p); + long result = MvMedian.ascendingUnsignedLong(v, first, valueCount); + builder.appendLong(result); } + return builder.build(); } } /** * Evaluate blocks containing at least one multivalued field and all multivalued fields are in ascending order. */ - private Block.Ref evalAscendingNotNullable(Block.Ref ref) { - try (ref) { - LongBlock v = (LongBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (LongVector.FixedBuilder builder = driverContext.blockFactory().newLongVectorFixedBuilder(positionCount)) { - MvMedian.Longs work = new MvMedian.Longs(); - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - int first = v.getFirstValueIndex(p); - long result = MvMedian.ascendingUnsignedLong(v, first, valueCount); - builder.appendLong(result); - } - return Block.Ref.floating(builder.build().asBlock()); + private Block evalAscendingNotNullable(Block fieldVal) { + LongBlock v = (LongBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (LongVector.FixedBuilder builder = driverContext.blockFactory().newLongVectorFixedBuilder(positionCount)) { + MvMedian.Longs work = new MvMedian.Longs(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + long result = MvMedian.ascendingUnsignedLong(v, first, valueCount); + builder.appendLong(result); } + return builder.build().asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinBooleanEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinBooleanEvaluator.java index 032607382ae6b..a8546837479a8 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinBooleanEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinBooleanEvaluator.java @@ -34,32 +34,30 @@ public String name() { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNullable(Block.Ref ref) { - if (ref.block().mvSortedAscending()) { - return evalAscendingNullable(ref); + public Block evalNullable(Block fieldVal) { + if (fieldVal.mvSortedAscending()) { + return evalAscendingNullable(fieldVal); } - try (ref) { - BooleanBlock v = (BooleanBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (BooleanBlock.Builder builder = driverContext.blockFactory().newBooleanBlockBuilder(positionCount)) { - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - if (valueCount == 0) { - builder.appendNull(); - continue; - } - int first = v.getFirstValueIndex(p); - int end = first + valueCount; - boolean value = v.getBoolean(first); - for (int i = first + 1; i < end; i++) { - boolean next = v.getBoolean(i); - value = MvMin.process(value, next); - } - boolean result = value; - builder.appendBoolean(result); + BooleanBlock v = (BooleanBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (BooleanBlock.Builder builder = driverContext.blockFactory().newBooleanBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; } - return Block.Ref.floating(builder.build()); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + boolean value = v.getBoolean(first); + for (int i = first + 1; i < end; i++) { + boolean next = v.getBoolean(i); + value = MvMin.process(value, next); + } + boolean result = value; + builder.appendBoolean(result); } + return builder.build(); } } @@ -67,72 +65,66 @@ public Block.Ref evalNullable(Block.Ref ref) { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNotNullable(Block.Ref ref) { - if (ref.block().mvSortedAscending()) { - return evalAscendingNotNullable(ref); + public Block evalNotNullable(Block fieldVal) { + if (fieldVal.mvSortedAscending()) { + return evalAscendingNotNullable(fieldVal); } - try (ref) { - BooleanBlock v = (BooleanBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (BooleanVector.FixedBuilder builder = driverContext.blockFactory().newBooleanVectorFixedBuilder(positionCount)) { - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - int first = v.getFirstValueIndex(p); - int end = first + valueCount; - boolean value = v.getBoolean(first); - for (int i = first + 1; i < end; i++) { - boolean next = v.getBoolean(i); - value = MvMin.process(value, next); - } - boolean result = value; - builder.appendBoolean(result); + BooleanBlock v = (BooleanBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (BooleanVector.FixedBuilder builder = driverContext.blockFactory().newBooleanVectorFixedBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + boolean value = v.getBoolean(first); + for (int i = first + 1; i < end; i++) { + boolean next = v.getBoolean(i); + value = MvMin.process(value, next); } - return Block.Ref.floating(builder.build().asBlock()); + boolean result = value; + builder.appendBoolean(result); } + return builder.build().asBlock(); } } /** * Evaluate blocks containing at least one multivalued field and all multivalued fields are in ascending order. */ - private Block.Ref evalAscendingNullable(Block.Ref ref) { - try (ref) { - BooleanBlock v = (BooleanBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (BooleanBlock.Builder builder = driverContext.blockFactory().newBooleanBlockBuilder(positionCount)) { - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - if (valueCount == 0) { - builder.appendNull(); - continue; - } - int first = v.getFirstValueIndex(p); - int idx = MvMin.ascendingIndex(valueCount); - boolean result = v.getBoolean(first + idx); - builder.appendBoolean(result); + private Block evalAscendingNullable(Block fieldVal) { + BooleanBlock v = (BooleanBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (BooleanBlock.Builder builder = driverContext.blockFactory().newBooleanBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; } - return Block.Ref.floating(builder.build()); + int first = v.getFirstValueIndex(p); + int idx = MvMin.ascendingIndex(valueCount); + boolean result = v.getBoolean(first + idx); + builder.appendBoolean(result); } + return builder.build(); } } /** * Evaluate blocks containing at least one multivalued field and all multivalued fields are in ascending order. */ - private Block.Ref evalAscendingNotNullable(Block.Ref ref) { - try (ref) { - BooleanBlock v = (BooleanBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (BooleanVector.FixedBuilder builder = driverContext.blockFactory().newBooleanVectorFixedBuilder(positionCount)) { - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - int first = v.getFirstValueIndex(p); - int idx = MvMin.ascendingIndex(valueCount); - boolean result = v.getBoolean(first + idx); - builder.appendBoolean(result); - } - return Block.Ref.floating(builder.build().asBlock()); + private Block evalAscendingNotNullable(Block fieldVal) { + BooleanBlock v = (BooleanBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (BooleanVector.FixedBuilder builder = driverContext.blockFactory().newBooleanVectorFixedBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + int idx = MvMin.ascendingIndex(valueCount); + boolean result = v.getBoolean(first + idx); + builder.appendBoolean(result); } + return builder.build().asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinBytesRefEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinBytesRefEvaluator.java index 05d792775af1d..f00e7272ae378 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinBytesRefEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinBytesRefEvaluator.java @@ -35,34 +35,32 @@ public String name() { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNullable(Block.Ref ref) { - if (ref.block().mvSortedAscending()) { - return evalAscendingNullable(ref); + public Block evalNullable(Block fieldVal) { + if (fieldVal.mvSortedAscending()) { + return evalAscendingNullable(fieldVal); } - try (ref) { - BytesRefBlock v = (BytesRefBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { - BytesRef firstScratch = new BytesRef(); - BytesRef nextScratch = new BytesRef(); - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - if (valueCount == 0) { - builder.appendNull(); - continue; - } - int first = v.getFirstValueIndex(p); - int end = first + valueCount; - BytesRef value = v.getBytesRef(first, firstScratch); - for (int i = first + 1; i < end; i++) { - BytesRef next = v.getBytesRef(i, nextScratch); - MvMin.process(value, next); - } - BytesRef result = value; - builder.appendBytesRef(result); + BytesRefBlock v = (BytesRefBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + BytesRef firstScratch = new BytesRef(); + BytesRef nextScratch = new BytesRef(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; } - return Block.Ref.floating(builder.build()); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + BytesRef value = v.getBytesRef(first, firstScratch); + for (int i = first + 1; i < end; i++) { + BytesRef next = v.getBytesRef(i, nextScratch); + MvMin.process(value, next); + } + BytesRef result = value; + builder.appendBytesRef(result); } + return builder.build(); } } @@ -70,78 +68,72 @@ public Block.Ref evalNullable(Block.Ref ref) { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNotNullable(Block.Ref ref) { - if (ref.block().mvSortedAscending()) { - return evalAscendingNotNullable(ref); + public Block evalNotNullable(Block fieldVal) { + if (fieldVal.mvSortedAscending()) { + return evalAscendingNotNullable(fieldVal); } - try (ref) { - BytesRefBlock v = (BytesRefBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (BytesRefVector.Builder builder = driverContext.blockFactory().newBytesRefVectorBuilder(positionCount)) { - BytesRef firstScratch = new BytesRef(); - BytesRef nextScratch = new BytesRef(); - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - int first = v.getFirstValueIndex(p); - int end = first + valueCount; - BytesRef value = v.getBytesRef(first, firstScratch); - for (int i = first + 1; i < end; i++) { - BytesRef next = v.getBytesRef(i, nextScratch); - MvMin.process(value, next); - } - BytesRef result = value; - builder.appendBytesRef(result); + BytesRefBlock v = (BytesRefBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (BytesRefVector.Builder builder = driverContext.blockFactory().newBytesRefVectorBuilder(positionCount)) { + BytesRef firstScratch = new BytesRef(); + BytesRef nextScratch = new BytesRef(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + BytesRef value = v.getBytesRef(first, firstScratch); + for (int i = first + 1; i < end; i++) { + BytesRef next = v.getBytesRef(i, nextScratch); + MvMin.process(value, next); } - return Block.Ref.floating(builder.build().asBlock()); + BytesRef result = value; + builder.appendBytesRef(result); } + return builder.build().asBlock(); } } /** * Evaluate blocks containing at least one multivalued field and all multivalued fields are in ascending order. */ - private Block.Ref evalAscendingNullable(Block.Ref ref) { - try (ref) { - BytesRefBlock v = (BytesRefBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { - BytesRef firstScratch = new BytesRef(); - BytesRef nextScratch = new BytesRef(); - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - if (valueCount == 0) { - builder.appendNull(); - continue; - } - int first = v.getFirstValueIndex(p); - int idx = MvMin.ascendingIndex(valueCount); - BytesRef result = v.getBytesRef(first + idx, firstScratch); - builder.appendBytesRef(result); + private Block evalAscendingNullable(Block fieldVal) { + BytesRefBlock v = (BytesRefBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + BytesRef firstScratch = new BytesRef(); + BytesRef nextScratch = new BytesRef(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; } - return Block.Ref.floating(builder.build()); + int first = v.getFirstValueIndex(p); + int idx = MvMin.ascendingIndex(valueCount); + BytesRef result = v.getBytesRef(first + idx, firstScratch); + builder.appendBytesRef(result); } + return builder.build(); } } /** * Evaluate blocks containing at least one multivalued field and all multivalued fields are in ascending order. */ - private Block.Ref evalAscendingNotNullable(Block.Ref ref) { - try (ref) { - BytesRefBlock v = (BytesRefBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (BytesRefVector.Builder builder = driverContext.blockFactory().newBytesRefVectorBuilder(positionCount)) { - BytesRef firstScratch = new BytesRef(); - BytesRef nextScratch = new BytesRef(); - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - int first = v.getFirstValueIndex(p); - int idx = MvMin.ascendingIndex(valueCount); - BytesRef result = v.getBytesRef(first + idx, firstScratch); - builder.appendBytesRef(result); - } - return Block.Ref.floating(builder.build().asBlock()); + private Block evalAscendingNotNullable(Block fieldVal) { + BytesRefBlock v = (BytesRefBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (BytesRefVector.Builder builder = driverContext.blockFactory().newBytesRefVectorBuilder(positionCount)) { + BytesRef firstScratch = new BytesRef(); + BytesRef nextScratch = new BytesRef(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + int idx = MvMin.ascendingIndex(valueCount); + BytesRef result = v.getBytesRef(first + idx, firstScratch); + builder.appendBytesRef(result); } + return builder.build().asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinDoubleEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinDoubleEvaluator.java index 9652d869e0726..5cd7ee9039a33 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinDoubleEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinDoubleEvaluator.java @@ -33,32 +33,30 @@ public String name() { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNullable(Block.Ref ref) { - if (ref.block().mvSortedAscending()) { - return evalAscendingNullable(ref); + public Block evalNullable(Block fieldVal) { + if (fieldVal.mvSortedAscending()) { + return evalAscendingNullable(fieldVal); } - try (ref) { - DoubleBlock v = (DoubleBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - if (valueCount == 0) { - builder.appendNull(); - continue; - } - int first = v.getFirstValueIndex(p); - int end = first + valueCount; - double value = v.getDouble(first); - for (int i = first + 1; i < end; i++) { - double next = v.getDouble(i); - value = MvMin.process(value, next); - } - double result = value; - builder.appendDouble(result); + DoubleBlock v = (DoubleBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; } - return Block.Ref.floating(builder.build()); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + double value = v.getDouble(first); + for (int i = first + 1; i < end; i++) { + double next = v.getDouble(i); + value = MvMin.process(value, next); + } + double result = value; + builder.appendDouble(result); } + return builder.build(); } } @@ -66,72 +64,66 @@ public Block.Ref evalNullable(Block.Ref ref) { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNotNullable(Block.Ref ref) { - if (ref.block().mvSortedAscending()) { - return evalAscendingNotNullable(ref); + public Block evalNotNullable(Block fieldVal) { + if (fieldVal.mvSortedAscending()) { + return evalAscendingNotNullable(fieldVal); } - try (ref) { - DoubleBlock v = (DoubleBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (DoubleVector.FixedBuilder builder = driverContext.blockFactory().newDoubleVectorFixedBuilder(positionCount)) { - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - int first = v.getFirstValueIndex(p); - int end = first + valueCount; - double value = v.getDouble(first); - for (int i = first + 1; i < end; i++) { - double next = v.getDouble(i); - value = MvMin.process(value, next); - } - double result = value; - builder.appendDouble(result); + DoubleBlock v = (DoubleBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (DoubleVector.FixedBuilder builder = driverContext.blockFactory().newDoubleVectorFixedBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + double value = v.getDouble(first); + for (int i = first + 1; i < end; i++) { + double next = v.getDouble(i); + value = MvMin.process(value, next); } - return Block.Ref.floating(builder.build().asBlock()); + double result = value; + builder.appendDouble(result); } + return builder.build().asBlock(); } } /** * Evaluate blocks containing at least one multivalued field and all multivalued fields are in ascending order. */ - private Block.Ref evalAscendingNullable(Block.Ref ref) { - try (ref) { - DoubleBlock v = (DoubleBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - if (valueCount == 0) { - builder.appendNull(); - continue; - } - int first = v.getFirstValueIndex(p); - int idx = MvMin.ascendingIndex(valueCount); - double result = v.getDouble(first + idx); - builder.appendDouble(result); + private Block evalAscendingNullable(Block fieldVal) { + DoubleBlock v = (DoubleBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; } - return Block.Ref.floating(builder.build()); + int first = v.getFirstValueIndex(p); + int idx = MvMin.ascendingIndex(valueCount); + double result = v.getDouble(first + idx); + builder.appendDouble(result); } + return builder.build(); } } /** * Evaluate blocks containing at least one multivalued field and all multivalued fields are in ascending order. */ - private Block.Ref evalAscendingNotNullable(Block.Ref ref) { - try (ref) { - DoubleBlock v = (DoubleBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (DoubleVector.FixedBuilder builder = driverContext.blockFactory().newDoubleVectorFixedBuilder(positionCount)) { - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - int first = v.getFirstValueIndex(p); - int idx = MvMin.ascendingIndex(valueCount); - double result = v.getDouble(first + idx); - builder.appendDouble(result); - } - return Block.Ref.floating(builder.build().asBlock()); + private Block evalAscendingNotNullable(Block fieldVal) { + DoubleBlock v = (DoubleBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (DoubleVector.FixedBuilder builder = driverContext.blockFactory().newDoubleVectorFixedBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + int idx = MvMin.ascendingIndex(valueCount); + double result = v.getDouble(first + idx); + builder.appendDouble(result); } + return builder.build().asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinIntEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinIntEvaluator.java index ff52d235cecc5..93b4612f898ad 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinIntEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinIntEvaluator.java @@ -33,32 +33,30 @@ public String name() { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNullable(Block.Ref ref) { - if (ref.block().mvSortedAscending()) { - return evalAscendingNullable(ref); + public Block evalNullable(Block fieldVal) { + if (fieldVal.mvSortedAscending()) { + return evalAscendingNullable(fieldVal); } - try (ref) { - IntBlock v = (IntBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (IntBlock.Builder builder = driverContext.blockFactory().newIntBlockBuilder(positionCount)) { - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - if (valueCount == 0) { - builder.appendNull(); - continue; - } - int first = v.getFirstValueIndex(p); - int end = first + valueCount; - int value = v.getInt(first); - for (int i = first + 1; i < end; i++) { - int next = v.getInt(i); - value = MvMin.process(value, next); - } - int result = value; - builder.appendInt(result); + IntBlock v = (IntBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (IntBlock.Builder builder = driverContext.blockFactory().newIntBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; } - return Block.Ref.floating(builder.build()); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + int value = v.getInt(first); + for (int i = first + 1; i < end; i++) { + int next = v.getInt(i); + value = MvMin.process(value, next); + } + int result = value; + builder.appendInt(result); } + return builder.build(); } } @@ -66,72 +64,66 @@ public Block.Ref evalNullable(Block.Ref ref) { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNotNullable(Block.Ref ref) { - if (ref.block().mvSortedAscending()) { - return evalAscendingNotNullable(ref); + public Block evalNotNullable(Block fieldVal) { + if (fieldVal.mvSortedAscending()) { + return evalAscendingNotNullable(fieldVal); } - try (ref) { - IntBlock v = (IntBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (IntVector.FixedBuilder builder = driverContext.blockFactory().newIntVectorFixedBuilder(positionCount)) { - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - int first = v.getFirstValueIndex(p); - int end = first + valueCount; - int value = v.getInt(first); - for (int i = first + 1; i < end; i++) { - int next = v.getInt(i); - value = MvMin.process(value, next); - } - int result = value; - builder.appendInt(result); + IntBlock v = (IntBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (IntVector.FixedBuilder builder = driverContext.blockFactory().newIntVectorFixedBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + int value = v.getInt(first); + for (int i = first + 1; i < end; i++) { + int next = v.getInt(i); + value = MvMin.process(value, next); } - return Block.Ref.floating(builder.build().asBlock()); + int result = value; + builder.appendInt(result); } + return builder.build().asBlock(); } } /** * Evaluate blocks containing at least one multivalued field and all multivalued fields are in ascending order. */ - private Block.Ref evalAscendingNullable(Block.Ref ref) { - try (ref) { - IntBlock v = (IntBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (IntBlock.Builder builder = driverContext.blockFactory().newIntBlockBuilder(positionCount)) { - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - if (valueCount == 0) { - builder.appendNull(); - continue; - } - int first = v.getFirstValueIndex(p); - int idx = MvMin.ascendingIndex(valueCount); - int result = v.getInt(first + idx); - builder.appendInt(result); + private Block evalAscendingNullable(Block fieldVal) { + IntBlock v = (IntBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (IntBlock.Builder builder = driverContext.blockFactory().newIntBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; } - return Block.Ref.floating(builder.build()); + int first = v.getFirstValueIndex(p); + int idx = MvMin.ascendingIndex(valueCount); + int result = v.getInt(first + idx); + builder.appendInt(result); } + return builder.build(); } } /** * Evaluate blocks containing at least one multivalued field and all multivalued fields are in ascending order. */ - private Block.Ref evalAscendingNotNullable(Block.Ref ref) { - try (ref) { - IntBlock v = (IntBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (IntVector.FixedBuilder builder = driverContext.blockFactory().newIntVectorFixedBuilder(positionCount)) { - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - int first = v.getFirstValueIndex(p); - int idx = MvMin.ascendingIndex(valueCount); - int result = v.getInt(first + idx); - builder.appendInt(result); - } - return Block.Ref.floating(builder.build().asBlock()); + private Block evalAscendingNotNullable(Block fieldVal) { + IntBlock v = (IntBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (IntVector.FixedBuilder builder = driverContext.blockFactory().newIntVectorFixedBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + int idx = MvMin.ascendingIndex(valueCount); + int result = v.getInt(first + idx); + builder.appendInt(result); } + return builder.build().asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinLongEvaluator.java index b940eead096a4..9c974caecc40d 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinLongEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinLongEvaluator.java @@ -33,32 +33,30 @@ public String name() { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNullable(Block.Ref ref) { - if (ref.block().mvSortedAscending()) { - return evalAscendingNullable(ref); + public Block evalNullable(Block fieldVal) { + if (fieldVal.mvSortedAscending()) { + return evalAscendingNullable(fieldVal); } - try (ref) { - LongBlock v = (LongBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - if (valueCount == 0) { - builder.appendNull(); - continue; - } - int first = v.getFirstValueIndex(p); - int end = first + valueCount; - long value = v.getLong(first); - for (int i = first + 1; i < end; i++) { - long next = v.getLong(i); - value = MvMin.process(value, next); - } - long result = value; - builder.appendLong(result); + LongBlock v = (LongBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; } - return Block.Ref.floating(builder.build()); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + long value = v.getLong(first); + for (int i = first + 1; i < end; i++) { + long next = v.getLong(i); + value = MvMin.process(value, next); + } + long result = value; + builder.appendLong(result); } + return builder.build(); } } @@ -66,72 +64,66 @@ public Block.Ref evalNullable(Block.Ref ref) { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNotNullable(Block.Ref ref) { - if (ref.block().mvSortedAscending()) { - return evalAscendingNotNullable(ref); + public Block evalNotNullable(Block fieldVal) { + if (fieldVal.mvSortedAscending()) { + return evalAscendingNotNullable(fieldVal); } - try (ref) { - LongBlock v = (LongBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (LongVector.FixedBuilder builder = driverContext.blockFactory().newLongVectorFixedBuilder(positionCount)) { - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - int first = v.getFirstValueIndex(p); - int end = first + valueCount; - long value = v.getLong(first); - for (int i = first + 1; i < end; i++) { - long next = v.getLong(i); - value = MvMin.process(value, next); - } - long result = value; - builder.appendLong(result); + LongBlock v = (LongBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (LongVector.FixedBuilder builder = driverContext.blockFactory().newLongVectorFixedBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + long value = v.getLong(first); + for (int i = first + 1; i < end; i++) { + long next = v.getLong(i); + value = MvMin.process(value, next); } - return Block.Ref.floating(builder.build().asBlock()); + long result = value; + builder.appendLong(result); } + return builder.build().asBlock(); } } /** * Evaluate blocks containing at least one multivalued field and all multivalued fields are in ascending order. */ - private Block.Ref evalAscendingNullable(Block.Ref ref) { - try (ref) { - LongBlock v = (LongBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - if (valueCount == 0) { - builder.appendNull(); - continue; - } - int first = v.getFirstValueIndex(p); - int idx = MvMin.ascendingIndex(valueCount); - long result = v.getLong(first + idx); - builder.appendLong(result); + private Block evalAscendingNullable(Block fieldVal) { + LongBlock v = (LongBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; } - return Block.Ref.floating(builder.build()); + int first = v.getFirstValueIndex(p); + int idx = MvMin.ascendingIndex(valueCount); + long result = v.getLong(first + idx); + builder.appendLong(result); } + return builder.build(); } } /** * Evaluate blocks containing at least one multivalued field and all multivalued fields are in ascending order. */ - private Block.Ref evalAscendingNotNullable(Block.Ref ref) { - try (ref) { - LongBlock v = (LongBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (LongVector.FixedBuilder builder = driverContext.blockFactory().newLongVectorFixedBuilder(positionCount)) { - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - int first = v.getFirstValueIndex(p); - int idx = MvMin.ascendingIndex(valueCount); - long result = v.getLong(first + idx); - builder.appendLong(result); - } - return Block.Ref.floating(builder.build().asBlock()); + private Block evalAscendingNotNullable(Block fieldVal) { + LongBlock v = (LongBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (LongVector.FixedBuilder builder = driverContext.blockFactory().newLongVectorFixedBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + int idx = MvMin.ascendingIndex(valueCount); + long result = v.getLong(first + idx); + builder.appendLong(result); } + return builder.build().asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumDoubleEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumDoubleEvaluator.java index d5d29942491c6..cc54ebad77667 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumDoubleEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumDoubleEvaluator.java @@ -34,29 +34,27 @@ public String name() { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNullable(Block.Ref ref) { - try (ref) { - DoubleBlock v = (DoubleBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { - CompensatedSum work = new CompensatedSum(); - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - if (valueCount == 0) { - builder.appendNull(); - continue; - } - int first = v.getFirstValueIndex(p); - int end = first + valueCount; - for (int i = first; i < end; i++) { - double value = v.getDouble(i); - MvSum.process(work, value); - } - double result = MvSum.finish(work); - builder.appendDouble(result); + public Block evalNullable(Block fieldVal) { + DoubleBlock v = (DoubleBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + CompensatedSum work = new CompensatedSum(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; } - return Block.Ref.floating(builder.build()); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + for (int i = first; i < end; i++) { + double value = v.getDouble(i); + MvSum.process(work, value); + } + double result = MvSum.finish(work); + builder.appendDouble(result); } + return builder.build(); } } @@ -64,25 +62,23 @@ public Block.Ref evalNullable(Block.Ref ref) { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNotNullable(Block.Ref ref) { - try (ref) { - DoubleBlock v = (DoubleBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (DoubleVector.FixedBuilder builder = driverContext.blockFactory().newDoubleVectorFixedBuilder(positionCount)) { - CompensatedSum work = new CompensatedSum(); - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - int first = v.getFirstValueIndex(p); - int end = first + valueCount; - for (int i = first; i < end; i++) { - double value = v.getDouble(i); - MvSum.process(work, value); - } - double result = MvSum.finish(work); - builder.appendDouble(result); + public Block evalNotNullable(Block fieldVal) { + DoubleBlock v = (DoubleBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (DoubleVector.FixedBuilder builder = driverContext.blockFactory().newDoubleVectorFixedBuilder(positionCount)) { + CompensatedSum work = new CompensatedSum(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + for (int i = first; i < end; i++) { + double value = v.getDouble(i); + MvSum.process(work, value); } - return Block.Ref.floating(builder.build().asBlock()); + double result = MvSum.finish(work); + builder.appendDouble(result); } + return builder.build().asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumIntEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumIntEvaluator.java index 8375a303eb7cb..bd24d4a917e84 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumIntEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumIntEvaluator.java @@ -39,34 +39,32 @@ public String name() { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNullable(Block.Ref ref) { - try (ref) { - IntBlock v = (IntBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (IntBlock.Builder builder = driverContext.blockFactory().newIntBlockBuilder(positionCount)) { - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - if (valueCount == 0) { - builder.appendNull(); - continue; - } - try { - int first = v.getFirstValueIndex(p); - int end = first + valueCount; - int value = v.getInt(first); - for (int i = first + 1; i < end; i++) { - int next = v.getInt(i); - value = MvSum.process(value, next); - } - int result = value; - builder.appendInt(result); - } catch (ArithmeticException e) { - warnings.registerException(e); - builder.appendNull(); + public Block evalNullable(Block fieldVal) { + IntBlock v = (IntBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (IntBlock.Builder builder = driverContext.blockFactory().newIntBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; + } + try { + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + int value = v.getInt(first); + for (int i = first + 1; i < end; i++) { + int next = v.getInt(i); + value = MvSum.process(value, next); } + int result = value; + builder.appendInt(result); + } catch (ArithmeticException e) { + warnings.registerException(e); + builder.appendNull(); } - return Block.Ref.floating(builder.build()); } + return builder.build(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumLongEvaluator.java index 625c3756bb5af..823d6fa17bee2 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumLongEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumLongEvaluator.java @@ -39,34 +39,32 @@ public String name() { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNullable(Block.Ref ref) { - try (ref) { - LongBlock v = (LongBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - if (valueCount == 0) { - builder.appendNull(); - continue; - } - try { - int first = v.getFirstValueIndex(p); - int end = first + valueCount; - long value = v.getLong(first); - for (int i = first + 1; i < end; i++) { - long next = v.getLong(i); - value = MvSum.process(value, next); - } - long result = value; - builder.appendLong(result); - } catch (ArithmeticException e) { - warnings.registerException(e); - builder.appendNull(); + public Block evalNullable(Block fieldVal) { + LongBlock v = (LongBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; + } + try { + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + long value = v.getLong(first); + for (int i = first + 1; i < end; i++) { + long next = v.getLong(i); + value = MvSum.process(value, next); } + long result = value; + builder.appendLong(result); + } catch (ArithmeticException e) { + warnings.registerException(e); + builder.appendNull(); } - return Block.Ref.floating(builder.build()); } + return builder.build(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumUnsignedLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumUnsignedLongEvaluator.java index 407b0be54b4a5..8203b46b57a51 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumUnsignedLongEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumUnsignedLongEvaluator.java @@ -39,34 +39,32 @@ public String name() { * Evaluate blocks containing at least one multivalued field. */ @Override - public Block.Ref evalNullable(Block.Ref ref) { - try (ref) { - LongBlock v = (LongBlock) ref.block(); - int positionCount = v.getPositionCount(); - try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { - for (int p = 0; p < positionCount; p++) { - int valueCount = v.getValueCount(p); - if (valueCount == 0) { - builder.appendNull(); - continue; - } - try { - int first = v.getFirstValueIndex(p); - int end = first + valueCount; - long value = v.getLong(first); - for (int i = first + 1; i < end; i++) { - long next = v.getLong(i); - value = MvSum.processUnsignedLong(value, next); - } - long result = value; - builder.appendLong(result); - } catch (ArithmeticException e) { - warnings.registerException(e); - builder.appendNull(); + public Block evalNullable(Block fieldVal) { + LongBlock v = (LongBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; + } + try { + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + long value = v.getLong(first); + for (int i = first + 1; i < end; i++) { + long next = v.getLong(i); + value = MvSum.processUnsignedLong(value, next); } + long result = value; + builder.appendLong(result); + } catch (ArithmeticException e) { + warnings.registerException(e); + builder.appendNull(); } - return Block.Ref.floating(builder.build()); } + return builder.build(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/ConcatEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/ConcatEvaluator.java index bb935ac2e91c6..2b3045d29c70f 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/ConcatEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/ConcatEvaluator.java @@ -38,22 +38,20 @@ public ConcatEvaluator(BreakingBytesRefBuilder scratch, EvalOperator.ExpressionE } @Override - public Block.Ref eval(Page page) { - Block.Ref[] valuesRefs = new Block.Ref[values.length]; - try (Releasable valuesRelease = Releasables.wrap(valuesRefs)) { - BytesRefBlock[] valuesBlocks = new BytesRefBlock[values.length]; + public Block eval(Page page) { + BytesRefBlock[] valuesBlocks = new BytesRefBlock[values.length]; + try (Releasable valuesRelease = Releasables.wrap(valuesBlocks)) { for (int i = 0; i < valuesBlocks.length; i++) { - valuesRefs[i] = values[i].eval(page); - valuesBlocks[i] = (BytesRefBlock) valuesRefs[i].block(); + valuesBlocks[i] = (BytesRefBlock)values[i].eval(page); } BytesRefVector[] valuesVectors = new BytesRefVector[values.length]; for (int i = 0; i < valuesBlocks.length; i++) { valuesVectors[i] = valuesBlocks[i].asVector(); if (valuesVectors[i] == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valuesBlocks)); + return eval(page.getPositionCount(), valuesBlocks); } } - return Block.Ref.floating(eval(page.getPositionCount(), valuesVectors).asBlock()); + return eval(page.getPositionCount(), valuesVectors).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/EndsWithEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/EndsWithEvaluator.java index c0ac9f41fd99e..b1cadf96b80cd 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/EndsWithEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/EndsWithEvaluator.java @@ -36,20 +36,18 @@ public EndsWithEvaluator(EvalOperator.ExpressionEvaluator str, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref strRef = str.eval(page)) { - BytesRefBlock strBlock = (BytesRefBlock) strRef.block(); - try (Block.Ref suffixRef = suffix.eval(page)) { - BytesRefBlock suffixBlock = (BytesRefBlock) suffixRef.block(); + public Block eval(Page page) { + try (BytesRefBlock strBlock = (BytesRefBlock) str.eval(page)) { + try (BytesRefBlock suffixBlock = (BytesRefBlock) suffix.eval(page)) { BytesRefVector strVector = strBlock.asVector(); if (strVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), strBlock, suffixBlock)); + return eval(page.getPositionCount(), strBlock, suffixBlock); } BytesRefVector suffixVector = suffixBlock.asVector(); if (suffixVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), strBlock, suffixBlock)); + return eval(page.getPositionCount(), strBlock, suffixBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), strVector, suffixVector).asBlock()); + return eval(page.getPositionCount(), strVector, suffixVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/LTrimEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/LTrimEvaluator.java index 5df651e0013f4..034cf5ddc5727 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/LTrimEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/LTrimEvaluator.java @@ -30,14 +30,13 @@ public LTrimEvaluator(EvalOperator.ExpressionEvaluator val, DriverContext driver } @Override - public Block.Ref eval(Page page) { - try (Block.Ref valRef = val.eval(page)) { - BytesRefBlock valBlock = (BytesRefBlock) valRef.block(); + public Block eval(Page page) { + try (BytesRefBlock valBlock = (BytesRefBlock) val.eval(page)) { BytesRefVector valVector = valBlock.asVector(); if (valVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock)); + return eval(page.getPositionCount(), valBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), valVector).asBlock()); + return eval(page.getPositionCount(), valVector).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/LeftEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/LeftEvaluator.java index 35b4efc290bbf..b2cbbc8ed9cf6 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/LeftEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/LeftEvaluator.java @@ -45,20 +45,18 @@ public LeftEvaluator(BytesRef out, UnicodeUtil.UTF8CodePoint cp, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref strRef = str.eval(page)) { - BytesRefBlock strBlock = (BytesRefBlock) strRef.block(); - try (Block.Ref lengthRef = length.eval(page)) { - IntBlock lengthBlock = (IntBlock) lengthRef.block(); + public Block eval(Page page) { + try (BytesRefBlock strBlock = (BytesRefBlock) str.eval(page)) { + try (IntBlock lengthBlock = (IntBlock) length.eval(page)) { BytesRefVector strVector = strBlock.asVector(); if (strVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), strBlock, lengthBlock)); + return eval(page.getPositionCount(), strBlock, lengthBlock); } IntVector lengthVector = lengthBlock.asVector(); if (lengthVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), strBlock, lengthBlock)); + return eval(page.getPositionCount(), strBlock, lengthBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), strVector, lengthVector).asBlock()); + return eval(page.getPositionCount(), strVector, lengthVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/LengthEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/LengthEvaluator.java index 45d4438bec986..2896de06f656d 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/LengthEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/LengthEvaluator.java @@ -32,14 +32,13 @@ public LengthEvaluator(EvalOperator.ExpressionEvaluator val, DriverContext drive } @Override - public Block.Ref eval(Page page) { - try (Block.Ref valRef = val.eval(page)) { - BytesRefBlock valBlock = (BytesRefBlock) valRef.block(); + public Block eval(Page page) { + try (BytesRefBlock valBlock = (BytesRefBlock) val.eval(page)) { BytesRefVector valVector = valBlock.asVector(); if (valVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock)); + return eval(page.getPositionCount(), valBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), valVector).asBlock()); + return eval(page.getPositionCount(), valVector).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/RTrimEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/RTrimEvaluator.java index 86aa88b64bd8b..a2d1d6bb34384 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/RTrimEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/RTrimEvaluator.java @@ -30,14 +30,13 @@ public RTrimEvaluator(EvalOperator.ExpressionEvaluator val, DriverContext driver } @Override - public Block.Ref eval(Page page) { - try (Block.Ref valRef = val.eval(page)) { - BytesRefBlock valBlock = (BytesRefBlock) valRef.block(); + public Block eval(Page page) { + try (BytesRefBlock valBlock = (BytesRefBlock) val.eval(page)) { BytesRefVector valVector = valBlock.asVector(); if (valVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock)); + return eval(page.getPositionCount(), valBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), valVector).asBlock()); + return eval(page.getPositionCount(), valVector).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/ReplaceConstantEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/ReplaceConstantEvaluator.java index 929ed6b9b0d45..b3af24d2f6851 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/ReplaceConstantEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/ReplaceConstantEvaluator.java @@ -44,20 +44,18 @@ public ReplaceConstantEvaluator(Source source, EvalOperator.ExpressionEvaluator } @Override - public Block.Ref eval(Page page) { - try (Block.Ref strRef = str.eval(page)) { - BytesRefBlock strBlock = (BytesRefBlock) strRef.block(); - try (Block.Ref newStrRef = newStr.eval(page)) { - BytesRefBlock newStrBlock = (BytesRefBlock) newStrRef.block(); + public Block eval(Page page) { + try (BytesRefBlock strBlock = (BytesRefBlock) str.eval(page)) { + try (BytesRefBlock newStrBlock = (BytesRefBlock) newStr.eval(page)) { BytesRefVector strVector = strBlock.asVector(); if (strVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), strBlock, newStrBlock)); + return eval(page.getPositionCount(), strBlock, newStrBlock); } BytesRefVector newStrVector = newStrBlock.asVector(); if (newStrVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), strBlock, newStrBlock)); + return eval(page.getPositionCount(), strBlock, newStrBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), strVector, newStrVector)); + return eval(page.getPositionCount(), strVector, newStrVector); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/ReplaceEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/ReplaceEvaluator.java index 9d742bd9fb7ef..89013fd3ca2f1 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/ReplaceEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/ReplaceEvaluator.java @@ -44,26 +44,23 @@ public ReplaceEvaluator(Source source, EvalOperator.ExpressionEvaluator str, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref strRef = str.eval(page)) { - BytesRefBlock strBlock = (BytesRefBlock) strRef.block(); - try (Block.Ref regexRef = regex.eval(page)) { - BytesRefBlock regexBlock = (BytesRefBlock) regexRef.block(); - try (Block.Ref newStrRef = newStr.eval(page)) { - BytesRefBlock newStrBlock = (BytesRefBlock) newStrRef.block(); + public Block eval(Page page) { + try (BytesRefBlock strBlock = (BytesRefBlock) str.eval(page)) { + try (BytesRefBlock regexBlock = (BytesRefBlock) regex.eval(page)) { + try (BytesRefBlock newStrBlock = (BytesRefBlock) newStr.eval(page)) { BytesRefVector strVector = strBlock.asVector(); if (strVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), strBlock, regexBlock, newStrBlock)); + return eval(page.getPositionCount(), strBlock, regexBlock, newStrBlock); } BytesRefVector regexVector = regexBlock.asVector(); if (regexVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), strBlock, regexBlock, newStrBlock)); + return eval(page.getPositionCount(), strBlock, regexBlock, newStrBlock); } BytesRefVector newStrVector = newStrBlock.asVector(); if (newStrVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), strBlock, regexBlock, newStrBlock)); + return eval(page.getPositionCount(), strBlock, regexBlock, newStrBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), strVector, regexVector, newStrVector)); + return eval(page.getPositionCount(), strVector, regexVector, newStrVector); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/RightEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/RightEvaluator.java index a4e6083a26eb0..1e3094ed8d5d3 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/RightEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/RightEvaluator.java @@ -45,20 +45,18 @@ public RightEvaluator(BytesRef out, UnicodeUtil.UTF8CodePoint cp, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref strRef = str.eval(page)) { - BytesRefBlock strBlock = (BytesRefBlock) strRef.block(); - try (Block.Ref lengthRef = length.eval(page)) { - IntBlock lengthBlock = (IntBlock) lengthRef.block(); + public Block eval(Page page) { + try (BytesRefBlock strBlock = (BytesRefBlock) str.eval(page)) { + try (IntBlock lengthBlock = (IntBlock) length.eval(page)) { BytesRefVector strVector = strBlock.asVector(); if (strVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), strBlock, lengthBlock)); + return eval(page.getPositionCount(), strBlock, lengthBlock); } IntVector lengthVector = lengthBlock.asVector(); if (lengthVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), strBlock, lengthBlock)); + return eval(page.getPositionCount(), strBlock, lengthBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), strVector, lengthVector).asBlock()); + return eval(page.getPositionCount(), strVector, lengthVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitSingleByteEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitSingleByteEvaluator.java index 257fa867c5f2c..bb5b3569934c0 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitSingleByteEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitSingleByteEvaluator.java @@ -38,14 +38,13 @@ public SplitSingleByteEvaluator(EvalOperator.ExpressionEvaluator str, byte delim } @Override - public Block.Ref eval(Page page) { - try (Block.Ref strRef = str.eval(page)) { - BytesRefBlock strBlock = (BytesRefBlock) strRef.block(); + public Block eval(Page page) { + try (BytesRefBlock strBlock = (BytesRefBlock) str.eval(page)) { BytesRefVector strVector = strBlock.asVector(); if (strVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), strBlock)); + return eval(page.getPositionCount(), strBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), strVector)); + return eval(page.getPositionCount(), strVector); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitVariableEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitVariableEvaluator.java index 4996cae1da7d3..d80d8d65c3606 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitVariableEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitVariableEvaluator.java @@ -38,20 +38,18 @@ public SplitVariableEvaluator(EvalOperator.ExpressionEvaluator str, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref strRef = str.eval(page)) { - BytesRefBlock strBlock = (BytesRefBlock) strRef.block(); - try (Block.Ref delimRef = delim.eval(page)) { - BytesRefBlock delimBlock = (BytesRefBlock) delimRef.block(); + public Block eval(Page page) { + try (BytesRefBlock strBlock = (BytesRefBlock) str.eval(page)) { + try (BytesRefBlock delimBlock = (BytesRefBlock) delim.eval(page)) { BytesRefVector strVector = strBlock.asVector(); if (strVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), strBlock, delimBlock)); + return eval(page.getPositionCount(), strBlock, delimBlock); } BytesRefVector delimVector = delimBlock.asVector(); if (delimVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), strBlock, delimBlock)); + return eval(page.getPositionCount(), strBlock, delimBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), strVector, delimVector)); + return eval(page.getPositionCount(), strVector, delimVector); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/StartsWithEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/StartsWithEvaluator.java index 7579b2aa2696d..564dd1b7760be 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/StartsWithEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/StartsWithEvaluator.java @@ -36,20 +36,18 @@ public StartsWithEvaluator(EvalOperator.ExpressionEvaluator str, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref strRef = str.eval(page)) { - BytesRefBlock strBlock = (BytesRefBlock) strRef.block(); - try (Block.Ref prefixRef = prefix.eval(page)) { - BytesRefBlock prefixBlock = (BytesRefBlock) prefixRef.block(); + public Block eval(Page page) { + try (BytesRefBlock strBlock = (BytesRefBlock) str.eval(page)) { + try (BytesRefBlock prefixBlock = (BytesRefBlock) prefix.eval(page)) { BytesRefVector strVector = strBlock.asVector(); if (strVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), strBlock, prefixBlock)); + return eval(page.getPositionCount(), strBlock, prefixBlock); } BytesRefVector prefixVector = prefixBlock.asVector(); if (prefixVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), strBlock, prefixBlock)); + return eval(page.getPositionCount(), strBlock, prefixBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), strVector, prefixVector).asBlock()); + return eval(page.getPositionCount(), strVector, prefixVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/SubstringEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/SubstringEvaluator.java index 8b9b02ed7ec08..f0b4b0363ebc5 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/SubstringEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/SubstringEvaluator.java @@ -40,26 +40,23 @@ public SubstringEvaluator(EvalOperator.ExpressionEvaluator str, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref strRef = str.eval(page)) { - BytesRefBlock strBlock = (BytesRefBlock) strRef.block(); - try (Block.Ref startRef = start.eval(page)) { - IntBlock startBlock = (IntBlock) startRef.block(); - try (Block.Ref lengthRef = length.eval(page)) { - IntBlock lengthBlock = (IntBlock) lengthRef.block(); + public Block eval(Page page) { + try (BytesRefBlock strBlock = (BytesRefBlock) str.eval(page)) { + try (IntBlock startBlock = (IntBlock) start.eval(page)) { + try (IntBlock lengthBlock = (IntBlock) length.eval(page)) { BytesRefVector strVector = strBlock.asVector(); if (strVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), strBlock, startBlock, lengthBlock)); + return eval(page.getPositionCount(), strBlock, startBlock, lengthBlock); } IntVector startVector = startBlock.asVector(); if (startVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), strBlock, startBlock, lengthBlock)); + return eval(page.getPositionCount(), strBlock, startBlock, lengthBlock); } IntVector lengthVector = lengthBlock.asVector(); if (lengthVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), strBlock, startBlock, lengthBlock)); + return eval(page.getPositionCount(), strBlock, startBlock, lengthBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), strVector, startVector, lengthVector).asBlock()); + return eval(page.getPositionCount(), strVector, startVector, lengthVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/SubstringNoLengthEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/SubstringNoLengthEvaluator.java index 27d2465875dce..a410df8bbdc69 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/SubstringNoLengthEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/SubstringNoLengthEvaluator.java @@ -36,20 +36,18 @@ public SubstringNoLengthEvaluator(EvalOperator.ExpressionEvaluator str, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref strRef = str.eval(page)) { - BytesRefBlock strBlock = (BytesRefBlock) strRef.block(); - try (Block.Ref startRef = start.eval(page)) { - IntBlock startBlock = (IntBlock) startRef.block(); + public Block eval(Page page) { + try (BytesRefBlock strBlock = (BytesRefBlock) str.eval(page)) { + try (IntBlock startBlock = (IntBlock) start.eval(page)) { BytesRefVector strVector = strBlock.asVector(); if (strVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), strBlock, startBlock)); + return eval(page.getPositionCount(), strBlock, startBlock); } IntVector startVector = startBlock.asVector(); if (startVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), strBlock, startBlock)); + return eval(page.getPositionCount(), strBlock, startBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), strVector, startVector).asBlock()); + return eval(page.getPositionCount(), strVector, startVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/TrimEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/TrimEvaluator.java index f9a6e19e5293a..38b42070e96a6 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/TrimEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/TrimEvaluator.java @@ -30,14 +30,13 @@ public TrimEvaluator(EvalOperator.ExpressionEvaluator val, DriverContext driverC } @Override - public Block.Ref eval(Page page) { - try (Block.Ref valRef = val.eval(page)) { - BytesRefBlock valBlock = (BytesRefBlock) valRef.block(); + public Block eval(Page page) { + try (BytesRefBlock valBlock = (BytesRefBlock) val.eval(page)) { BytesRefVector valVector = valBlock.asVector(); if (valVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), valBlock)); + return eval(page.getPositionCount(), valBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), valVector).asBlock()); + return eval(page.getPositionCount(), valVector).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddDatetimesEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddDatetimesEvaluator.java index d24c4700a2dde..f484a77c30ed2 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddDatetimesEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddDatetimesEvaluator.java @@ -41,14 +41,13 @@ public AddDatetimesEvaluator(Source source, EvalOperator.ExpressionEvaluator dat } @Override - public Block.Ref eval(Page page) { - try (Block.Ref datetimeRef = datetime.eval(page)) { - LongBlock datetimeBlock = (LongBlock) datetimeRef.block(); + public Block eval(Page page) { + try (LongBlock datetimeBlock = (LongBlock) datetime.eval(page)) { LongVector datetimeVector = datetimeBlock.asVector(); if (datetimeVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), datetimeBlock)); + return eval(page.getPositionCount(), datetimeBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), datetimeVector)); + return eval(page.getPositionCount(), datetimeVector); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddDoublesEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddDoublesEvaluator.java index 1029295315ec3..1e9cf33ae39e2 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddDoublesEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddDoublesEvaluator.java @@ -33,20 +33,18 @@ public AddDoublesEvaluator(EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - DoubleBlock lhsBlock = (DoubleBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - DoubleBlock rhsBlock = (DoubleBlock) rhsRef.block(); + public Block eval(Page page) { + try (DoubleBlock lhsBlock = (DoubleBlock) lhs.eval(page)) { + try (DoubleBlock rhsBlock = (DoubleBlock) rhs.eval(page)) { DoubleVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } DoubleVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector).asBlock()); + return eval(page.getPositionCount(), lhsVector, rhsVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddIntsEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddIntsEvaluator.java index 276a8d9c477b6..e7a3b57479b99 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddIntsEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddIntsEvaluator.java @@ -39,20 +39,18 @@ public AddIntsEvaluator(Source source, EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - IntBlock lhsBlock = (IntBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - IntBlock rhsBlock = (IntBlock) rhsRef.block(); + public Block eval(Page page) { + try (IntBlock lhsBlock = (IntBlock) lhs.eval(page)) { + try (IntBlock rhsBlock = (IntBlock) rhs.eval(page)) { IntVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } IntVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector)); + return eval(page.getPositionCount(), lhsVector, rhsVector); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddLongsEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddLongsEvaluator.java index fc39808f7c2f1..d2e029ff276b8 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddLongsEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddLongsEvaluator.java @@ -39,20 +39,18 @@ public AddLongsEvaluator(Source source, EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - LongBlock lhsBlock = (LongBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - LongBlock rhsBlock = (LongBlock) rhsRef.block(); + public Block eval(Page page) { + try (LongBlock lhsBlock = (LongBlock) lhs.eval(page)) { + try (LongBlock rhsBlock = (LongBlock) rhs.eval(page)) { LongVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } LongVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector)); + return eval(page.getPositionCount(), lhsVector, rhsVector); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddUnsignedLongsEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddUnsignedLongsEvaluator.java index 0d8bae4fdb2d2..54b7b8df88178 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddUnsignedLongsEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddUnsignedLongsEvaluator.java @@ -39,20 +39,18 @@ public AddUnsignedLongsEvaluator(Source source, EvalOperator.ExpressionEvaluator } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - LongBlock lhsBlock = (LongBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - LongBlock rhsBlock = (LongBlock) rhsRef.block(); + public Block eval(Page page) { + try (LongBlock lhsBlock = (LongBlock) lhs.eval(page)) { + try (LongBlock rhsBlock = (LongBlock) rhs.eval(page)) { LongVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } LongVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector)); + return eval(page.getPositionCount(), lhsVector, rhsVector); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DivDoublesEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DivDoublesEvaluator.java index 4a7caa5a488d9..f906d83b19ce4 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DivDoublesEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DivDoublesEvaluator.java @@ -33,20 +33,18 @@ public DivDoublesEvaluator(EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - DoubleBlock lhsBlock = (DoubleBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - DoubleBlock rhsBlock = (DoubleBlock) rhsRef.block(); + public Block eval(Page page) { + try (DoubleBlock lhsBlock = (DoubleBlock) lhs.eval(page)) { + try (DoubleBlock rhsBlock = (DoubleBlock) rhs.eval(page)) { DoubleVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } DoubleVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector).asBlock()); + return eval(page.getPositionCount(), lhsVector, rhsVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DivIntsEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DivIntsEvaluator.java index 4bb8c64959635..53cfbd8540e33 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DivIntsEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DivIntsEvaluator.java @@ -39,20 +39,18 @@ public DivIntsEvaluator(Source source, EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - IntBlock lhsBlock = (IntBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - IntBlock rhsBlock = (IntBlock) rhsRef.block(); + public Block eval(Page page) { + try (IntBlock lhsBlock = (IntBlock) lhs.eval(page)) { + try (IntBlock rhsBlock = (IntBlock) rhs.eval(page)) { IntVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } IntVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector)); + return eval(page.getPositionCount(), lhsVector, rhsVector); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DivLongsEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DivLongsEvaluator.java index 133c6c9bc5503..31f62d3d729c5 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DivLongsEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DivLongsEvaluator.java @@ -39,20 +39,18 @@ public DivLongsEvaluator(Source source, EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - LongBlock lhsBlock = (LongBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - LongBlock rhsBlock = (LongBlock) rhsRef.block(); + public Block eval(Page page) { + try (LongBlock lhsBlock = (LongBlock) lhs.eval(page)) { + try (LongBlock rhsBlock = (LongBlock) rhs.eval(page)) { LongVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } LongVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector)); + return eval(page.getPositionCount(), lhsVector, rhsVector); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DivUnsignedLongsEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DivUnsignedLongsEvaluator.java index af160626c730d..104208de1e13f 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DivUnsignedLongsEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DivUnsignedLongsEvaluator.java @@ -39,20 +39,18 @@ public DivUnsignedLongsEvaluator(Source source, EvalOperator.ExpressionEvaluator } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - LongBlock lhsBlock = (LongBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - LongBlock rhsBlock = (LongBlock) rhsRef.block(); + public Block eval(Page page) { + try (LongBlock lhsBlock = (LongBlock) lhs.eval(page)) { + try (LongBlock rhsBlock = (LongBlock) rhs.eval(page)) { LongVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } LongVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector)); + return eval(page.getPositionCount(), lhsVector, rhsVector); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/ModDoublesEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/ModDoublesEvaluator.java index f194c1f1a1fbb..6d4f2d08b0b6e 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/ModDoublesEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/ModDoublesEvaluator.java @@ -33,20 +33,18 @@ public ModDoublesEvaluator(EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - DoubleBlock lhsBlock = (DoubleBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - DoubleBlock rhsBlock = (DoubleBlock) rhsRef.block(); + public Block eval(Page page) { + try (DoubleBlock lhsBlock = (DoubleBlock) lhs.eval(page)) { + try (DoubleBlock rhsBlock = (DoubleBlock) rhs.eval(page)) { DoubleVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } DoubleVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector).asBlock()); + return eval(page.getPositionCount(), lhsVector, rhsVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/ModIntsEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/ModIntsEvaluator.java index 2cac7f276c1ce..1f6979179627d 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/ModIntsEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/ModIntsEvaluator.java @@ -39,20 +39,18 @@ public ModIntsEvaluator(Source source, EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - IntBlock lhsBlock = (IntBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - IntBlock rhsBlock = (IntBlock) rhsRef.block(); + public Block eval(Page page) { + try (IntBlock lhsBlock = (IntBlock) lhs.eval(page)) { + try (IntBlock rhsBlock = (IntBlock) rhs.eval(page)) { IntVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } IntVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector)); + return eval(page.getPositionCount(), lhsVector, rhsVector); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/ModLongsEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/ModLongsEvaluator.java index 37e2d31928ab9..3bc252c5cd059 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/ModLongsEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/ModLongsEvaluator.java @@ -39,20 +39,18 @@ public ModLongsEvaluator(Source source, EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - LongBlock lhsBlock = (LongBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - LongBlock rhsBlock = (LongBlock) rhsRef.block(); + public Block eval(Page page) { + try (LongBlock lhsBlock = (LongBlock) lhs.eval(page)) { + try (LongBlock rhsBlock = (LongBlock) rhs.eval(page)) { LongVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } LongVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector)); + return eval(page.getPositionCount(), lhsVector, rhsVector); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/ModUnsignedLongsEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/ModUnsignedLongsEvaluator.java index 97e386e1921af..a18a99c7e220f 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/ModUnsignedLongsEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/ModUnsignedLongsEvaluator.java @@ -39,20 +39,18 @@ public ModUnsignedLongsEvaluator(Source source, EvalOperator.ExpressionEvaluator } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - LongBlock lhsBlock = (LongBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - LongBlock rhsBlock = (LongBlock) rhsRef.block(); + public Block eval(Page page) { + try (LongBlock lhsBlock = (LongBlock) lhs.eval(page)) { + try (LongBlock rhsBlock = (LongBlock) rhs.eval(page)) { LongVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } LongVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector)); + return eval(page.getPositionCount(), lhsVector, rhsVector); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/MulDoublesEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/MulDoublesEvaluator.java index d9f3ae0729c46..4ab6801f66b92 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/MulDoublesEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/MulDoublesEvaluator.java @@ -33,20 +33,18 @@ public MulDoublesEvaluator(EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - DoubleBlock lhsBlock = (DoubleBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - DoubleBlock rhsBlock = (DoubleBlock) rhsRef.block(); + public Block eval(Page page) { + try (DoubleBlock lhsBlock = (DoubleBlock) lhs.eval(page)) { + try (DoubleBlock rhsBlock = (DoubleBlock) rhs.eval(page)) { DoubleVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } DoubleVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector).asBlock()); + return eval(page.getPositionCount(), lhsVector, rhsVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/MulIntsEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/MulIntsEvaluator.java index b100f23023a43..9926668c5e505 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/MulIntsEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/MulIntsEvaluator.java @@ -39,20 +39,18 @@ public MulIntsEvaluator(Source source, EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - IntBlock lhsBlock = (IntBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - IntBlock rhsBlock = (IntBlock) rhsRef.block(); + public Block eval(Page page) { + try (IntBlock lhsBlock = (IntBlock) lhs.eval(page)) { + try (IntBlock rhsBlock = (IntBlock) rhs.eval(page)) { IntVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } IntVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector)); + return eval(page.getPositionCount(), lhsVector, rhsVector); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/MulLongsEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/MulLongsEvaluator.java index 33fe21db3b478..8be74005e1940 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/MulLongsEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/MulLongsEvaluator.java @@ -39,20 +39,18 @@ public MulLongsEvaluator(Source source, EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - LongBlock lhsBlock = (LongBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - LongBlock rhsBlock = (LongBlock) rhsRef.block(); + public Block eval(Page page) { + try (LongBlock lhsBlock = (LongBlock) lhs.eval(page)) { + try (LongBlock rhsBlock = (LongBlock) rhs.eval(page)) { LongVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } LongVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector)); + return eval(page.getPositionCount(), lhsVector, rhsVector); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/MulUnsignedLongsEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/MulUnsignedLongsEvaluator.java index 4f6df9f79786e..4ba489dc65f06 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/MulUnsignedLongsEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/MulUnsignedLongsEvaluator.java @@ -39,20 +39,18 @@ public MulUnsignedLongsEvaluator(Source source, EvalOperator.ExpressionEvaluator } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - LongBlock lhsBlock = (LongBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - LongBlock rhsBlock = (LongBlock) rhsRef.block(); + public Block eval(Page page) { + try (LongBlock lhsBlock = (LongBlock) lhs.eval(page)) { + try (LongBlock rhsBlock = (LongBlock) rhs.eval(page)) { LongVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } LongVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector)); + return eval(page.getPositionCount(), lhsVector, rhsVector); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/NegDoublesEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/NegDoublesEvaluator.java index 4705bd343547c..330b3afa3df19 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/NegDoublesEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/NegDoublesEvaluator.java @@ -29,14 +29,13 @@ public NegDoublesEvaluator(EvalOperator.ExpressionEvaluator v, DriverContext dri } @Override - public Block.Ref eval(Page page) { - try (Block.Ref vRef = v.eval(page)) { - DoubleBlock vBlock = (DoubleBlock) vRef.block(); + public Block eval(Page page) { + try (DoubleBlock vBlock = (DoubleBlock) v.eval(page)) { DoubleVector vVector = vBlock.asVector(); if (vVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), vBlock)); + return eval(page.getPositionCount(), vBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), vVector).asBlock()); + return eval(page.getPositionCount(), vVector).asBlock(); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/NegIntsEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/NegIntsEvaluator.java index 8783f91eed95a..9691099b03924 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/NegIntsEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/NegIntsEvaluator.java @@ -36,14 +36,13 @@ public NegIntsEvaluator(Source source, EvalOperator.ExpressionEvaluator v, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref vRef = v.eval(page)) { - IntBlock vBlock = (IntBlock) vRef.block(); + public Block eval(Page page) { + try (IntBlock vBlock = (IntBlock) v.eval(page)) { IntVector vVector = vBlock.asVector(); if (vVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), vBlock)); + return eval(page.getPositionCount(), vBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), vVector)); + return eval(page.getPositionCount(), vVector); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/NegLongsEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/NegLongsEvaluator.java index 9f4dc7d8c9ed5..4d8ee14d4569b 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/NegLongsEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/NegLongsEvaluator.java @@ -36,14 +36,13 @@ public NegLongsEvaluator(Source source, EvalOperator.ExpressionEvaluator v, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref vRef = v.eval(page)) { - LongBlock vBlock = (LongBlock) vRef.block(); + public Block eval(Page page) { + try (LongBlock vBlock = (LongBlock) v.eval(page)) { LongVector vVector = vBlock.asVector(); if (vVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), vBlock)); + return eval(page.getPositionCount(), vBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), vVector)); + return eval(page.getPositionCount(), vVector); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubDatetimesEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubDatetimesEvaluator.java index ec8125ebde0ea..de81736c42abf 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubDatetimesEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubDatetimesEvaluator.java @@ -41,14 +41,13 @@ public SubDatetimesEvaluator(Source source, EvalOperator.ExpressionEvaluator dat } @Override - public Block.Ref eval(Page page) { - try (Block.Ref datetimeRef = datetime.eval(page)) { - LongBlock datetimeBlock = (LongBlock) datetimeRef.block(); + public Block eval(Page page) { + try (LongBlock datetimeBlock = (LongBlock) datetime.eval(page)) { LongVector datetimeVector = datetimeBlock.asVector(); if (datetimeVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), datetimeBlock)); + return eval(page.getPositionCount(), datetimeBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), datetimeVector)); + return eval(page.getPositionCount(), datetimeVector); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubDoublesEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubDoublesEvaluator.java index 76c06dd137a18..6609d6cfbb4ae 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubDoublesEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubDoublesEvaluator.java @@ -33,20 +33,18 @@ public SubDoublesEvaluator(EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - DoubleBlock lhsBlock = (DoubleBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - DoubleBlock rhsBlock = (DoubleBlock) rhsRef.block(); + public Block eval(Page page) { + try (DoubleBlock lhsBlock = (DoubleBlock) lhs.eval(page)) { + try (DoubleBlock rhsBlock = (DoubleBlock) rhs.eval(page)) { DoubleVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } DoubleVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector).asBlock()); + return eval(page.getPositionCount(), lhsVector, rhsVector).asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubIntsEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubIntsEvaluator.java index 842a8a5b42ea1..4013cdd240dd0 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubIntsEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubIntsEvaluator.java @@ -39,20 +39,18 @@ public SubIntsEvaluator(Source source, EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - IntBlock lhsBlock = (IntBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - IntBlock rhsBlock = (IntBlock) rhsRef.block(); + public Block eval(Page page) { + try (IntBlock lhsBlock = (IntBlock) lhs.eval(page)) { + try (IntBlock rhsBlock = (IntBlock) rhs.eval(page)) { IntVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } IntVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector)); + return eval(page.getPositionCount(), lhsVector, rhsVector); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubLongsEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubLongsEvaluator.java index 79e2879f2861a..7528750da15f8 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubLongsEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubLongsEvaluator.java @@ -39,20 +39,18 @@ public SubLongsEvaluator(Source source, EvalOperator.ExpressionEvaluator lhs, } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - LongBlock lhsBlock = (LongBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - LongBlock rhsBlock = (LongBlock) rhsRef.block(); + public Block eval(Page page) { + try (LongBlock lhsBlock = (LongBlock) lhs.eval(page)) { + try (LongBlock rhsBlock = (LongBlock) rhs.eval(page)) { LongVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } LongVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector)); + return eval(page.getPositionCount(), lhsVector, rhsVector); } } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubUnsignedLongsEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubUnsignedLongsEvaluator.java index c71855990c257..6c2a31db0a6f0 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubUnsignedLongsEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubUnsignedLongsEvaluator.java @@ -39,20 +39,18 @@ public SubUnsignedLongsEvaluator(Source source, EvalOperator.ExpressionEvaluator } @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhsRef = lhs.eval(page)) { - LongBlock lhsBlock = (LongBlock) lhsRef.block(); - try (Block.Ref rhsRef = rhs.eval(page)) { - LongBlock rhsBlock = (LongBlock) rhsRef.block(); + public Block eval(Page page) { + try (LongBlock lhsBlock = (LongBlock) lhs.eval(page)) { + try (LongBlock rhsBlock = (LongBlock) rhs.eval(page)) { LongVector lhsVector = lhsBlock.asVector(); if (lhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } LongVector rhsVector = rhsBlock.asVector(); if (rhsVector == null) { - return Block.Ref.floating(eval(page.getPositionCount(), lhsBlock, rhsBlock)); + return eval(page.getPositionCount(), lhsBlock, rhsBlock); } - return Block.Ref.floating(eval(page.getPositionCount(), lhsVector, rhsVector)); + return eval(page.getPositionCount(), lhsVector, rhsVector); } } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ColumnInfo.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ColumnInfo.java index bc648678984d5..12ec974142f62 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ColumnInfo.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ColumnInfo.java @@ -8,9 +8,11 @@ package org.elasticsearch.xpack.esql.action; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.BytesRefBlock; @@ -24,6 +26,7 @@ import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.versionfield.Version; @@ -186,6 +189,17 @@ protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Pa return builder.value(UnsupportedValueSource.UNSUPPORTED_OUTPUT); } }; + case "_source" -> new PositionToXContent(block) { + @Override + protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) + throws IOException { + BytesRef val = ((BytesRefBlock) block).getBytesRef(valueIndex, scratch); + try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, new BytesArray(val))) { + parser.nextToken(); + return builder.copyCurrentStructure(parser); + } + } + }; default -> throw new IllegalArgumentException("can't convert values of type [" + type + "]"); }; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java index 5196cbb0dfd1c..c467f0dfc9075 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java @@ -30,7 +30,6 @@ import org.elasticsearch.xpack.esql.plugin.QueryPragmas; import java.io.IOException; -import java.time.ZoneId; import java.util.ArrayList; import java.util.List; import java.util.Locale; @@ -59,7 +58,6 @@ public class EsqlQueryRequest extends ActionRequest implements CompositeIndicesR private static final ParseField QUERY_FIELD = new ParseField("query"); private static final ParseField COLUMNAR_FIELD = new ParseField("columnar"); - private static final ParseField TIME_ZONE_FIELD = new ParseField("time_zone"); private static final ParseField FILTER_FIELD = new ParseField("filter"); private static final ParseField PRAGMA_FIELD = new ParseField("pragma"); private static final ParseField PARAMS_FIELD = new ParseField("params"); @@ -69,7 +67,6 @@ public class EsqlQueryRequest extends ActionRequest implements CompositeIndicesR private String query; private boolean columnar; - private ZoneId zoneId; private Locale locale; private QueryBuilder filter; private QueryPragmas pragmas = new QueryPragmas(Settings.EMPTY); @@ -109,14 +106,6 @@ public boolean columnar() { return columnar; } - public void zoneId(ZoneId zoneId) { - this.zoneId = zoneId; - } - - public ZoneId zoneId() { - return zoneId; - } - public void locale(Locale locale) { this.locale = locale; } @@ -157,7 +146,6 @@ private static ObjectParser objectParser(Supplier parser = new ObjectParser<>("esql/query", false, supplier); parser.declareString(EsqlQueryRequest::query, QUERY_FIELD); parser.declareBoolean(EsqlQueryRequest::columnar, COLUMNAR_FIELD); - parser.declareString((request, zoneId) -> request.zoneId(ZoneId.of(zoneId)), TIME_ZONE_FIELD); parser.declareObject(EsqlQueryRequest::filter, (p, c) -> AbstractQueryBuilder.parseTopLevelQuery(p), FILTER_FIELD); parser.declareObject( EsqlQueryRequest::pragmas, diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestBuilder.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestBuilder.java index 8d57e606e5b91..1d23fcbf8e05b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestBuilder.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestBuilder.java @@ -12,8 +12,6 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.xpack.esql.plugin.QueryPragmas; -import java.time.ZoneId; - public class EsqlQueryRequestBuilder extends ActionRequestBuilder { public EsqlQueryRequestBuilder(ElasticsearchClient client, EsqlQueryAction action, EsqlQueryRequest request) { @@ -34,11 +32,6 @@ public EsqlQueryRequestBuilder columnar(boolean columnar) { return this; } - public EsqlQueryRequestBuilder timeZone(ZoneId zoneId) { - request.zoneId(zoneId); - return this; - } - public EsqlQueryRequestBuilder filter(QueryBuilder filter) { request.filter(filter); return this; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java index a5194b1695c2c..3d91eafc8e033 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java @@ -10,11 +10,14 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockStreamInput; @@ -32,17 +35,22 @@ import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.versionfield.Version; import java.io.IOException; +import java.io.UncheckedIOException; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.function.Function; @@ -254,6 +262,17 @@ private static Object valueAt(String dataType, Block block, int offset, BytesRef case "boolean" -> ((BooleanBlock) block).getBoolean(offset); case "version" -> new Version(((BytesRefBlock) block).getBytesRef(offset, scratch)).toString(); case "unsupported" -> UnsupportedValueSource.UNSUPPORTED_OUTPUT; + case "_source" -> { + BytesRef val = ((BytesRefBlock) block).getBytesRef(offset, scratch); + try { + try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, new BytesArray(val))) { + parser.nextToken(); + yield parser.mapOrdered(); + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } default -> throw EsqlIllegalArgumentException.illegalDataType(dataType); }; } @@ -287,6 +306,18 @@ private static Page valuesToPage(List dataTypes, List> valu case "boolean" -> ((BooleanBlock.Builder) builder).appendBoolean(((Boolean) value)); case "null" -> builder.appendNull(); case "version" -> ((BytesRefBlock.Builder) builder).appendBytesRef(new Version(value.toString()).toBytesRef()); + case "_source" -> { + @SuppressWarnings("unchecked") + Map o = (Map) value; + try { + try (XContentBuilder sourceBuilder = JsonXContent.contentBuilder()) { + sourceBuilder.map(o); + ((BytesRefBlock.Builder) builder).appendBytesRef(BytesReference.bytes(sourceBuilder).toBytesRef()); + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } default -> throw EsqlIllegalArgumentException.illegalDataType(dataTypes.get(c)); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java index 557da9639a086..a8462703a2b37 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java @@ -81,6 +81,9 @@ import static org.elasticsearch.xpack.ql.type.DataTypes.NESTED; public class Analyzer extends ParameterizedRuleExecutor { + static final List NO_FIELDS = List.of( + new ReferenceAttribute(Source.EMPTY, "", DataTypes.NULL, null, Nullability.TRUE, null, false) + ); private static final Iterable> rules; static { @@ -142,7 +145,7 @@ protected LogicalPlan rule(EsqlUnresolvedRelation plan, AnalyzerContext context) EsIndex esIndex = context.indexResolution().get(); var attributes = mappingAsAttributes(plan.source(), esIndex.mapping()); attributes.addAll(plan.metadataFields()); - return new EsRelation(plan.source(), esIndex, attributes); + return new EsRelation(plan.source(), esIndex, attributes.isEmpty() ? NO_FIELDS : attributes); } } @@ -256,8 +259,10 @@ public static List calculateEnrichFields( List enrichFields, EnrichPolicy policy ) { - Map fieldMap = mapping.stream().collect(Collectors.toMap(NamedExpression::name, Function.identity())); - fieldMap.remove(policy.getMatchField()); + Set policyEnrichFieldSet = new HashSet<>(policy.getEnrichFields()); + Map fieldMap = mapping.stream() + .filter(e -> policyEnrichFieldSet.contains(e.name())) + .collect(Collectors.toMap(NamedExpression::name, Function.identity())); List result = new ArrayList<>(); if (enrichFields == null || enrichFields.isEmpty()) { // use the policy to infer the enrich fields diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupOperator.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupOperator.java index 5ac551103f338..844cfde286072 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupOperator.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupOperator.java @@ -56,6 +56,7 @@ public String describe() { public Operator get(DriverContext driverContext) { return new EnrichLookupOperator( sessionId, + driverContext, parentTask, maxOutstandingRequests, inputChannel, @@ -70,6 +71,7 @@ public Operator get(DriverContext driverContext) { public EnrichLookupOperator( String sessionId, + DriverContext driverContext, CancellableTask parentTask, int maxOutstandingRequests, int inputChannel, @@ -79,7 +81,7 @@ public EnrichLookupOperator( String matchField, List enrichFields ) { - super(maxOutstandingRequests); + super(driverContext, maxOutstandingRequests); this.sessionId = sessionId; this.parentTask = parentTask; this.inputChannel = inputChannel; @@ -106,7 +108,7 @@ protected void performAsync(Page inputPage, ActionListener listener) { } @Override - public void close() { + protected void doClose() { // TODO: Maybe create a sub-task as the parent task of all the lookup tasks // then cancel it when this operator terminates early (e.g., have enough result). } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java index 98c1397d97860..8dc5bdaeca393 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.UnavailableShardsException; -import org.elasticsearch.action.support.ChannelActionListener; import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.ClusterState; @@ -25,6 +24,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.compute.OwningChannelActionListener; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockStreamInput; @@ -38,6 +38,9 @@ import org.elasticsearch.compute.operator.OutputOperator; import org.elasticsearch.compute.operator.ProjectOperator; import org.elasticsearch.compute.operator.SourceOperator; +import org.elasticsearch.core.AbstractRefCounted; +import org.elasticsearch.core.RefCounted; +import org.elasticsearch.core.Releasables; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.shard.ShardId; @@ -125,7 +128,12 @@ public EnrichLookupService( this.executor = transportService.getThreadPool().executor(EsqlPlugin.ESQL_THREAD_POOL_NAME); this.bigArrays = bigArrays; this.blockFactory = blockFactory; - transportService.registerRequestHandler(LOOKUP_ACTION_NAME, this.executor, LookupRequest::new, new TransportHandler()); + transportService.registerRequestHandler( + LOOKUP_ACTION_NAME, + this.executor, + in -> new LookupRequest(in, blockFactory), + new TransportHandler() + ); } public void lookupAsync( @@ -164,7 +172,11 @@ public void lookupAsync( lookupRequest, parentTask, TransportRequestOptions.EMPTY, - new ActionListenerResponseHandler<>(listener.map(r -> r.page), LookupResponse::new, executor) + new ActionListenerResponseHandler<>( + listener.map(LookupResponse::takePage), + in -> new LookupResponse(in, blockFactory), + executor + ) ); } }, listener::onFailure)); @@ -226,11 +238,11 @@ private void doLookup( ActionListener listener ) { Block inputBlock = inputPage.getBlock(0); - if (inputBlock.areAllValuesNull()) { - listener.onResponse(createNullResponse(inputPage.getPositionCount(), extractFields)); - return; - } try { + if (inputBlock.areAllValuesNull()) { + listener.onResponse(createNullResponse(inputPage.getPositionCount(), extractFields)); + return; + } ShardSearchRequest shardSearchRequest = new ShardSearchRequest(shardId, 0, AliasFilter.EMPTY); SearchContext searchContext = searchService.createSearchContext(shardSearchRequest, SearchService.NO_TIMEOUT); listener = ActionListener.runBefore(listener, searchContext::close); @@ -239,33 +251,45 @@ private void doLookup( final SourceOperator queryOperator = switch (matchType) { case "match", "range" -> { QueryList queryList = QueryList.termQueryList(fieldType, searchExecutionContext, inputBlock); - yield new EnrichQuerySourceOperator(queryList, searchExecutionContext.getIndexReader()); + yield new EnrichQuerySourceOperator(blockFactory, queryList, searchExecutionContext.getIndexReader()); } default -> throw new EsqlIllegalArgumentException("illegal match type " + matchType); }; List intermediateOperators = new ArrayList<>(extractFields.size() + 2); final ElementType[] mergingTypes = new ElementType[extractFields.size()]; - // extract-field operators + + // load the fields + List fields = new ArrayList<>(extractFields.size()); for (int i = 0; i < extractFields.size(); i++) { NamedExpression extractField = extractFields.get(i); final ElementType elementType = LocalExecutionPlanner.toElementType(extractField.dataType()); mergingTypes[i] = elementType; - var sources = BlockReaderFactories.factories( + var loaders = BlockReaderFactories.loaders( List.of(searchContext), extractField instanceof Alias a ? ((NamedExpression) a.child()).name() : extractField.name(), EsqlDataTypes.isUnsupported(extractField.dataType()) ); - intermediateOperators.add( - new ValuesSourceReaderOperator(BlockFactory.getNonBreakingInstance(), sources, 0, extractField.name()) - ); + fields.add(new ValuesSourceReaderOperator.FieldInfo(extractField.name(), loaders)); } + intermediateOperators.add( + new ValuesSourceReaderOperator( + blockFactory, + fields, + List.of(new ValuesSourceReaderOperator.ShardContext(searchContext.searcher().getIndexReader(), () -> { + throw new UnsupportedOperationException("can't load _source as part of enrich"); + })), + 0 + ) + ); + // drop docs block intermediateOperators.add(droppingBlockOperator(extractFields.size() + 2, 0)); boolean singleLeaf = searchContext.searcher().getLeafContexts().size() == 1; + // merging field-values by position final int[] mergingChannels = IntStream.range(0, extractFields.size()).map(i -> i + 1).toArray(); intermediateOperators.add( - new MergePositionsOperator(singleLeaf, inputPage.getPositionCount(), 0, mergingChannels, mergingTypes) + new MergePositionsOperator(singleLeaf, inputPage.getPositionCount(), 0, mergingChannels, mergingTypes, blockFactory) ); AtomicReference result = new AtomicReference<>(); OutputOperator outputOperator = new OutputOperator(List.of(), Function.identity(), result::set); @@ -297,12 +321,18 @@ private void doLookup( } } - private static Page createNullResponse(int positionCount, List extractFields) { + private Page createNullResponse(int positionCount, List extractFields) { final Block[] blocks = new Block[extractFields.size()]; - for (int i = 0; i < extractFields.size(); i++) { - blocks[i] = Block.constantNullBlock(positionCount); + try { + for (int i = 0; i < extractFields.size(); i++) { + blocks[i] = blockFactory.newConstantNullBlock(positionCount); + } + return new Page(blocks); + } finally { + if (blocks[blocks.length - 1] == null) { + Releasables.close(blocks); + } } - return new Page(blocks); } private static Operator droppingBlockOperator(int totalBlocks, int droppingPosition) { @@ -319,7 +349,8 @@ private static Operator droppingBlockOperator(int totalBlocks, int droppingPosit private class TransportHandler implements TransportRequestHandler { @Override public void messageReceived(LookupRequest request, TransportChannel channel, Task task) { - ActionListener listener = new ChannelActionListener<>(channel); + request.incRef(); + ActionListener listener = ActionListener.runBefore(new OwningChannelActionListener<>(channel), request::decRef); doLookup( request.sessionId, (CancellableTask) task, @@ -340,6 +371,9 @@ private static class LookupRequest extends TransportRequest implements IndicesRe private final String matchField; private final Page inputPage; private final List extractFields; + // TODO: Remove this workaround once we have Block RefCount + private final Page toRelease; + private final RefCounted refs = AbstractRefCounted.of(this::releasePage); LookupRequest( String sessionId, @@ -354,17 +388,18 @@ private static class LookupRequest extends TransportRequest implements IndicesRe this.matchType = matchType; this.matchField = matchField; this.inputPage = inputPage; + this.toRelease = null; this.extractFields = extractFields; } - LookupRequest(StreamInput in) throws IOException { + LookupRequest(StreamInput in, BlockFactory blockFactory) throws IOException { super(in); this.sessionId = in.readString(); this.shardId = new ShardId(in); this.matchType = in.readString(); this.matchField = in.readString(); - // TODO real BlockFactory - this.inputPage = new Page(new BlockStreamInput(in, BlockFactory.getNonBreakingInstance())); + this.inputPage = new Page(new BlockStreamInput(in, blockFactory)); + this.toRelease = inputPage; PlanStreamInput planIn = new PlanStreamInput(in, PlanNameRegistry.INSTANCE, in.namedWriteableRegistry(), null); this.extractFields = planIn.readCollectionAsList(readerFromPlanReader(PlanStreamInput::readNamedExpression)); } @@ -400,6 +435,32 @@ public String getDescription() { } }; } + + private void releasePage() { + if (toRelease != null) { + Releasables.closeExpectNoException(toRelease::releaseBlocks); + } + } + + @Override + public void incRef() { + refs.incRef(); + } + + @Override + public boolean tryIncRef() { + return refs.tryIncRef(); + } + + @Override + public boolean decRef() { + return refs.decRef(); + } + + @Override + public boolean hasReferences() { + return refs.hasReferences(); + } } private static String lookupDescription( @@ -427,20 +488,52 @@ private static String lookupDescription( } private static class LookupResponse extends TransportResponse { - private final Page page; + private Page page; + private final RefCounted refs = AbstractRefCounted.of(this::releasePage); LookupResponse(Page page) { this.page = page; } - LookupResponse(StreamInput in) throws IOException { - // TODO real BlockFactory - this.page = new Page(new BlockStreamInput(in, BlockFactory.getNonBreakingInstance())); + LookupResponse(StreamInput in, BlockFactory blockFactory) throws IOException { + this.page = new Page(new BlockStreamInput(in, blockFactory)); } @Override public void writeTo(StreamOutput out) throws IOException { page.writeTo(out); } + + Page takePage() { + var p = page; + page = null; + return p; + } + + private void releasePage() { + if (page != null) { + Releasables.closeExpectNoException(page::releaseBlocks); + } + } + + @Override + public void incRef() { + refs.incRef(); + } + + @Override + public boolean tryIncRef() { + return refs.tryIncRef(); + } + + @Override + public boolean decRef() { + return refs.decRef(); + } + + @Override + public boolean hasReferences() { + return refs.hasReferences(); + } } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java index 1e21886a7ac4b..246849896bcdf 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java @@ -9,12 +9,12 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; -import org.elasticsearch.action.support.ChannelActionListener; import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.compute.OwningChannelActionListener; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportChannel; @@ -110,7 +110,7 @@ public void messageReceived(ResolveRequest request, TransportChannel channel, Ta String policyName = request.policyName; EnrichPolicy policy = policies().get(policyName); ThreadContext threadContext = threadPool.getThreadContext(); - ActionListener listener = new ChannelActionListener<>(channel); + ActionListener listener = new OwningChannelActionListener<>(channel); listener = ContextPreservingActionListener.wrapPreservingContext(listener, threadContext); try (ThreadContext.StoredContext ignored = threadContext.stashWithOrigin(ClientHelper.ENRICH_ORIGIN)) { indexResolver.resolveAsMergedMapping( diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichQuerySourceOperator.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichQuerySourceOperator.java index de785e161d527..8d23a59779e6b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichQuerySourceOperator.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichQuerySourceOperator.java @@ -16,13 +16,13 @@ import org.apache.lucene.search.Scorable; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Weight; -import org.apache.lucene.util.ArrayUtil; -import org.elasticsearch.compute.data.ConstantIntVector; +import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.DocVector; -import org.elasticsearch.compute.data.IntArrayVector; import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.SourceOperator; +import org.elasticsearch.core.Releasables; import java.io.IOException; import java.io.UncheckedIOException; @@ -34,6 +34,7 @@ */ final class EnrichQuerySourceOperator extends SourceOperator { + private final BlockFactory blockFactory; private final QueryList queryList; private int queryPosition; private Weight weight = null; @@ -41,7 +42,8 @@ final class EnrichQuerySourceOperator extends SourceOperator { private int leafIndex = 0; private final IndexSearcher searcher; - EnrichQuerySourceOperator(QueryList queryList, IndexReader indexReader) { + EnrichQuerySourceOperator(BlockFactory blockFactory, QueryList queryList, IndexReader indexReader) { + this.blockFactory = blockFactory; this.queryList = queryList; this.indexReader = indexReader; this.searcher = new IndexSearcher(indexReader); @@ -92,22 +94,30 @@ private Page queryOneLeaf(Weight weight, int leafIndex) throws IOException { if (scorer == null) { return null; } - DocCollector collector = new DocCollector(); - scorer.score(collector, leafReaderContext.reader().getLiveDocs()); - final int matches = collector.matches; - DocVector docVector = new DocVector( - new ConstantIntVector(0, matches), - new ConstantIntVector(leafIndex, matches), - new IntArrayVector(collector.docs, matches), - true - ); - IntBlock positionBlock = new ConstantIntVector(queryPosition, matches).asBlock(); - return new Page(docVector.asBlock(), positionBlock); + IntVector docs = null, segments = null, shards = null; + boolean success = false; + try (IntVector.Builder docsBuilder = blockFactory.newIntVectorBuilder(1)) { + scorer.score(new DocCollector(docsBuilder), leafReaderContext.reader().getLiveDocs()); + docs = docsBuilder.build(); + final int positionCount = docs.getPositionCount(); + segments = blockFactory.newConstantIntVector(leafIndex, positionCount); + shards = blockFactory.newConstantIntVector(0, positionCount); + var positions = blockFactory.newConstantIntBlockWith(queryPosition, positionCount); + success = true; + return new Page(new DocVector(shards, segments, docs, true).asBlock(), positions); + } finally { + if (success == false) { + Releasables.close(docs, shards, segments); + } + } } private static class DocCollector implements LeafCollector { - int matches = 0; - int[] docs = new int[0]; + final IntVector.Builder docIds; + + DocCollector(IntVector.Builder docIds) { + this.docIds = docIds; + } @Override public void setScorer(Scorable scorer) { @@ -115,9 +125,8 @@ public void setScorer(Scorable scorer) { } @Override - public void collect(int doc) throws IOException { - docs = ArrayUtil.grow(docs, matches + 1); - docs[matches++] = doc; + public void collect(int doc) { + docIds.appendInt(doc); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/MergePositionsOperator.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/MergePositionsOperator.java index e9a9512fe23c9..89447807db5b9 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/MergePositionsOperator.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/MergePositionsOperator.java @@ -8,10 +8,12 @@ package org.elasticsearch.xpack.esql.enrich; import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.Operator; +import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; import java.util.Arrays; @@ -53,8 +55,16 @@ final class MergePositionsOperator implements Operator { private PositionBuilder positionBuilder = null; private Page outputPage; - - MergePositionsOperator(boolean singleMode, int positionCount, int positionChannel, int[] mergingChannels, ElementType[] mergingTypes) { + private final BlockFactory blockFactory; + + MergePositionsOperator( + boolean singleMode, + int positionCount, + int positionChannel, + int[] mergingChannels, + ElementType[] mergingTypes, + BlockFactory blockFactory + ) { if (mergingChannels.length != mergingTypes.length) { throw new IllegalArgumentException( "Merging channels don't match merging types; channels=" @@ -63,14 +73,21 @@ final class MergePositionsOperator implements Operator { + Arrays.toString(mergingTypes) ); } + this.blockFactory = blockFactory; this.singleMode = singleMode; this.positionCount = positionCount; this.positionChannel = positionChannel; this.mergingChannels = mergingChannels; this.mergingTypes = mergingTypes; this.outputBuilders = new Block.Builder[mergingTypes.length]; - for (int i = 0; i < mergingTypes.length; i++) { - outputBuilders[i] = mergingTypes[i].newBlockBuilder(positionCount); + try { + for (int i = 0; i < mergingTypes.length; i++) { + outputBuilders[i] = mergingTypes[i].newBlockBuilder(positionCount, blockFactory); + } + } finally { + if (outputBuilders[outputBuilders.length - 1] == null) { + Releasables.close(outputBuilders); + } } } @@ -81,56 +98,77 @@ public boolean needsInput() { @Override public void addInput(Page page) { - final IntBlock positions = page.getBlock(positionChannel); - final int currentPosition = positions.getInt(0); - if (singleMode) { - fillNullUpToPosition(currentPosition); - for (int i = 0; i < mergingChannels.length; i++) { - int channel = mergingChannels[i]; - outputBuilders[i].appendAllValuesToCurrentPosition(page.getBlock(channel)); - } - filledPositions++; - } else { - if (positionBuilder != null && positionBuilder.position != currentPosition) { - flushPositionBuilder(); - } - if (positionBuilder == null) { - positionBuilder = new PositionBuilder(currentPosition, mergingTypes); + try { + final IntBlock positions = page.getBlock(positionChannel); + final int currentPosition = positions.getInt(0); + if (singleMode) { + fillNullUpToPosition(currentPosition); + for (int i = 0; i < mergingChannels.length; i++) { + int channel = mergingChannels[i]; + outputBuilders[i].appendAllValuesToCurrentPosition(page.getBlock(channel)); + } + filledPositions++; + } else { + if (positionBuilder != null && positionBuilder.position != currentPosition) { + flushPositionBuilder(); + } + if (positionBuilder == null) { + positionBuilder = new PositionBuilder(currentPosition, mergingTypes, blockFactory); + } + positionBuilder.combine(page, mergingChannels); } - positionBuilder.combine(page, mergingChannels); + } finally { + Releasables.closeExpectNoException(page::releaseBlocks); } } - static final class PositionBuilder { + static final class PositionBuilder implements Releasable { private final int position; private final Block.Builder[] builders; - PositionBuilder(int position, ElementType[] elementTypes) { + PositionBuilder(int position, ElementType[] elementTypes, BlockFactory blockFactory) { this.position = position; this.builders = new Block.Builder[elementTypes.length]; - for (int i = 0; i < builders.length; i++) { - builders[i] = elementTypes[i].newBlockBuilder(1); + try { + for (int i = 0; i < builders.length; i++) { + builders[i] = elementTypes[i].newBlockBuilder(1, blockFactory); + } + } finally { + if (builders[builders.length - 1] == null) { + Releasables.close(builders); + } } } void combine(Page page, int[] channels) { for (int i = 0; i < channels.length; i++) { - builders[i].appendAllValuesToCurrentPosition(page.getBlock(channels[i])); + Block block = page.getBlock(channels[i]); + builders[i].appendAllValuesToCurrentPosition(block); } } void buildTo(Block.Builder[] output) { for (int i = 0; i < output.length; i++) { - output[i].appendAllValuesToCurrentPosition(builders[i].build()); + try (var b = builders[i]; Block block = b.build()) { + output[i].appendAllValuesToCurrentPosition(block); + } } } + + @Override + public void close() { + Releasables.close(builders); + } } private void flushPositionBuilder() { fillNullUpToPosition(positionBuilder.position); filledPositions++; - positionBuilder.buildTo(outputBuilders); - positionBuilder = null; + try (var p = positionBuilder) { + p.buildTo(outputBuilders); + } finally { + positionBuilder = null; + } } private void fillNullUpToPosition(int position) { @@ -148,14 +186,10 @@ public void finish() { flushPositionBuilder(); } fillNullUpToPosition(positionCount); - try { - Block[] blocks = Arrays.stream(outputBuilders).map(Block.Builder::build).toArray(Block[]::new); - outputPage = new Page(blocks); - finished = true; - assert outputPage.getPositionCount() == positionCount; - } finally { - Releasables.closeExpectNoException(outputBuilders); - } + final Block[] blocks = Block.Builder.buildAll(outputBuilders); + outputPage = new Page(blocks); + assert outputPage.getPositionCount() == positionCount; + finished = true; } @Override @@ -172,6 +206,10 @@ public Page getOutput() { @Override public void close() { - + Releasables.close(Releasables.wrap(outputBuilders), positionBuilder, () -> { + if (outputPage != null) { + outputPage.releaseBlocks(); + } + }); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/EvalMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/EvalMapper.java index b9ef94d587556..132df0d3a5afd 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/EvalMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/EvalMapper.java @@ -86,14 +86,14 @@ record BooleanLogicExpressionEvaluator(BinaryLogic bl, ExpressionEvaluator leftE implements ExpressionEvaluator { @Override - public Block.Ref eval(Page page) { - try (Block.Ref lhs = leftEval.eval(page); Block.Ref rhs = rightEval.eval(page)) { - Vector lhsVector = lhs.block().asVector(); - Vector rhsVector = rhs.block().asVector(); + public Block eval(Page page) { + try (Block lhs = leftEval.eval(page); Block rhs = rightEval.eval(page)) { + Vector lhsVector = lhs.asVector(); + Vector rhsVector = rhs.asVector(); if (lhsVector != null && rhsVector != null) { - return Block.Ref.floating(eval((BooleanVector) lhsVector, (BooleanVector) rhsVector)); + return eval((BooleanVector) lhsVector, (BooleanVector) rhsVector); } - return Block.Ref.floating(eval(lhs.block(), rhs.block())); + return eval(lhs, rhs); } } @@ -165,8 +165,10 @@ static class Attributes extends ExpressionMapper { public ExpressionEvaluator.Factory map(Attribute attr, Layout layout) { record Attribute(int channel) implements ExpressionEvaluator { @Override - public Block.Ref eval(Page page) { - return new Block.Ref(page.getBlock(channel), page); + public Block eval(Page page) { + Block block = page.getBlock(channel); + block.incRef(); + return block; } @Override @@ -193,8 +195,8 @@ static class Literals extends ExpressionMapper { public ExpressionEvaluator.Factory map(Literal lit, Layout layout) { record LiteralsEvaluator(DriverContext context, Literal lit) implements ExpressionEvaluator { @Override - public Block.Ref eval(Page page) { - return Block.Ref.floating(block(lit, context.blockFactory(), page.getPositionCount())); + public Block eval(Page page) { + return block(lit, context.blockFactory(), page.getPositionCount()); } @Override @@ -261,12 +263,10 @@ public String toString() { record IsNullEvaluator(DriverContext driverContext, EvalOperator.ExpressionEvaluator field) implements ExpressionEvaluator { @Override - public Block.Ref eval(Page page) { - try (Block.Ref fieldBlock = field.eval(page)) { - if (fieldBlock.block().asVector() != null) { - return Block.Ref.floating( - BooleanBlock.newConstantBlockWith(false, page.getPositionCount(), driverContext.blockFactory()) - ); + public Block eval(Page page) { + try (Block fieldBlock = field.eval(page)) { + if (fieldBlock.asVector() != null) { + return BooleanBlock.newConstantBlockWith(false, page.getPositionCount(), driverContext.blockFactory()); } try ( BooleanVector.FixedBuilder builder = BooleanVector.newVectorFixedBuilder( @@ -275,9 +275,9 @@ public Block.Ref eval(Page page) { ) ) { for (int p = 0; p < page.getPositionCount(); p++) { - builder.appendBoolean(fieldBlock.block().isNull(p)); + builder.appendBoolean(fieldBlock.isNull(p)); } - return Block.Ref.floating(builder.build().asBlock()); + return builder.build().asBlock(); } } } @@ -317,12 +317,10 @@ record IsNotNullEvaluator(DriverContext driverContext, EvalOperator.ExpressionEv implements EvalOperator.ExpressionEvaluator { @Override - public Block.Ref eval(Page page) { - try (Block.Ref fieldBlock = field.eval(page)) { - if (fieldBlock.block().asVector() != null) { - return Block.Ref.floating( - BooleanBlock.newConstantBlockWith(true, page.getPositionCount(), driverContext.blockFactory()) - ); + public Block eval(Page page) { + try (Block fieldBlock = field.eval(page)) { + if (fieldBlock.asVector() != null) { + return BooleanBlock.newConstantBlockWith(true, page.getPositionCount(), driverContext.blockFactory()); } try ( BooleanVector.FixedBuilder builder = BooleanVector.newVectorFixedBuilder( @@ -331,9 +329,9 @@ public Block.Ref eval(Page page) { ) ) { for (int p = 0; p < page.getPositionCount(); p++) { - builder.appendBoolean(fieldBlock.block().isNull(p) == false); + builder.appendBoolean(fieldBlock.isNull(p) == false); } - return Block.Ref.floating(builder.build().asBlock()); + return builder.build().asBlock(); } } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/mapper/EvaluatorMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/mapper/EvaluatorMapper.java index 90699bfaf83df..3ab555799ee34 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/mapper/EvaluatorMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/mapper/EvaluatorMapper.java @@ -37,8 +37,8 @@ public interface EvaluatorMapper { default Object fold() { return toJavaObject(toEvaluator(e -> driverContext -> new ExpressionEvaluator() { @Override - public Block.Ref eval(Page page) { - return Block.Ref.floating(fromArrayRow(driverContext.blockFactory(), e.fold())[0]); + public Block eval(Page page) { + return fromArrayRow(driverContext.blockFactory(), e.fold())[0]; } @Override @@ -49,6 +49,6 @@ public void close() {} // TODO maybe this should have a small fixed limit? new BlockFactory(new NoopCircuitBreaker(CircuitBreaker.REQUEST), BigArrays.NON_RECYCLING_INSTANCE) ) - ).eval(new Page(1)).block(), 0); + ).eval(new Page(1)), 0); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InMapper.java index 2be60292dff6f..6ef37abf5a9b4 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InMapper.java @@ -13,7 +13,6 @@ import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.BooleanVector; import org.elasticsearch.compute.data.Page; -import org.elasticsearch.compute.data.Vector; import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; import org.elasticsearch.core.Releasables; @@ -47,7 +46,7 @@ public ExpressionEvaluator.Factory map(In in, Layout layout) { record InExpressionEvaluator(List listEvaluators) implements EvalOperator.ExpressionEvaluator { @Override - public Block.Ref eval(Page page) { + public Block eval(Page page) { int positionCount = page.getPositionCount(); boolean[] values = new boolean[positionCount]; BitSet nulls = new BitSet(positionCount); // at least one evaluation resulted in NULL on a row @@ -55,22 +54,21 @@ public Block.Ref eval(Page page) { for (int i = 0; i < listEvaluators().size(); i++) { var evaluator = listEvaluators.get(i); - try (Block.Ref ref = evaluator.eval(page)) { - - Vector vector = ref.block().asVector(); + try (BooleanBlock block = (BooleanBlock) evaluator.eval(page)) { + BooleanVector vector = block.asVector(); if (vector != null) { - updateValues((BooleanVector) vector, values); + updateValues(vector, values); } else { - if (ref.block().areAllValuesNull()) { + if (block.areAllValuesNull()) { nullInValues = true; } else { - updateValues((BooleanBlock) ref.block(), values, nulls); + updateValues(block, values, nulls); } } } } - return Block.Ref.floating(evalWithNulls(values, nulls, nullInValues)); + return evalWithNulls(values, nulls, nullInValues); } private static void updateValues(BooleanVector vector, boolean[] values) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java index a3d08e4cb6306..caef1fe0de627 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java @@ -212,7 +212,7 @@ private record CaseEvaluator( EvalOperator.ExpressionEvaluator elseVal ) implements EvalOperator.ExpressionEvaluator { @Override - public Block.Ref eval(Page page) { + public Block eval(Page page) { /* * We have to evaluate lazily so any errors or warnings that would be * produced by the right hand side are avoided. And so if anything @@ -231,26 +231,25 @@ public Block.Ref eval(Page page) { ); try (Releasable ignored = limited::releaseBlocks) { for (ConditionEvaluator condition : conditions) { - try (Block.Ref conditionRef = condition.condition.eval(limited)) { - BooleanBlock b = (BooleanBlock) conditionRef.block(); + try (BooleanBlock b = (BooleanBlock) condition.condition.eval(limited)) { if (b.isNull(0)) { continue; } if (false == b.getBoolean(b.getFirstValueIndex(0))) { continue; } - try (Block.Ref valueRef = condition.value.eval(limited)) { - result.copyFrom(valueRef.block(), 0, 1); + try (Block values = condition.value.eval(limited)) { + result.copyFrom(values, 0, 1); continue position; } } } - try (Block.Ref elseRef = elseVal.eval(limited)) { - result.copyFrom(elseRef.block(), 0, 1); + try (Block values = elseVal.eval(limited)) { + result.copyFrom(values, 0, 1); } } } - return Block.Ref.floating(result.build()); + return result.build(); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/AbstractConvertFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/AbstractConvertFunction.java index c717fafa877a9..0da3717f758bf 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/AbstractConvertFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/AbstractConvertFunction.java @@ -100,18 +100,21 @@ protected AbstractEvaluator(DriverContext driverContext, EvalOperator.Expression /** * Called when evaluating a {@link Block} that contains null values. + * @return the returned Block has its own reference and the caller is responsible for releasing it. */ protected abstract Block evalBlock(Block b); /** * Called when evaluating a {@link Block} that does not contain null values. + * @return the returned Block has its own reference and the caller is responsible for releasing it. */ protected abstract Block evalVector(Vector v); - public Block.Ref eval(Page page) { - try (Block.Ref ref = fieldEvaluator.eval(page)) { - Vector vector = ref.block().asVector(); - return Block.Ref.floating(vector == null ? evalBlock(ref.block()) : evalVector(vector)); + @Override + public final Block eval(Page page) { + try (Block block = fieldEvaluator.eval(page)) { + Vector vector = block.asVector(); + return vector == null ? evalBlock(block) : evalVector(vector); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Pow.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Pow.java index 48db81fefbc98..9e160e7c2f15f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Pow.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Pow.java @@ -25,7 +25,6 @@ import java.util.Objects; import java.util.function.Function; -import static org.elasticsearch.xpack.esql.expression.function.scalar.math.Cast.cast; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.FIRST; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.SECOND; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isNumeric; @@ -37,13 +36,13 @@ public class Pow extends ScalarFunction implements OptionalArgument, EvaluatorMa public Pow( Source source, - @Param(name = "base", type = { "integer", "long", "double" }) Expression base, - @Param(name = "exponent", type = { "integer", "double" }) Expression exponent + @Param(name = "base", type = { "integer", "unsigned_long", "long", "double" }) Expression base, + @Param(name = "exponent", type = { "integer", "unsigned_long", "long", "double" }) Expression exponent ) { super(source, Arrays.asList(base, exponent)); this.base = base; this.exponent = exponent; - this.dataType = determineDataType(base, exponent); + this.dataType = DataTypes.DOUBLE; } @Override @@ -70,65 +69,19 @@ public Object fold() { return EvaluatorMapper.super.fold(); } - @Evaluator(extraName = "Double", warnExceptions = { ArithmeticException.class }) + @Evaluator(warnExceptions = { ArithmeticException.class }) static double process(double base, double exponent) { return validateAsDouble(base, exponent); } - @Evaluator(extraName = "Long", warnExceptions = { ArithmeticException.class }) - static long processLong(double base, double exponent) { - return exponent == 1 ? validateAsLong(base) : validateAsLong(base, exponent); - } - - @Evaluator(extraName = "Int", warnExceptions = { ArithmeticException.class }) - static int processInt(double base, double exponent) { - return exponent == 1 ? validateAsInt(base) : validateAsInt(base, exponent); - } - private static double validateAsDouble(double base, double exponent) { double result = Math.pow(base, exponent); - if (Double.isNaN(result)) { - throw new ArithmeticException("invalid result: pow(" + base + ", " + exponent + ")"); + if (Double.isNaN(result) || Double.isInfinite(result)) { + throw new ArithmeticException("invalid result when computing pow"); } return result; } - private static long validateAsLong(double base, double exponent) { - double result = Math.pow(base, exponent); - if (Double.isNaN(result)) { - throw new ArithmeticException("invalid result: pow(" + base + ", " + exponent + ")"); - } - return validateAsLong(result); - } - - private static long validateAsLong(double value) { - if (Double.compare(value, Long.MAX_VALUE) > 0) { - throw new ArithmeticException("long overflow"); - } - if (Double.compare(value, Long.MIN_VALUE) < 0) { - throw new ArithmeticException("long overflow"); - } - return (long) value; - } - - private static int validateAsInt(double base, double exponent) { - double result = Math.pow(base, exponent); - if (Double.isNaN(result)) { - throw new ArithmeticException("invalid result: pow(" + base + ", " + exponent + ")"); - } - return validateAsInt(result); - } - - private static int validateAsInt(double value) { - if (Double.compare(value, Integer.MAX_VALUE) > 0) { - throw new ArithmeticException("integer overflow"); - } - if (Double.compare(value, Integer.MIN_VALUE) < 0) { - throw new ArithmeticException("integer overflow"); - } - return (int) value; - } - @Override public final Expression replaceChildren(List newChildren) { return new Pow(source(), newChildren.get(0), newChildren.get(1)); @@ -152,16 +105,6 @@ public DataType dataType() { return dataType; } - private static DataType determineDataType(Expression base, Expression exponent) { - if (base.dataType().isRational() || exponent.dataType().isRational()) { - return DataTypes.DOUBLE; - } - if (base.dataType().size() == Long.BYTES || exponent.dataType().size() == Long.BYTES) { - return DataTypes.LONG; - } - return DataTypes.INTEGER; - } - @Override public ScriptTemplate asScript() { throw new UnsupportedOperationException("functions do not support scripting"); @@ -169,27 +112,9 @@ public ScriptTemplate asScript() { @Override public ExpressionEvaluator.Factory toEvaluator(Function toEvaluator) { - var baseEvaluator = toEvaluator.apply(base); - var exponentEvaluator = toEvaluator.apply(exponent); - if (dataType == DataTypes.DOUBLE) { - return new PowDoubleEvaluator.Factory( - source(), - cast(base.dataType(), DataTypes.DOUBLE, baseEvaluator), - cast(exponent.dataType(), DataTypes.DOUBLE, exponentEvaluator) - ); - } else if (dataType == DataTypes.LONG) { - return new PowLongEvaluator.Factory( - source(), - cast(base.dataType(), DataTypes.DOUBLE, baseEvaluator), - cast(exponent.dataType(), DataTypes.DOUBLE, exponentEvaluator) - ); - } else { - return new PowIntEvaluator.Factory( - source(), - cast(base.dataType(), DataTypes.DOUBLE, baseEvaluator), - cast(exponent.dataType(), DataTypes.DOUBLE, exponentEvaluator) - ); - } + var baseEval = Cast.cast(base.dataType(), DataTypes.DOUBLE, toEvaluator.apply(base)); + var expEval = Cast.cast(exponent.dataType(), DataTypes.DOUBLE, toEvaluator.apply(exponent)); + return new PowEvaluator.Factory(source(), baseEval, expEval); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunction.java index 196137336bee5..e3bb8212aebab 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunction.java @@ -64,30 +64,37 @@ protected AbstractEvaluator(EvalOperator.ExpressionEvaluator field) { * that it's producing an "array vector" because it only ever emits single * valued fields and no null values. Building an array vector directly is * generally faster than building it via a {@link Block.Builder}. + * + * @return the returned Block has its own reference and the caller is responsible for releasing it. */ - protected abstract Block.Ref evalNotNullable(Block.Ref fieldVal); + protected abstract Block evalNotNullable(Block fieldVal); /** - * Called to evaluate single valued fields when the target block does not - * have null values. + * Called to evaluate single valued fields when the target block does not have null values. + * + * @return the returned Block has its own reference and the caller is responsible for releasing it. */ - protected Block.Ref evalSingleValuedNotNullable(Block.Ref fieldRef) { + protected Block evalSingleValuedNotNullable(Block fieldRef) { + fieldRef.incRef(); return fieldRef; } @Override - public final Block.Ref eval(Page page) { - Block.Ref ref = field.eval(page); - if (ref.block().mayHaveMultivaluedFields() == false) { - if (ref.block().mayHaveNulls()) { - return evalSingleValuedNullable(ref); + public final Block eval(Page page) { + try (Block block = field.eval(page)) { + if (block.mayHaveMultivaluedFields()) { + if (block.mayHaveNulls()) { + return evalNullable(block); + } else { + return evalNotNullable(block); + } + } + if (block.mayHaveNulls()) { + return evalSingleValuedNullable(block); + } else { + return evalSingleValuedNotNullable(block); } - return evalSingleValuedNotNullable(ref); - } - if (ref.block().mayHaveNulls()) { - return evalNullable(ref); } - return evalNotNullable(ref); } } @@ -105,21 +112,28 @@ protected AbstractNullableEvaluator(EvalOperator.ExpressionEvaluator field) { /** * Called when evaluating a {@link Block} that contains null values. + * @return the returned Block has its own reference and the caller is responsible for releasing it. */ - protected abstract Block.Ref evalNullable(Block.Ref fieldVal); + protected abstract Block evalNullable(Block fieldVal); /** - * Called to evaluate single valued fields when the target block has null - * values. + * Called to evaluate single valued fields when the target block has null values. + * @return the returned Block has its own reference and the caller is responsible for releasing it. */ - protected Block.Ref evalSingleValuedNullable(Block.Ref fieldRef) { + protected Block evalSingleValuedNullable(Block fieldRef) { + fieldRef.incRef(); return fieldRef; } @Override - public Block.Ref eval(Page page) { - Block.Ref ref = field.eval(page); - return ref.block().mayHaveMultivaluedFields() ? evalNullable(ref) : evalSingleValuedNullable(ref); + public Block eval(Page page) { + try (Block block = field.eval(page)) { + if (block.mayHaveMultivaluedFields()) { + return evalNullable(block); + } else { + return evalSingleValuedNullable(block); + } + } } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcat.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcat.java index a187bb41ee235..9f5c492d7fe7c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcat.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcat.java @@ -120,11 +120,8 @@ private static class Evaluator implements ExpressionEvaluator { } @Override - public final Block.Ref eval(Page page) { - try (Block.Ref fieldRef = field.eval(page); Block.Ref delimRef = delim.eval(page)) { - BytesRefBlock fieldVal = (BytesRefBlock) fieldRef.block(); - BytesRefBlock delimVal = (BytesRefBlock) delimRef.block(); - + public final Block eval(Page page) { + try (BytesRefBlock fieldVal = (BytesRefBlock) field.eval(page); BytesRefBlock delimVal = (BytesRefBlock) delim.eval(page)) { int positionCount = page.getPositionCount(); try (BytesRefBlock.Builder builder = BytesRefBlock.newBlockBuilder(positionCount, context.blockFactory())) { BytesRefBuilder work = new BytesRefBuilder(); // TODO BreakingBytesRefBuilder so we don't blow past circuit breakers @@ -155,7 +152,7 @@ public final Block.Ref eval(Page page) { } builder.appendBytesRef(work.get()); } - return Block.Ref.floating(builder.build()); + return builder.build(); } } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCount.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCount.java index 528c0b6d5f0cb..6d1446f4cccf4 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCount.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCount.java @@ -95,46 +95,38 @@ protected String name() { } @Override - protected Block.Ref evalNullable(Block.Ref ref) { - try (ref; IntBlock.Builder builder = IntBlock.newBlockBuilder(ref.block().getPositionCount(), driverContext.blockFactory())) { - for (int p = 0; p < ref.block().getPositionCount(); p++) { - int valueCount = ref.block().getValueCount(p); + protected Block evalNullable(Block block) { + try (var builder = IntBlock.newBlockBuilder(block.getPositionCount(), driverContext.blockFactory())) { + for (int p = 0; p < block.getPositionCount(); p++) { + int valueCount = block.getValueCount(p); if (valueCount == 0) { builder.appendNull(); continue; } builder.appendInt(valueCount); } - return Block.Ref.floating(builder.build()); + return builder.build(); } } @Override - protected Block.Ref evalNotNullable(Block.Ref ref) { - try ( - ref; - IntVector.FixedBuilder builder = IntVector.newVectorFixedBuilder( - ref.block().getPositionCount(), - driverContext.blockFactory() - ) - ) { - for (int p = 0; p < ref.block().getPositionCount(); p++) { - builder.appendInt(ref.block().getValueCount(p)); + protected Block evalNotNullable(Block block) { + try (var builder = IntVector.newVectorFixedBuilder(block.getPositionCount(), driverContext.blockFactory())) { + for (int p = 0; p < block.getPositionCount(); p++) { + builder.appendInt(block.getValueCount(p)); } - return Block.Ref.floating(builder.build().asBlock()); + return builder.build().asBlock(); } } @Override - protected Block.Ref evalSingleValuedNullable(Block.Ref ref) { + protected Block evalSingleValuedNullable(Block ref) { return evalNullable(ref); } @Override - protected Block.Ref evalSingleValuedNotNullable(Block.Ref ref) { - try (ref) { - return Block.Ref.floating(driverContext.blockFactory().newConstantIntBlockWith(1, ref.block().getPositionCount())); - } + protected Block evalSingleValuedNotNullable(Block ref) { + return driverContext.blockFactory().newConstantIntBlockWith(1, ref.getPositionCount()); } } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java index d0fe387d680db..ea95971e1b7b6 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java @@ -143,7 +143,7 @@ private record CoalesceEvaluator(DriverContext driverContext, ElementType result implements EvalOperator.ExpressionEvaluator { @Override - public Block.Ref eval(Page page) { + public Block eval(Page page) { /* * We have to evaluate lazily so any errors or warnings that would be * produced by the right hand side are avoided. And so if anything @@ -163,9 +163,9 @@ public Block.Ref eval(Page page) { ); try (Releasable ignored = limited::releaseBlocks) { for (EvalOperator.ExpressionEvaluator eval : evaluators) { - try (Block.Ref ref = eval.eval(limited)) { - if (false == ref.block().isNull(0)) { - result.copyFrom(ref.block(), 0, 1); + try (Block block = eval.eval(limited)) { + if (false == block.isNull(0)) { + result.copyFrom(block, 0, 1); continue position; } } @@ -173,7 +173,7 @@ public Block.Ref eval(Page page) { result.appendNull(); } } - return Block.Ref.floating(result.build()); + return result.build(); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LTrim.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LTrim.java index 952c3314af80a..382f64fcf831c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LTrim.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LTrim.java @@ -12,6 +12,8 @@ import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.TypeResolutions; @@ -27,8 +29,9 @@ * Removes leading whitespaces from a string. */ public class LTrim extends UnaryScalarFunction implements EvaluatorMapper { - public LTrim(Source source, Expression field) { - super(source, field); + @FunctionInfo(returnType = { "keyword", "text" }, description = "Removes leading whitespaces from a string.") + public LTrim(Source source, @Param(name = "str", type = { "keyword", "text" }) Expression str) { + super(source, str); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RTrim.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RTrim.java index 273a032a90ed3..98fc93b4f6acc 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RTrim.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RTrim.java @@ -12,6 +12,8 @@ import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.TypeResolutions; @@ -27,8 +29,9 @@ * Removes trailing whitespaces from a string. */ public class RTrim extends UnaryScalarFunction implements EvaluatorMapper { - public RTrim(Source source, Expression field) { - super(source, field); + @FunctionInfo(returnType = { "keyword", "text" }, description = "Removes trailing whitespaces from a string.") + public RTrim(Source source, @Param(name = "str", type = { "keyword", "text" }) Expression str) { + super(source, str); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Trim.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Trim.java index b865199c1c2ae..ce15d1db0f8f9 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Trim.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Trim.java @@ -12,6 +12,8 @@ import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.TypeResolutions; @@ -27,8 +29,8 @@ * Removes leading and trailing whitespaces from a string. */ public final class Trim extends UnaryScalarFunction implements EvaluatorMapper { - - public Trim(Source source, Expression str) { + @FunctionInfo(returnType = { "keyword", "text" }, description = "Removes leading and trailing whitespaces from a string.") + public Trim(Source source, @Param(name = "str", type = { "keyword", "text" }) Expression str) { super(source, str); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java index e7409543ca68e..3ae19ceef4d08 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java @@ -115,7 +115,6 @@ protected static List> rules() { "Operator Optimization", new CombineProjections(), new CombineEvals(), - new ReplaceDuplicateAggWithEval(), new PruneEmptyPlans(), new PropagateEmptyRelation(), new ConvertStringToByteRef(), @@ -149,7 +148,13 @@ protected static List> rules() { ); var skip = new Batch<>("Skip Compute", new SkipQueryOnLimitZero()); - var cleanup = new Batch<>("Clean Up", new ReplaceLimitAndSortAsTopN()); + var cleanup = new Batch<>( + "Clean Up", + new ReplaceDuplicateAggWithEval(), + // pushing down limits again, because ReplaceDuplicateAggWithEval could create new Project nodes that can still be optimized + new PushDownAndCombineLimits(), + new ReplaceLimitAndSortAsTopN() + ); var defaultTopN = new Batch<>("Add default TopN", new AddDefaultTopN()); var label = new Batch<>("Set as Optimized", Limiter.ONCE, new SetAsOptimized()); @@ -281,7 +286,10 @@ protected LogicalPlan rule(UnaryPlan plan) { // eliminate lower project but first replace the aliases in the upper one return p.withProjections(combineProjections(project.projections(), p.projections())); } else if (child instanceof Aggregate a) { - return new Aggregate(a.source(), a.child(), a.groupings(), combineProjections(project.projections(), a.aggregates())); + var aggs = a.aggregates(); + var newAggs = combineProjections(project.projections(), aggs); + var newGroups = replacePrunedAliasesUsedInGroupBy(a.groupings(), aggs, newAggs); + return new Aggregate(a.source(), a.child(), newGroups, newAggs); } } @@ -320,6 +328,39 @@ private List combineProjections(List return replaced; } + /** + * Replace grouping alias previously contained in the aggregations that might have been projected away. + */ + private List replacePrunedAliasesUsedInGroupBy( + List groupings, + List oldAggs, + List newAggs + ) { + AttributeMap removedAliases = new AttributeMap<>(); + AttributeSet currentAliases = new AttributeSet(Expressions.asAttributes(newAggs)); + + // record only removed aliases + for (NamedExpression ne : oldAggs) { + if (ne instanceof Alias alias) { + var attr = ne.toAttribute(); + if (currentAliases.contains(attr) == false) { + removedAliases.put(attr, alias.child()); + } + } + } + + if (removedAliases.isEmpty()) { + return groupings; + } + + var newGroupings = new ArrayList(groupings.size()); + for (Expression group : groupings) { + newGroupings.add(group.transformUp(Attribute.class, a -> removedAliases.resolve(a, a))); + } + + return newGroupings; + } + public static Expression trimNonTopLevelAliases(Expression e) { if (e instanceof Alias a) { return new Alias(a.source(), a.name(), a.qualifier(), trimAliases(a.child()), a.id()); @@ -786,7 +827,7 @@ protected LogicalPlan rule(OrderBy orderBy) { if (child instanceof OrderBy childOrder) { // combine orders - return new OrderBy(orderBy.source(), childOrder.child(), CollectionUtils.combine(orderBy.order(), childOrder.order())); + return new OrderBy(orderBy.source(), childOrder.child(), orderBy.order()); } else if (child instanceof Project) { return pushDownPastProject(orderBy); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java index 49caf0e4618bd..d5763f28f6394 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java @@ -134,12 +134,12 @@ public PlanFactory visitDissectCommand(EsqlBaseParser.DissectCommandContext ctx) referenceKeys.iterator().next() ); } - List keys = parser.outputKeys() - .stream() - .map(x -> new ReferenceAttribute(src, x, DataTypes.KEYWORD)) - .map(Attribute.class::cast) - .toList(); - + List keys = new ArrayList<>(); + for (var x : parser.outputKeys()) { + if (x.isEmpty() == false) { + keys.add(new ReferenceAttribute(src, x, DataTypes.KEYWORD)); + } + } return new Dissect(src, p, expression(ctx.primaryExpression()), new Dissect.Parser(pattern, appendSeparator, parser), keys); } catch (DissectException e) { throw new ParsingException(src, "Invalid pattern for dissect: [{}]", pattern); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java index f73ab716cb534..f1647ff15d9d0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java @@ -19,7 +19,7 @@ import org.elasticsearch.compute.lucene.ValuesSourceReaderOperator; import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.compute.operator.OrdinalsGroupingOperator; -import org.elasticsearch.index.mapper.BlockDocValuesReader; +import org.elasticsearch.index.mapper.BlockLoader; import org.elasticsearch.index.mapper.NestedLookup; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; @@ -62,33 +62,23 @@ public List searchContexts() { @Override public final PhysicalOperation fieldExtractPhysicalOperation(FieldExtractExec fieldExtractExec, PhysicalOperation source) { Layout.Builder layout = source.layout.builder(); - var sourceAttr = fieldExtractExec.sourceAttribute(); - - PhysicalOperation op = source; + List readers = searchContexts.stream() + .map(s -> new ValuesSourceReaderOperator.ShardContext(s.searcher().getIndexReader(), s::newSourceLoader)) + .toList(); + List fields = new ArrayList<>(); + int docChannel = source.layout.get(sourceAttr.id()).channel(); for (Attribute attr : fieldExtractExec.attributesToExtract()) { if (attr instanceof FieldAttribute fa && fa.getExactInfo().hasExact()) { attr = fa.exactAttribute(); } layout.append(attr); - Layout previousLayout = op.layout; - DataType dataType = attr.dataType(); String fieldName = attr.name(); - List factories = BlockReaderFactories.factories( - searchContexts, - fieldName, - EsqlDataTypes.isUnsupported(dataType) - ); - - int docChannel = previousLayout.get(sourceAttr.id()).channel(); - - op = op.with( - new ValuesSourceReaderOperator.ValuesSourceReaderOperatorFactory(factories, docChannel, fieldName), - layout.build() - ); + List loaders = BlockReaderFactories.loaders(searchContexts, fieldName, EsqlDataTypes.isUnsupported(dataType)); + fields.add(new ValuesSourceReaderOperator.FieldInfo(fieldName, loaders)); } - return op; + return source.with(new ValuesSourceReaderOperator.Factory(fields, readers, docChannel), layout.build()); } public static Function querySupplier(QueryBuilder queryBuilder) { @@ -170,10 +160,14 @@ public final Operator.OperatorFactory ordinalGroupingOperatorFactory( ) { var sourceAttribute = FieldExtractExec.extractSourceAttributesFrom(aggregateExec.child()); int docChannel = source.layout.get(sourceAttribute.id()).channel(); + List shardContexts = searchContexts.stream() + .map(s -> new ValuesSourceReaderOperator.ShardContext(s.searcher().getIndexReader(), s::newSourceLoader)) + .toList(); // The grouping-by values are ready, let's group on them directly. // Costin: why are they ready and not already exposed in the layout? return new OrdinalsGroupingOperator.OrdinalsGroupingOperatorFactory( - BlockReaderFactories.factories(searchContexts, attrSource.name(), EsqlDataTypes.isUnsupported(attrSource.dataType())), + BlockReaderFactories.loaders(searchContexts, attrSource.name(), EsqlDataTypes.isUnsupported(attrSource.dataType())), + shardContexts, groupElementType, docChannel, attrSource.name(), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java index 9a76bc0865865..74cc4ab999808 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java @@ -13,7 +13,6 @@ import org.elasticsearch.compute.Describable; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; -import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.lucene.LuceneCountOperator; @@ -290,6 +289,7 @@ public static ElementType toElementType(DataType dataType) { if (dataType == DataTypes.KEYWORD || dataType == DataTypes.TEXT || dataType == DataTypes.IP + || dataType == DataTypes.SOURCE || dataType == DataTypes.VERSION || dataType == DataTypes.UNSUPPORTED) { return ElementType.BYTES_REF; @@ -335,8 +335,10 @@ private static Function alignPageToAttributes(List attrs, var blocks = new Block[mappedPosition.length]; for (int i = 0; i < blocks.length; i++) { blocks[i] = p.getBlock(mappedPosition[i]); + blocks[i].incRef(); } - return p.newPageAndRelease(blocks); + p.releaseBlocks(); + return new Page(blocks); } : Function.identity(); return transformer; @@ -360,11 +362,10 @@ private PhysicalOperation planExchangeSink(ExchangeSinkExec exchangeSink, LocalE // the outputs are going to be similar except for the bool "seen" flags which are added in below List blocks = new ArrayList<>(asList(localExec.supplier().get())); if (blocks.size() > 0) { - Block boolBlock = BooleanBlock.newConstantBlockWith(true, 1); for (int i = 0, s = output.size(); i < s; i++) { var out = output.get(i); if (out.dataType() == DataTypes.BOOLEAN) { - blocks.add(i, boolBlock); + blocks.add(i, BlockFactory.getNonBreakingInstance().newConstantBooleanBlockWith(true, 1)); } } } @@ -584,16 +585,17 @@ private PhysicalOperation planProject(ProjectExec project, LocalExecutionPlanner inputId = ne.id(); } Layout.ChannelAndType input = source.layout.get(inputId); - Layout.ChannelSet channelSet = inputChannelToOutputIds.computeIfAbsent( - input.channel(), - ignore -> new Layout.ChannelSet(new HashSet<>(), input.type()) - ); + Layout.ChannelSet channelSet = inputChannelToOutputIds.get(input.channel()); + if (channelSet == null) { + channelSet = new Layout.ChannelSet(new HashSet<>(), input.type()); + channelSet.nameIds().add(ne.id()); + layout.append(channelSet); + } else { + channelSet.nameIds().add(ne.id()); + } if (channelSet.type() != input.type()) { throw new IllegalArgumentException("type mismatch for aliases"); } - channelSet.nameIds().add(ne.id()); - - layout.append(channelSet); projectionList.add(input.channel()); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java index e10469a4ff97d..c28867f89c981 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java @@ -10,11 +10,10 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchShardsAction; import org.elasticsearch.action.search.SearchShardsGroup; import org.elasticsearch.action.search.SearchShardsRequest; import org.elasticsearch.action.search.SearchShardsResponse; -import org.elasticsearch.action.support.ChannelActionListener; +import org.elasticsearch.action.search.TransportSearchShardsAction; import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.action.support.RefCountingListener; import org.elasticsearch.action.support.RefCountingRunnable; @@ -25,6 +24,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.compute.OwningChannelActionListener; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.Driver; @@ -52,6 +52,7 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportRequestOptions; @@ -68,6 +69,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -341,6 +343,14 @@ private void computeTargetNodes( String[] originalIndices, ActionListener> listener ) { + var remoteIndices = transportService.getRemoteClusterService().groupIndices(SearchRequest.DEFAULT_INDICES_OPTIONS, originalIndices); + remoteIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + if (remoteIndices.isEmpty() == false) { + listener.onFailure( + new IllegalArgumentException("ES|QL does not yet support querying remote indices " + Arrays.toString(originalIndices)) + ); + return; + } // Ideally, the search_shards API should be called before the field-caps API; however, this can lead // to a situation where the column structure (i.e., matched data types) differs depending on the query. ThreadContext threadContext = transportService.getThreadPool().getThreadContext(); @@ -393,7 +403,7 @@ private void computeTargetNodes( ); transportService.sendChildRequest( transportService.getLocalNode(), - SearchShardsAction.NAME, + TransportSearchShardsAction.TYPE.name(), searchShardsRequest, parentTask, TransportRequestOptions.EMPTY, @@ -426,7 +436,7 @@ public void messageReceived(DataNodeRequest request, TransportChannel channel, T final var sessionId = request.sessionId(); final var exchangeSink = exchangeService.getSinkHandler(sessionId); parentTask.addListener(() -> exchangeService.finishSinkHandler(sessionId, new TaskCancelledException("task cancelled"))); - final ActionListener listener = new ChannelActionListener<>(channel).map(nullValue -> new DataNodeResponse()); + final ActionListener listener = new OwningChannelActionListener<>(channel).map(nullValue -> new DataNodeResponse()); acquireSearchContexts(request.shardIds(), request.aliasFilters(), ActionListener.wrap(searchContexts -> { var computeContext = new ComputeContext(sessionId, searchContexts, request.configuration(), null, exchangeSink); runCompute(parentTask, computeContext, request.plan(), ActionListener.wrap(unused -> { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java index bdb475be925ce..de4af3497d80d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java @@ -88,7 +88,7 @@ protected void doExecute(Task task, EsqlQueryRequest request, ActionListener listener) { EsqlConfiguration configuration = new EsqlConfiguration( - request.zoneId() != null ? request.zoneId() : ZoneOffset.UTC, + ZoneOffset.UTC, request.locale() != null ? request.locale() : Locale.US, // TODO: plug-in security null, diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQuery.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQuery.java index b554ccb2920aa..e419be2b7e1fc 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQuery.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQuery.java @@ -10,8 +10,10 @@ import org.apache.lucene.index.DocValues; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.PointValues; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.index.Terms; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.IndexSearcher; @@ -321,20 +323,30 @@ public Scorer scorer(LeafReaderContext context) throws IOException { * can't do that because we need the check the number of fields. */ if (lfd instanceof LeafNumericFieldData n) { - return scorer(nextScorer, n); + return scorer(context, nextScorer, n); } if (lfd instanceof LeafOrdinalsFieldData o) { - return scorer(nextScorer, o); + return scorer(context, nextScorer, o); } return scorer(nextScorer, lfd); } - private Scorer scorer(Scorer nextScorer, LeafNumericFieldData lfd) { + private Scorer scorer(LeafReaderContext context, Scorer nextScorer, LeafNumericFieldData lfd) throws IOException { SortedNumericDocValues sortedNumerics = lfd.getLongValues(); if (DocValues.unwrapSingleton(sortedNumerics) != null) { - // Segment contains only single valued fields. - stats.numericSingle++; - return nextScorer; + /* + * Segment contains only single valued fields. But it's possible + * that some fields have 0 values. The most surefire way to check + * is to look at the index for the data. If there isn't an index + * this isn't going to work - but if there is we can compare the + * number of documents in the index to the number of values in it - + * if they are the same we've got a dense singleton. + */ + PointValues points = context.reader().getPointValues(fieldData.getFieldName()); + if (points != null && points.getDocCount() == context.reader().maxDoc()) { + stats.numericSingle++; + return nextScorer; + } } TwoPhaseIterator nextIterator = nextScorer.twoPhaseIterator(); if (nextIterator == null) { @@ -353,12 +365,22 @@ private Scorer scorer(Scorer nextScorer, LeafNumericFieldData lfd) { ); } - private Scorer scorer(Scorer nextScorer, LeafOrdinalsFieldData lfd) { + private Scorer scorer(LeafReaderContext context, Scorer nextScorer, LeafOrdinalsFieldData lfd) throws IOException { SortedSetDocValues sortedSet = lfd.getOrdinalsValues(); if (DocValues.unwrapSingleton(sortedSet) != null) { - // Segment contains only single valued fields. - stats.ordinalsSingle++; - return nextScorer; + /* + * Segment contains only single valued fields. But it's possible + * that some fields have 0 values. The most surefire way to check + * is to look at the index for the data. If there isn't an index + * this isn't going to work - but if there is we can compare the + * number of documents in the index to the number of values in it - + * if they are the same we've got a dense singleton. + */ + Terms terms = context.reader().terms(fieldData.getFieldName()); + if (terms != null && terms.getDocCount() == context.reader().maxDoc()) { + stats.ordinalsSingle++; + return nextScorer; + } } TwoPhaseIterator nextIterator = nextScorer.twoPhaseIterator(); if (nextIterator == null) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java index d1a073f64fe81..7adcb0a1f9623 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java @@ -27,6 +27,7 @@ import org.elasticsearch.xpack.esql.optimizer.PhysicalPlanOptimizer; import org.elasticsearch.xpack.esql.parser.EsqlParser; import org.elasticsearch.xpack.esql.parser.TypedParamValue; +import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plan.logical.Keep; import org.elasticsearch.xpack.esql.plan.logical.RegexExtract; import org.elasticsearch.xpack.esql.plan.physical.EstimatesRowSize; @@ -37,6 +38,7 @@ import org.elasticsearch.xpack.ql.expression.Alias; import org.elasticsearch.xpack.ql.expression.Attribute; import org.elasticsearch.xpack.ql.expression.AttributeSet; +import org.elasticsearch.xpack.ql.expression.EmptyAttribute; import org.elasticsearch.xpack.ql.expression.MetadataAttribute; import org.elasticsearch.xpack.ql.expression.UnresolvedStar; import org.elasticsearch.xpack.ql.expression.function.FunctionRegistry; @@ -185,12 +187,8 @@ private void preAnalyzeIndices(LogicalPlan parsed, ActionListener void preAnalyzeIndices(LogicalPlan parsed, ActionListener void preAnalyzeIndices(LogicalPlan parsed, ActionListener fieldNames(LogicalPlan parsed) { + static Set fieldNames(LogicalPlan parsed, Set enrichPolicyMatchFields) { if (false == parsed.anyMatch(plan -> plan instanceof Aggregate || plan instanceof Project)) { // no explicit columns selection, for example "from employees" return IndexResolver.ALL_FIELDS; @@ -242,6 +243,12 @@ static Set fieldNames(LogicalPlan parsed) { for (Attribute extracted : re.extractedFields()) { references.removeIf(attr -> matchByName(attr, extracted.qualifiedName(), false)); } + } else if (p instanceof Enrich) { + AttributeSet enrichRefs = p.references(); + // Enrich adds an EmptyAttribute if no match field is specified + // The exact name of the field will be added later as part of enrichPolicyMatchFields Set + enrichRefs.removeIf(attr -> attr instanceof EmptyAttribute); + references.addAll(enrichRefs); } else { references.addAll(p.references()); if (p instanceof Keep) { @@ -266,10 +273,13 @@ static Set fieldNames(LogicalPlan parsed) { // otherwise, in some edge cases, we will fail to ask for "*" (all fields) instead references.removeIf(a -> a instanceof MetadataAttribute || MetadataAttribute.isSupported(a.qualifiedName())); Set fieldNames = references.names(); - if (fieldNames.isEmpty()) { - return IndexResolver.ALL_FIELDS; + if (fieldNames.isEmpty() && enrichPolicyMatchFields.isEmpty()) { + // there cannot be an empty list of fields, we'll ask the simplest and lightest one instead: _index + return IndexResolver.INDEX_METADATA_FIELD; } else { fieldNames.addAll(subfields(fieldNames)); + fieldNames.addAll(enrichPolicyMatchFields); + fieldNames.addAll(subfields(enrichPolicyMatchFields)); return fieldNames; } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java index b8ba722b989ad..61a739c786dac 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java @@ -34,6 +34,7 @@ import static org.elasticsearch.xpack.ql.type.DataTypes.OBJECT; import static org.elasticsearch.xpack.ql.type.DataTypes.SCALED_FLOAT; import static org.elasticsearch.xpack.ql.type.DataTypes.SHORT; +import static org.elasticsearch.xpack.ql.type.DataTypes.SOURCE; import static org.elasticsearch.xpack.ql.type.DataTypes.TEXT; import static org.elasticsearch.xpack.ql.type.DataTypes.UNSIGNED_LONG; import static org.elasticsearch.xpack.ql.type.DataTypes.UNSUPPORTED; @@ -64,6 +65,7 @@ public final class EsqlDataTypes { OBJECT, NESTED, SCALED_FLOAT, + SOURCE, VERSION, UNSIGNED_LONG ).sorted(Comparator.comparing(DataType::typeName)).toList(); @@ -158,6 +160,7 @@ public static boolean isRepresentable(DataType t) { && t != SHORT && t != FLOAT && t != SCALED_FLOAT + && t != SOURCE && t != HALF_FLOAT; } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index 13581710f7c53..99b21225e1985 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -9,6 +9,7 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.Randomness; @@ -215,7 +216,7 @@ public CsvTests(String fileName, String groupName, String testName, Integer line public final void test() throws Throwable { try { - assumeTrue("Test " + testName + " is not enabled", isEnabled(testName)); + assumeTrue("Test " + testName + " is not enabled", isEnabled(testName, Version.CURRENT)); doTest(); } catch (Throwable th) { throw reworkException(th); @@ -228,7 +229,7 @@ protected final boolean enableWarningsCheck() { } public boolean logResults() { - return false; + return true; } private void doTest() throws Exception { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java index dd25148c958d0..b1b492b28076e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java @@ -26,7 +26,6 @@ import org.elasticsearch.xpack.esql.parser.TypedParamValue; import java.io.IOException; -import java.time.ZoneId; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -42,7 +41,6 @@ public class EsqlQueryRequestTests extends ESTestCase { public void testParseFields() throws IOException { String query = randomAlphaOfLengthBetween(1, 100); boolean columnar = randomBoolean(); - ZoneId zoneId = randomZone(); Locale locale = randomLocale(random()); QueryBuilder filter = randomQueryBuilder(); @@ -53,16 +51,14 @@ public void testParseFields() throws IOException { { "query": "%s", "columnar": %s, - "time_zone": "%s", "locale": "%s", "filter": %s - %s""", query, columnar, zoneId, locale.toLanguageTag(), filter, paramsString); + %s""", query, columnar, locale.toLanguageTag(), filter, paramsString); EsqlQueryRequest request = parseEsqlQueryRequest(json); assertEquals(query, request.query()); assertEquals(columnar, request.columnar()); - assertEquals(zoneId, request.zoneId()); assertEquals(locale.toLanguageTag(), request.locale().toLanguageTag()); assertEquals(locale, request.locale()); assertEquals(filter, request.filter()); @@ -77,8 +73,8 @@ public void testRejectUnknownFields() { assertParserErrorMessage(""" { "query": "foo", - "time_z0ne": "Z" - }""", "unknown field [time_z0ne] did you mean [time_zone]?"); + "columbar": true + }""", "unknown field [columbar] did you mean [columnar]?"); assertParserErrorMessage(""" { @@ -90,7 +86,7 @@ public void testRejectUnknownFields() { public void testMissingQueryIsNotValidation() throws IOException { EsqlQueryRequest request = parseEsqlQueryRequest(""" { - "time_zone": "Z" + "columnar": true }"""); assertNotNull(request.validate()); assertThat(request.validate().getMessage(), containsString("[query] is required")); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java index d71d0074c7ec0..3316f76c44680 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java @@ -11,6 +11,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Strings; import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.ByteSizeValue; @@ -32,6 +33,7 @@ import org.elasticsearch.test.AbstractChunkedSerializingTestCase; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.type.DataType; @@ -40,6 +42,8 @@ import org.junit.After; import org.junit.Before; +import java.io.IOException; +import java.io.UncheckedIOException; import java.util.ArrayList; import java.util.List; @@ -112,6 +116,20 @@ private Page randomPage(List columns) { ); case "version" -> ((BytesRefBlock.Builder) builder).appendBytesRef(new Version(randomIdentifier()).toBytesRef()); case "null" -> builder.appendNull(); + case "_source" -> { + try { + ((BytesRefBlock.Builder) builder).appendBytesRef( + BytesReference.bytes( + JsonXContent.contentBuilder() + .startObject() + .field(randomAlphaOfLength(3), randomAlphaOfLength(10)) + .endObject() + ).toBytesRef() + ); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } default -> throw new UnsupportedOperationException("unsupported data type [" + c + "]"); } return builder.build(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index 6e75eea75f655..8990433a5155d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -7,13 +7,26 @@ package org.elasticsearch.xpack.esql.analysis; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.enrich.EnrichPolicy; +import org.elasticsearch.xpack.esql.enrich.EnrichPolicyResolution; +import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; import org.elasticsearch.xpack.esql.expression.function.aggregate.Max; import org.elasticsearch.xpack.esql.plan.logical.EsqlUnresolvedRelation; import org.elasticsearch.xpack.esql.plan.logical.Eval; import org.elasticsearch.xpack.esql.plan.logical.Row; +import org.elasticsearch.xpack.esql.plan.logical.local.EsqlProject; import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; +import org.elasticsearch.xpack.esql.session.EsqlSession; +import org.elasticsearch.xpack.esql.type.EsqlDataTypeRegistry; import org.elasticsearch.xpack.ql.expression.Alias; import org.elasticsearch.xpack.ql.expression.Attribute; import org.elasticsearch.xpack.ql.expression.Expressions; @@ -24,22 +37,30 @@ import org.elasticsearch.xpack.ql.expression.UnresolvedAttribute; import org.elasticsearch.xpack.ql.index.EsIndex; import org.elasticsearch.xpack.ql.index.IndexResolution; +import org.elasticsearch.xpack.ql.index.IndexResolver; import org.elasticsearch.xpack.ql.plan.TableIdentifier; import org.elasticsearch.xpack.ql.plan.logical.Aggregate; import org.elasticsearch.xpack.ql.plan.logical.EsRelation; import org.elasticsearch.xpack.ql.plan.logical.Filter; import org.elasticsearch.xpack.ql.plan.logical.Limit; +import org.elasticsearch.xpack.ql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.ql.plan.logical.OrderBy; import org.elasticsearch.xpack.ql.type.DataType; import org.elasticsearch.xpack.ql.type.DataTypes; import org.elasticsearch.xpack.ql.type.TypesTests; +import java.io.IOException; +import java.io.InputStream; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.stream.IntStream; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.TEST_VERIFIER; import static org.elasticsearch.xpack.esql.EsqlTestUtils.as; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.configuration; import static org.elasticsearch.xpack.esql.EsqlTestUtils.withDefaultLimitWarning; +import static org.elasticsearch.xpack.esql.analysis.Analyzer.NO_FIELDS; import static org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils.analyze; import static org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils.analyzer; import static org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils.loadMapping; @@ -68,7 +89,7 @@ public void testIndexResolution() { var plan = analyzer.analyze(UNRESOLVED_RELATION); var limit = as(plan, Limit.class); - assertEquals(new EsRelation(EMPTY, idx, false), limit.child()); + assertEquals(new EsRelation(EMPTY, idx, NO_FIELDS), limit.child()); } public void testFailOnUnresolvedIndex() { @@ -86,7 +107,7 @@ public void testIndexWithClusterResolution() { var plan = analyzer.analyze(UNRESOLVED_RELATION); var limit = as(plan, Limit.class); - assertEquals(new EsRelation(EMPTY, idx, false), limit.child()); + assertEquals(new EsRelation(EMPTY, idx, NO_FIELDS), limit.child()); } public void testAttributeResolution() { @@ -1120,6 +1141,61 @@ public void testAggsWithoutAggAndFollowingCommand() throws Exception { assertEquals(agg.groupings(), agg.aggregates()); } + public void testEmptyEsRelationOnLimitZeroWithCount() throws IOException { + var query = """ + from test* + | stats count=count(*) + | sort count desc + | limit 0"""; + var plan = analyzeWithEmptyFieldCapsResponse(query); + var limit = as(plan, Limit.class); + limit = as(limit.child(), Limit.class); + assertThat(limit.limit().fold(), equalTo(0)); + var orderBy = as(limit.child(), OrderBy.class); + var agg = as(orderBy.child(), Aggregate.class); + assertEmptyEsRelation(agg.child()); + } + + public void testEmptyEsRelationOnConstantEvalAndKeep() throws IOException { + var query = """ + from test* + | eval c = 1 + | keep c + | limit 2"""; + var plan = analyzeWithEmptyFieldCapsResponse(query); + var limit = as(plan, Limit.class); + limit = as(limit.child(), Limit.class); + assertThat(limit.limit().fold(), equalTo(2)); + var project = as(limit.child(), EsqlProject.class); + var eval = as(project.child(), Eval.class); + assertEmptyEsRelation(eval.child()); + } + + public void testEmptyEsRelationOnConstantEvalAndStats() throws IOException { + var query = """ + from test* + | limit 10 + | eval x = 1 + | stats c = count(x)"""; + var plan = analyzeWithEmptyFieldCapsResponse(query); + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + var eval = as(agg.child(), Eval.class); + limit = as(eval.child(), Limit.class); + assertThat(limit.limit().fold(), equalTo(10)); + assertEmptyEsRelation(limit.child()); + } + + public void testEmptyEsRelationOnCountStar() throws IOException { + var query = """ + from test* + | stats c = count(*)"""; + var plan = analyzeWithEmptyFieldCapsResponse(query); + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertEmptyEsRelation(agg.child()); + } + public void testUnsupportedFieldsInStats() { var errorMsg = "Cannot use field [point] with unsupported type [geo_point]"; @@ -1311,6 +1387,27 @@ public void testEnrichExcludesPolicyKey() { assertThat(e.getMessage(), containsString("Unknown column [id]")); } + public void testEnrichFieldsIncludeMatchField() { + String query = """ + FROM test + | EVAL x = to_string(languages) + | ENRICH languages ON x + | KEEP language_name, language_code + """; + IndexResolution testIndex = loadMapping("mapping-basic.json", "test"); + IndexResolution languageIndex = loadMapping("mapping-languages.json", "languages"); + var enrichPolicy = new EnrichPolicy("match", null, List.of("unused"), "language_code", List.of("language_code", "language_name")); + EnrichResolution enrichResolution = new EnrichResolution( + Set.of(new EnrichPolicyResolution("languages", enrichPolicy, languageIndex)), + Set.of("languages") + ); + AnalyzerContext context = new AnalyzerContext(configuration(query), new EsqlFunctionRegistry(), testIndex, enrichResolution); + Analyzer analyzer = new Analyzer(context, TEST_VERIFIER); + LogicalPlan plan = analyze(query, analyzer); + var limit = as(plan, Limit.class); + assertThat(Expressions.names(limit.output()), contains("language_name", "language_code")); + } + public void testChainedEvalFieldsUse() { var query = "from test | eval x0 = pow(salary, 1), x1 = pow(x0, 2), x2 = pow(x1, 3)"; int additionalEvals = randomIntBetween(0, 5); @@ -1361,4 +1458,30 @@ protected List filteredWarnings() { return withDefaultLimitWarning(super.filteredWarnings()); } + private static LogicalPlan analyzeWithEmptyFieldCapsResponse(String query) throws IOException { + IndexResolution resolution = IndexResolver.mergedMappings( + EsqlDataTypeRegistry.INSTANCE, + "test*", + readFieldCapsResponse("empty_field_caps_response.json"), + EsqlSession::specificValidity, + IndexResolver.PRESERVE_PROPERTIES, + IndexResolver.INDEX_METADATA_FIELD + ); + var analyzer = analyzer(resolution, TEST_VERIFIER, configuration(query)); + return analyze(query, analyzer); + } + + private static FieldCapabilitiesResponse readFieldCapsResponse(String resourceName) throws IOException { + InputStream stream = AnalyzerTests.class.getResourceAsStream("/" + resourceName); + BytesReference ref = Streams.readFully(stream); + XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, ref, XContentType.JSON); + return FieldCapabilitiesResponse.fromXContent(parser); + } + + private void assertEmptyEsRelation(LogicalPlan plan) { + assertThat(plan, instanceOf(EsRelation.class)); + EsRelation esRelation = (EsRelation) plan; + assertThat(esRelation.output(), equalTo(NO_FIELDS)); + assertTrue(esRelation.index().mapping().isEmpty()); + } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/enrich/EnrichQuerySourceOperatorTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/enrich/EnrichQuerySourceOperatorTests.java index 107f749aefa0f..c06ed820b9983 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/enrich/EnrichQuerySourceOperatorTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/enrich/EnrichQuerySourceOperatorTests.java @@ -19,7 +19,13 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.tests.store.MockDirectoryWrapper; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockUtils; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.DocBlock; @@ -31,6 +37,8 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.test.ESTestCase; +import org.junit.After; +import org.junit.Before; import java.util.HashMap; import java.util.HashSet; @@ -43,6 +51,21 @@ public class EnrichQuerySourceOperatorTests extends ESTestCase { + private BlockFactory blockFactory; + + @Before + public void setupBlockFactory() { + BigArrays bigArrays = new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, ByteSizeValue.ofGb(1)).withCircuitBreaking(); + CircuitBreaker breaker = bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST); + this.blockFactory = new BlockFactory(breaker, bigArrays); + } + + @After + public void allBreakersEmpty() throws Exception { + MockBigArrays.ensureAllArraysAreReleased(); + assertThat(blockFactory.breaker().getUsed(), equalTo(0L)); + } + public void testQueries() throws Exception { MockDirectoryWrapper dir = newMockDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(); @@ -66,18 +89,19 @@ public void testQueries() throws Exception { DirectoryReader reader = DirectoryReader.open(writer); writer.close(); - BytesRefBlock inputTerms = BytesRefBlock.newBlockBuilder(6) - .appendBytesRef(new BytesRef("b2")) - .beginPositionEntry() - .appendBytesRef(new BytesRef("c1")) - .appendBytesRef(new BytesRef("a2")) - .endPositionEntry() - .appendBytesRef(new BytesRef("z2")) - .appendNull() - .appendBytesRef(new BytesRef("a3")) - .appendNull() - .build(); - + final BytesRefBlock inputTerms; + try (BytesRefBlock.Builder termBuilder = blockFactory.newBytesRefBlockBuilder(6)) { + termBuilder.appendBytesRef(new BytesRef("b2")) + .beginPositionEntry() + .appendBytesRef(new BytesRef("c1")) + .appendBytesRef(new BytesRef("a2")) + .endPositionEntry() + .appendBytesRef(new BytesRef("z2")) + .appendNull() + .appendBytesRef(new BytesRef("a3")) + .appendNull(); + inputTerms = termBuilder.build(); + } MappedFieldType uidField = new KeywordFieldMapper.KeywordFieldType("uid"); QueryList queryList = QueryList.termQueryList(uidField, mock(SearchExecutionContext.class), inputTerms); assertThat(queryList.getPositionCount(), equalTo(6)); @@ -95,7 +119,7 @@ public void testQueries() throws Exception { // 3 -> [] -> [] // 4 -> [a1] -> [3] // 5 -> [] -> [] - EnrichQuerySourceOperator queryOperator = new EnrichQuerySourceOperator(queryList, reader); + EnrichQuerySourceOperator queryOperator = new EnrichQuerySourceOperator(blockFactory, queryList, reader); { Page p0 = queryOperator.getOutput(); assertNotNull(p0); @@ -106,6 +130,7 @@ public void testQueries() throws Exception { Block positions = p0.getBlock(1); assertThat(BlockUtils.toJavaObject(positions, 0), equalTo(0)); assertThat(BlockUtils.toJavaObject(positions, 1), equalTo(0)); + p0.releaseBlocks(); } { Page p1 = queryOperator.getOutput(); @@ -119,6 +144,7 @@ public void testQueries() throws Exception { assertThat(BlockUtils.toJavaObject(positions, 0), equalTo(1)); assertThat(BlockUtils.toJavaObject(positions, 1), equalTo(1)); assertThat(BlockUtils.toJavaObject(positions, 2), equalTo(1)); + p1.releaseBlocks(); } { Page p2 = queryOperator.getOutput(); @@ -136,6 +162,7 @@ public void testQueries() throws Exception { assertThat(docs.getInt(0), equalTo(3)); Block positions = p4.getBlock(1); assertThat(BlockUtils.toJavaObject(positions, 0), equalTo(4)); + p4.releaseBlocks(); } { Page p5 = queryOperator.getOutput(); @@ -147,7 +174,7 @@ public void testQueries() throws Exception { assertNull(p6); } assertTrue(queryOperator.isFinished()); - IOUtils.close(reader, dir); + IOUtils.close(reader, dir, inputTerms); } public void testRandomMatchQueries() throws Exception { @@ -171,25 +198,28 @@ public void testRandomMatchQueries() throws Exception { Map> expectedPositions = new HashMap<>(); int numPositions = randomIntBetween(1, 1000); - BytesRefBlock.Builder inputTerms = BytesRefBlock.newBlockBuilder(numPositions); - for (int i = 0; i < numPositions; i++) { - if (randomBoolean()) { - String term = randomFrom(terms.keySet()); - inputTerms.appendBytesRef(new BytesRef(term)); - Integer position = terms.get(term); - expectedPositions.put(i, Set.of(position)); - } else { + final BytesRefBlock inputTerms; + try (BytesRefBlock.Builder builder = blockFactory.newBytesRefBlockBuilder(numPositions)) { + for (int i = 0; i < numPositions; i++) { if (randomBoolean()) { - inputTerms.appendNull(); + String term = randomFrom(terms.keySet()); + builder.appendBytesRef(new BytesRef(term)); + Integer position = terms.get(term); + expectedPositions.put(i, Set.of(position)); } else { - String term = "other-" + randomIntBetween(1, 100); - inputTerms.appendBytesRef(new BytesRef(term)); + if (randomBoolean()) { + builder.appendNull(); + } else { + String term = "other-" + randomIntBetween(1, 100); + builder.appendBytesRef(new BytesRef(term)); + } } } + inputTerms = builder.build(); } MappedFieldType uidField = new KeywordFieldMapper.KeywordFieldType("uid"); - QueryList queryList = QueryList.termQueryList(uidField, mock(SearchExecutionContext.class), inputTerms.build()); - EnrichQuerySourceOperator queryOperator = new EnrichQuerySourceOperator(queryList, reader); + var queryList = QueryList.termQueryList(uidField, mock(SearchExecutionContext.class), inputTerms); + EnrichQuerySourceOperator queryOperator = new EnrichQuerySourceOperator(blockFactory, queryList, reader); Map> actualPositions = new HashMap<>(); while (queryOperator.isFinished() == false) { Page page = queryOperator.getOutput(); @@ -201,10 +231,11 @@ public void testRandomMatchQueries() throws Exception { int position = positions.getInt(i); actualPositions.computeIfAbsent(position, k -> new HashSet<>()).add(doc); } + page.releaseBlocks(); } } assertThat(actualPositions, equalTo(expectedPositions)); - IOUtils.close(reader, dir); + IOUtils.close(reader, dir, inputTerms); } private static IntVector getDocVector(Page page, int blockIndex) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/enrich/MergePositionsOperatorTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/enrich/MergePositionsOperatorTests.java index 0a0f6e5217044..80d127fc81907 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/enrich/MergePositionsOperatorTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/enrich/MergePositionsOperatorTests.java @@ -8,9 +8,14 @@ package org.elasticsearch.xpack.esql.enrich; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockUtils; import org.elasticsearch.compute.data.BytesRefBlock; -import org.elasticsearch.compute.data.ConstantIntVector; import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.Page; @@ -22,58 +27,79 @@ public class MergePositionsOperatorTests extends ESTestCase { - public void testSimple() { + public void testSimple() throws Exception { + BigArrays bigArrays = new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, ByteSizeValue.ofGb(1)).withCircuitBreaking(); + CircuitBreaker breaker = bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST); + BlockFactory blockFactory = new BlockFactory(breaker, bigArrays); MergePositionsOperator mergeOperator = new MergePositionsOperator( randomBoolean(), 7, 0, new int[] { 1, 2 }, - new ElementType[] { ElementType.BYTES_REF, ElementType.INT } + new ElementType[] { ElementType.BYTES_REF, ElementType.INT }, + blockFactory ); - mergeOperator.addInput( - new Page( - new ConstantIntVector(1, 1).asBlock(), - BytesRefBlock.newBlockBuilder(1).appendBytesRef(new BytesRef("w0")).build(), - IntBlock.newBlockBuilder(1).appendNull().build() - ) - ); - mergeOperator.addInput( - new Page( - new ConstantIntVector(2, 1).asBlock(), - BytesRefBlock.newBlockBuilder(1) - .beginPositionEntry() + { + final IntBlock b1 = blockFactory.newConstantIntBlockWith(1, 1); + final BytesRefBlock b2; + try (var builder = blockFactory.newBytesRefBlockBuilder(1)) { + b2 = builder.appendBytesRef(new BytesRef("w0")).build(); + } + final IntBlock b3; + try (var builder = blockFactory.newIntBlockBuilder(1)) { + b3 = builder.appendNull().build(); + } + mergeOperator.addInput(new Page(b1, b2, b3)); + } + { + final IntBlock b1 = blockFactory.newConstantIntBlockWith(2, 1); + final BytesRefBlock b2; + try (var builder = blockFactory.newBytesRefBlockBuilder(1)) { + b2 = builder.beginPositionEntry() .appendBytesRef(new BytesRef("a1")) .appendBytesRef(new BytesRef("c1")) .endPositionEntry() - .build(), - IntBlock.newBlockBuilder(1).appendNull().build() - ) - ); - mergeOperator.addInput( - new Page( - new ConstantIntVector(3, 2).asBlock(), - BytesRefBlock.newBlockBuilder(1) - .appendBytesRef(new BytesRef("f5")) + .build(); + } + final IntBlock b3; + try (var builder = blockFactory.newIntBlockBuilder(1)) { + b3 = builder.appendNull().build(); + } + mergeOperator.addInput(new Page(b1, b2, b3)); + } + { + final IntBlock b1 = blockFactory.newConstantIntBlockWith(3, 2); + final BytesRefBlock b2; + try (var builder = blockFactory.newBytesRefBlockBuilder(2)) { + b2 = builder.appendBytesRef(new BytesRef("f5")) .beginPositionEntry() .appendBytesRef(new BytesRef("k1")) .appendBytesRef(new BytesRef("k2")) .endPositionEntry() - .build(), - IntBlock.newBlockBuilder(1).appendInt(2020).appendInt(2021).build() - ) - ); - mergeOperator.addInput( - new Page( - new ConstantIntVector(5, 1).asBlock(), - BytesRefBlock.newBlockBuilder(1) - .beginPositionEntry() + .build(); + } + final IntBlock b3; + try (var builder = blockFactory.newIntBlockBuilder(2)) { + b3 = builder.appendInt(2020).appendInt(2021).build(); + } + mergeOperator.addInput(new Page(b1, b2, b3)); + } + { + final IntBlock b1 = blockFactory.newConstantIntBlockWith(5, 1); + final BytesRefBlock b2; + try (var builder = blockFactory.newBytesRefBlockBuilder(1)) { + b2 = builder.beginPositionEntry() .appendBytesRef(new BytesRef("r2")) .appendBytesRef(new BytesRef("k2")) .endPositionEntry() - .build(), - IntBlock.newBlockBuilder(1).appendInt(2023).build() - ) - ); + .build(); + } + final IntBlock b3; + try (var builder = blockFactory.newIntBlockBuilder(1)) { + b3 = builder.appendInt(2023).build(); + } + mergeOperator.addInput(new Page(b1, b2, b3)); + } mergeOperator.finish(); Page out = mergeOperator.getOutput(); assertTrue(mergeOperator.isFinished()); @@ -98,5 +124,8 @@ public void testSimple() { assertTrue(f2.isNull(4)); assertThat(BlockUtils.toJavaObject(f2, 5), equalTo(2023)); assertTrue(f2.isNull(6)); + mergeOperator.close(); + out.releaseBlocks(); + MockBigArrays.ensureAllArraysAreReleased(); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index 094ecc9bfe569..64b93cc6eae5d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -12,6 +12,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.MockBigArrays; @@ -30,6 +31,7 @@ import org.elasticsearch.indices.CrankyCircuitBreakerService; import org.elasticsearch.logging.LogManager; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.esql.evaluator.EvalMapper; import org.elasticsearch.xpack.esql.expression.function.scalar.conditional.Greatest; import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; @@ -52,8 +54,13 @@ import org.junit.After; import org.junit.AfterClass; import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.rules.TestRule; +import org.junit.runner.Description; +import org.junit.runners.model.Statement; import java.io.IOException; +import java.io.UncheckedIOException; import java.nio.file.Files; import java.nio.file.Path; import java.time.Duration; @@ -111,17 +118,26 @@ public static Literal randomLiteral(DataType type) { case "text" -> new BytesRef(randomAlphaOfLength(50)); case "version" -> randomVersion().toBytesRef(); case "null" -> null; + case "_source" -> { + try { + yield BytesReference.bytes( + JsonXContent.contentBuilder().startObject().field(randomAlphaOfLength(3), randomAlphaOfLength(10)).endObject() + ).toBytesRef(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } default -> throw new IllegalArgumentException("can't make random values for [" + type.typeName() + "]"); }, type); } protected TestCaseSupplier.TestCase testCase; - protected static Iterable parameterSuppliersFromTypedData(List cases) { + protected static Iterable parameterSuppliersFromTypedData(List suppliers) { // TODO rename this method to something more descriptive. Javadoc. And make sure all parameters are "representable" types. - List parameters = new ArrayList<>(cases.size()); - for (TestCaseSupplier element : cases) { - parameters.add(new Object[] { element }); + List parameters = new ArrayList<>(suppliers.size()); + for (TestCaseSupplier supplier : suppliers) { + parameters.add(new Object[] { supplier }); } return parameters; } @@ -217,8 +233,8 @@ private void testEvaluate(boolean readFloating) { // TODO should we convert unsigned_long into BigDecimal so it's easier to assert? Object result; try (ExpressionEvaluator evaluator = evaluator(expression).get(driverContext())) { - try (Block.Ref ref = evaluator.eval(row(testCase.getDataValues()))) { - result = toJavaObject(ref.block(), 0); + try (Block block = evaluator.eval(row(testCase.getDataValues()))) { + result = toJavaObject(block, 0); } } assertThat(result, not(equalTo(Double.NaN))); @@ -369,18 +385,17 @@ private void testEvaluateBlock(BlockFactory inputBlockFactory, DriverContext con } } Expression expression = readFloating ? buildDeepCopyOfFieldExpression(testCase) : buildFieldExpression(testCase); - try (ExpressionEvaluator eval = evaluator(expression).get(context); Block.Ref ref = eval.eval(new Page(manyPositionsBlocks))) { - assertThat(ref.block().getPositionCount(), equalTo(ref.block().getPositionCount())); + try (ExpressionEvaluator eval = evaluator(expression).get(context); Block block = eval.eval(new Page(manyPositionsBlocks))) { for (int p = 0; p < positions; p++) { if (nullPositions.contains(p)) { - assertThat(toJavaObject(ref.block(), p), allNullsMatcher()); + assertThat(toJavaObject(block, p), allNullsMatcher()); continue; } - assertThat(toJavaObject(ref.block(), p), testCase.getMatcher()); + assertThat(toJavaObject(block, p), testCase.getMatcher()); } assertThat( "evaluates to tracked block", - ref.block().blockFactory(), + block.blockFactory(), either(sameInstance(context.blockFactory())).or(sameInstance(inputBlockFactory)) ); } @@ -412,8 +427,8 @@ public final void testSimpleWithNulls() { // TODO replace this with nulls insert data.add(simpleData.get(b)); } } - try (Block.Ref ref = eval.eval(new Page(blocks))) { - assertSimpleWithNulls(data, ref.block(), i); + try (Block block = eval.eval(new Page(blocks))) { + assertSimpleWithNulls(data, block, i); } } } @@ -440,8 +455,8 @@ public final void testEvaluateInManyThreads() throws ExecutionException, Interru futures.add(exec.submit(() -> { try (EvalOperator.ExpressionEvaluator eval = evalSupplier.get(driverContext())) { for (int c = 0; c < count; c++) { - try (Block.Ref ref = eval.eval(page)) { - assertThat(toJavaObject(ref.block(), 0), testCase.getMatcher()); + try (Block block = eval.eval(page)) { + assertThat(toJavaObject(block, 0), testCase.getMatcher()); } } } @@ -493,13 +508,34 @@ public void testSerializationOfSimple() { assertSerialization(buildFieldExpression(testCase)); } + private static boolean ranAllTests = false; + + @ClassRule + public static TestRule rule = new TestRule() { + @Override + public Statement apply(Statement base, Description description) { + for (Description d : description.getChildren()) { + if (d.getChildren().size() > 1) { + ranAllTests = true; + return base; + } + } + return base; + } + }; + @AfterClass public static void testFunctionInfo() { + if (ranAllTests == false) { + LogManager.getLogger(getTestClass()).info("Skipping function info checks because we're running a portion of the tests"); + return; + } FunctionDefinition definition = definition(); if (definition == null) { LogManager.getLogger(getTestClass()).info("Skipping function info checks because the function isn't registered"); return; } + LogManager.getLogger(getTestClass()).info("Running function info checks"); EsqlFunctionRegistry.FunctionDescription description = EsqlFunctionRegistry.description(definition); List args = description.args(); @@ -648,6 +684,30 @@ protected static List errorsForCasesWithoutExamples(List failureForCasesWithoutExamples(List testCaseSuppliers) { + typesRequired(testCaseSuppliers); + List suppliers = new ArrayList<>(testCaseSuppliers.size()); + suppliers.addAll(testCaseSuppliers); + + Set> valid = testCaseSuppliers.stream().map(TestCaseSupplier::types).collect(Collectors.toSet()); + List> validPerPosition = validPerPosition(valid); + + testCaseSuppliers.stream() + .map(s -> s.types().size()) + .collect(Collectors.toSet()) + .stream() + .flatMap(count -> allPermutations(count)) + .filter(types -> valid.contains(types) == false) + .map(types -> new TestCaseSupplier("type error for " + TestCaseSupplier.nameFromTypes(types), types, () -> { + throw new IllegalStateException("must implement a case for " + types); + })) + .forEach(suppliers::add); + return suppliers; + } + private static void typesRequired(List suppliers) { String bad = suppliers.stream().filter(s -> s.types() == null).map(s -> s.name()).collect(Collectors.joining("\n")); if (bad.equals("") == false) { @@ -762,6 +822,10 @@ public static void renderSignature() throws IOException { if (System.getProperty("generateDocs") == null) { return; } + if (ranAllTests == false) { + LogManager.getLogger(getTestClass()).info("Skipping rendering signature because we're running a portion of the tests"); + return; + } FunctionDefinition definition = definition(); if (definition == null) { LogManager.getLogger(getTestClass()).info("Skipping rendering signature because the function isn't registered"); @@ -906,4 +970,11 @@ static Version randomVersion() { default -> throw new IllegalArgumentException(); }; } + + /** + * All string types (keyword, text, match_only_text, etc). + */ + protected static DataType[] strings() { + return EsqlDataTypes.types().stream().filter(DataTypes::isString).toArray(DataType[]::new); + } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/DeepCopy.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/DeepCopy.java index 4f217a68b8535..5b67011a818ab 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/DeepCopy.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/DeepCopy.java @@ -37,9 +37,9 @@ public EvalOperator.ExpressionEvaluator.Factory toEvaluator( private final EvalOperator.ExpressionEvaluator child = childEval.get(ctx); @Override - public Block.Ref eval(Page page) { - try (Block.Ref ref = child.eval(page)) { - return Block.Ref.floating(BlockUtils.deepCopyOf(ref.block(), ctx.blockFactory())); + public Block eval(Page page) { + try (Block block = child.eval(page)) { + return BlockUtils.deepCopyOf(block, ctx.blockFactory()); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java index e49776db1edea..8603cea9e873c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java @@ -26,7 +26,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; -import java.util.function.DoubleBinaryOperator; +import java.util.function.BinaryOperator; import java.util.function.DoubleFunction; import java.util.function.Function; import java.util.function.IntFunction; @@ -138,27 +138,28 @@ public static List forBinaryCastingToDouble( String name, String lhsName, String rhsName, - DoubleBinaryOperator expected, + BinaryOperator expected, Double lhsMin, Double lhsMax, Double rhsMin, Double rhsMax, List warnings ) { - List suppliers = new ArrayList<>(); - List lhsSuppliers = new ArrayList<>(); - List rhsSuppliers = new ArrayList<>(); - - lhsSuppliers.addAll(intCases(lhsMin.intValue(), lhsMax.intValue())); - lhsSuppliers.addAll(longCases(lhsMin.longValue(), lhsMax.longValue())); - lhsSuppliers.addAll(ulongCases(BigInteger.valueOf((long) Math.ceil(lhsMin)), BigInteger.valueOf((long) Math.floor(lhsMax)))); - lhsSuppliers.addAll(doubleCases(lhsMin, lhsMax)); - - rhsSuppliers.addAll(intCases(rhsMin.intValue(), rhsMax.intValue())); - rhsSuppliers.addAll(longCases(rhsMin.longValue(), rhsMax.longValue())); - rhsSuppliers.addAll(ulongCases(BigInteger.valueOf((long) Math.ceil(rhsMin)), BigInteger.valueOf((long) Math.floor(rhsMax)))); - rhsSuppliers.addAll(doubleCases(rhsMin, rhsMax)); + List lhsSuppliers = castToDoubleSuppliersFromRange(lhsMin, lhsMax); + List rhsSuppliers = castToDoubleSuppliersFromRange(rhsMin, rhsMax); + return forBinaryCastingToDouble(name, lhsName, rhsName, expected, lhsSuppliers, rhsSuppliers, warnings); + } + public static List forBinaryCastingToDouble( + String name, + String lhsName, + String rhsName, + BinaryOperator expected, + List lhsSuppliers, + List rhsSuppliers, + List warnings + ) { + List suppliers = new ArrayList<>(); for (TypedDataSupplier lhsSupplier : lhsSuppliers) { for (TypedDataSupplier rhsSupplier : rhsSuppliers) { String caseName = lhsSupplier.name() + ", " + rhsSupplier.name(); @@ -182,7 +183,7 @@ public static List forBinaryCastingToDouble( List.of(lhsTyped, rhsTyped), name + "[" + lhsName + "=" + lhsEvalName + ", " + rhsName + "=" + rhsEvalName + "]", DataTypes.DOUBLE, - equalTo(expected.applyAsDouble(lhs.doubleValue(), rhs.doubleValue())) + equalTo(expected.apply(lhs.doubleValue(), rhs.doubleValue())) ); for (String warning : warnings) { testCase = testCase.withWarning(warning); @@ -195,6 +196,15 @@ public static List forBinaryCastingToDouble( return suppliers; } + public static List castToDoubleSuppliersFromRange(Double Min, Double Max) { + List suppliers = new ArrayList<>(); + suppliers.addAll(intCases(Min.intValue(), Max.intValue())); + suppliers.addAll(longCases(Min.longValue(), Max.longValue())); + suppliers.addAll(ulongCases(BigInteger.valueOf((long) Math.ceil(Min)), BigInteger.valueOf((long) Math.floor(Max)))); + suppliers.addAll(doubleCases(Min, Max)); + return suppliers; + } + /** * Generate positive test cases for a unary function operating on an {@link DataTypes#INTEGER}. */ diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/AbstractScalarFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/AbstractScalarFunctionTestCase.java index ae46592a90ac1..08783823fc00f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/AbstractScalarFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/AbstractScalarFunctionTestCase.java @@ -76,13 +76,6 @@ public Set sortedTypesSet(DataType[] validTypes, DataType... additiona return mergedSet; } - /** - * All string types (keyword, text, match_only_text, etc). For passing to {@link #required} or {@link #optional}. - */ - protected static DataType[] strings() { - return EsqlDataTypes.types().stream().filter(DataTypes::isString).toArray(DataType[]::new); - } - /** * All integer types (long, int, short, byte). For passing to {@link #required} or {@link #optional}. */ diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java index f776e1a4655d2..838044c8b90f6 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java @@ -90,9 +90,9 @@ public void testEvalCase() { testCase(caseExpr -> { try ( EvalOperator.ExpressionEvaluator eval = caseExpr.toEvaluator(child -> evaluator(child)).get(driverContext()); - Block.Ref ref = eval.eval(new Page(IntBlock.newConstantBlockWith(0, 1))) + Block block = eval.eval(new Page(IntBlock.newConstantBlockWith(0, 1))) ) { - return toJavaObject(ref.block(), 0); + return toJavaObject(block, 0); } }); } @@ -148,12 +148,12 @@ public void testCaseWithIncompatibleTypes() { public void testCaseIsLazy() { Case caseExpr = caseExpr(true, 1, true, 2); - try (Block.Ref ref = caseExpr.toEvaluator(child -> { + try (Block block = caseExpr.toEvaluator(child -> { Object value = child.fold(); if (value != null && value.equals(2)) { return dvrCtx -> new EvalOperator.ExpressionEvaluator() { @Override - public Block.Ref eval(Page page) { + public Block eval(Page page) { fail("Unexpected evaluation of 4th argument"); return null; } @@ -164,7 +164,7 @@ public void close() {} } return evaluator(child); }).get(driverContext()).eval(new Page(IntBlock.newConstantBlockWith(0, 1)))) { - assertEquals(1, toJavaObject(ref.block(), 0)); + assertEquals(1, toJavaObject(block, 0)); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/PowTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/PowTests.java index 58f56e54c7245..c8b316d8e6bfb 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/PowTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/PowTests.java @@ -10,7 +10,6 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.ql.expression.Expression; @@ -21,8 +20,6 @@ import java.util.List; import java.util.function.Supplier; -import static org.hamcrest.Matchers.equalTo; - public class PowTests extends AbstractScalarFunctionTestCase { public PowTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); @@ -30,415 +27,63 @@ public PowTests(@Name("TestCase") Supplier testCaseSu @ParametersFactory public static Iterable parameters() { - return parameterSuppliersFromTypedData(List.of(new TestCaseSupplier("pow(, )", () -> { - double base = 1 / randomDouble(); - int exponent = between(-30, 30); - return new TestCaseSupplier.TestCase( + // Positive number bases + List suppliers = TestCaseSupplier.forBinaryCastingToDouble( + "PowEvaluator", + "base", + "exponent", + Math::pow, + // 143^143 is still representable, but 144^144 is infinite + 1d, + 143d, + -143d, + 143d, + List.of() + ); + // Anything to 0 is 1 + suppliers.addAll( + TestCaseSupplier.forBinaryCastingToDouble( + "PowEvaluator", + "base", + "exponent", + (b, e) -> 1d, + // 143^143 is still representable, but 144^144 is infinite + TestCaseSupplier.castToDoubleSuppliersFromRange(Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY), List.of( - new TestCaseSupplier.TypedData(base, DataTypes.DOUBLE, "arg"), - new TestCaseSupplier.TypedData(exponent, DataTypes.INTEGER, "exp") + new TestCaseSupplier.TypedDataSupplier("<0 double>", () -> 0d, DataTypes.DOUBLE), + new TestCaseSupplier.TypedDataSupplier("<-0 double>", () -> -0d, DataTypes.DOUBLE) ), - "PowDoubleEvaluator[base=Attribute[channel=0], exponent=CastIntToDoubleEvaluator[v=Attribute[channel=1]]]", - DataTypes.DOUBLE, - equalTo(Math.pow(base, exponent)) - ); - }), - new TestCaseSupplier( - "pow(NaN, 1)", - () -> new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(Double.NaN, DataTypes.DOUBLE, "base"), - new TestCaseSupplier.TypedData(1.0d, DataTypes.DOUBLE, "exp") - ), - "PowDoubleEvaluator[base=Attribute[channel=0], exponent=Attribute[channel=1]]", - DataTypes.DOUBLE, - equalTo(null) - ).withWarning("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.") - .withWarning("Line -1:-1: java.lang.ArithmeticException: invalid result: pow(NaN, 1.0)") - ), - new TestCaseSupplier( - "pow(1, NaN)", - () -> new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(1.0d, DataTypes.DOUBLE, "base"), - new TestCaseSupplier.TypedData(Double.NaN, DataTypes.DOUBLE, "exp") - ), - "PowDoubleEvaluator[base=Attribute[channel=0], exponent=Attribute[channel=1]]", - DataTypes.DOUBLE, - equalTo(null) - ).withWarning("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.") - .withWarning("Line -1:-1: java.lang.ArithmeticException: invalid result: pow(1.0, NaN)") - ), - new TestCaseSupplier( - "pow(NaN, 0)", - () -> new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(Double.NaN, DataTypes.DOUBLE, "base"), - new TestCaseSupplier.TypedData(0d, DataTypes.DOUBLE, "exp") - ), - "PowDoubleEvaluator[base=Attribute[channel=0], exponent=Attribute[channel=1]]", - DataTypes.DOUBLE, - equalTo(1d) - ) - ), - new TestCaseSupplier( - "pow(0, 0)", - () -> new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(0d, DataTypes.DOUBLE, "base"), - new TestCaseSupplier.TypedData(0d, DataTypes.DOUBLE, "exp") - ), - "PowDoubleEvaluator[base=Attribute[channel=0], exponent=Attribute[channel=1]]", - DataTypes.DOUBLE, - equalTo(1d) - ) - ), - new TestCaseSupplier( - "pow(1, 1)", - () -> new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(1, DataTypes.INTEGER, "base"), - new TestCaseSupplier.TypedData(1, DataTypes.INTEGER, "base") - ), - "PowIntEvaluator[base=CastIntToDoubleEvaluator[v=Attribute[channel=0]], " - + "exponent=CastIntToDoubleEvaluator[v=Attribute[channel=1]]]", - DataTypes.INTEGER, - equalTo(1) - ) - ), - new TestCaseSupplier( - "pow(integer, 0)", - () -> new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(randomValueOtherThan(0, ESTestCase::randomInt), DataTypes.INTEGER, "base"), - new TestCaseSupplier.TypedData(0, DataTypes.INTEGER, "exp") - ), - "PowIntEvaluator[base=CastIntToDoubleEvaluator[v=Attribute[channel=0]], " - + "exponent=CastIntToDoubleEvaluator[v=Attribute[channel=1]]]", - DataTypes.INTEGER, - equalTo(1) - ) - ), - new TestCaseSupplier("pow(integer, 2)", () -> { - int base = randomIntBetween(-1000, 1000); - return new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(base, DataTypes.INTEGER, "base"), - new TestCaseSupplier.TypedData(2, DataTypes.INTEGER, "exp") - ), - "PowIntEvaluator[base=CastIntToDoubleEvaluator[v=Attribute[channel=0]], " - + "exponent=CastIntToDoubleEvaluator[v=Attribute[channel=1]]]", - DataTypes.INTEGER, - equalTo((int) Math.pow(base, 2)) - ); - }), - new TestCaseSupplier( - "integer overflow case", - () -> new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(Integer.MAX_VALUE, DataTypes.INTEGER, "base"), - new TestCaseSupplier.TypedData(2, DataTypes.INTEGER, "exp") - ), - "PowIntEvaluator[base=CastIntToDoubleEvaluator[v=Attribute[channel=0]], " - + "exponent=CastIntToDoubleEvaluator[v=Attribute[channel=1]]]", - DataTypes.INTEGER, - equalTo(null) - ).withWarning("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.") - .withWarning("Line -1:-1: java.lang.ArithmeticException: integer overflow") - ), - new TestCaseSupplier( - "long overflow case", - () -> new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(Long.MAX_VALUE, DataTypes.LONG, "base"), - new TestCaseSupplier.TypedData(2, DataTypes.INTEGER, "exp") - ), - "PowLongEvaluator[base=CastLongToDoubleEvaluator[v=Attribute[channel=0]], " - + "exponent=CastIntToDoubleEvaluator[v=Attribute[channel=1]]]", - DataTypes.LONG, - equalTo(null) - ).withWarning("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.") - .withWarning("Line -1:-1: java.lang.ArithmeticException: long overflow") - ), - new TestCaseSupplier( - "pow(2, 0.5) == sqrt(2)", - () -> new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(2, DataTypes.INTEGER, "base"), - new TestCaseSupplier.TypedData(0.5, DataTypes.DOUBLE, "exp") - ), - "PowDoubleEvaluator[base=CastIntToDoubleEvaluator[v=Attribute[channel=0]], exponent=Attribute[channel=1]]", - DataTypes.DOUBLE, - equalTo(Math.sqrt(2)) - ) - ), - new TestCaseSupplier( - "pow(2.0, 0.5) == sqrt(2)", - () -> new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(2d, DataTypes.DOUBLE, "base"), - new TestCaseSupplier.TypedData(0.5, DataTypes.DOUBLE, "exp") - ), - "PowDoubleEvaluator[base=Attribute[channel=0], exponent=Attribute[channel=1]]", - DataTypes.DOUBLE, - equalTo(Math.sqrt(2)) - ) - ), - new TestCaseSupplier("pow(integer, double)", () -> { - // Positive numbers to a non-integer power - int base = randomIntBetween(1, 1000); - double exp = randomDoubleBetween(-10.0, 10.0, true); - double expected = Math.pow(base, exp); - TestCaseSupplier.TestCase testCase = new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(base, DataTypes.INTEGER, "base"), - new TestCaseSupplier.TypedData(exp, DataTypes.DOUBLE, "exp") - ), - "PowDoubleEvaluator[base=CastIntToDoubleEvaluator[v=Attribute[channel=0]], exponent=Attribute[channel=1]]", - DataTypes.DOUBLE, - equalTo(expected) - ); - return testCase; - }), - new TestCaseSupplier("fractional power of negative integer is null", () -> { - // Negative numbers to a non-integer power are NaN - int base = randomIntBetween(-1000, -1); - double exp = randomDouble(); // between 0 and 1 - TestCaseSupplier.TestCase testCase = new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(base, DataTypes.INTEGER, "base"), - new TestCaseSupplier.TypedData(exp, DataTypes.DOUBLE, "exp") - ), - "PowDoubleEvaluator[base=CastIntToDoubleEvaluator[v=Attribute[channel=0]], exponent=Attribute[channel=1]]", - DataTypes.DOUBLE, - equalTo(null) - ).withWarning("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.") - .withWarning("Line -1:-1: java.lang.ArithmeticException: invalid result: pow(" + (double) base + ", " + exp + ")"); - return testCase; - }), - new TestCaseSupplier( - "pow(123, -1)", - () -> new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(123, DataTypes.INTEGER, "base"), - new TestCaseSupplier.TypedData(-1, DataTypes.INTEGER, "exp") - ), - "PowIntEvaluator[base=CastIntToDoubleEvaluator[v=Attribute[channel=0]], " - + "exponent=CastIntToDoubleEvaluator[v=Attribute[channel=1]]]", - DataTypes.INTEGER, - equalTo(0) - ) - ), - new TestCaseSupplier( - "pow(123L, -1)", - () -> new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(123L, DataTypes.LONG, "base"), - new TestCaseSupplier.TypedData(-1, DataTypes.INTEGER, "exp") - ), - "PowLongEvaluator[base=CastLongToDoubleEvaluator[v=Attribute[channel=0]], " - + "exponent=CastIntToDoubleEvaluator[v=Attribute[channel=1]]]", - DataTypes.LONG, - equalTo(0L) - ) - ), - new TestCaseSupplier( - "pow(123D, -1)", - () -> new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(123.0, DataTypes.DOUBLE, "base"), - new TestCaseSupplier.TypedData(-1, DataTypes.INTEGER, "exp") - ), - "PowDoubleEvaluator[base=Attribute[channel=0], exponent=CastIntToDoubleEvaluator[v=Attribute[channel=1]]]", - DataTypes.DOUBLE, - equalTo(1D / 123D) - ) - ), - new TestCaseSupplier("pow(integer, 1)", () -> { - int base = randomInt(); - return new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(base, DataTypes.INTEGER, "base"), - new TestCaseSupplier.TypedData(1, DataTypes.INTEGER, "exp") - ), - "PowIntEvaluator[base=CastIntToDoubleEvaluator[v=Attribute[channel=0]], " - + "exponent=CastIntToDoubleEvaluator[v=Attribute[channel=1]]]", - DataTypes.INTEGER, - equalTo(base) - ); - }), - new TestCaseSupplier( - "pow(1L, 1)", - () -> new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(1L, DataTypes.LONG, "base"), - new TestCaseSupplier.TypedData(1, DataTypes.INTEGER, "exp") - ), - "PowLongEvaluator[base=CastLongToDoubleEvaluator[v=Attribute[channel=0]], " - + "exponent=CastIntToDoubleEvaluator[v=Attribute[channel=1]]]", - DataTypes.LONG, - equalTo(1L) - ) - ), - new TestCaseSupplier("pow(long, 1)", () -> { - // Avoid double precision loss - long base = randomLongBetween(-1L << 51, 1L << 51); - return new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(base, DataTypes.LONG, "base"), - new TestCaseSupplier.TypedData(1, DataTypes.INTEGER, "exp") - ), - "PowLongEvaluator[base=CastLongToDoubleEvaluator[v=Attribute[channel=0]], " - + "exponent=CastIntToDoubleEvaluator[v=Attribute[channel=1]]]", - DataTypes.LONG, - equalTo(base) - ); - }), - new TestCaseSupplier("long-double overflow", () -> { - long base = 4339622345450989181L; // Not exactly representable as a double - long expected = 4339622345450989056L; - return new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(base, DataTypes.LONG, "base"), - new TestCaseSupplier.TypedData(1, DataTypes.INTEGER, "exp") - ), - "PowLongEvaluator[base=CastLongToDoubleEvaluator[v=Attribute[channel=0]], " - + "exponent=CastIntToDoubleEvaluator[v=Attribute[channel=1]]]", - DataTypes.LONG, - equalTo(expected) - ); - }), - new TestCaseSupplier("pow(long, 0)", () -> { - long base = randomLong(); - return new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(base, DataTypes.LONG, "base"), - new TestCaseSupplier.TypedData(0, DataTypes.INTEGER, "exp") - ), - "PowLongEvaluator[base=CastLongToDoubleEvaluator[v=Attribute[channel=0]], " - + "exponent=CastIntToDoubleEvaluator[v=Attribute[channel=1]]]", - DataTypes.LONG, - equalTo(1L) - ); - }), - new TestCaseSupplier("pow(long, 2)", () -> { - long base = randomLongBetween(-1000, 1000); - return new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(base, DataTypes.LONG, "base"), - new TestCaseSupplier.TypedData(2, DataTypes.INTEGER, "exp") - ), - "PowLongEvaluator[base=CastLongToDoubleEvaluator[v=Attribute[channel=0]], " - + "exponent=CastIntToDoubleEvaluator[v=Attribute[channel=1]]]", - DataTypes.LONG, - equalTo((long) Math.pow(base, 2)) - ); - }), - new TestCaseSupplier("pow(long, double)", () -> { - // Negative numbers to non-integer power are NaN - long base = randomLongBetween(1, 1000); - double exp = randomDoubleBetween(-10.0, 10.0, true); - double expected = Math.pow(base, exp); - TestCaseSupplier.TestCase testCase = new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(base, DataTypes.LONG, "base"), - new TestCaseSupplier.TypedData(exp, DataTypes.DOUBLE, "exp") - ), - "PowDoubleEvaluator[base=CastLongToDoubleEvaluator[v=Attribute[channel=0]], exponent=Attribute[channel=1]]", - DataTypes.DOUBLE, - equalTo(expected) - ); - return testCase; - }), - new TestCaseSupplier( - "pow(1D, 1)", - () -> new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(1D, DataTypes.DOUBLE, "base"), - new TestCaseSupplier.TypedData(1, DataTypes.INTEGER, "exp") - ), - "PowDoubleEvaluator[base=Attribute[channel=0], exponent=CastIntToDoubleEvaluator[v=Attribute[channel=1]]]", - DataTypes.DOUBLE, - equalTo(1D) + List.of() + ) + ); + + // Add null cases before the rest of the error cases, so messages are correct. + suppliers = anyNullIsNull(true, suppliers); + + // Overflow should be null + suppliers.addAll( + TestCaseSupplier.forBinaryCastingToDouble( + "PowEvaluator", + "base", + "exponent", + (b, e) -> null, + // 143^143 is still representable, but 144^144 is infinite + 144d, + Double.POSITIVE_INFINITY, + 144d, + Double.POSITIVE_INFINITY, + List.of( + "Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", + "Line -1:-1: java.lang.ArithmeticException: invalid result when computing pow" ) - ), - new TestCaseSupplier("pow(double, 1)", () -> { - double base; - if (randomBoolean()) { - base = randomDouble(); - } else { - // Sometimes pick a large number - base = 1 / randomDouble(); - } - return new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(base, DataTypes.DOUBLE, "base"), - new TestCaseSupplier.TypedData(1, DataTypes.INTEGER, "exp") - ), - "PowDoubleEvaluator[base=Attribute[channel=0], exponent=CastIntToDoubleEvaluator[v=Attribute[channel=1]]]", - DataTypes.DOUBLE, - equalTo(base) - ); - }), - new TestCaseSupplier("pow(double, 0)", () -> { - double base; - if (randomBoolean()) { - base = randomDouble(); - } else { - // Sometimes pick a large number - base = 1 / randomDouble(); - } - return new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(base, DataTypes.DOUBLE, "base"), - new TestCaseSupplier.TypedData(0, DataTypes.INTEGER, "exp") - ), - "PowDoubleEvaluator[base=Attribute[channel=0], exponent=CastIntToDoubleEvaluator[v=Attribute[channel=1]]]", - DataTypes.DOUBLE, - equalTo(1D) - ); - }), - new TestCaseSupplier("pow(double, 2)", () -> { - double base = randomDoubleBetween(-1000, 1000, true); - return new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(base, DataTypes.DOUBLE, "base"), - new TestCaseSupplier.TypedData(2, DataTypes.INTEGER, "exp") - ), - "PowDoubleEvaluator[base=Attribute[channel=0], exponent=CastIntToDoubleEvaluator[v=Attribute[channel=1]]]", - DataTypes.DOUBLE, - equalTo(Math.pow(base, 2)) - ); - }), - new TestCaseSupplier("pow(double, double)", () -> { - // Negative numbers to a non-integer power are NaN - double base = randomDoubleBetween(0, 1000, true); - double exp = randomDoubleBetween(-10.0, 10.0, true); - TestCaseSupplier.TestCase testCase = new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(base, DataTypes.DOUBLE, "base"), - new TestCaseSupplier.TypedData(exp, DataTypes.DOUBLE, "exp") - ), - "PowDoubleEvaluator[base=Attribute[channel=0], exponent=Attribute[channel=1]]", - DataTypes.DOUBLE, - equalTo(Math.pow(base, exp)) - ); - return testCase; - }) - )); + ) + ); + return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(suppliers)); } @Override protected DataType expectedType(List argTypes) { - var base = argTypes.get(0); - var exp = argTypes.get(1); - if (base.isRational() || exp.isRational()) { - return DataTypes.DOUBLE; - } else if (base.size() == Long.BYTES || exp.size() == Long.BYTES) { - return DataTypes.LONG; - } else { - return DataTypes.INTEGER; - } + return DataTypes.DOUBLE; } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundTests.java index 8f10e2d11c5f1..5ef485e8ba441 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundTests.java @@ -113,19 +113,19 @@ public void testExamples() { private Object process(Number val) { try ( - Block.Ref ref = evaluator(new Round(Source.EMPTY, field("val", typeOf(val)), null)).get(driverContext()).eval(row(List.of(val))) + Block block = evaluator(new Round(Source.EMPTY, field("val", typeOf(val)), null)).get(driverContext()).eval(row(List.of(val))) ) { - return toJavaObject(ref.block(), 0); + return toJavaObject(block, 0); } } private Object process(Number val, int decimals) { try ( - Block.Ref ref = evaluator(new Round(Source.EMPTY, field("val", typeOf(val)), field("decimals", DataTypes.INTEGER))).get( + Block block = evaluator(new Round(Source.EMPTY, field("val", typeOf(val)), field("decimals", DataTypes.INTEGER))).get( driverContext() ).eval(row(List.of(val, decimals))) ) { - return toJavaObject(ref.block(), 0); + return toJavaObject(block, 0); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/CoalesceTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/CoalesceTests.java index 8db6b1bbd0c93..328f94b9c87e7 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/CoalesceTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/CoalesceTests.java @@ -89,7 +89,7 @@ public void testCoalesceIsLazy() { if (child == evil) { return dvrCtx -> new EvalOperator.ExpressionEvaluator() { @Override - public Block.Ref eval(Page page) { + public Block eval(Page page) { throw new AssertionError("shouldn't be called"); } @@ -101,9 +101,9 @@ public void close() {} }; try ( EvalOperator.ExpressionEvaluator eval = exp.toEvaluator(map).get(driverContext()); - Block.Ref ref = eval.eval(row(testCase.getDataValues())) + Block block = eval.eval(row(testCase.getDataValues())) ) { - assertThat(toJavaObject(ref.block(), 0), testCase.getMatcher()); + assertThat(toJavaObject(block, 0), testCase.getMatcher()); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNotNullTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNotNullTests.java index b80b64a0783ba..2c0864d0a8fdc 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNotNullTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNotNullTests.java @@ -10,45 +10,64 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; -import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.Expression; -import org.elasticsearch.xpack.ql.expression.Literal; import org.elasticsearch.xpack.ql.expression.predicate.nulls.IsNotNull; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; import org.elasticsearch.xpack.ql.type.DataTypes; import org.hamcrest.Matcher; +import java.util.ArrayList; import java.util.List; import java.util.function.Supplier; import static org.hamcrest.Matchers.equalTo; -public class IsNotNullTests extends AbstractScalarFunctionTestCase { +public class IsNotNullTests extends AbstractFunctionTestCase { public IsNotNullTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @ParametersFactory public static Iterable parameters() { - return parameterSuppliersFromTypedData(List.of(new TestCaseSupplier("Keyword Not Null", () -> { - return new TestCaseSupplier.TestCase( - List.of(new TestCaseSupplier.TypedData(new BytesRef("cat"), DataTypes.KEYWORD, "exp")), - "IsNotNullEvaluator[field=Attribute[channel=0]]", - DataTypes.BOOLEAN, - equalTo(true) + List suppliers = new ArrayList<>(); + for (DataType type : EsqlDataTypes.types()) { + if (false == EsqlDataTypes.isRepresentable(type)) { + continue; + } + if (type != DataTypes.NULL) { + suppliers.add( + new TestCaseSupplier( + "non-null " + type.typeName(), + List.of(type), + () -> new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(randomLiteral(type).value(), type, "v")), + "IsNotNullEvaluator[field=Attribute[channel=0]]", + DataTypes.BOOLEAN, + equalTo(true) + ) + ) + ); + } + suppliers.add( + new TestCaseSupplier( + "null " + type.typeName(), + List.of(type), + () -> new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(null, type, "v")), + "IsNotNullEvaluator[field=Attribute[channel=0]]", + DataTypes.BOOLEAN, + equalTo(false) + ) + ) ); - }))); - } - - @Override - protected DataType expectedType(List argTypes) { - return DataTypes.BOOLEAN; + } + return parameterSuppliersFromTypedData(failureForCasesWithoutExamples(suppliers)); } @Override @@ -56,27 +75,11 @@ protected void assertSimpleWithNulls(List data, Block value, int nullBlo assertFalse(((BooleanBlock) value).asVector().getBoolean(0)); } - @Override - protected List argSpec() { - return List.of(required(EsqlDataTypes.types().toArray(DataType[]::new))); - } - @Override protected Expression build(Source source, List args) { return new IsNotNull(Source.EMPTY, args.get(0)); } - public void testAllTypes() { - for (DataType type : EsqlDataTypes.types()) { - if (DataTypes.isPrimitive(type) == false) { - continue; - } - Literal lit = randomLiteral(EsqlDataTypes.widenSmallNumericTypes(type)); - assertThat(new IsNotNull(Source.EMPTY, lit).fold(), equalTo(lit.value() != null)); - assertThat(new IsNotNull(Source.EMPTY, new Literal(Source.EMPTY, null, type)).fold(), equalTo(false)); - } - } - @Override protected Matcher allNullsMatcher() { return equalTo(false); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNullTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNullTests.java index 3702d4814ce02..c6c67d67375db 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNullTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNullTests.java @@ -10,45 +10,64 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; -import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.Expression; -import org.elasticsearch.xpack.ql.expression.Literal; import org.elasticsearch.xpack.ql.expression.predicate.nulls.IsNull; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; import org.elasticsearch.xpack.ql.type.DataTypes; import org.hamcrest.Matcher; +import java.util.ArrayList; import java.util.List; import java.util.function.Supplier; import static org.hamcrest.Matchers.equalTo; -public class IsNullTests extends AbstractScalarFunctionTestCase { +public class IsNullTests extends AbstractFunctionTestCase { public IsNullTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @ParametersFactory public static Iterable parameters() { - return parameterSuppliersFromTypedData(List.of(new TestCaseSupplier("Keyword is Null", () -> { - return new TestCaseSupplier.TestCase( - List.of(new TestCaseSupplier.TypedData(new BytesRef("cat"), DataTypes.KEYWORD, "exp")), - "IsNullEvaluator[field=Attribute[channel=0]]", - DataTypes.BOOLEAN, - equalTo(false) + List suppliers = new ArrayList<>(); + for (DataType type : EsqlDataTypes.types()) { + if (false == EsqlDataTypes.isRepresentable(type)) { + continue; + } + if (type != DataTypes.NULL) { + suppliers.add( + new TestCaseSupplier( + "non-null " + type.typeName(), + List.of(type), + () -> new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(randomLiteral(type).value(), type, "v")), + "IsNullEvaluator[field=Attribute[channel=0]]", + DataTypes.BOOLEAN, + equalTo(false) + ) + ) + ); + } + suppliers.add( + new TestCaseSupplier( + "null " + type.typeName(), + List.of(type), + () -> new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(null, type, "v")), + "IsNullEvaluator[field=Attribute[channel=0]]", + DataTypes.BOOLEAN, + equalTo(true) + ) + ) ); - }))); - } - - @Override - protected DataType expectedType(List argTypes) { - return DataTypes.BOOLEAN; + } + return parameterSuppliersFromTypedData(failureForCasesWithoutExamples(suppliers)); } @Override @@ -56,27 +75,11 @@ protected void assertSimpleWithNulls(List data, Block value, int nullBlo assertTrue(((BooleanBlock) value).asVector().getBoolean(0)); } - @Override - protected List argSpec() { - return List.of(required(EsqlDataTypes.types().toArray(DataType[]::new))); - } - @Override protected Expression build(Source source, List args) { return new IsNull(Source.EMPTY, args.get(0)); } - public void testAllTypes() { - for (DataType type : EsqlDataTypes.types()) { - if (DataTypes.isPrimitive(type) == false) { - continue; - } - Literal lit = randomLiteral(EsqlDataTypes.widenSmallNumericTypes(type)); - assertThat(new IsNull(Source.EMPTY, lit).fold(), equalTo(lit.value() == null)); - assertThat(new IsNull(Source.EMPTY, new Literal(Source.EMPTY, null, type)).fold(), equalTo(true)); - } - } - @Override protected Matcher allNullsMatcher() { return equalTo(true); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/AbstractTrimTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/AbstractTrimTests.java index fdb9387b410ff..229abbcdb187d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/AbstractTrimTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/AbstractTrimTests.java @@ -8,8 +8,8 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.string; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; -import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.ql.type.DataType; import java.util.ArrayList; @@ -18,11 +18,11 @@ import static org.hamcrest.Matchers.equalTo; -public abstract class AbstractTrimTests extends AbstractScalarFunctionTestCase { +public abstract class AbstractTrimTests extends AbstractFunctionTestCase { static Iterable parameters(String name, boolean trimLeading, boolean trimTrailing) { List suppliers = new ArrayList<>(); for (DataType type : strings()) { - suppliers.add(new TestCaseSupplier("no whitespace/" + type, () -> { + suppliers.add(new TestCaseSupplier("no whitespace/" + type, List.of(type), () -> { String text = randomAlphaOfLength(8); return testCase(name, type, text, text); })); @@ -40,17 +40,17 @@ static Iterable parameters(String name, boolean trimLeading, boolean t Map.entry("information separator one", new char[] { '\u001F' }), Map.entry("whitespace", new char[] { ' ', '\t', '\n', '\u000B', '\f', '\r', '\u001C', '\u001D', '\u001E', '\u001F' }) )) { - suppliers.add(new TestCaseSupplier(type + "/leading " + whitespaces.getKey(), () -> { + suppliers.add(new TestCaseSupplier(type + "/leading " + whitespaces.getKey(), List.of(type), () -> { String text = randomAlphaOfLength(8); String withWhitespace = randomWhiteSpace(whitespaces.getValue()) + text; return testCase(name, type, withWhitespace, trimLeading ? text : withWhitespace); })); - suppliers.add(new TestCaseSupplier(type + "/trailing " + whitespaces.getKey(), () -> { + suppliers.add(new TestCaseSupplier(type + "/trailing " + whitespaces.getKey(), List.of(type), () -> { String text = randomAlphaOfLength(8); String withWhitespace = text + randomWhiteSpace(whitespaces.getValue()); return testCase(name, type, withWhitespace, trimTrailing ? text : withWhitespace); })); - suppliers.add(new TestCaseSupplier(type + "/leading and trailing " + whitespaces.getKey(), () -> { + suppliers.add(new TestCaseSupplier(type + "/leading and trailing " + whitespaces.getKey(), List.of(type), () -> { String text = randomAlphaOfLength(8); String leadingWhitespace = randomWhiteSpace(whitespaces.getValue()); String trailingWhitespace = randomWhiteSpace(whitespaces.getValue()); @@ -61,13 +61,13 @@ static Iterable parameters(String name, boolean trimLeading, boolean t (trimLeading ? "" : leadingWhitespace) + text + (trimTrailing ? "" : trailingWhitespace) ); })); - suppliers.add(new TestCaseSupplier(type + "/all " + whitespaces.getKey(), () -> { + suppliers.add(new TestCaseSupplier(type + "/all " + whitespaces.getKey(), List.of(type), () -> { String text = randomWhiteSpace(whitespaces.getValue()); return testCase(name, type, text, ""); })); } } - return parameterSuppliersFromTypedData(suppliers); + return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, suppliers))); } private static TestCaseSupplier.TestCase testCase(String name, DataType type, String data, String expected) { @@ -79,16 +79,6 @@ private static TestCaseSupplier.TestCase testCase(String name, DataType type, St ); } - @Override - protected final List argSpec() { - return List.of(required(strings())); - } - - @Override - protected final DataType expectedType(List argTypes) { - return argTypes.get(0); - } - private static String randomWhiteSpace(char[] whitespaces) { char[] randomWhitespace = new char[randomIntBetween(1, 8)]; for (int i = 0; i < randomWhitespace.length; i++) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ConcatTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ConcatTests.java index 32e894e5282d5..8b05b7478f570 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ConcatTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ConcatTests.java @@ -135,9 +135,9 @@ public void testSomeConstant() { try ( EvalOperator.ExpressionEvaluator eval = evaluator(expression).get(driverContext()); - Block.Ref ref = eval.eval(row(fieldValues)) + Block block = eval.eval(row(fieldValues)) ) { - assertThat(toJavaObject(ref.block(), 0), testCase.getMatcher()); + assertThat(toJavaObject(block, 0), testCase.getMatcher()); } } @@ -150,7 +150,7 @@ private void testOversized(int totalLen, List mix, List fiel Exception e = expectThrows(EsqlClientException.class, () -> { try ( EvalOperator.ExpressionEvaluator eval = evaluator(expression).get(driverContext()); - Block.Ref ref = eval.eval(row(fieldValues)); + Block block = eval.eval(row(fieldValues)); ) {} }); assertThat(e.getMessage(), is("concatenating more than [1048576] bytes is not supported")); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LeftTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LeftTests.java index edbfec2bc5d85..316bb679f2b70 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LeftTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LeftTests.java @@ -201,12 +201,12 @@ private String process(String str, int length) { EvalOperator.ExpressionEvaluator eval = evaluator( new Left(Source.EMPTY, field("str", DataTypes.KEYWORD), new Literal(Source.EMPTY, length, DataTypes.INTEGER)) ).get(driverContext()); - Block.Ref ref = eval.eval(row(List.of(new BytesRef(str)))) + Block block = eval.eval(row(List.of(new BytesRef(str)))) ) { - if (ref.block().isNull(0)) { + if (block.isNull(0)) { return null; } - BytesRef resultByteRef = ((BytesRef) toJavaObject(ref.block(), 0)); + BytesRef resultByteRef = ((BytesRef) toJavaObject(block, 0)); return resultByteRef == null ? null : resultByteRef.utf8ToString(); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RightTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RightTests.java index 04b8dd4079028..0eeb312512b30 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RightTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RightTests.java @@ -203,12 +203,12 @@ private String process(String str, int length) { EvalOperator.ExpressionEvaluator eval = evaluator( new Right(Source.EMPTY, field("str", DataTypes.KEYWORD), new Literal(Source.EMPTY, length, DataTypes.INTEGER)) ).get(driverContext()); - Block.Ref ref = eval.eval(row(List.of(new BytesRef(str)))) + Block block = eval.eval(row(List.of(new BytesRef(str)))) ) { - if (ref.block().isNull(0)) { + if (block.isNull(0)) { return null; } - BytesRef resultByteRef = ((BytesRef) toJavaObject(ref.block(), 0)); + BytesRef resultByteRef = ((BytesRef) toJavaObject(block, 0)); return resultByteRef == null ? null : resultByteRef.utf8ToString(); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitTests.java index 3926cc46dd883..abaa382637882 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitTests.java @@ -96,8 +96,8 @@ public void testConstantDelimiter() { */ assert ':' == 58; assertThat(eval.toString(), equalTo("SplitSingleByteEvaluator[str=Attribute[channel=0], delim=58]")); - try (Block.Ref ref = eval.eval(new Page(BytesRefBlock.newConstantBlockWith(new BytesRef("foo:bar"), 1)))) { - assertThat(toJavaObject(ref.block(), 0), equalTo(List.of(new BytesRef("foo"), new BytesRef("bar")))); + try (Block block = eval.eval(new Page(BytesRefBlock.newConstantBlockWith(new BytesRef("foo:bar"), 1)))) { + assertThat(toJavaObject(block, 0), equalTo(List.of(new BytesRef("foo"), new BytesRef("bar")))); } } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SubstringTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SubstringTests.java index 722b2bea8060a..fd9cb29ec62c4 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SubstringTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SubstringTests.java @@ -139,9 +139,9 @@ private String process(String str, int start, Integer length) { length == null ? null : new Literal(Source.EMPTY, length, DataTypes.INTEGER) ) ).get(driverContext()); - Block.Ref ref = eval.eval(row(List.of(new BytesRef(str)))) + Block block = eval.eval(row(List.of(new BytesRef(str)))) ) { - return ref.block().isNull(0) ? null : ((BytesRef) toJavaObject(ref.block(), 0)).utf8ToString(); + return block.isNull(0) ? null : ((BytesRef) toJavaObject(block, 0)).utf8ToString(); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/AbstractBinaryOperatorTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/AbstractBinaryOperatorTestCase.java index 6b81fbb1a2fdd..cc677787c50c6 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/AbstractBinaryOperatorTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/AbstractBinaryOperatorTestCase.java @@ -94,8 +94,8 @@ public final void testApplyToAllTypes() { Source src = new Source(Location.EMPTY, lhsType.typeName() + " " + rhsType.typeName()); if (isRepresentable(lhsType) && isRepresentable(rhsType)) { op = build(src, field("lhs", lhsType), field("rhs", rhsType)); - try (Block.Ref ref = evaluator(op).get(driverContext()).eval(row(List.of(lhs.value(), rhs.value())))) { - result = toJavaObject(ref.block(), 0); + try (Block block = evaluator(op).get(driverContext()).eval(row(List.of(lhs.value(), rhs.value())))) { + result = toJavaObject(block, 0); } } else { op = build(src, lhs, rhs); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/NegTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/NegTests.java index dc365234351c6..727f0dcb9804d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/NegTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/NegTests.java @@ -180,8 +180,8 @@ public void testEdgeCases() { private Object process(Object val) { if (testCase.allTypesAreRepresentable()) { Neg neg = new Neg(Source.EMPTY, field("val", typeOf(val))); - try (Block.Ref ref = evaluator(neg).get(driverContext()).eval(row(List.of(val)))) { - return toJavaObject(ref.block(), 0); + try (Block block = evaluator(neg).get(driverContext()).eval(row(List.of(val)))) { + return toJavaObject(block, 0); } } else { // just fold if type is not representable Neg neg = new Neg(Source.EMPTY, new Literal(Source.EMPTY, val, typeOf(val))); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index f63026c28279a..352dccc046588 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -780,7 +780,7 @@ public void testCombineOrderBy() { | sort salary"""); var topN = as(plan, TopN.class); - assertThat(orderNames(topN), contains("salary", "emp_no")); + assertThat(orderNames(topN), contains("salary")); as(topN.child(), EsRelation.class); } @@ -792,7 +792,7 @@ public void testCombineOrderByThroughEval() { | sort x"""); var topN = as(plan, TopN.class); - assertThat(orderNames(topN), contains("x", "emp_no")); + assertThat(orderNames(topN), contains("x")); var eval = as(topN.child(), Eval.class); as(eval.child(), EsRelation.class); } @@ -806,7 +806,7 @@ public void testCombineOrderByThroughEvalWithTwoDefs() { | sort z"""); var topN = as(plan, TopN.class); - assertThat(orderNames(topN), contains("z", "emp_no")); + assertThat(orderNames(topN), contains("z")); var eval = as(topN.child(), Eval.class); assertThat(Expressions.names(eval.fields()), contains("x", "y", "z")); as(eval.child(), EsRelation.class); @@ -820,7 +820,7 @@ public void testCombineOrderByThroughDissect() { | sort x"""); var topN = as(plan, TopN.class); - assertThat(orderNames(topN), contains("x", "emp_no")); + assertThat(orderNames(topN), contains("x")); var dissect = as(topN.child(), Dissect.class); as(dissect.child(), EsRelation.class); } @@ -833,7 +833,7 @@ public void testCombineOrderByThroughGrok() { | sort x"""); var topN = as(plan, TopN.class); - assertThat(orderNames(topN), contains("x", "emp_no")); + assertThat(orderNames(topN), contains("x")); var grok = as(topN.child(), Grok.class); as(grok.child(), EsRelation.class); } @@ -847,7 +847,7 @@ public void testCombineOrderByThroughProject() { var keep = as(plan, Project.class); var topN = as(keep.child(), TopN.class); - assertThat(orderNames(topN), contains("salary", "emp_no")); + assertThat(orderNames(topN), contains("salary")); as(topN.child(), EsRelation.class); } @@ -862,7 +862,7 @@ public void testCombineOrderByThroughProjectAndEval() { var keep = as(plan, Project.class); var topN = as(keep.child(), TopN.class); - assertThat(orderNames(topN), contains("salary", "emp_no")); + assertThat(orderNames(topN), contains("salary")); var eval = as(topN.child(), Eval.class); assertThat(Expressions.names(eval.fields()), contains("e")); as(eval.child(), EsRelation.class); @@ -878,7 +878,7 @@ public void testCombineOrderByThroughProjectWithAlias() { var keep = as(plan, Project.class); var topN = as(keep.child(), TopN.class); - assertThat(orderNames(topN), contains("salary", "emp_no")); + assertThat(orderNames(topN), contains("salary")); as(topN.child(), EsRelation.class); } @@ -890,7 +890,7 @@ public void testCombineOrderByThroughFilter() { | sort salary"""); var topN = as(plan, TopN.class); - assertThat(orderNames(topN), contains("salary", "emp_no")); + assertThat(orderNames(topN), contains("salary")); var filter = as(topN.child(), Filter.class); as(filter.child(), EsRelation.class); } @@ -998,7 +998,7 @@ public void testMultipleMvExpandWithSortAndLimit() { var keep = as(plan, EsqlProject.class); var topN = as(keep.child(), TopN.class); assertThat(topN.limit().fold(), equalTo(5)); - assertThat(orderNames(topN), contains("salary", "first_name")); + assertThat(orderNames(topN), contains("salary")); var limit = as(topN.child(), Limit.class); assertThat(limit.limit().fold(), equalTo(5)); var mvExp = as(limit.child(), MvExpand.class); @@ -1313,10 +1313,10 @@ public void testCombineMultipleOrderByAndLimits() { var keep = as(plan, Project.class); var topN = as(keep.child(), TopN.class); - assertThat(orderNames(topN), contains("emp_no", "first_name")); + assertThat(orderNames(topN), contains("emp_no")); var filter = as(topN.child(), Filter.class); var topN2 = as(filter.child(), TopN.class); - assertThat(orderNames(topN2), contains("salary", "emp_no")); + assertThat(orderNames(topN2), contains("salary")); as(topN2.child(), EsRelation.class); } @@ -1357,12 +1357,6 @@ public void testDontPruneSameFieldDifferentDirectionSortClauses() { new FieldAttribute(EMPTY, "emp_no", mapping.get("emp_no")), Order.OrderDirection.DESC, Order.NullsPosition.FIRST - ), - new Order( - EMPTY, - new FieldAttribute(EMPTY, "salary", mapping.get("salary")), - Order.OrderDirection.ASC, - Order.NullsPosition.LAST ) ) ); @@ -1406,12 +1400,6 @@ public void testPruneRedundantSortClauses() { new FieldAttribute(EMPTY, "emp_no", mapping.get("emp_no")), Order.OrderDirection.DESC, Order.NullsPosition.LAST - ), - new Order( - EMPTY, - new FieldAttribute(EMPTY, "salary", mapping.get("salary")), - Order.OrderDirection.DESC, - Order.NullsPosition.LAST ) ) ); @@ -1436,12 +1424,6 @@ public void testDontPruneSameFieldDifferentDirectionSortClauses_UsingAlias() { new FieldAttribute(EMPTY, "emp_no", mapping.get("emp_no")), Order.OrderDirection.ASC, Order.NullsPosition.LAST - ), - new Order( - EMPTY, - new FieldAttribute(EMPTY, "emp_no", mapping.get("emp_no")), - Order.OrderDirection.DESC, - Order.NullsPosition.FIRST ) ) ); @@ -2472,6 +2454,73 @@ public void testMvExpandFoldable() { var row = as(expand.child(), Row.class); } + /** + * Expected + * Limit[500[INTEGER]] + * \_Aggregate[[a{r}#2],[COUNT([2a][KEYWORD]) AS bar]] + * \_Row[[1[INTEGER] AS a]] + */ + public void testRenameStatsDropGroup() { + LogicalPlan plan = optimizedPlan(""" + row a = 1 + | rename a AS foo + | stats bar = count(*) by foo + | drop foo"""); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(Expressions.names(agg.groupings()), contains("a")); + var row = as(agg.child(), Row.class); + } + + /** + * Expected + * Limit[500[INTEGER]] + * \_Aggregate[[a{r}#2, bar{r}#8],[COUNT([2a][KEYWORD]) AS baz, b{r}#4 AS bar]] + * \_Row[[1[INTEGER] AS a, 2[INTEGER] AS b]] + */ + public void testMultipleRenameStatsDropGroup() { + LogicalPlan plan = optimizedPlan(""" + row a = 1, b = 2 + | rename a AS foo, b as bar + | stats baz = count(*) by foo, bar + | drop foo"""); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(Expressions.names(agg.groupings()), contains("a", "bar")); + var row = as(agg.child(), Row.class); + } + + /** + * Expected + * Limit[500[INTEGER]] + * \_Aggregate[[emp_no{f}#11, bar{r}#4],[MAX(salary{f}#16) AS baz, gender{f}#13 AS bar]] + * \_EsRelation[test][_meta_field{f}#17, emp_no{f}#11, first_name{f}#12, ..] + */ + public void testMultipleRenameStatsDropGroupMultirow() { + LogicalPlan plan = optimizedPlan(""" + from test + | rename emp_no AS foo, gender as bar + | stats baz = max(salary) by foo, bar + | drop foo"""); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(Expressions.names(agg.groupings()), contains("emp_no", "bar")); + var row = as(agg.child(), EsRelation.class); + } + + public void testLimitZeroUsesLocalRelation() { + LogicalPlan plan = optimizedPlan(""" + from test + | stats count=count(*) + | sort count desc + | limit 0"""); + + assertThat(plan, instanceOf(LocalRelation.class)); + } + private T aliased(Expression exp, Class clazz) { var alias = as(exp, Alias.class); return as(alias.child(), clazz); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java index 985127731a0d1..53e2a2e412fcd 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java @@ -636,6 +636,7 @@ public void testDissectPattern() { "from a | dissect foo \"%{bar}\" append_separator=3", "Invalid value for dissect append_separator: expected a string, but was [3]" ); + expectError("from a | dissect foo \"%{}\"", "Invalid pattern for dissect: [%{}]"); } public void testGrokPattern() { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java index f2cddce199928..c8c8029f994cc 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java @@ -24,6 +24,8 @@ import org.elasticsearch.compute.lucene.LuceneTopNSourceOperator; import org.elasticsearch.compute.operator.SourceOperator; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; import org.elasticsearch.index.cache.query.TrivialQueryCachingPolicy; import org.elasticsearch.index.mapper.MapperServiceTestCase; import org.elasticsearch.search.internal.ContextIndexSearcher; @@ -67,13 +69,15 @@ public static Iterable parameters() throws Exception { private Directory directory = newDirectory(); private IndexReader reader; + private final ArrayList releasables = new ArrayList<>(); + public LocalExecutionPlannerTests(@Name("estimatedRowSizeIsHuge") boolean estimatedRowSizeIsHuge) { this.estimatedRowSizeIsHuge = estimatedRowSizeIsHuge; } @After public void closeIndex() throws IOException { - IOUtils.close(reader, directory); + IOUtils.close(reader, directory, () -> Releasables.close(releasables), releasables::clear); } public void testLuceneSourceOperatorHugeRowSize() throws IOException { @@ -157,6 +161,7 @@ private EsPhysicalOperationProviders esPhysicalOperationProviders() throws IOExc new TestSearchContext(createSearchExecutionContext(createMapperService(mapping(b -> {})), searcher), null, searcher) ); } + releasables.addAll(searchContexts); return new EsPhysicalOperationProviders(searchContexts); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java index cc5b05537c4c6..a6eacae2857e7 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java @@ -57,8 +57,11 @@ interface Setup { public static List params() { List params = new ArrayList<>(); for (String fieldType : new String[] { "long", "integer", "short", "byte", "double", "float", "keyword" }) { - params.add(new Object[] { new StandardSetup(fieldType, false) }); - params.add(new Object[] { new StandardSetup(fieldType, true) }); + for (boolean multivaluedField : new boolean[] { true, false }) { + for (boolean allowEmpty : new boolean[] { true, false }) { + params.add(new Object[] { new StandardSetup(fieldType, multivaluedField, allowEmpty, 100) }); + } + } } params.add(new Object[] { new FieldMissingSetup() }); return params; @@ -196,7 +199,7 @@ private void testCase(SingleValueQuery.Builder builder, boolean rewritesToMatchN } } - private record StandardSetup(String fieldType, boolean multivaluedField) implements Setup { + private record StandardSetup(String fieldType, boolean multivaluedField, boolean empty, int count) implements Setup { @Override public XContentBuilder mapping(XContentBuilder builder) throws IOException { builder.startObject("i").field("type", "long").endObject(); @@ -207,27 +210,32 @@ public XContentBuilder mapping(XContentBuilder builder) throws IOException { @Override public List> build(RandomIndexWriter iw) throws IOException { List> fieldValues = new ArrayList<>(100); - for (int i = 0; i < 100; i++) { - // i == 10 forces at least one multivalued field when we're configured for multivalued fields - boolean makeMultivalued = multivaluedField && (i == 10 || randomBoolean()); - List values; - if (makeMultivalued) { - int count = between(2, 10); - Set set = new HashSet<>(count); - while (set.size() < count) { - set.add(randomValue()); - } - values = List.copyOf(set); - } else { - values = List.of(randomValue()); - } + for (int i = 0; i < count; i++) { + List values = values(i); fieldValues.add(values); iw.addDocument(docFor(i, values)); } - return fieldValues; } + private List values(int i) { + // i == 10 forces at least one multivalued field when we're configured for multivalued fields + boolean makeMultivalued = multivaluedField && (i == 10 || randomBoolean()); + if (makeMultivalued) { + int count = between(2, 10); + Set set = new HashSet<>(count); + while (set.size() < count) { + set.add(randomValue()); + } + return List.copyOf(set); + } + // i == 0 forces at least one empty field when we're configured for empty fields + if (empty && (i == 0 || randomBoolean())) { + return List.of(); + } + return List.of(randomValue()); + } + private Object randomValue() { return switch (fieldType) { case "long" -> randomLong(); @@ -279,7 +287,7 @@ public void assertStats(SingleValueQuery.Builder builder, boolean subHasTwoPhase assertThat(builder.stats().bytesApprox(), equalTo(0)); assertThat(builder.stats().bytesNoApprox(), equalTo(0)); - if (multivaluedField) { + if (multivaluedField || empty) { assertThat(builder.stats().numericSingle(), greaterThanOrEqualTo(0)); if (subHasTwoPhase) { assertThat(builder.stats().numericMultiNoApprox(), equalTo(0)); @@ -300,7 +308,7 @@ public void assertStats(SingleValueQuery.Builder builder, boolean subHasTwoPhase assertThat(builder.stats().numericMultiApprox(), equalTo(0)); assertThat(builder.stats().bytesApprox(), equalTo(0)); assertThat(builder.stats().bytesNoApprox(), equalTo(0)); - if (multivaluedField) { + if (multivaluedField || empty) { assertThat(builder.stats().ordinalsSingle(), greaterThanOrEqualTo(0)); if (subHasTwoPhase) { assertThat(builder.stats().ordinalsMultiNoApprox(), equalTo(0)); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java index 5736353223f6e..0aaf4a1a18e32 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java @@ -10,9 +10,11 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.parser.EsqlParser; +import java.util.Collections; import java.util.Set; import static org.elasticsearch.xpack.ql.index.IndexResolver.ALL_FIELDS; +import static org.elasticsearch.xpack.ql.index.IndexResolver.INDEX_METADATA_FIELD; import static org.hamcrest.Matchers.equalTo; public class IndexResolverFieldNamesTests extends ESTestCase { @@ -27,6 +29,10 @@ public void testBasicFromCommandWithMetadata() { assertFieldNames("from test [metadata _index, _id, _version]", ALL_FIELDS); } + public void testBasicEvalAndDrop() { + assertFieldNames("from test | eval x = 1 | drop x", ALL_FIELDS); + } + public void testSimple1() { assertFieldNames( "from employees | sort emp_no | keep emp_no, still_hired | limit 3", @@ -278,18 +284,24 @@ public void testMultivalueInput1() { | keep emp_no, a, b, c""", Set.of("emp_no", "emp_no.*", "job_positions", "job_positions.*")); } + public void testLimitZero() { + assertFieldNames(""" + FROM employees + | LIMIT 0""", ALL_FIELDS); + } + public void testDocsDropHeight() { assertFieldNames(""" FROM employees | DROP height - | LIMIT 0""", Set.of("*")); + | LIMIT 0""", ALL_FIELDS); } public void testDocsDropHeightWithWildcard() { assertFieldNames(""" FROM employees | DROP height* - | LIMIT 0""", Set.of("*")); + | LIMIT 0""", ALL_FIELDS); } public void testDocsEval() { @@ -312,7 +324,7 @@ public void testDocsKeepDoubleWildcard() { assertFieldNames(""" FROM employees | KEEP h*, * - | LIMIT 0""", Set.of("*")); + | LIMIT 0""", ALL_FIELDS); } public void testDocsRename() { @@ -339,7 +351,7 @@ public void testDocsStats() { } public void testSortWithLimitOne_DropHeight() { - assertFieldNames("from employees | sort languages | limit 1 | drop height*", Set.of("*")); + assertFieldNames("from employees | sort languages | limit 1 | drop height*", ALL_FIELDS); } public void testDropAllColumns() { @@ -378,7 +390,7 @@ public void testUselessEnrich() { from employees | eval x = "abc" | enrich languages_policy on x - | limit 1""", Set.of("*")); + | limit 1""", ALL_FIELDS); } public void testSimpleSortLimit() { @@ -608,11 +620,11 @@ public void testMultivalueInput() { } public void testSelectAll() { - assertFieldNames("FROM apps [metadata _id]", Set.of("*")); + assertFieldNames("FROM apps [metadata _id]", ALL_FIELDS); } public void testFilterById() { - assertFieldNames("FROM apps [metadata _id]| WHERE _id == \"4\"", Set.of("*")); + assertFieldNames("FROM apps [metadata _id]| WHERE _id == \"4\"", ALL_FIELDS); } public void testKeepId() { @@ -640,7 +652,7 @@ public void testConcatId() { } public void testStatsOnId() { - assertFieldNames("FROM apps [metadata _id] | stats c = count(_id), d = count_distinct(_id)", Set.of("*")); + assertFieldNames("FROM apps [metadata _id] | stats c = count(_id), d = count_distinct(_id)", INDEX_METADATA_FIELD); } public void testStatsOnIdByGroup() { @@ -758,7 +770,7 @@ public void testRenameDrop() { | rename hire_date as x, emp_no as y | drop first_name, last_name, gender, birth_date, salary, languages*, height*, still_hired, avg_worked_seconds, job_positions, is_rehired, salary_change* - | limit 5""", Set.of("*")); + | limit 5""", ALL_FIELDS); } public void testMaxOfLong() { @@ -960,7 +972,7 @@ public void testRenameReuseAlias() { assertFieldNames(""" from test | rename emp_no as e, first_name as e - """, Set.of("*")); + """, ALL_FIELDS); } public void testIfDuplicateNamesGroupingHasPriority() { @@ -1027,7 +1039,7 @@ public void testEvalOverride() { } public void testBasicWildcardKeep() { - assertFieldNames("from test | keep *", Set.of("*")); + assertFieldNames("from test | keep *", ALL_FIELDS); } public void testBasicWildcardKeep2() { @@ -1041,7 +1053,7 @@ public void testWildcardKeep() { assertFieldNames(""" from test | keep first_name, *, last_name - """, Set.of("*")); + """, ALL_FIELDS); } public void testProjectThenDropName() { @@ -1073,21 +1085,21 @@ public void testProjectDropPattern() { from test | keep * | drop *_name - """, Set.of("*")); + """, ALL_FIELDS); } public void testProjectDropNoStarPattern() { assertFieldNames(""" from test | drop *_name - """, Set.of("*")); + """, ALL_FIELDS); } public void testProjectOrderPatternWithRest() { assertFieldNames(""" from test | keep *name, *, emp_no - """, Set.of("*")); + """, ALL_FIELDS); } public void testProjectDropPatternAndKeepOthers() { @@ -1105,18 +1117,70 @@ public void testAliasesThatGetDropped() { | where first_name like "%A" | eval first_name = concat(first_name, "xyz") | drop first_name - """, Set.of("*")); + """, ALL_FIELDS); } public void testWhereClauseNoProjection() { assertFieldNames(""" from test | where first_name is not null - """, Set.of("*")); + """, ALL_FIELDS); + } + + public void testCountAllGrouped() { + assertFieldNames(""" + from test + | stats c = count(*) by languages + | rename languages as l + | sort l DESC + """, Set.of("languages", "languages.*")); + } + + public void testCountAllAndOtherStatGrouped() { + assertFieldNames(""" + from test + | stats c = count(*), min = min(emp_no) by languages + | sort languages + """, Set.of("emp_no", "emp_no.*", "languages", "languages.*")); + } + + public void testCountAllWithEval() { + assertFieldNames(""" + from test + | rename languages as l + | stats min = min(salary) by l + | eval x = min + 1 + | stats ca = count(*), cx = count(x) by l + | sort l + """, Set.of("languages", "languages.*", "salary", "salary.*")); + } + + public void testCountStar() { + assertFieldNames(""" + from test + | stats count=count(*) + | sort count desc + | limit 0 + """, INDEX_METADATA_FIELD); + } + + public void testEnrichOnDefaultFieldWithKeep() { + Set fieldNames = EsqlSession.fieldNames(parser.createStatement(""" + from employees + | enrich languages_policy + | keep emp_no"""), Set.of("language_name")); + assertThat(fieldNames, equalTo(Set.of("emp_no", "emp_no.*", "language_name", "language_name.*"))); + } + + public void testEnrichOnDefaultField() { + Set fieldNames = EsqlSession.fieldNames(parser.createStatement(""" + from employees + | enrich languages_policy"""), Set.of("language_name")); + assertThat(fieldNames, equalTo(ALL_FIELDS)); } private void assertFieldNames(String query, Set expected) { - Set fieldNames = EsqlSession.fieldNames(parser.createStatement(query)); + Set fieldNames = EsqlSession.fieldNames(parser.createStatement(query), Collections.emptySet()); assertThat(fieldNames, equalTo(expected)); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistryTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistryTests.java index 1b8ab355cc2eb..e4fa78fac0dee 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistryTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistryTests.java @@ -57,7 +57,8 @@ private void resolve(String esTypeName, TimeSeriesParams.MetricType metricType, "idx-*", caps, EsqlSession::specificValidity, - IndexResolver.PRESERVE_PROPERTIES + IndexResolver.PRESERVE_PROPERTIES, + null ); EsField f = resolution.get().mapping().get(fieldCap.getName()); diff --git a/x-pack/plugin/esql/src/test/resources/empty_field_caps_response.json b/x-pack/plugin/esql/src/test/resources/empty_field_caps_response.json new file mode 100644 index 0000000000000..fe8b293e3c0b9 --- /dev/null +++ b/x-pack/plugin/esql/src/test/resources/empty_field_caps_response.json @@ -0,0 +1,16 @@ +{ + "indices": [ + "test1", + "test2" + ], + "fields": { + "_index": { + "_index": { + "type": "_index", + "metadata_field": true, + "searchable": true, + "aggregatable": true + } + } + } +} diff --git a/x-pack/plugin/fleet/src/internalClusterTest/java/org/elasticsearch/xpack/fleet/action/GetGlobalCheckpointsActionIT.java b/x-pack/plugin/fleet/src/internalClusterTest/java/org/elasticsearch/xpack/fleet/action/GetGlobalCheckpointsActionIT.java index 4f2a623fa2def..5c87fe8dd6c19 100644 --- a/x-pack/plugin/fleet/src/internalClusterTest/java/org/elasticsearch/xpack/fleet/action/GetGlobalCheckpointsActionIT.java +++ b/x-pack/plugin/fleet/src/internalClusterTest/java/org/elasticsearch/xpack/fleet/action/GetGlobalCheckpointsActionIT.java @@ -74,7 +74,7 @@ public void testGetGlobalCheckpoints() throws Exception { final int totalDocuments = shards * 3; for (int i = 0; i < totalDocuments; ++i) { - client().prepareIndex(indexName).setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get(); + prepareIndex(indexName).setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get(); } final GetGlobalCheckpointsAction.Request request2 = new GetGlobalCheckpointsAction.Request( @@ -118,7 +118,7 @@ public void testPollGlobalCheckpointAdvancement() throws Exception { final int totalDocuments = between(25, 50); new Thread(() -> { for (int i = 0; i < totalDocuments; ++i) { - client().prepareIndex(indexName).setId(Integer.toString(i)).setSource("{}", XContentType.JSON).execute(); + prepareIndex(indexName).setId(Integer.toString(i)).setSource("{}", XContentType.JSON).execute(); } }).start(); @@ -147,7 +147,7 @@ public void testPollGlobalCheckpointAdvancementTimeout() { final int totalDocuments = 30; for (int i = 0; i < totalDocuments; ++i) { - client().prepareIndex(indexName).setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get(); + prepareIndex(indexName).setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get(); } final GetGlobalCheckpointsAction.Request request = new GetGlobalCheckpointsAction.Request( @@ -258,7 +258,7 @@ public void testWaitOnIndexCreated() throws Exception { indicesAdmin().prepareCreate(indexName) .setSettings(indexSettings(1, 0).put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.REQUEST)) .get(); - client().prepareIndex(indexName).setId(Integer.toString(0)).setSource("{}", XContentType.JSON).get(); + prepareIndex(indexName).setId(Integer.toString(0)).setSource("{}", XContentType.JSON).get(); GetGlobalCheckpointsAction.Response response = future.actionGet(); long elapsed = TimeValue.timeValueNanos(System.nanoTime() - start).seconds(); @@ -335,7 +335,7 @@ public void testWaitOnPrimaryShardsReady() throws Exception { ActionFuture future = client().execute(GetGlobalCheckpointsAction.INSTANCE, request); Thread.sleep(randomIntBetween(10, 100)); updateIndexSettings(Settings.builder().put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "node", ""), indexName); - client().prepareIndex(indexName).setId(Integer.toString(0)).setSource("{}", XContentType.JSON).get(); + prepareIndex(indexName).setId(Integer.toString(0)).setSource("{}", XContentType.JSON).get(); GetGlobalCheckpointsAction.Response response = future.actionGet(); long elapsed = TimeValue.timeValueNanos(System.nanoTime() - start).seconds(); @@ -361,7 +361,7 @@ public void testWaitOnPrimaryShardThrottled() throws Exception { Thread.sleep(randomIntBetween(10, 100)); updateClusterSettings(Settings.builder().putNull(CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING.getKey())); - client().prepareIndex(indexName).setId(Integer.toString(0)).setSource("{}", XContentType.JSON).get(); + prepareIndex(indexName).setId(Integer.toString(0)).setSource("{}", XContentType.JSON).get(); var response = future.actionGet(); long elapsed = TimeValue.timeValueNanos(System.nanoTime() - start).seconds(); diff --git a/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/rest/RestFleetMultiSearchAction.java b/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/rest/RestFleetMultiSearchAction.java index 308af9b89e074..c177bea2e63ca 100644 --- a/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/rest/RestFleetMultiSearchAction.java +++ b/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/rest/RestFleetMultiSearchAction.java @@ -7,9 +7,9 @@ package org.elasticsearch.xpack.fleet.rest; -import org.elasticsearch.action.search.MultiSearchAction; import org.elasticsearch.action.search.MultiSearchRequest; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.TransportMultiSearchAction; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; @@ -18,7 +18,7 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import org.elasticsearch.rest.action.search.RestMultiSearchAction; import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.usage.SearchUsageHolder; @@ -112,7 +112,11 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli return channel -> { final RestCancellableNodeClient cancellableClient = new RestCancellableNodeClient(client, request.getHttpChannel()); - cancellableClient.execute(MultiSearchAction.INSTANCE, multiSearchRequest, new RestChunkedToXContentListener<>(channel)); + cancellableClient.execute( + TransportMultiSearchAction.TYPE, + multiSearchRequest, + new RestRefCountedChunkedToXContentListener<>(channel) + ); }; } diff --git a/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/rest/RestFleetSearchAction.java b/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/rest/RestFleetSearchAction.java index 178db0229ca58..3a09fe1d18382 100644 --- a/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/rest/RestFleetSearchAction.java +++ b/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/rest/RestFleetSearchAction.java @@ -8,8 +8,8 @@ package org.elasticsearch.xpack.fleet.rest; import org.elasticsearch.Version; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.core.TimeValue; @@ -96,7 +96,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli return channel -> { RestCancellableNodeClient cancelClient = new RestCancellableNodeClient(client, request.getHttpChannel()); - cancelClient.execute(SearchAction.INSTANCE, searchRequest, new RestChunkedToXContentListener<>(channel)); + cancelClient.execute(TransportSearchAction.TYPE, searchRequest, new RestChunkedToXContentListener<>(channel)); }; } diff --git a/x-pack/plugin/fleet/src/test/java/org/elasticsearch/xpack/fleet/action/PostSecretResponseTests.java b/x-pack/plugin/fleet/src/test/java/org/elasticsearch/xpack/fleet/action/PostSecretResponseTests.java index 6100bdd30c9cf..c47c2e553a509 100644 --- a/x-pack/plugin/fleet/src/test/java/org/elasticsearch/xpack/fleet/action/PostSecretResponseTests.java +++ b/x-pack/plugin/fleet/src/test/java/org/elasticsearch/xpack/fleet/action/PostSecretResponseTests.java @@ -24,6 +24,7 @@ protected PostSecretResponse createTestInstance() { @Override protected PostSecretResponse mutateInstance(PostSecretResponse instance) { - return new PostSecretResponse(randomAlphaOfLengthBetween(2, 10)); + String id = randomValueOtherThan(instance.id(), () -> randomAlphaOfLengthBetween(2, 10)); + return new PostSecretResponse(id); } } diff --git a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java index 0b1693e0c3712..5f219bd8ce592 100644 --- a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java +++ b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java @@ -9,12 +9,11 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.search.ClosePointInTimeAction; import org.elasticsearch.action.search.ClosePointInTimeRequest; -import org.elasticsearch.action.search.OpenPointInTimeAction; import org.elasticsearch.action.search.OpenPointInTimeRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.action.search.TransportClosePointInTimeAction; +import org.elasticsearch.action.search.TransportOpenPointInTimeAction; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.PlainActionFuture; @@ -49,7 +48,9 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCountAndNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; @@ -73,8 +74,7 @@ public void testTimestampRangeRecalculatedOnStalePrimaryAllocation() throws IOEx createIndex("index", 1, 1); - final DocWriteResponse indexResponse = client().prepareIndex("index") - .setSource(DataStream.TIMESTAMP_FIELD_NAME, "2010-01-06T02:03:04.567Z") + final DocWriteResponse indexResponse = prepareIndex("index").setSource(DataStream.TIMESTAMP_FIELD_NAME, "2010-01-06T02:03:04.567Z") .get(); ensureGreen("index"); @@ -171,7 +171,7 @@ public void testTimestampFieldTypeExposedByAllIndicesServices() throws Exception ensureGreen("index"); if (randomBoolean()) { - client().prepareIndex("index").setSource(DataStream.TIMESTAMP_FIELD_NAME, date).get(); + prepareIndex("index").setSource(DataStream.TIMESTAMP_FIELD_NAME, date).get(); } for (final IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { @@ -224,37 +224,40 @@ public void testRetryPointInTime() throws Exception { ); int numDocs = randomIntBetween(1, 100); for (int i = 0; i < numDocs; i++) { - client().prepareIndex(indexName).setSource("created_date", "2011-02-02").get(); + prepareIndex(indexName).setSource("created_date", "2011-02-02").get(); } assertAcked(client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest(indexName)).actionGet()); final OpenPointInTimeRequest openPointInTimeRequest = new OpenPointInTimeRequest(indexName).indicesOptions( IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED ).keepAlive(TimeValue.timeValueMinutes(2)); - final String pitId = client().execute(OpenPointInTimeAction.INSTANCE, openPointInTimeRequest).actionGet().getPointInTimeId(); + final String pitId = client().execute(TransportOpenPointInTimeAction.TYPE, openPointInTimeRequest).actionGet().getPointInTimeId(); try { - SearchResponse resp = prepareSearch().setIndices(indexName) - .setPreference(null) - .setPointInTime(new PointInTimeBuilder(pitId)) - .get(); - assertNoFailures(resp); - assertThat(resp.pointInTimeId(), equalTo(pitId)); - assertHitCount(resp, numDocs); + assertNoFailuresAndResponse( + prepareSearch().setIndices(indexName).setPreference(null).setPointInTime(new PointInTimeBuilder(pitId)), + searchResponse -> { + assertThat(searchResponse.pointInTimeId(), equalTo(pitId)); + assertHitCount(searchResponse, numDocs); + } + ); internalCluster().restartNode(assignedNode); ensureGreen(indexName); - resp = prepareSearch().setIndices(indexName) - .setQuery(new RangeQueryBuilder("created_date").gte("2011-01-01").lte("2011-12-12")) - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setPreference(null) - .setPreFilterShardSize(between(1, 10)) - .setAllowPartialSearchResults(true) - .setPointInTime(new PointInTimeBuilder(pitId)) - .get(); - assertNoFailures(resp); - assertThat(resp.pointInTimeId(), equalTo(pitId)); - assertHitCount(resp, numDocs); + + assertNoFailuresAndResponse( + prepareSearch().setIndices(indexName) + .setQuery(new RangeQueryBuilder("created_date").gte("2011-01-01").lte("2011-12-12")) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setPreference(null) + .setPreFilterShardSize(between(1, 10)) + .setAllowPartialSearchResults(true) + .setPointInTime(new PointInTimeBuilder(pitId)), + searchResponse -> { + assertThat(searchResponse.pointInTimeId(), equalTo(pitId)); + assertHitCount(searchResponse, numDocs); + } + ); } finally { assertAcked(client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest(indexName).setFreeze(false)).actionGet()); - client().execute(ClosePointInTimeAction.INSTANCE, new ClosePointInTimeRequest(pitId)).actionGet(); + client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pitId)).actionGet(); } } @@ -265,13 +268,13 @@ public void testPointInTimeWithDeletedIndices() { int index1 = randomIntBetween(10, 50); for (int i = 0; i < index1; i++) { String id = Integer.toString(i); - client().prepareIndex("index-1").setId(id).setSource("value", i).get(); + prepareIndex("index-1").setId(id).setSource("value", i).get(); } int index2 = randomIntBetween(10, 50); for (int i = 0; i < index2; i++) { String id = Integer.toString(i); - client().prepareIndex("index-2").setId(id).setSource("value", i).get(); + prepareIndex("index-2").setId(id).setSource("value", i).get(); } assertAcked(client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest("index-1", "index-2")).actionGet()); @@ -279,23 +282,24 @@ public void testPointInTimeWithDeletedIndices() { IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED ).keepAlive(TimeValue.timeValueMinutes(2)); - final String pitId = client().execute(OpenPointInTimeAction.INSTANCE, openPointInTimeRequest).actionGet().getPointInTimeId(); + final String pitId = client().execute(TransportOpenPointInTimeAction.TYPE, openPointInTimeRequest).actionGet().getPointInTimeId(); try { indicesAdmin().prepareDelete("index-1").get(); // Return partial results if allow partial search result is allowed - SearchResponse resp = prepareSearch().setPreference(null) - .setAllowPartialSearchResults(true) - .setPointInTime(new PointInTimeBuilder(pitId)) - .get(); - assertFailures(resp); - assertHitCount(resp, index2); + assertResponse( + prepareSearch().setPreference(null).setAllowPartialSearchResults(true).setPointInTime(new PointInTimeBuilder(pitId)), + searchResponse -> { + assertFailures(searchResponse); + assertHitCount(searchResponse, index2); + } + ); // Fails if allow partial search result is not allowed expectThrows( ElasticsearchException.class, prepareSearch().setPreference(null).setAllowPartialSearchResults(false).setPointInTime(new PointInTimeBuilder(pitId))::get ); } finally { - client().execute(ClosePointInTimeAction.INSTANCE, new ClosePointInTimeRequest(pitId)).actionGet(); + client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pitId)).actionGet(); } } @@ -305,7 +309,7 @@ public void testOpenPointInTimeWithNoIndexMatched() { int numDocs = randomIntBetween(10, 50); for (int i = 0; i < numDocs; i++) { String id = Integer.toString(i); - client().prepareIndex("test-index").setId(id).setSource("value", i).get(); + prepareIndex("test-index").setId(id).setSource("value", i).get(); } assertAcked(client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest("test-index")).actionGet()); // include the frozen indices @@ -313,13 +317,16 @@ public void testOpenPointInTimeWithNoIndexMatched() { final OpenPointInTimeRequest openPointInTimeRequest = new OpenPointInTimeRequest("test-*").indicesOptions( IndicesOptions.strictExpandOpenAndForbidClosed() ).keepAlive(TimeValue.timeValueMinutes(2)); - final String pitId = client().execute(OpenPointInTimeAction.INSTANCE, openPointInTimeRequest).actionGet().getPointInTimeId(); + final String pitId = client().execute(TransportOpenPointInTimeAction.TYPE, openPointInTimeRequest) + .actionGet() + .getPointInTimeId(); try { - SearchResponse resp = prepareSearch().setPreference(null).setPointInTime(new PointInTimeBuilder(pitId)).get(); - assertNoFailures(resp); - assertHitCount(resp, numDocs); + assertNoFailuresAndResponse( + prepareSearch().setPreference(null).setPointInTime(new PointInTimeBuilder(pitId)), + searchResponse -> assertHitCount(searchResponse, numDocs) + ); } finally { - client().execute(ClosePointInTimeAction.INSTANCE, new ClosePointInTimeRequest(pitId)).actionGet(); + client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pitId)).actionGet(); } } // exclude the frozen indices @@ -327,13 +334,13 @@ public void testOpenPointInTimeWithNoIndexMatched() { final OpenPointInTimeRequest openPointInTimeRequest = new OpenPointInTimeRequest("test-*").keepAlive( TimeValue.timeValueMinutes(2) ); - final String pitId = client().execute(OpenPointInTimeAction.INSTANCE, openPointInTimeRequest).actionGet().getPointInTimeId(); + final String pitId = client().execute(TransportOpenPointInTimeAction.TYPE, openPointInTimeRequest) + .actionGet() + .getPointInTimeId(); try { - SearchResponse resp = prepareSearch().setPreference(null).setPointInTime(new PointInTimeBuilder(pitId)).get(); - assertNoFailures(resp); - assertHitCount(resp, 0); + assertHitCountAndNoFailures(prepareSearch().setPreference(null).setPointInTime(new PointInTimeBuilder(pitId)), 0); } finally { - client().execute(ClosePointInTimeAction.INSTANCE, new ClosePointInTimeRequest(pitId)).actionGet(); + client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pitId)).actionGet(); } } } diff --git a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexRecoveryTests.java b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexRecoveryTests.java index 74655a39089fe..8bc32f0171f29 100644 --- a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexRecoveryTests.java +++ b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexRecoveryTests.java @@ -61,9 +61,7 @@ public void testRecoverExistingReplica() throws Exception { randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, randomIntBetween(0, 50)) - .mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)) - .collect(toList()) + IntStream.range(0, randomIntBetween(0, 50)).mapToObj(n -> prepareIndex(indexName).setSource("num", n)).collect(toList()) ); ensureGreen(indexName); indicesAdmin().prepareFlush(indexName).get(); diff --git a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java index 6b028e5ea0815..faeaee2da9307 100644 --- a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java +++ b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java @@ -12,14 +12,14 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.delete.DeleteResponse; -import org.elasticsearch.action.search.ClosePointInTimeAction; import org.elasticsearch.action.search.ClosePointInTimeRequest; -import org.elasticsearch.action.search.OpenPointInTimeAction; import org.elasticsearch.action.search.OpenPointInTimeRequest; import org.elasticsearch.action.search.OpenPointInTimeResponse; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.action.search.TransportClosePointInTimeAction; +import org.elasticsearch.action.search.TransportOpenPointInTimeAction; import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.PlainActionFuture; @@ -86,20 +86,20 @@ protected Collection> getPlugins() { String openReaders(TimeValue keepAlive, String... indices) { OpenPointInTimeRequest request = new OpenPointInTimeRequest(indices).indicesOptions(IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED) .keepAlive(keepAlive); - final OpenPointInTimeResponse response = client().execute(OpenPointInTimeAction.INSTANCE, request).actionGet(); + final OpenPointInTimeResponse response = client().execute(TransportOpenPointInTimeAction.TYPE, request).actionGet(); return response.getPointInTimeId(); } public void testCloseFreezeAndOpen() throws Exception { String indexName = "index"; createIndex(indexName, Settings.builder().put("index.number_of_shards", 2).build()); - client().prepareIndex(indexName).setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - client().prepareIndex(indexName).setId("2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - client().prepareIndex(indexName).setId("3").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex(indexName).setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex(indexName).setId("2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex(indexName).setId("3").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); assertAcked(client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest(indexName)).actionGet()); expectThrows( ClusterBlockException.class, - () -> client().prepareIndex(indexName).setId("4").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get() + () -> prepareIndex(indexName).setId("4").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get() ); IndicesService indexServices = getInstanceFromNode(IndicesService.class); Index index = resolveIndex(indexName); @@ -161,7 +161,7 @@ public void testCloseFreezeAndOpen() throws Exception { } assertWarnings(TransportSearchAction.FROZEN_INDICES_DEPRECATION_MESSAGE.replace("{}", indexName)); } finally { - client().execute(ClosePointInTimeAction.INSTANCE, new ClosePointInTimeRequest(pitId)).get(); + client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pitId)).get(); } } @@ -180,7 +180,7 @@ public void testSearchAndGetAPIsAreThrottled() throws IOException { String indexName = "index"; createIndex(indexName, Settings.builder().put("index.number_of_shards", 2).build(), mapping); for (int i = 0; i < 10; i++) { - client().prepareIndex(indexName).setId("" + i).setSource("field", "foo bar baz").get(); + prepareIndex(indexName).setId("" + i).setSource("field", "foo bar baz").get(); } assertAcked(client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest(indexName)).actionGet()); int numRequests = randomIntBetween(20, 50); @@ -217,9 +217,9 @@ public void testFreezeAndUnfreeze() { final IndexService originalIndexService = createIndex("index", Settings.builder().put("index.number_of_shards", 2).build()); assertThat(originalIndexService.getMetadata().getTimestampRange(), sameInstance(IndexLongFieldRange.UNKNOWN)); - client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - client().prepareIndex("index").setId("2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - client().prepareIndex("index").setId("3").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("index").setId("2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("index").setId("3").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); if (randomBoolean()) { // sometimes close it @@ -250,7 +250,7 @@ public void testFreezeAndUnfreeze() { assertThat(engine, Matchers.instanceOf(InternalEngine.class)); assertThat(indexService.getMetadata().getTimestampRange(), sameInstance(IndexLongFieldRange.UNKNOWN)); } - client().prepareIndex("index").setId("4").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("index").setId("4").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); } private void assertIndexFrozen(String idx) { @@ -278,9 +278,9 @@ public void testDoubleFreeze() { public void testUnfreezeClosedIndices() { createIndex("idx", Settings.builder().put("index.number_of_shards", 1).build()); - client().prepareIndex("idx").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("idx").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); createIndex("idx-closed", Settings.builder().put("index.number_of_shards", 1).build()); - client().prepareIndex("idx-closed").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("idx-closed").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); assertAcked(client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest("idx")).actionGet()); assertAcked(indicesAdmin().prepareClose("idx-closed").get()); assertAcked( @@ -298,9 +298,9 @@ public void testUnfreezeClosedIndices() { public void testFreezePattern() { String indexName = "test-idx"; createIndex(indexName, Settings.builder().put("index.number_of_shards", 1).build()); - client().prepareIndex(indexName).setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex(indexName).setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); createIndex("test-idx-1", Settings.builder().put("index.number_of_shards", 1).build()); - client().prepareIndex("test-idx-1").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test-idx-1").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); assertAcked(client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest(indexName)).actionGet()); assertIndexFrozen(indexName); @@ -322,8 +322,8 @@ public void testFreezePattern() { public void testCanMatch() throws IOException { createIndex("index"); - client().prepareIndex("index").setId("1").setSource("field", "2010-01-05T02:00").setRefreshPolicy(IMMEDIATE).execute().actionGet(); - client().prepareIndex("index").setId("2").setSource("field", "2010-01-06T02:00").setRefreshPolicy(IMMEDIATE).execute().actionGet(); + prepareIndex("index").setId("1").setSource("field", "2010-01-05T02:00").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("index").setId("2").setSource("field", "2010-01-06T02:00").setRefreshPolicy(IMMEDIATE).get(); { IndicesService indexServices = getInstanceFromNode(IndicesService.class); Index index = resolveIndex("index"); @@ -460,12 +460,12 @@ public void testCanMatch() throws IOException { public void testWriteToFrozenIndex() { createIndex("idx", Settings.builder().put("index.number_of_shards", 1).build()); - client().prepareIndex("idx").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("idx").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); assertAcked(client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest("idx")).actionGet()); assertIndexFrozen("idx"); expectThrows( ClusterBlockException.class, - () -> client().prepareIndex("idx").setId("2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get() + () -> prepareIndex("idx").setId("2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get() ); } @@ -508,7 +508,7 @@ public void testUnfreezeClosedIndex() { public void testFreezeIndexIncreasesIndexSettingsVersion() { final String index = "test"; createIndex(index, indexSettings(1, 0).build()); - client().prepareIndex(index).setSource("field", "value").execute().actionGet(); + prepareIndex(index).setSource("field", "value").get(); final long settingsVersion = clusterAdmin().prepareState().get().getState().metadata().index(index).getSettingsVersion(); @@ -548,7 +548,7 @@ public void testRecoveryState() { final long nbDocs = randomIntBetween(0, 50); for (long i = 0; i < nbDocs; i++) { - final DocWriteResponse indexResponse = client().prepareIndex(indexName).setId(Long.toString(i)).setSource("field", i).get(); + final DocWriteResponse indexResponse = prepareIndex(indexName).setId(Long.toString(i)).setSource("field", i).get(); assertThat(indexResponse.status(), is(RestStatus.CREATED)); } @@ -579,7 +579,7 @@ public void testTranslogStats() { final int nbDocs = randomIntBetween(0, 50); int uncommittedOps = 0; for (long i = 0; i < nbDocs; i++) { - final DocWriteResponse indexResponse = client().prepareIndex(indexName).setId(Long.toString(i)).setSource("field", i).get(); + final DocWriteResponse indexResponse = prepareIndex(indexName).setId(Long.toString(i)).setSource("field", i).get(); assertThat(indexResponse.status(), is(RestStatus.CREATED)); if (rarely()) { indicesAdmin().prepareFlush(indexName).get(); @@ -613,8 +613,8 @@ public void testTranslogStats() { public void testComputesTimestampRangeFromMilliseconds() { final int shardCount = between(1, 3); createIndex("index", Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, shardCount).build()); - client().prepareIndex("index").setSource(DataStream.TIMESTAMP_FIELD_NAME, "2010-01-05T01:02:03.456Z").get(); - client().prepareIndex("index").setSource(DataStream.TIMESTAMP_FIELD_NAME, "2010-01-06T02:03:04.567Z").get(); + prepareIndex("index").setSource(DataStream.TIMESTAMP_FIELD_NAME, "2010-01-05T01:02:03.456Z").get(); + prepareIndex("index").setSource(DataStream.TIMESTAMP_FIELD_NAME, "2010-01-06T02:03:04.567Z").get(); assertAcked(client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest("index")).actionGet()); @@ -645,8 +645,8 @@ public void testComputesTimestampRangeFromNanoseconds() throws IOException { final int shardCount = between(1, 3); createIndex("index", Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, shardCount).build(), mapping); - client().prepareIndex("index").setSource(DataStream.TIMESTAMP_FIELD_NAME, "2010-01-05T01:02:03.456789012Z").get(); - client().prepareIndex("index").setSource(DataStream.TIMESTAMP_FIELD_NAME, "2010-01-06T02:03:04.567890123Z").get(); + prepareIndex("index").setSource(DataStream.TIMESTAMP_FIELD_NAME, "2010-01-05T01:02:03.456789012Z").get(); + prepareIndex("index").setSource(DataStream.TIMESTAMP_FIELD_NAME, "2010-01-06T02:03:04.567890123Z").get(); assertAcked(client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest("index")).actionGet()); diff --git a/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/frozen/FrozenEngineTests.java b/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/frozen/FrozenEngineTests.java index 2dc1ada344f57..83532a8171947 100644 --- a/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/frozen/FrozenEngineTests.java +++ b/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/frozen/FrozenEngineTests.java @@ -23,13 +23,12 @@ import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.store.Store; +import org.elasticsearch.indices.breaker.CircuitBreakerMetrics; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.hamcrest.Matchers; import java.io.IOException; -import java.lang.reflect.Method; -import java.lang.reflect.Modifier; import java.util.Collections; import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; @@ -231,6 +230,7 @@ public void testSearchConcurrently() throws IOException, InterruptedException { null, globalCheckpoint::get, new HierarchyCircuitBreakerService( + CircuitBreakerMetrics.NOOP, defaultSettings.getSettings(), Collections.emptyList(), new ClusterSettings(defaultSettings.getNodeSettings(), ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) @@ -277,29 +277,6 @@ public void testSearchConcurrently() throws IOException, InterruptedException { } } - private static void checkOverrideMethods(Class clazz) throws NoSuchMethodException, SecurityException { - final Class superClazz = clazz.getSuperclass(); - for (Method m : superClazz.getMethods()) { - final int mods = m.getModifiers(); - if (Modifier.isStatic(mods) - || Modifier.isAbstract(mods) - || Modifier.isFinal(mods) - || m.isSynthetic() - || m.getName().equals("attributes") - || m.getName().equals("getStats")) { - continue; - } - // The point of these checks is to ensure that methods from the super class - // are overwritten to make sure we never miss a method from FilterLeafReader / FilterDirectoryReader - final Method subM = clazz.getMethod(m.getName(), m.getParameterTypes()); - if (subM.getDeclaringClass() == superClazz - && m.getDeclaringClass() != Object.class - && m.getDeclaringClass() == subM.getDeclaringClass()) { - fail(clazz + " doesn't override" + m + " although it has been declared by it's superclass"); - } - } - } - private class CountingRefreshListener implements ReferenceManager.RefreshListener { final AtomicInteger afterRefresh = new AtomicInteger(0); diff --git a/x-pack/plugin/graph/src/internalClusterTest/java/org/elasticsearch/xpack/graph/test/GraphTests.java b/x-pack/plugin/graph/src/internalClusterTest/java/org/elasticsearch/xpack/graph/test/GraphTests.java index 9009ae0da90e3..25c22672cf81c 100644 --- a/x-pack/plugin/graph/src/internalClusterTest/java/org/elasticsearch/xpack/graph/test/GraphTests.java +++ b/x-pack/plugin/graph/src/internalClusterTest/java/org/elasticsearch/xpack/graph/test/GraphTests.java @@ -89,8 +89,7 @@ public void setUp() throws Exception { for (DocTemplate dt : socialNetTemplate) { for (int i = 0; i < dt.numDocs; i++) { // Supply a doc ID for deterministic routing of docs to shards - client().prepareIndex("test") - .setId("doc#" + numDocs) + prepareIndex("test").setId("doc#" + numDocs) .setSource("decade", dt.decade, "people", dt.people, "description", dt.description) .get(); numDocs++; @@ -99,7 +98,7 @@ public void setUp() throws Exception { indicesAdmin().prepareRefresh("test").get(); // Ensure single segment with no deletes. Hopefully solves test instability in // issue https://github.com/elastic/x-pack-elasticsearch/issues/918 - ForceMergeResponse actionGet = indicesAdmin().prepareForceMerge("test").setFlush(true).setMaxNumSegments(1).execute().actionGet(); + ForceMergeResponse actionGet = indicesAdmin().prepareForceMerge("test").setFlush(true).setMaxNumSegments(1).get(); indicesAdmin().prepareRefresh("test").get(); assertAllSuccessful(actionGet); for (IndexShardSegments seg : indicesAdmin().prepareSegments().get().getIndices().get("test")) { diff --git a/x-pack/plugin/identity-provider/build.gradle b/x-pack/plugin/identity-provider/build.gradle index 38ca5dfd344b8..dd085e62efa48 100644 --- a/x-pack/plugin/identity-provider/build.gradle +++ b/x-pack/plugin/identity-provider/build.gradle @@ -34,7 +34,7 @@ dependencies { api "org.opensaml:opensaml-storage-impl:${versions.opensaml}" api "net.shibboleth.utilities:java-support:8.4.0" api "com.google.code.findbugs:jsr305:3.0.2" - api "org.apache.santuario:xmlsec:2.3.2" + api "org.apache.santuario:xmlsec:2.3.4" api "io.dropwizard.metrics:metrics-core:4.1.4" api ( "org.cryptacular:cryptacular:1.2.5") { exclude group: 'org.bouncycastle' diff --git a/x-pack/plugin/identity-provider/src/internalClusterTest/java/org/elasticsearch/xpack/idp/action/SamlIdentityProviderTests.java b/x-pack/plugin/identity-provider/src/internalClusterTest/java/org/elasticsearch/xpack/idp/action/SamlIdentityProviderTests.java index e72d97d212119..76bf415fdcce5 100644 --- a/x-pack/plugin/identity-provider/src/internalClusterTest/java/org/elasticsearch/xpack/idp/action/SamlIdentityProviderTests.java +++ b/x-pack/plugin/identity-provider/src/internalClusterTest/java/org/elasticsearch/xpack/idp/action/SamlIdentityProviderTests.java @@ -276,17 +276,21 @@ public void testSpInitiatedSsoFailsForUserWithNoAccess() throws Exception { initRequest.setJsonEntity(Strings.format(""" {"entity_id":"%s", "acs":"%s","authn_state":%s} """, entityId, acsUrl, Strings.toString(authnStateBuilder))); - Response initResponse = getRestClient().performRequest(initRequest); - ObjectPath initResponseObject = ObjectPath.createFromResponse(initResponse); - assertThat(initResponseObject.evaluate("post_url").toString(), equalTo(acsUrl)); - final String body = initResponseObject.evaluate("saml_response").toString(); + ResponseException e = expectThrows(ResponseException.class, () -> getRestClient().performRequest(initRequest)); + Response response = e.getResponse(); + assertThat(response.getStatusLine().getStatusCode(), equalTo(403)); + ObjectPath initResponseObject = ObjectPath.createFromResponse(response); + assertThat(initResponseObject.evaluate("status"), equalTo(403)); + final String baseSamlResponseObjectPath = "error.saml_initiate_single_sign_on_response."; + assertThat(initResponseObject.evaluate(baseSamlResponseObjectPath + "post_url").toString(), equalTo(acsUrl)); + final String body = initResponseObject.evaluate(baseSamlResponseObjectPath + "saml_response").toString(); assertThat(body, containsString("")); assertThat(body, containsString("InResponseTo=\"" + expectedInResponeTo + "\"")); - Map sp = initResponseObject.evaluate("service_provider"); + Map sp = initResponseObject.evaluate(baseSamlResponseObjectPath + "service_provider"); assertThat(sp, hasKey("entity_id")); assertThat(sp.get("entity_id"), equalTo(entityId)); assertThat( - initResponseObject.evaluate("error"), + initResponseObject.evaluate(baseSamlResponseObjectPath + "error"), equalTo("User [" + SAMPLE_USER_NAME + "] is not permitted to access service [" + entityId + "]") ); } diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/action/SamlInitiateSingleSignOnResponse.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/action/SamlInitiateSingleSignOnResponse.java index d920b29de7bcd..a7cd9c606b3c6 100644 --- a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/action/SamlInitiateSingleSignOnResponse.java +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/action/SamlInitiateSingleSignOnResponse.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; +import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; @@ -72,4 +73,14 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(samlStatus); out.writeOptionalString(error); } + + public void toXContent(XContentBuilder builder) throws IOException { + builder.field("post_url", this.getPostUrl()); + builder.field("saml_response", this.getSamlResponse()); + builder.field("saml_status", this.getSamlStatus()); + builder.field("error", this.getError()); + builder.startObject("service_provider"); + builder.field("entity_id", this.getEntityId()); + builder.endObject(); + } } diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/action/TransportSamlInitiateSingleSignOnAction.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/action/TransportSamlInitiateSingleSignOnAction.java index a41569920ecf8..f2b9c20c79d61 100644 --- a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/action/TransportSamlInitiateSingleSignOnAction.java +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/action/TransportSamlInitiateSingleSignOnAction.java @@ -9,11 +9,12 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.logging.LoggerMessageFormat; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; @@ -29,6 +30,7 @@ import org.elasticsearch.xpack.idp.saml.sp.SamlServiceProvider; import org.elasticsearch.xpack.idp.saml.support.SamlAuthenticationState; import org.elasticsearch.xpack.idp.saml.support.SamlFactory; +import org.elasticsearch.xpack.idp.saml.support.SamlInitiateSingleSignOnException; import org.opensaml.saml.saml2.core.Response; import org.opensaml.saml.saml2.core.StatusCode; @@ -80,47 +82,51 @@ protected void doExecute( false, ActionListener.wrap(sp -> { if (null == sp) { - final String message = "Service Provider with Entity ID [" - + request.getSpEntityId() - + "] and ACS [" - + request.getAssertionConsumerService() - + "] is not known to this Identity Provider"; - possiblyReplyWithSamlFailure( - authenticationState, - request.getSpEntityId(), - request.getAssertionConsumerService(), - StatusCode.RESPONDER, - new IllegalArgumentException(message), - listener + writeFailureResponse( + listener, + buildSamlInitiateSingleSignOnException( + authenticationState, + request.getSpEntityId(), + request.getAssertionConsumerService(), + StatusCode.RESPONDER, + RestStatus.BAD_REQUEST, + "Service Provider with Entity ID [{}] and ACS [{}] is not known to this Identity Provider", + request.getSpEntityId(), + request.getAssertionConsumerService() + ) + ); return; } final SecondaryAuthentication secondaryAuthentication = SecondaryAuthentication.readFromContext(securityContext); if (secondaryAuthentication == null) { - possiblyReplyWithSamlFailure( - authenticationState, - request.getSpEntityId(), - request.getAssertionConsumerService(), - StatusCode.REQUESTER, - new ElasticsearchSecurityException("Request is missing secondary authentication", RestStatus.FORBIDDEN), - listener + writeFailureResponse( + listener, + buildSamlInitiateSingleSignOnException( + authenticationState, + request.getSpEntityId(), + request.getAssertionConsumerService(), + StatusCode.REQUESTER, + RestStatus.FORBIDDEN, + "Request is missing secondary authentication" + ) ); return; } buildUserFromAuthentication(secondaryAuthentication, sp, ActionListener.wrap(user -> { if (user == null) { - possiblyReplyWithSamlFailure( - authenticationState, - request.getSpEntityId(), - request.getAssertionConsumerService(), - StatusCode.REQUESTER, - new ElasticsearchSecurityException( - "User [{}] is not permitted to access service [{}]", + writeFailureResponse( + listener, + buildSamlInitiateSingleSignOnException( + authenticationState, + request.getSpEntityId(), + request.getAssertionConsumerService(), + StatusCode.REQUESTER, RestStatus.FORBIDDEN, + "User [{}] is not permitted to access service [{}]", secondaryAuthentication.getUser().principal(), sp.getEntityId() - ), - listener + ) ); return; } @@ -144,23 +150,25 @@ protected void doExecute( listener.onFailure(e); } }, - e -> possiblyReplyWithSamlFailure( + e -> writeFailureResponse( + listener, + buildResponderSamlInitiateSingleSignOnException( + authenticationState, + request.getSpEntityId(), + request.getAssertionConsumerService(), + e + ) + ) + )); + }, + e -> writeFailureResponse( + listener, + buildResponderSamlInitiateSingleSignOnException( authenticationState, request.getSpEntityId(), request.getAssertionConsumerService(), - StatusCode.RESPONDER, - e, - listener + e ) - )); - }, - e -> possiblyReplyWithSamlFailure( - authenticationState, - request.getSpEntityId(), - request.getAssertionConsumerService(), - StatusCode.RESPONDER, - e, - listener ) ) ); @@ -194,15 +202,25 @@ private void buildUserFromAuthentication( }); } - private void possiblyReplyWithSamlFailure( - SamlAuthenticationState authenticationState, - String spEntityId, - String acsUrl, - String statusCode, - Exception e, - ActionListener listener + private void writeFailureResponse( + final ActionListener listener, + final SamlInitiateSingleSignOnException ex + ) { + logger.debug("Failed to generate a successful SAML response: ", ex); + listener.onFailure(ex); + } + + private SamlInitiateSingleSignOnException buildSamlInitiateSingleSignOnException( + final SamlAuthenticationState authenticationState, + final String spEntityId, + final String acsUrl, + final String statusCode, + final RestStatus restStatus, + final String messageFormatStr, + final Object... args ) { - logger.debug("Failed to generate a successful SAML response: ", e); + final SamlInitiateSingleSignOnException ex; + String exceptionMessage = LoggerMessageFormat.format(messageFormatStr, args); if (authenticationState != null) { final FailedAuthenticationResponseMessageBuilder builder = new FailedAuthenticationResponseMessageBuilder( samlFactory, @@ -210,11 +228,34 @@ private void possiblyReplyWithSamlFailure( identityProvider ).setInResponseTo(authenticationState.getAuthnRequestId()).setAcsUrl(acsUrl).setPrimaryStatusCode(statusCode); final Response response = builder.build(); - listener.onResponse( - new SamlInitiateSingleSignOnResponse(spEntityId, acsUrl, samlFactory.getXmlContent(response), statusCode, e.getMessage()) + ex = new SamlInitiateSingleSignOnException( + exceptionMessage, + restStatus, + new SamlInitiateSingleSignOnResponse(spEntityId, acsUrl, samlFactory.getXmlContent(response), statusCode, exceptionMessage) ); } else { - listener.onFailure(e); + ex = new SamlInitiateSingleSignOnException(exceptionMessage, restStatus); } + return ex; + } + + private SamlInitiateSingleSignOnException buildResponderSamlInitiateSingleSignOnException( + final SamlAuthenticationState authenticationState, + final String spEntityId, + final String acsUrl, + final Exception cause + ) { + final String exceptionMessage = cause.getMessage(); + final RestStatus restStatus = ExceptionsHelper.status(cause); + final SamlInitiateSingleSignOnException ex = buildSamlInitiateSingleSignOnException( + authenticationState, + spEntityId, + acsUrl, + StatusCode.RESPONDER, + restStatus, + exceptionMessage + ); + ex.initCause(cause); + return ex; } } diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/rest/action/RestSamlInitiateSingleSignOnAction.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/rest/action/RestSamlInitiateSingleSignOnAction.java index 509c1e06ec45e..3e4d57860fdae 100644 --- a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/rest/action/RestSamlInitiateSingleSignOnAction.java +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/rest/action/RestSamlInitiateSingleSignOnAction.java @@ -68,13 +68,7 @@ protected RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClien @Override public RestResponse buildResponse(SamlInitiateSingleSignOnResponse response, XContentBuilder builder) throws Exception { builder.startObject(); - builder.field("post_url", response.getPostUrl()); - builder.field("saml_response", response.getSamlResponse()); - builder.field("saml_status", response.getSamlStatus()); - builder.field("error", response.getError()); - builder.startObject("service_provider"); - builder.field("entity_id", response.getEntityId()); - builder.endObject(); + response.toXContent(builder); builder.endObject(); return new RestResponse(RestStatus.OK, builder); } diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndex.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndex.java index 2291061af3e98..202b52e0974d8 100644 --- a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndex.java +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndex.java @@ -215,7 +215,7 @@ public void installIndexTemplate(ActionListener listener) { } private boolean isTemplateUpToDate(ClusterState state) { - return TemplateUtils.checkTemplateExistsAndIsUpToDate(TEMPLATE_NAME, TEMPLATE_META_VERSION_KEY, state, logger, null); + return TemplateUtils.checkTemplateExistsAndIsUpToDate(TEMPLATE_NAME, TEMPLATE_META_VERSION_KEY, state, logger); } public void deleteDocument(DocumentVersion version, WriteRequest.RefreshPolicy refreshPolicy, ActionListener listener) { diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/support/SamlInitiateSingleSignOnException.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/support/SamlInitiateSingleSignOnException.java new file mode 100644 index 0000000000000..ba983a84b5199 --- /dev/null +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/support/SamlInitiateSingleSignOnException.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.idp.saml.support; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.idp.action.SamlInitiateSingleSignOnResponse; + +import java.io.IOException; + +public class SamlInitiateSingleSignOnException extends ElasticsearchSecurityException { + + private SamlInitiateSingleSignOnResponse samlInitiateSingleSignOnResponse; + + public SamlInitiateSingleSignOnException( + String msg, + RestStatus status, + SamlInitiateSingleSignOnResponse samlInitiateSingleSignOnResponse + ) { + super(msg, status); + this.samlInitiateSingleSignOnResponse = samlInitiateSingleSignOnResponse; + } + + public SamlInitiateSingleSignOnException(String msg, RestStatus status) { + super(msg, status); + } + + @Override + protected void metadataToXContent(XContentBuilder builder, Params params) throws IOException { + if (this.samlInitiateSingleSignOnResponse != null) { + builder.startObject("saml_initiate_single_sign_on_response"); + this.samlInitiateSingleSignOnResponse.toXContent(builder); + builder.endObject(); + } + } + + public SamlInitiateSingleSignOnResponse getSamlInitiateSingleSignOnResponse() { + return samlInitiateSingleSignOnResponse; + } +} diff --git a/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/action/TransportSamlInitiateSingleSignOnActionTests.java b/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/action/TransportSamlInitiateSingleSignOnActionTests.java index 9436a4e1c39a9..3eb9096efce8d 100644 --- a/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/action/TransportSamlInitiateSingleSignOnActionTests.java +++ b/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/action/TransportSamlInitiateSingleSignOnActionTests.java @@ -33,6 +33,7 @@ import org.elasticsearch.xpack.idp.saml.sp.WildcardServiceProviderResolver; import org.elasticsearch.xpack.idp.saml.support.SamlAuthenticationState; import org.elasticsearch.xpack.idp.saml.support.SamlFactory; +import org.elasticsearch.xpack.idp.saml.support.SamlInitiateSingleSignOnException; import org.elasticsearch.xpack.idp.saml.test.IdpSamlTestCase; import org.mockito.Mockito; import org.opensaml.saml.saml2.core.StatusCode; @@ -112,7 +113,9 @@ public void testGetResponseWithoutSecondaryAuthenticationInSpInitiatedFlow() thr final TransportSamlInitiateSingleSignOnAction action = setupTransportAction(false); action.doExecute(mock(Task.class), request, future); - final SamlInitiateSingleSignOnResponse response = future.get(); + final SamlInitiateSingleSignOnException ex = (SamlInitiateSingleSignOnException) expectThrows(Exception.class, future::get) + .getCause(); + final SamlInitiateSingleSignOnResponse response = ex.getSamlInitiateSingleSignOnResponse(); assertThat(response.getError(), equalTo("Request is missing secondary authentication")); assertThat(response.getSamlStatus(), equalTo(StatusCode.REQUESTER)); assertThat(response.getPostUrl(), equalTo("https://sp.some.org/saml/acs")); diff --git a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java index 3ca8c7302d6dd..2e61b6e978b61 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java @@ -80,6 +80,7 @@ public class DownsampleActionIT extends ESRestTestCase { }, "routing_path": ["metricset"], "mode": "time_series", + "look_ahead_time": "1m", "lifecycle.name": "%s" } }, @@ -301,7 +302,6 @@ public void testRollupIndexInTheHotPhaseAfterRollover() throws Exception { }); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/100271") public void testTsdbDataStreams() throws Exception { // Create the ILM policy DateHistogramInterval fixedInterval = ConfigTestHelpers.randomInterval(); @@ -337,7 +337,10 @@ public void testTsdbDataStreams() throws Exception { rolloverMaxOneDocCondition(client(), dataStream); String rollupIndex = waitAndGetRollupIndexName(client(), backingIndexName, fixedInterval); - assertNotNull("Cannot retrieve rollup index name", rollupIndex); + if (rollupIndex == null) { + logger.warn("explain:" + explainIndex(client(), backingIndexName)); + } + assertNotNull(String.format(Locale.ROOT, "Cannot retrieve rollup index [%s]", rollupIndex), rollupIndex); assertBusy(() -> assertTrue("Rollup index does not exist", indexExists(rollupIndex)), 30, TimeUnit.SECONDS); assertBusy(() -> assertFalse("Source index should have been deleted", indexExists(backingIndexName)), 30, TimeUnit.SECONDS); assertBusy(() -> { @@ -490,6 +493,111 @@ public void testDownsampleTwice() throws Exception { } } + public void testDownsampleTwiceSameInterval() throws Exception { + // Create the ILM policy + Request request = new Request("PUT", "_ilm/policy/" + policy); + request.setJsonEntity(""" + { + "policy": { + "phases": { + "warm": { + "actions": { + "downsample": { + "fixed_interval" : "5m" + } + } + }, + "cold": { + "min_age": "365d", + "actions": {} + } + } + } + } + """); + assertOK(client().performRequest(request)); + + // Create a template + Request createIndexTemplateRequest = new Request("POST", "/_index_template/" + dataStream); + createIndexTemplateRequest.setJsonEntity( + Strings.format(TEMPLATE, dataStream, "2006-01-08T23:40:53.384Z", "2021-01-08T23:40:53.384Z", policy) + ); + assertOK(client().performRequest(createIndexTemplateRequest)); + + index(client(), dataStream, true, null, "@timestamp", "2020-01-01T05:10:00Z", "volume", 11.0, "metricset", randomAlphaOfLength(5)); + + String firstBackingIndex = getBackingIndices(client(), dataStream).get(0); + logger.info("--> firstBackingIndex: {}", firstBackingIndex); + assertBusy( + () -> assertThat( + "index must wait in the " + CheckNotDataStreamWriteIndexStep.NAME + " until it is not the write index anymore", + explainIndex(client(), firstBackingIndex).get("step"), + is(CheckNotDataStreamWriteIndexStep.NAME) + ), + 30, + TimeUnit.SECONDS + ); + + // before we rollover, update template to not contain time boundaries anymore (rollover is blocked otherwise due to index time + // boundaries overlapping after rollover) + Request updateIndexTemplateRequest = new Request("POST", "/_index_template/" + dataStream); + updateIndexTemplateRequest.setJsonEntity(Strings.format(TEMPLATE_NO_TIME_BOUNDARIES, dataStream, policy)); + assertOK(client().performRequest(updateIndexTemplateRequest)); + + // Manual rollover the original index such that it's not the write index in the data stream anymore + rolloverMaxOneDocCondition(client(), dataStream); + + String downsampleIndexName = "downsample-5m-" + firstBackingIndex; + // wait for the downsample index to get to the end of the warm phase + assertBusy(() -> { + assertThat(indexExists(downsampleIndexName), is(true)); + assertThat(indexExists(firstBackingIndex), is(false)); + + assertThat(explainIndex(client(), downsampleIndexName).get("step"), is(PhaseCompleteStep.NAME)); + assertThat(explainIndex(client(), downsampleIndexName).get("phase"), is("warm")); + + Map settings = getOnlyIndexSettings(client(), downsampleIndexName); + assertEquals(firstBackingIndex, settings.get(IndexMetadata.INDEX_DOWNSAMPLE_ORIGIN_NAME.getKey())); + assertEquals(firstBackingIndex, settings.get(IndexMetadata.INDEX_DOWNSAMPLE_SOURCE_NAME.getKey())); + assertEquals(DownsampleTaskStatus.SUCCESS.toString(), settings.get(IndexMetadata.INDEX_DOWNSAMPLE_STATUS.getKey())); + assertEquals(policy, settings.get(LifecycleSettings.LIFECYCLE_NAME_SETTING.getKey())); + }, 60, TimeUnit.SECONDS); + + // update the policy to now contain the downsample action in cold, whilst not existing in warm anymore (this will have our already + // downsampled index attempt to go through the downsample action again when in cold) + + Request updatePolicyRequest = new Request("PUT", "_ilm/policy/" + policy); + updatePolicyRequest.setJsonEntity(""" + { + "policy": { + "phases": { + "warm": { + "actions": { + } + }, + "cold": { + "min_age": "0ms", + "actions": { + "downsample": { + "fixed_interval" : "5m" + } + } + } + } + } + } + """); + assertOK(client().performRequest(updatePolicyRequest)); + + // the downsample index (already part of the data stream as we created it in the warm phase previously) should continue to exist and + // reach the cold/complete/complete step + assertBusy(() -> { + assertThat(indexExists(downsampleIndexName), is(true)); + assertThat(explainIndex(client(), downsampleIndexName).get("step"), is(PhaseCompleteStep.NAME)); + assertThat(explainIndex(client(), downsampleIndexName).get("phase"), is("cold")); + }, 60, TimeUnit.SECONDS); + } + /** * Gets the generated rollup index name for a given index by looking at newly created indices that match the rollup index name pattern * @@ -506,7 +614,7 @@ public String waitAndGetRollupIndexName(RestClient client, String originalIndexN } catch (IOException e) { return false; } - }, 60, TimeUnit.SECONDS); + }, 120, TimeUnit.SECONDS); // High timeout in case we're unlucky and end_time has been increased. logger.info("--> original index name is [{}], rollup index name is [{}]", originalIndexName, rollupIndexName[0]); return rollupIndexName[0]; } diff --git a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataStreamAndIndexLifecycleMixingTests.java b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataStreamAndIndexLifecycleMixingTests.java index 668cc4121b7b5..b40528664275d 100644 --- a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataStreamAndIndexLifecycleMixingTests.java +++ b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataStreamAndIndexLifecycleMixingTests.java @@ -960,16 +960,12 @@ static void putComposableIndexTemplate( ) throws IOException { PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request(name); request.indexTemplate( - new ComposableIndexTemplate( - patterns, - new Template(settings, mappings == null ? null : CompressedXContent.fromJSON(mappings), null, lifecycle), - null, - null, - null, - metadata, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(patterns) + .template(new Template(settings, mappings == null ? null : CompressedXContent.fromJSON(mappings), null, lifecycle)) + .metadata(metadata) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); } diff --git a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ILMMultiNodeIT.java b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ILMMultiNodeIT.java index 795cc104e96b7..8ad3458ceca68 100644 --- a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ILMMultiNodeIT.java +++ b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ILMMultiNodeIT.java @@ -87,21 +87,16 @@ public void testShrinkOnTiers() throws Exception { null ); - ComposableIndexTemplate template = new ComposableIndexTemplate( - Collections.singletonList(index), - t, - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ); + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList(index)) + .template(t) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build(); client().execute( PutComposableIndexTemplateAction.INSTANCE, new PutComposableIndexTemplateAction.Request("template").indexTemplate(template) ).actionGet(); - client().prepareIndex(index).setCreate(true).setId("1").setSource("@timestamp", "2020-09-09").get(); + prepareIndex(index).setCreate(true).setId("1").setSource("@timestamp", "2020-09-09").get(); assertBusy(() -> { ExplainLifecycleResponse explain = client().execute(ExplainLifecycleAction.INSTANCE, new ExplainLifecycleRequest().indices("*")) diff --git a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ILMMultiNodeWithCCRDisabledIT.java b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ILMMultiNodeWithCCRDisabledIT.java index d8cdcc27bc038..e10e4a466f7f3 100644 --- a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ILMMultiNodeWithCCRDisabledIT.java +++ b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ILMMultiNodeWithCCRDisabledIT.java @@ -87,21 +87,16 @@ public void testShrinkOnTiers() throws Exception { null ); - ComposableIndexTemplate template = new ComposableIndexTemplate( - Collections.singletonList(index), - t, - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ); + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList(index)) + .template(t) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build(); client().execute( PutComposableIndexTemplateAction.INSTANCE, new PutComposableIndexTemplateAction.Request("template").indexTemplate(template) ).actionGet(); - client().prepareIndex(index).setCreate(true).setId("1").setSource("@timestamp", "2020-09-09").get(); + prepareIndex(index).setCreate(true).setId("1").setSource("@timestamp", "2020-09-09").get(); assertBusy(() -> { ExplainLifecycleResponse explain = client().execute(ExplainLifecycleAction.INSTANCE, new ExplainLifecycleRequest().indices("*")) diff --git a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java index 069771515d1b6..1f09fd7457187 100644 --- a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java +++ b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java @@ -183,8 +183,7 @@ public void testSingleNodeCluster() throws Exception { assertNotNull(indexLifecycleService.getScheduledJob()); assertBusy(() -> { LifecycleExecutionState lifecycleState = clusterAdmin().prepareState() - .execute() - .actionGet() + .get() .getState() .getMetadata() .index("test") @@ -415,8 +414,7 @@ public void testMasterDedicatedDataDedicated() throws Exception { assertBusy(() -> assertTrue(indexExists("test"))); assertBusy(() -> { LifecycleExecutionState lifecycleState = clusterAdmin().prepareState() - .execute() - .actionGet() + .get() .getState() .getMetadata() .index("test") diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingServiceTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingServiceTests.java index 6aa46dee54829..63c029316536f 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingServiceTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingServiceTests.java @@ -1395,77 +1395,82 @@ public void testMigrateComposableIndexTemplates() { String includeRoutingSetting = INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + nodeAttrName; String excludeRoutingSetting = INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + nodeAttrName; - ComposableIndexTemplate templateWithRequireRouting = new ComposableIndexTemplate( - List.of("test-*"), - new Template( - Settings.builder().put(requireRoutingSetting, "hot").put(LifecycleSettings.LIFECYCLE_NAME, "testLifecycle").build(), - null, - null - ), - List.of(), - randomLong(), - randomLong(), - null - ); + ComposableIndexTemplate templateWithRequireRouting = ComposableIndexTemplate.builder() + .indexPatterns(List.of("test-*")) + .template( + new Template( + Settings.builder().put(requireRoutingSetting, "hot").put(LifecycleSettings.LIFECYCLE_NAME, "testLifecycle").build(), + null, + null + ) + ) + .componentTemplates(List.of()) + .priority(randomLong()) + .version(randomLong()) + .build(); - ComposableIndexTemplate templateWithIncludeRouting = new ComposableIndexTemplate( - List.of("test-*"), - new Template( - Settings.builder().put(includeRoutingSetting, "hot").put(LifecycleSettings.LIFECYCLE_NAME, "testLifecycle").build(), - null, - null - ), - List.of(), - randomLong(), - randomLong(), - null - ); + ComposableIndexTemplate templateWithIncludeRouting = ComposableIndexTemplate.builder() + .indexPatterns(List.of("test-*")) + .template( + new Template( + Settings.builder().put(includeRoutingSetting, "hot").put(LifecycleSettings.LIFECYCLE_NAME, "testLifecycle").build(), + null, + null + ) + ) + .componentTemplates(List.of()) + .priority(randomLong()) + .version(randomLong()) + .build(); - ComposableIndexTemplate templateWithExcludeRouting = new ComposableIndexTemplate( - List.of("test-*"), - new Template( - Settings.builder().put(excludeRoutingSetting, "hot").put(LifecycleSettings.LIFECYCLE_NAME, "testLifecycle").build(), - null, - null - ), - List.of(), - randomLong(), - randomLong(), - null - ); + ComposableIndexTemplate templateWithExcludeRouting = ComposableIndexTemplate.builder() + .indexPatterns(List.of("test-*")) + .template( + new Template( + Settings.builder().put(excludeRoutingSetting, "hot").put(LifecycleSettings.LIFECYCLE_NAME, "testLifecycle").build(), + null, + null + ) + ) + .componentTemplates(List.of()) + .priority(randomLong()) + .version(randomLong()) + .build(); - ComposableIndexTemplate templateWithRequireAndIncludeRoutings = new ComposableIndexTemplate( - List.of("test-*"), - new Template( - Settings.builder() - .put(requireRoutingSetting, "hot") - .put(includeRoutingSetting, "rack1") - .put(LifecycleSettings.LIFECYCLE_NAME, "testLifecycle") - .build(), - null, - null - ), - List.of(), - randomLong(), - randomLong(), - null - ); + ComposableIndexTemplate templateWithRequireAndIncludeRoutings = ComposableIndexTemplate.builder() + .indexPatterns(List.of("test-*")) + .template( + new Template( + Settings.builder() + .put(requireRoutingSetting, "hot") + .put(includeRoutingSetting, "rack1") + .put(LifecycleSettings.LIFECYCLE_NAME, "testLifecycle") + .build(), + null, + null + ) + ) + .componentTemplates(List.of()) + .priority(randomLong()) + .version(randomLong()) + .build(); - ComposableIndexTemplate templateWithoutCustomRoutings = new ComposableIndexTemplate( - List.of("test-*"), - new Template( - Settings.builder() - .put(LifecycleSettings.LIFECYCLE_NAME, "testLifecycle") - .put(IndexSettings.LIFECYCLE_PARSE_ORIGINATION_DATE, true) - .build(), - null, - null - ), - List.of(), - randomLong(), - randomLong(), - null - ); + ComposableIndexTemplate templateWithoutCustomRoutings = ComposableIndexTemplate.builder() + .indexPatterns(List.of("test-*")) + .template( + new Template( + Settings.builder() + .put(LifecycleSettings.LIFECYCLE_NAME, "testLifecycle") + .put(IndexSettings.LIFECYCLE_PARSE_ORIGINATION_DATE, true) + .build(), + null, + null + ) + ) + .componentTemplates(List.of()) + .priority(randomLong()) + .version(randomLong()) + .build(); ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) .metadata( @@ -1635,14 +1640,13 @@ public void testMigrateIndexAndComponentTemplates() { null ); - ComposableIndexTemplate composableTemplateWithRequireRouting = new ComposableIndexTemplate( - List.of("test-*"), - new Template(Settings.builder().put(requireRoutingSetting, "hot").build(), null, null), - List.of("component-template-without-custom-routing"), - randomLong(), - randomLong(), - null - ); + ComposableIndexTemplate composableTemplateWithRequireRouting = ComposableIndexTemplate.builder() + .indexPatterns(List.of("test-*")) + .template(new Template(Settings.builder().put(requireRoutingSetting, "hot").build(), null, null)) + .componentTemplates(List.of("component-template-without-custom-routing")) + .priority(randomLong()) + .version(randomLong()) + .build(); ComponentTemplate compTemplateWithRequireAndIncludeRoutings = new ComponentTemplate( new Template( diff --git a/x-pack/plugin/inference/build.gradle b/x-pack/plugin/inference/build.gradle index 98f71b66c67ef..e4f4de0027073 100644 --- a/x-pack/plugin/inference/build.gradle +++ b/x-pack/plugin/inference/build.gradle @@ -8,12 +8,16 @@ apply plugin: 'elasticsearch.internal-es-plugin' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { - name 'inference' + name 'x-pack-inference' description 'Configuration and evaluation of inference models' classname 'org.elasticsearch.xpack.inference.InferencePlugin' extendedPlugins = ['x-pack-core'] } +base { + archivesName = 'x-pack-inference' +} + dependencies { implementation project(path: ':libs:elasticsearch-logging') compileOnly project(":server") diff --git a/x-pack/plugin/inference/qa/build.gradle b/x-pack/plugin/inference/qa/build.gradle new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/x-pack/plugin/inference/qa/inference-service-tests/build.gradle b/x-pack/plugin/inference/qa/inference-service-tests/build.gradle new file mode 100644 index 0000000000000..f87f5cb8377c5 --- /dev/null +++ b/x-pack/plugin/inference/qa/inference-service-tests/build.gradle @@ -0,0 +1,11 @@ +apply plugin: 'elasticsearch.internal-java-rest-test' + +dependencies { + compileOnly project(':x-pack:plugin:core') + javaRestTestImplementation project(path: xpackModule('inference')) + clusterPlugins project(':x-pack:plugin:inference:qa:test-service-plugin') +} + +tasks.named("javaRestTest").configure { + usesDefaultDistribution() +} diff --git a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/MockInferenceServiceIT.java b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/MockInferenceServiceIT.java new file mode 100644 index 0000000000000..5ed11958fc64e --- /dev/null +++ b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/MockInferenceServiceIT.java @@ -0,0 +1,181 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference; + +import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.junit.ClassRule; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.emptyString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; + +public class MockInferenceServiceIT extends ESRestTestCase { + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .setting("xpack.license.self_generated.type", "trial") + .setting("xpack.security.enabled", "true") + .plugin("org.elasticsearch.xpack.inference.mock.TestInferenceServicePlugin") + .user("x_pack_rest_user", "x-pack-test-password") + .build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + @Override + protected Settings restClientSettings() { + String token = basicAuthHeaderValue("x_pack_rest_user", new SecureString("x-pack-test-password".toCharArray())); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); + } + + static String mockServiceModelConfig() { + return Strings.format(""" + { + "service": "test_service", + "service_settings": { + "model": "my_model", + "api_key": "abc64" + }, + "task_settings": { + "temperature": 3 + } + } + """); + } + + @SuppressWarnings("unchecked") + public void testMockService() throws IOException { + String modelId = "test-mock"; + var putModel = putModel(modelId, mockServiceModelConfig(), TaskType.SPARSE_EMBEDDING); + var getModels = getModels(modelId, TaskType.SPARSE_EMBEDDING); + var model = ((List>) getModels.get("models")).get(0); + + for (var modelMap : List.of(putModel, model)) { + assertEquals(modelId, modelMap.get("model_id")); + assertEquals(TaskType.SPARSE_EMBEDDING, TaskType.fromString((String) modelMap.get("task_type"))); + assertEquals("test_service", modelMap.get("service")); + } + + // The response is randomly generated, the input can be anything + var inference = inferOnMockService(modelId, TaskType.SPARSE_EMBEDDING, List.of(randomAlphaOfLength(10))); + assertNonEmptyInferenceResults(inference, TaskType.SPARSE_EMBEDDING); + } + + @SuppressWarnings("unchecked") + public void testMockServiceWithMultipleInputs() throws IOException { + String modelId = "test-mock-with-multi-inputs"; + putModel(modelId, mockServiceModelConfig(), TaskType.SPARSE_EMBEDDING); + + // The response is randomly generated, the input can be anything + var inference = inferOnMockService( + modelId, + TaskType.SPARSE_EMBEDDING, + List.of(randomAlphaOfLength(5), randomAlphaOfLength(10), randomAlphaOfLength(15)) + ); + + var results = (List>) inference.get("result"); + assertThat(results, hasSize(3)); + assertNonEmptyInferenceResults(inference, TaskType.SPARSE_EMBEDDING); + } + + @SuppressWarnings("unchecked") + public void testMockService_DoesNotReturnSecretsInGetResponse() throws IOException { + String modelId = "test-mock"; + var putModel = putModel(modelId, mockServiceModelConfig(), TaskType.SPARSE_EMBEDDING); + var getModels = getModels(modelId, TaskType.SPARSE_EMBEDDING); + var model = ((List>) getModels.get("models")).get(0); + + var serviceSettings = (Map) model.get("service_settings"); + assertNull(serviceSettings.get("api_key")); + assertNotNull(serviceSettings.get("model")); + + var putServiceSettings = (Map) putModel.get("service_settings"); + assertNull(putServiceSettings.get("api_key")); + assertNotNull(putServiceSettings.get("model")); + } + + private Map putModel(String modelId, String modelConfig, TaskType taskType) throws IOException { + String endpoint = Strings.format("_inference/%s/%s", taskType, modelId); + var request = new Request("PUT", endpoint); + request.setJsonEntity(modelConfig); + var reponse = client().performRequest(request); + assertOkWithErrorMessage(reponse); + return entityAsMap(reponse); + } + + public Map getModels(String modelId, TaskType taskType) throws IOException { + var endpoint = Strings.format("_inference/%s/%s", taskType, modelId); + var request = new Request("GET", endpoint); + var reponse = client().performRequest(request); + assertOkWithErrorMessage(reponse); + return entityAsMap(reponse); + } + + private Map inferOnMockService(String modelId, TaskType taskType, List input) throws IOException { + var endpoint = Strings.format("_inference/%s/%s", taskType, modelId); + var request = new Request("POST", endpoint); + + var bodyBuilder = new StringBuilder("{\"input\": ["); + for (var in : input) { + bodyBuilder.append('"').append(in).append('"').append(','); + } + // remove last comma + bodyBuilder.deleteCharAt(bodyBuilder.length() - 1); + bodyBuilder.append("]}"); + + System.out.println("body_request:" + bodyBuilder); + request.setJsonEntity(bodyBuilder.toString()); + var reponse = client().performRequest(request); + assertOkWithErrorMessage(reponse); + return entityAsMap(reponse); + } + + @SuppressWarnings("unchecked") + protected void assertNonEmptyInferenceResults(Map resultMap, TaskType taskType) { + if (taskType == TaskType.SPARSE_EMBEDDING) { + var results = (List) resultMap.get("result"); + assertThat(results, not(empty())); + for (String result : results) { + assertThat(result, is(not(emptyString()))); + } + } else { + fail("test with task type [" + taskType + "] are not supported yet"); + } + } + + protected static void assertOkWithErrorMessage(Response response) throws IOException { + int statusCode = response.getStatusLine().getStatusCode(); + if (statusCode == 200 || statusCode == 201) { + return; + } + + String responseStr = EntityUtils.toString(response.getEntity()); + assertThat(responseStr, response.getStatusLine().getStatusCode(), anyOf(equalTo(200), equalTo(201))); + } +} diff --git a/x-pack/plugin/inference/qa/test-service-plugin/build.gradle b/x-pack/plugin/inference/qa/test-service-plugin/build.gradle new file mode 100644 index 0000000000000..9020589f74a0c --- /dev/null +++ b/x-pack/plugin/inference/qa/test-service-plugin/build.gradle @@ -0,0 +1,19 @@ + +apply plugin: 'elasticsearch.base-internal-es-plugin' +apply plugin: 'elasticsearch.internal-java-rest-test' + +esplugin { + name 'inference-service-test' + description 'A mock inference service' + classname 'org.elasticsearch.xpack.inference.mock.TestInferenceServicePlugin' +} + +dependencies { + compileOnly project(':x-pack:plugin:core') + compileOnly project(':x-pack:plugin:inference') + compileOnly project(':x-pack:plugin:ml') +} + +tasks.named("javaRestTest").configure { + usesDefaultDistribution() +} diff --git a/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/TestInferenceServicePlugin.java b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestInferenceServicePlugin.java similarity index 78% rename from x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/TestInferenceServicePlugin.java rename to x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestInferenceServicePlugin.java index f9e6eef5ffcc7..4d8cb18e541ff 100644 --- a/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/TestInferenceServicePlugin.java +++ b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestInferenceServicePlugin.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.inference.integration; +package org.elasticsearch.xpack.inference.mock; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.TransportVersion; @@ -16,6 +16,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.inference.InferenceResults; import org.elasticsearch.inference.InferenceService; +import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; @@ -27,17 +28,13 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.ml.inference.results.TextExpansionResultsTests; -import org.elasticsearch.xpack.inference.services.MapParsingUtils; import java.io.IOException; +import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Set; -import static org.elasticsearch.xpack.inference.services.MapParsingUtils.removeFromMapOrThrowIfNull; -import static org.elasticsearch.xpack.inference.services.MapParsingUtils.throwIfNotEmptyMap; - public class TestInferenceServicePlugin extends Plugin implements InferenceServicePlugin { @Override @@ -97,11 +94,12 @@ public TransportVersion getMinimalSupportedVersion() { public abstract static class TestInferenceServiceBase implements InferenceService { + @SuppressWarnings("unchecked") private static Map getTaskSettingsMap(Map settings) { Map taskSettingsMap; // task settings are optional if (settings.containsKey(ModelConfigurations.TASK_SETTINGS)) { - taskSettingsMap = removeFromMapOrThrowIfNull(settings, ModelConfigurations.TASK_SETTINGS); + taskSettingsMap = (Map) settings.remove(ModelConfigurations.TASK_SETTINGS); } else { taskSettingsMap = Map.of(); } @@ -114,35 +112,33 @@ public TestInferenceServiceBase(InferenceServicePlugin.InferenceServiceFactoryCo } @Override + @SuppressWarnings("unchecked") public TestServiceModel parseRequestConfig( String modelId, TaskType taskType, Map config, Set platfromArchitectures ) { - Map serviceSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.SERVICE_SETTINGS); + var serviceSettingsMap = (Map) config.remove(ModelConfigurations.SERVICE_SETTINGS); var serviceSettings = TestServiceSettings.fromMap(serviceSettingsMap); var secretSettings = TestSecretSettings.fromMap(serviceSettingsMap); var taskSettingsMap = getTaskSettingsMap(config); var taskSettings = TestTaskSettings.fromMap(taskSettingsMap); - throwIfNotEmptyMap(config, name()); - throwIfNotEmptyMap(serviceSettingsMap, name()); - throwIfNotEmptyMap(taskSettingsMap, name()); - return new TestServiceModel(modelId, taskType, name(), serviceSettings, taskSettings, secretSettings); } @Override + @SuppressWarnings("unchecked") public TestServiceModel parsePersistedConfig( String modelId, TaskType taskType, Map config, Map secrets ) { - Map serviceSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.SERVICE_SETTINGS); - Map secretSettingsMap = removeFromMapOrThrowIfNull(secrets, ModelSecrets.SECRET_SETTINGS); + var serviceSettingsMap = (Map) config.remove(ModelConfigurations.SERVICE_SETTINGS); + var secretSettingsMap = (Map) secrets.remove(ModelSecrets.SECRET_SETTINGS); var serviceSettings = TestServiceSettings.fromMap(serviceSettingsMap); var secretSettings = TestSecretSettings.fromMap(secretSettingsMap); @@ -154,9 +150,21 @@ public TestServiceModel parsePersistedConfig( } @Override - public void infer(Model model, String input, Map taskSettings, ActionListener listener) { + public void infer( + Model model, + List input, + Map taskSettings, + ActionListener listener + ) { switch (model.getConfigurations().getTaskType()) { - case SPARSE_EMBEDDING -> listener.onResponse(TextExpansionResultsTests.createRandomResults(1, 10)); + case SPARSE_EMBEDDING -> { + var strings = new ArrayList(); + for (int i = 0; i < input.size(); i++) { + strings.add(Integer.toString(i)); + } + + listener.onResponse(new TestResults(strings)); + } default -> listener.onFailure( new ElasticsearchStatusException( TaskType.unsupportedTaskTypeErrorMsg(model.getConfigurations().getTaskType(), name()), @@ -212,12 +220,10 @@ public record TestServiceSettings(String model) implements ServiceSettings { public static TestServiceSettings fromMap(Map map) { ValidationException validationException = new ValidationException(); - String model = MapParsingUtils.removeAsType(map, "model", String.class); + String model = (String) map.remove("model"); if (model == null) { - validationException.addValidationError( - MapParsingUtils.missingSettingErrorMsg("model", ModelConfigurations.SERVICE_SETTINGS) - ); + validationException.addValidationError("missing model"); } if (validationException.validationErrors().isEmpty() == false) { @@ -260,7 +266,7 @@ public record TestTaskSettings(Integer temperature) implements TaskSettings { private static final String NAME = "test_task_settings"; public static TestTaskSettings fromMap(Map map) { - Integer temperature = MapParsingUtils.removeAsType(map, "temperature", Integer.class); + Integer temperature = (Integer) map.remove("temperature"); return new TestTaskSettings(temperature); } @@ -301,10 +307,10 @@ public record TestSecretSettings(String apiKey) implements SecretSettings { public static TestSecretSettings fromMap(Map map) { ValidationException validationException = new ValidationException(); - String apiKey = MapParsingUtils.removeAsType(map, "api_key", String.class); + String apiKey = (String) map.remove("api_key"); if (apiKey == null) { - validationException.addValidationError(MapParsingUtils.missingSettingErrorMsg("api_key", ModelSecrets.SECRET_SETTINGS)); + validationException.addValidationError("missing api_key"); } if (validationException.validationErrors().isEmpty() == false) { @@ -341,4 +347,55 @@ public TransportVersion getMinimalSupportedVersion() { return TransportVersion.current(); // fine for these tests but will not work for cluster upgrade tests } } + + private static class TestResults implements InferenceServiceResults, InferenceResults { + + private static final String RESULTS_FIELD = "result"; + private List result; + + TestResults(List result) { + this.result = result; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(RESULTS_FIELD, result); + return builder; + } + + @Override + public String getWriteableName() { + return "test_result"; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeStringCollection(result); + } + + @Override + public String getResultsField() { + return RESULTS_FIELD; + } + + @Override + public List transformToLegacyFormat() { + return List.of(this); + } + + @Override + public Map asMap() { + return Map.of("result", result); + } + + @Override + public Map asMap(String outputField) { + return Map.of(outputField, result); + } + + @Override + public Object predictedValue() { + return result; + } + } } diff --git a/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/MockInferenceServiceIT.java b/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/MockInferenceServiceIT.java deleted file mode 100644 index 0da0340084cba..0000000000000 --- a/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/MockInferenceServiceIT.java +++ /dev/null @@ -1,192 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.inference.integration; - -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.client.internal.Client; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.inference.ModelConfigurations; -import org.elasticsearch.inference.ModelSecrets; -import org.elasticsearch.inference.TaskType; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.SecuritySettingsSourceField; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentFactory; -import org.elasticsearch.xcontent.XContentType; -import org.elasticsearch.xpack.core.ml.inference.results.TextExpansionResults; -import org.elasticsearch.xpack.inference.InferencePlugin; -import org.elasticsearch.xpack.inference.action.GetInferenceModelAction; -import org.elasticsearch.xpack.inference.action.InferenceAction; -import org.elasticsearch.xpack.inference.action.PutInferenceModelAction; -import org.elasticsearch.xpack.inference.registry.ModelRegistry; -import org.junit.Before; - -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.concurrent.TimeUnit; -import java.util.function.Function; - -import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; -import static org.elasticsearch.xpack.inference.services.MapParsingUtils.removeFromMapOrThrowIfNull; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.not; - -public class MockInferenceServiceIT extends ESIntegTestCase { - - private static final TimeValue TIMEOUT = new TimeValue(30, TimeUnit.SECONDS); - - private ModelRegistry modelRegistry; - - @Before - public void createComponents() { - modelRegistry = new ModelRegistry(client()); - } - - @Override - protected Collection> nodePlugins() { - return List.of(InferencePlugin.class, TestInferenceServicePlugin.class); - } - - @Override - protected Function getClientWrapper() { - final Map headers = Map.of( - "Authorization", - basicAuthHeaderValue("x_pack_rest_user", SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING) - ); - // we need to wrap node clients because we do not specify a user for nodes and all requests will use the system - // user. This is ok for internal n2n stuff but the test framework does other things like wiping indices, repositories, etc - // that the system user cannot do. so we wrap the node client with a user that can do these things since the client() calls - // return a node client - return client -> client.filterWithHeader(headers); - } - - public void testMockService() { - String modelId = "test-mock"; - ModelConfigurations putModel = putMockService(modelId, "test_service", TaskType.SPARSE_EMBEDDING); - ModelConfigurations readModel = getModel(modelId, TaskType.SPARSE_EMBEDDING); - assertModelsAreEqual(putModel, readModel); - - // The response is randomly generated, the input can be anything - inferOnMockService(modelId, TaskType.SPARSE_EMBEDDING, randomAlphaOfLength(10)); - } - - public void testMockInClusterService() { - String modelId = "test-mock-in-cluster"; - ModelConfigurations putModel = putMockService(modelId, "test_service_in_cluster_service", TaskType.SPARSE_EMBEDDING); - ModelConfigurations readModel = getModel(modelId, TaskType.SPARSE_EMBEDDING); - assertModelsAreEqual(putModel, readModel); - - // The response is randomly generated, the input can be anything - inferOnMockService(modelId, TaskType.SPARSE_EMBEDDING, randomAlphaOfLength(10)); - } - - public void testMockService_DoesNotReturnSecretsInGetResponse() throws IOException { - String modelId = "test-mock"; - putMockService(modelId, "test_service", TaskType.SPARSE_EMBEDDING); - ModelConfigurations readModel = getModel(modelId, TaskType.SPARSE_EMBEDDING); - - assertThat(readModel.getServiceSettings(), instanceOf(TestInferenceServicePlugin.TestServiceSettings.class)); - - var serviceSettings = (TestInferenceServicePlugin.TestServiceSettings) readModel.getServiceSettings(); - XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON).prettyPrint(); - serviceSettings.toXContent(builder, null); - String xContentResult = Strings.toString(builder); - - assertThat(xContentResult, is(""" - { - "model" : "my_model" - }""")); - } - - public void testGetUnparsedModelMap_ForTestServiceModel_ReturnsSecretsPopulated() { - String modelId = "test-unparsed"; - putMockService(modelId, "test_service", TaskType.SPARSE_EMBEDDING); - - var listener = new PlainActionFuture(); - modelRegistry.getUnparsedModelMap(modelId, listener); - - var modelConfig = listener.actionGet(TIMEOUT); - var secretsMap = removeFromMapOrThrowIfNull(modelConfig.secrets(), ModelSecrets.SECRET_SETTINGS); - var secrets = TestInferenceServicePlugin.TestSecretSettings.fromMap(secretsMap); - assertThat(secrets.apiKey(), is("abc64")); - } - - private ModelConfigurations putMockService(String modelId, String serviceName, TaskType taskType) { - String body = Strings.format(""" - { - "service": "%s", - "service_settings": { - "model": "my_model", - "api_key": "abc64" - }, - "task_settings": { - "temperature": 3 - } - } - """, serviceName); - var request = new PutInferenceModelAction.Request( - taskType.toString(), - modelId, - new BytesArray(body.getBytes(StandardCharsets.UTF_8)), - XContentType.JSON - ); - - var response = client().execute(PutInferenceModelAction.INSTANCE, request).actionGet(); - assertEquals(serviceName, response.getModel().getService()); - - assertThat(response.getModel().getServiceSettings(), instanceOf(TestInferenceServicePlugin.TestServiceSettings.class)); - var serviceSettings = (TestInferenceServicePlugin.TestServiceSettings) response.getModel().getServiceSettings(); - assertEquals("my_model", serviceSettings.model()); - - assertThat(response.getModel().getTaskSettings(), instanceOf(TestInferenceServicePlugin.TestTaskSettings.class)); - var taskSettings = (TestInferenceServicePlugin.TestTaskSettings) response.getModel().getTaskSettings(); - assertEquals(3, (int) taskSettings.temperature()); - - return response.getModel(); - } - - public ModelConfigurations getModel(String modelId, TaskType taskType) { - var response = client().execute(GetInferenceModelAction.INSTANCE, new GetInferenceModelAction.Request(modelId, taskType.toString())) - .actionGet(); - return response.getModel(); - } - - private void inferOnMockService(String modelId, TaskType taskType, String input) { - var response = client().execute(InferenceAction.INSTANCE, new InferenceAction.Request(taskType, modelId, input, Map.of())) - .actionGet(); - if (taskType == TaskType.SPARSE_EMBEDDING) { - assertThat(response.getResult(), instanceOf(TextExpansionResults.class)); - var teResult = (TextExpansionResults) response.getResult(); - assertThat(teResult.getWeightedTokens(), not(empty())); - } else { - fail("test with task type [" + taskType + "] are not supported yet"); - } - } - - private void assertModelsAreEqual(ModelConfigurations model1, ModelConfigurations model2) { - // The test can't rely on Model::equals as the specific subclass - // may be different. Model loses information about it's implemented - // subtype when it is streamed across the wire. - assertEquals(model1.getModelId(), model2.getModelId()); - assertEquals(model1.getService(), model2.getService()); - assertEquals(model1.getTaskType(), model2.getTaskType()); - - // TaskSettings and Service settings are named writables so - // the actual implementing class type is not lost when streamed \ - assertEquals(model1.getServiceSettings(), model2.getServiceSettings()); - assertEquals(model1.getTaskSettings(), model2.getTaskSettings()); - } -} diff --git a/x-pack/plugin/inference/src/main/java/module-info.java b/x-pack/plugin/inference/src/main/java/module-info.java index 2aec99a27c849..801b0a1cd755c 100644 --- a/x-pack/plugin/inference/src/main/java/module-info.java +++ b/x-pack/plugin/inference/src/main/java/module-info.java @@ -5,7 +5,7 @@ * 2.0. */ -module org.elasticsearch.xpack.inference { +module org.elasticsearch.inference { requires org.elasticsearch.base; requires org.elasticsearch.server; requires org.elasticsearch.xcontent; @@ -18,8 +18,10 @@ requires org.apache.httpcomponents.httpcore.nio; requires org.apache.lucene.core; - exports org.elasticsearch.xpack.inference.rest; exports org.elasticsearch.xpack.inference.action; exports org.elasticsearch.xpack.inference.registry; + exports org.elasticsearch.xpack.inference.rest; + exports org.elasticsearch.xpack.inference.results; + exports org.elasticsearch.xpack.inference.services; exports org.elasticsearch.xpack.inference; } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java index 42ad64b9c60a3..0ba7ca1d49150 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java @@ -9,13 +9,21 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.inference.EmptyTaskSettings; +import org.elasticsearch.inference.InferenceResults; +import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.SecretSettings; import org.elasticsearch.inference.ServiceSettings; import org.elasticsearch.inference.TaskSettings; +import org.elasticsearch.xpack.inference.results.LegacyTextEmbeddingResults; +import org.elasticsearch.xpack.inference.results.SparseEmbeddingResults; +import org.elasticsearch.xpack.inference.results.TextEmbeddingResults; import org.elasticsearch.xpack.inference.services.elser.ElserMlNodeServiceSettings; import org.elasticsearch.xpack.inference.services.elser.ElserMlNodeTaskSettings; import org.elasticsearch.xpack.inference.services.huggingface.elser.HuggingFaceElserSecretSettings; import org.elasticsearch.xpack.inference.services.huggingface.elser.HuggingFaceElserServiceSettings; +import org.elasticsearch.xpack.inference.services.openai.OpenAiServiceSettings; +import org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsTaskSettings; +import org.elasticsearch.xpack.inference.services.settings.DefaultSecretSettings; import java.util.ArrayList; import java.util.List; @@ -27,8 +35,24 @@ private InferenceNamedWriteablesProvider() {} public static List getNamedWriteables() { List namedWriteables = new ArrayList<>(); - // Empty default settings - namedWriteables.add(new NamedWriteableRegistry.Entry(EmptyTaskSettings.class, EmptyTaskSettings.NAME, EmptyTaskSettings::new)); + // Legacy inference results + namedWriteables.add( + new NamedWriteableRegistry.Entry(InferenceResults.class, LegacyTextEmbeddingResults.NAME, LegacyTextEmbeddingResults::new) + ); + + // Inference results + namedWriteables.add( + new NamedWriteableRegistry.Entry(InferenceServiceResults.class, SparseEmbeddingResults.NAME, SparseEmbeddingResults::new) + ); + namedWriteables.add( + new NamedWriteableRegistry.Entry(InferenceServiceResults.class, TextEmbeddingResults.NAME, TextEmbeddingResults::new) + ); + + // Empty default task settings + namedWriteables.add(new NamedWriteableRegistry.Entry(TaskSettings.class, EmptyTaskSettings.NAME, EmptyTaskSettings::new)); + + // Default secret settings + namedWriteables.add(new NamedWriteableRegistry.Entry(SecretSettings.class, DefaultSecretSettings.NAME, DefaultSecretSettings::new)); // ELSER config namedWriteables.add( @@ -50,6 +74,14 @@ public static List getNamedWriteables() { new NamedWriteableRegistry.Entry(SecretSettings.class, HuggingFaceElserSecretSettings.NAME, HuggingFaceElserSecretSettings::new) ); + // OpenAI + namedWriteables.add( + new NamedWriteableRegistry.Entry(ServiceSettings.class, OpenAiServiceSettings.NAME, OpenAiServiceSettings::new) + ); + namedWriteables.add( + new NamedWriteableRegistry.Entry(TaskSettings.class, OpenAiEmbeddingsTaskSettings.NAME, OpenAiEmbeddingsTaskSettings::new) + ); + return namedWriteables; } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java index 393cbd0413e5f..476f19a286d53 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java @@ -40,6 +40,7 @@ import org.elasticsearch.xpack.inference.action.TransportPutInferenceModelAction; import org.elasticsearch.xpack.inference.external.http.HttpClientManager; import org.elasticsearch.xpack.inference.external.http.HttpSettings; +import org.elasticsearch.xpack.inference.external.http.retry.RetrySettings; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; import org.elasticsearch.xpack.inference.registry.ModelRegistry; @@ -47,8 +48,10 @@ import org.elasticsearch.xpack.inference.rest.RestGetInferenceModelAction; import org.elasticsearch.xpack.inference.rest.RestInferenceAction; import org.elasticsearch.xpack.inference.rest.RestPutInferenceModelAction; +import org.elasticsearch.xpack.inference.services.ServiceComponents; import org.elasticsearch.xpack.inference.services.elser.ElserMlNodeService; import org.elasticsearch.xpack.inference.services.huggingface.elser.HuggingFaceElserService; +import org.elasticsearch.xpack.inference.services.openai.OpenAiService; import java.util.Collection; import java.util.List; @@ -61,10 +64,10 @@ public class InferencePlugin extends Plugin implements ActionPlugin, InferenceSe public static final String NAME = "inference"; public static final String UTILITY_THREAD_POOL_NAME = "inference_utility"; private final Settings settings; - private final SetOnce httpRequestSenderFactory = new SetOnce<>(); // We'll keep a reference to the http manager just in case the inference services don't get closed individually private final SetOnce httpManager = new SetOnce<>(); - private final SetOnce throttlerManager = new SetOnce<>(); + private final SetOnce httpFactory = new SetOnce<>(); + private final SetOnce serviceComponents = new SetOnce<>(); public InferencePlugin(Settings settings) { this.settings = settings; @@ -100,12 +103,19 @@ public List getRestHandlers( @Override public Collection createComponents(PluginServices services) { - throttlerManager.set(new ThrottlerManager(settings, services.threadPool(), services.clusterService())); + var throttlerManager = new ThrottlerManager(settings, services.threadPool(), services.clusterService()); + serviceComponents.set(new ServiceComponents(services.threadPool(), throttlerManager, settings)); - httpManager.set(HttpClientManager.create(settings, services.threadPool(), services.clusterService(), throttlerManager.get())); - httpRequestSenderFactory.set( - new HttpRequestSenderFactory(services.threadPool(), httpManager.get(), services.clusterService(), settings) + httpManager.set(HttpClientManager.create(settings, services.threadPool(), services.clusterService(), throttlerManager)); + + var httpRequestSenderFactory = new HttpRequestSenderFactory( + services.threadPool(), + httpManager.get(), + services.clusterService(), + settings ); + httpFactory.set(httpRequestSenderFactory); + ModelRegistry modelRegistry = new ModelRegistry(services.client()); return List.of(modelRegistry); } @@ -157,7 +167,8 @@ public List> getSettings() { HttpSettings.getSettings(), HttpClientManager.getSettings(), HttpRequestSenderFactory.HttpRequestSender.getSettings(), - ThrottlerManager.getSettings() + ThrottlerManager.getSettings(), + RetrySettings.getSettingsDefinitions() ).flatMap(Collection::stream).collect(Collectors.toList()); } @@ -173,7 +184,11 @@ public String getFeatureDescription() { @Override public List getInferenceServiceFactories() { - return List.of(ElserMlNodeService::new, context -> new HuggingFaceElserService(httpRequestSenderFactory, throttlerManager)); + return List.of( + ElserMlNodeService::new, + context -> new HuggingFaceElserService(httpFactory, serviceComponents), + context -> new OpenAiService(httpFactory, serviceComponents) + ); } @Override @@ -183,6 +198,9 @@ public List getInferenceServiceNamedWriteables() { @Override public void close() { - IOUtils.closeWhileHandlingException(httpManager.get(), throttlerManager.get()); + var serviceComponentsRef = serviceComponents.get(); + var throttlerToClose = serviceComponentsRef != null ? serviceComponentsRef.throttlerManager() : null; + + IOUtils.closeWhileHandlingException(httpManager.get(), throttlerToClose); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/GetInferenceModelAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/GetInferenceModelAction.java index 7e47d3d93e3ca..a9b1fb32a7471 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/GetInferenceModelAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/GetInferenceModelAction.java @@ -7,26 +7,33 @@ package org.elasticsearch.xpack.inference.action; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import java.util.Objects; -public class GetInferenceModelAction extends ActionType { +public class GetInferenceModelAction extends ActionType { public static final GetInferenceModelAction INSTANCE = new GetInferenceModelAction(); public static final String NAME = "cluster:admin/xpack/inference/get"; public GetInferenceModelAction() { - super(NAME, PutInferenceModelAction.Response::new); + super(NAME, GetInferenceModelAction.Response::new); } - public static class Request extends AcknowledgedRequest { + public static class Request extends AcknowledgedRequest { private final String modelId; private final TaskType taskType; @@ -75,4 +82,63 @@ public int hashCode() { return Objects.hash(modelId, taskType); } } + + public static class Response extends ActionResponse implements ToXContentObject { + + private final List models; + + public Response(List models) { + this.models = models; + } + + public Response(StreamInput in) throws IOException { + super(in); + if (in.getTransportVersion().onOrAfter(TransportVersions.ML_INFERENCE_GET_MULTIPLE_MODELS)) { + models = in.readCollectionAsList(ModelConfigurations::new); + } else { + models = new ArrayList<>(); + models.add(new ModelConfigurations(in)); + } + } + + public List getModels() { + return models; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + if (out.getTransportVersion().onOrAfter(TransportVersions.ML_INFERENCE_GET_MULTIPLE_MODELS)) { + out.writeCollection(models); + } else { + models.get(0).writeTo(out); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startArray("models"); + for (var model : models) { + if (model != null) { + model.toXContent(builder, params); + } + } + builder.endArray(); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + GetInferenceModelAction.Response response = (GetInferenceModelAction.Response) o; + return Objects.equals(models, response.models); + } + + @Override + public int hashCode() { + return Objects.hash(models); + } + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/InferenceAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/InferenceAction.java index 7938c2abd8d99..a6d0818cf1ca4 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/InferenceAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/InferenceAction.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.inference.action; import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; @@ -15,6 +16,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.inference.InferenceResults; +import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.TaskType; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xcontent.ObjectParser; @@ -22,8 +24,13 @@ import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.inference.results.TextExpansionResults; +import org.elasticsearch.xpack.inference.results.LegacyTextEmbeddingResults; +import org.elasticsearch.xpack.inference.results.SparseEmbeddingResults; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import java.util.Map; import java.util.Objects; @@ -44,7 +51,7 @@ public static class Request extends ActionRequest { static final ObjectParser PARSER = new ObjectParser<>(NAME, Request.Builder::new); static { // TODO timeout - PARSER.declareString(Request.Builder::setInput, INPUT); + PARSER.declareStringArray(Request.Builder::setInput, INPUT); PARSER.declareObject(Request.Builder::setTaskSettings, (p, c) -> p.mapOrdered(), TASK_SETTINGS); } @@ -57,10 +64,10 @@ public static Request parseRequest(String modelId, String taskType, XContentPars private final TaskType taskType; private final String modelId; - private final String input; + private final List input; private final Map taskSettings; - public Request(TaskType taskType, String modelId, String input, Map taskSettings) { + public Request(TaskType taskType, String modelId, List input, Map taskSettings) { this.taskType = taskType; this.modelId = modelId; this.input = input; @@ -71,7 +78,11 @@ public Request(StreamInput in) throws IOException { super(in); this.taskType = TaskType.fromStream(in); this.modelId = in.readString(); - this.input = in.readString(); + if (in.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_MULTIPLE_INPUTS)) { + this.input = in.readStringCollectionAsList(); + } else { + this.input = List.of(in.readString()); + } this.taskSettings = in.readMap(); } @@ -83,7 +94,7 @@ public String getModelId() { return modelId; } - public String getInput() { + public List getInput() { return input; } @@ -98,6 +109,11 @@ public ActionRequestValidationException validate() { e.addValidationError("missing input"); return e; } + if (input.isEmpty()) { + var e = new ActionRequestValidationException(); + e.addValidationError("input array is empty"); + return e; + } return null; } @@ -106,7 +122,11 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); taskType.writeTo(out); out.writeString(modelId); - out.writeString(input); + if (out.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_MULTIPLE_INPUTS)) { + out.writeStringCollection(input); + } else { + out.writeString(input.get(0)); + } out.writeGenericMap(taskSettings); } @@ -130,7 +150,7 @@ public static class Builder { private TaskType taskType; private String modelId; - private String input; + private List input; private Map taskSettings = Map.of(); private Builder() {} @@ -150,7 +170,7 @@ public Builder setTaskType(String taskTypeStr) { return this; } - public Builder setInput(String input) { + public Builder setInput(List input) { this.input = input; return this; } @@ -168,30 +188,94 @@ public Request build() { public static class Response extends ActionResponse implements ToXContentObject { - private final InferenceResults result; + private final InferenceServiceResults results; - public Response(InferenceResults result) { - this.result = result; + public Response(InferenceServiceResults results) { + this.results = results; } public Response(StreamInput in) throws IOException { super(in); - result = in.readNamedWriteable(InferenceResults.class); + if (in.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_SERVICE_RESULTS_ADDED)) { + results = in.readNamedWriteable(InferenceServiceResults.class); + } else if (in.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_MULTIPLE_INPUTS)) { + // This could be List aka List from ml plugin for + // hugging face elser and elser or the legacy format for openai + results = transformToServiceResults(in.readNamedWriteableCollectionAsList(InferenceResults.class)); + } else { + // It should only be InferenceResults aka TextEmbeddingResults from ml plugin for + // hugging face elser and elser + results = transformToServiceResults(List.of(in.readNamedWriteable(InferenceResults.class))); + } } - public InferenceResults getResult() { - return result; + @SuppressWarnings("deprecation") + static InferenceServiceResults transformToServiceResults(List parsedResults) { + if (parsedResults.isEmpty()) { + throw new ElasticsearchStatusException( + "Failed to transform results to response format, expected a non-empty list, please remove and re-add the service", + RestStatus.INTERNAL_SERVER_ERROR + ); + } + + if (parsedResults.get(0) instanceof LegacyTextEmbeddingResults openaiResults) { + if (parsedResults.size() > 1) { + throw new ElasticsearchStatusException( + "Failed to transform results to response format, malformed text embedding result," + + " please remove and re-add the service", + RestStatus.INTERNAL_SERVER_ERROR + ); + } + + return openaiResults.transformToTextEmbeddingResults(); + } else if (parsedResults.get(0) instanceof TextExpansionResults) { + return transformToSparseEmbeddingResult(parsedResults); + } else { + throw new ElasticsearchStatusException( + "Failed to transform results to response format, unknown embedding type received," + + " please remove and re-add the service", + RestStatus.INTERNAL_SERVER_ERROR + ); + } + } + + private static SparseEmbeddingResults transformToSparseEmbeddingResult(List parsedResults) { + List textExpansionResults = new ArrayList<>(parsedResults.size()); + + for (InferenceResults result : parsedResults) { + if (result instanceof TextExpansionResults textExpansion) { + textExpansionResults.add(textExpansion); + } else { + throw new ElasticsearchStatusException( + "Failed to transform results to response format, please remove and re-add the service", + RestStatus.INTERNAL_SERVER_ERROR + ); + } + } + + return SparseEmbeddingResults.of(textExpansionResults); + } + + public InferenceServiceResults getResults() { + return results; } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeNamedWriteable(result); + if (out.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_SERVICE_RESULTS_ADDED)) { + out.writeNamedWriteable(results); + } else if (out.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_MULTIPLE_INPUTS)) { + // This includes the legacy openai response format of List and hugging face elser and elser + out.writeNamedWriteableCollection(results.transformToLegacyFormat()); + } else { + out.writeNamedWriteable(results.transformToLegacyFormat().get(0)); + } } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - result.toXContent(builder, params); + results.toXContent(builder, params); builder.endObject(); return builder; } @@ -201,12 +285,12 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Response response = (Response) o; - return Objects.equals(result, response.result); + return Objects.equals(results, response.results); } @Override public int hashCode() { - return Objects.hash(result); + return Objects.hash(results); } } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportGetInferenceModelAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportGetInferenceModelAction.java index 0d2eef5a7c675..90fe9667c33aa 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportGetInferenceModelAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportGetInferenceModelAction.java @@ -20,9 +20,11 @@ import org.elasticsearch.xpack.inference.UnparsedModel; import org.elasticsearch.xpack.inference.registry.ModelRegistry; +import java.util.List; + public class TransportGetInferenceModelAction extends HandledTransportAction< GetInferenceModelAction.Request, - PutInferenceModelAction.Response> { + GetInferenceModelAction.Response> { private final ModelRegistry modelRegistry; private final InferenceServiceRegistry serviceRegistry; @@ -49,7 +51,7 @@ public TransportGetInferenceModelAction( protected void doExecute( Task task, GetInferenceModelAction.Request request, - ActionListener listener + ActionListener listener ) { modelRegistry.getUnparsedModelMap(request.getModelId(), ActionListener.wrap(modelConfigMap -> { var unparsedModel = UnparsedModel.unparsedModelFromMap(modelConfigMap.config(), modelConfigMap.secrets()); @@ -67,7 +69,7 @@ protected void doExecute( } var model = service.get() .parsePersistedConfig(unparsedModel.modelId(), unparsedModel.taskType(), unparsedModel.settings(), unparsedModel.secrets()); - listener.onResponse(new PutInferenceModelAction.Response(model.getConfigurations())); + listener.onResponse(new GetInferenceModelAction.Response(List.of(model.getConfigurations()))); }, listener::onFailure)); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportInferenceAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportInferenceAction.java index 29909163d7b3b..7718739420cf1 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportInferenceAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportInferenceAction.java @@ -83,8 +83,8 @@ private void inferOnService( InferenceService service, ActionListener listener ) { - service.infer(model, request.getInput(), request.getTaskSettings(), ActionListener.wrap(inferenceResult -> { - listener.onResponse(new InferenceAction.Response(inferenceResult)); + service.infer(model, request.getInput(), request.getTaskSettings(), ActionListener.wrap(inferenceResults -> { + listener.onResponse(new InferenceAction.Response(inferenceResults)); }, listener::onFailure)); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/ActionUtils.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/ActionUtils.java new file mode 100644 index 0000000000000..856146fafcb45 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/ActionUtils.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.action; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.rest.RestStatus; + +public class ActionUtils { + + public static ActionListener wrapFailuresInElasticsearchException( + String errorMessage, + ActionListener listener + ) { + return ActionListener.wrap(listener::onResponse, e -> { + var unwrappedException = ExceptionsHelper.unwrapCause(e); + + if (unwrappedException instanceof ElasticsearchException esException) { + listener.onFailure(esException); + } else { + listener.onFailure(createInternalServerError(unwrappedException, errorMessage)); + } + }); + } + + public static ElasticsearchStatusException createInternalServerError(Throwable e, String message) { + return new ElasticsearchStatusException(message, RestStatus.INTERNAL_SERVER_ERROR, e); + } + + private ActionUtils() {} +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/ExecutableAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/ExecutableAction.java index bc52a04ab7209..9991abf71fb12 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/ExecutableAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/ExecutableAction.java @@ -8,11 +8,13 @@ package org.elasticsearch.xpack.inference.external.action; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.inference.InferenceResults; +import org.elasticsearch.inference.InferenceServiceResults; + +import java.util.List; /** * Defines an inference request to a 3rd party service. The success or failure response is communicated through the provided listener. */ public interface ExecutableAction { - void execute(String input, ActionListener listener); + void execute(List input, ActionListener listener); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceElserAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceElserAction.java index acc3ab57ce9eb..fb648e2aabcfd 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceElserAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceElserAction.java @@ -8,40 +8,46 @@ package org.elasticsearch.xpack.inference.external.action.huggingface; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.inference.InferenceResults; -import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.xpack.inference.external.action.ExecutableAction; import org.elasticsearch.xpack.inference.external.http.sender.Sender; import org.elasticsearch.xpack.inference.external.huggingface.HuggingFaceAccount; import org.elasticsearch.xpack.inference.external.huggingface.HuggingFaceClient; import org.elasticsearch.xpack.inference.external.request.huggingface.HuggingFaceElserRequest; import org.elasticsearch.xpack.inference.external.request.huggingface.HuggingFaceElserRequestEntity; -import org.elasticsearch.xpack.inference.logging.ThrottlerManager; +import org.elasticsearch.xpack.inference.services.ServiceComponents; import org.elasticsearch.xpack.inference.services.huggingface.elser.HuggingFaceElserModel; +import java.util.List; + +import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.inference.external.action.ActionUtils.createInternalServerError; +import static org.elasticsearch.xpack.inference.external.action.ActionUtils.wrapFailuresInElasticsearchException; + public class HuggingFaceElserAction implements ExecutableAction { private final HuggingFaceAccount account; private final HuggingFaceClient client; + private final String errorMessage; - public HuggingFaceElserAction(Sender sender, HuggingFaceElserModel model, ThrottlerManager throttlerManager) { - this.client = new HuggingFaceClient(sender, throttlerManager); + public HuggingFaceElserAction(Sender sender, HuggingFaceElserModel model, ServiceComponents serviceComponents) { + this.client = new HuggingFaceClient(sender, serviceComponents); this.account = new HuggingFaceAccount(model.getServiceSettings().uri(), model.getSecretSettings().apiKey()); + this.errorMessage = format("Failed to send ELSER Hugging Face request to [%s]", model.getServiceSettings().uri().toString()); } - public void execute(String input, ActionListener listener) { + @Override + public void execute(List input, ActionListener listener) { try { HuggingFaceElserRequest request = new HuggingFaceElserRequest(account, new HuggingFaceElserRequestEntity(input)); + ActionListener wrappedListener = wrapFailuresInElasticsearchException(errorMessage, listener); - client.send(request, listener); + client.send(request, wrappedListener); } catch (ElasticsearchException e) { listener.onFailure(e); } catch (Exception e) { - listener.onFailure( - new ElasticsearchStatusException("Failed to send request ELSER Hugging Face request", RestStatus.INTERNAL_SERVER_ERROR, e) - ); + listener.onFailure(createInternalServerError(e, errorMessage)); } } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionCreator.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionCreator.java new file mode 100644 index 0000000000000..6c423760d0b35 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionCreator.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.action.openai; + +import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.services.ServiceComponents; +import org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsModel; + +import java.util.Map; +import java.util.Objects; + +/** + * Provides a way to construct an {@link ExecutableAction} using the visitor pattern based on the openai model type. + */ +public class OpenAiActionCreator implements OpenAiActionVisitor { + private final Sender sender; + private final ServiceComponents serviceComponents; + + public OpenAiActionCreator(Sender sender, ServiceComponents serviceComponents) { + this.sender = Objects.requireNonNull(sender); + this.serviceComponents = Objects.requireNonNull(serviceComponents); + } + + @Override + public ExecutableAction create(OpenAiEmbeddingsModel model, Map taskSettings) { + var overriddenModel = model.overrideWith(taskSettings); + + return new OpenAiEmbeddingsAction(sender, overriddenModel, serviceComponents); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionVisitor.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionVisitor.java new file mode 100644 index 0000000000000..52d9f2e2132a7 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionVisitor.java @@ -0,0 +1,17 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.action.openai; + +import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsModel; + +import java.util.Map; + +public interface OpenAiActionVisitor { + ExecutableAction create(OpenAiEmbeddingsModel model, Map taskSettings); +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiEmbeddingsAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiEmbeddingsAction.java new file mode 100644 index 0000000000000..71e6bf98838fc --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiEmbeddingsAction.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.action.openai; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.external.openai.OpenAiAccount; +import org.elasticsearch.xpack.inference.external.openai.OpenAiClient; +import org.elasticsearch.xpack.inference.external.request.openai.OpenAiEmbeddingsRequest; +import org.elasticsearch.xpack.inference.external.request.openai.OpenAiEmbeddingsRequestEntity; +import org.elasticsearch.xpack.inference.services.ServiceComponents; +import org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsModel; + +import java.net.URI; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.inference.external.action.ActionUtils.createInternalServerError; +import static org.elasticsearch.xpack.inference.external.action.ActionUtils.wrapFailuresInElasticsearchException; + +public class OpenAiEmbeddingsAction implements ExecutableAction { + + private final OpenAiAccount account; + private final OpenAiClient client; + private final OpenAiEmbeddingsModel model; + private final String errorMessage; + + public OpenAiEmbeddingsAction(Sender sender, OpenAiEmbeddingsModel model, ServiceComponents serviceComponents) { + this.model = Objects.requireNonNull(model); + this.account = new OpenAiAccount( + this.model.getServiceSettings().uri(), + this.model.getServiceSettings().organizationId(), + this.model.getSecretSettings().apiKey() + ); + this.client = new OpenAiClient(Objects.requireNonNull(sender), Objects.requireNonNull(serviceComponents)); + this.errorMessage = getErrorMessage(this.model.getServiceSettings().uri()); + } + + private static String getErrorMessage(@Nullable URI uri) { + if (uri != null) { + return format("Failed to send OpenAI embeddings request to [%s]", uri.toString()); + } + + return "Failed to send OpenAI embeddings request"; + } + + @Override + public void execute(List input, ActionListener listener) { + try { + OpenAiEmbeddingsRequest request = new OpenAiEmbeddingsRequest( + account, + new OpenAiEmbeddingsRequestEntity(input, model.getTaskSettings().model(), model.getTaskSettings().user()) + ); + ActionListener wrappedListener = wrapFailuresInElasticsearchException(errorMessage, listener); + + client.send(request, wrappedListener); + } catch (ElasticsearchException e) { + listener.onFailure(e); + } catch (Exception e) { + listener.onFailure(createInternalServerError(e, errorMessage)); + } + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClient.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClient.java index 1dac8153da4f1..73d1fa1c32568 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClient.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClient.java @@ -30,6 +30,9 @@ import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.xpack.inference.InferencePlugin.UTILITY_THREAD_POOL_NAME; +/** + * Provides a wrapper around a {@link CloseableHttpAsyncClient} to move the responses to a separate thread for processing. + */ public class HttpClient implements Closeable { private static final Logger logger = LogManager.getLogger(HttpClient.class); @@ -92,7 +95,7 @@ public void completed(HttpResponse response) { @Override public void failed(Exception ex) { - throttlerManager.getThrottler().warn(logger, format("Request [%s] failed", request.getRequestLine()), ex); + throttlerManager.warn(logger, format("Request [%s] failed", request.getRequestLine()), ex); failUsingUtilityThread(ex, listener); } @@ -108,7 +111,7 @@ private void respondUsingUtilityThread(HttpResponse response, HttpUriRequest req try { listener.onResponse(HttpResult.create(settings.getMaxResponseSize(), response)); } catch (Exception e) { - throttlerManager.getThrottler().warn(logger, format("Failed to create http result for [%s]", request.getRequestLine()), e); + throttlerManager.warn(logger, format("Failed to create http result for [%s]", request.getRequestLine()), e); listener.onFailure(e); } }); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClientManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClientManager.java index 494e0f7c60dff..7cc4a3cb24502 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClientManager.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClientManager.java @@ -38,9 +38,8 @@ public class HttpClientManager implements Closeable { public static final Setting MAX_CONNECTIONS = Setting.intSetting( "xpack.inference.http.max_connections", // TODO pick a reasonable values here - 20, - 1, - 1000, + 20, // default + 1, // min Setting.Property.NodeScope, Setting.Property.Dynamic ); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpResult.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpResult.java index 82256b51cf83e..6c79daa2dedc0 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpResult.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpResult.java @@ -43,4 +43,8 @@ private static byte[] limitBody(ByteSizeValue maxResponseSize, HttpResponse resp Objects.requireNonNull(response); Objects.requireNonNull(body); } + + public boolean isBodyEmpty() { + return body().length == 0; + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpUtils.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpUtils.java new file mode 100644 index 0000000000000..b6dbc6d6f2911 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpUtils.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http; + +import org.apache.http.client.methods.HttpRequestBase; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.xpack.inference.logging.ThrottlerManager; + +import static org.elasticsearch.core.Strings.format; + +public class HttpUtils { + + public static void checkForFailureStatusCode( + ThrottlerManager throttlerManager, + Logger logger, + HttpRequestBase request, + HttpResult result + ) { + if (result.response().getStatusLine().getStatusCode() >= 300) { + String message = getStatusCodeErrorMessage(request, result); + + throttlerManager.warn(logger, message); + + throw new IllegalStateException(message); + } + } + + private static String getStatusCodeErrorMessage(HttpRequestBase request, HttpResult result) { + int statusCode = result.response().getStatusLine().getStatusCode(); + + if (statusCode >= 400) { + return format( + "Received a failure status code for request [%s] status [%s]", + request.getRequestLine(), + result.response().getStatusLine().getStatusCode() + ); + } else if (statusCode >= 300) { + return format( + "Unhandled redirection for request [%s] status [%s]", + request.getRequestLine(), + result.response().getStatusLine().getStatusCode() + ); + } else { + return ""; + } + } + + public static void checkForEmptyBody(ThrottlerManager throttlerManager, Logger logger, HttpRequestBase request, HttpResult result) { + if (result.isBodyEmpty()) { + String message = format("Response body was empty for request [%s]", request.getRequestLine()); + throttlerManager.warn(logger, message); + throw new IllegalStateException(message); + } + } + + private HttpUtils() {} +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/IdleConnectionEvictor.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/IdleConnectionEvictor.java index 295c9b7b17946..f826ce0bcae4d 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/IdleConnectionEvictor.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/IdleConnectionEvictor.java @@ -10,6 +10,7 @@ import org.apache.http.nio.conn.NHttpClientConnectionManager; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.ThreadPool; @@ -45,7 +46,7 @@ public IdleConnectionEvictor( ThreadPool threadPool, NHttpClientConnectionManager connectionManager, TimeValue sleepTime, - TimeValue maxIdleTime + @Nullable TimeValue maxIdleTime ) { this.threadPool = Objects.requireNonNull(threadPool); this.connectionManager = Objects.requireNonNull(connectionManager); @@ -66,7 +67,7 @@ public synchronized void start() { private void startInternal() { logger.debug(() -> format("Idle connection evictor started with wait time: [%s] max idle: [%s]", sleepTime, maxIdleTime)); - Scheduler.Cancellable task = threadPool.scheduleWithFixedDelay(() -> { + cancellableTask.set(threadPool.scheduleWithFixedDelay(() -> { try { connectionManager.closeExpiredConnections(); if (maxIdleTime.get() != null) { @@ -75,9 +76,7 @@ private void startInternal() { } catch (Exception e) { logger.warn("HTTP connection eviction failed", e); } - }, sleepTime, threadPool.executor(UTILITY_THREAD_POOL_NAME)); - - cancellableTask.set(task); + }, sleepTime, threadPool.executor(UTILITY_THREAD_POOL_NAME))); } @Override diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/AlwaysRetryingResponseHandler.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/AlwaysRetryingResponseHandler.java new file mode 100644 index 0000000000000..49d0d768d9089 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/AlwaysRetryingResponseHandler.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.retry; + +import org.apache.http.client.methods.HttpRequestBase; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.core.CheckedFunction; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.http.HttpResult; +import org.elasticsearch.xpack.inference.logging.ThrottlerManager; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.external.http.HttpUtils.checkForEmptyBody; +import static org.elasticsearch.xpack.inference.external.http.HttpUtils.checkForFailureStatusCode; + +/** + * Provides a {@link ResponseHandler} which flags all errors as retryable. + */ +public class AlwaysRetryingResponseHandler implements ResponseHandler { + protected final String requestType; + private final CheckedFunction parseFunction; + + public AlwaysRetryingResponseHandler( + String requestType, + CheckedFunction parseFunction + ) { + this.requestType = Objects.requireNonNull(requestType); + this.parseFunction = Objects.requireNonNull(parseFunction); + } + + public void validateResponse(ThrottlerManager throttlerManager, Logger logger, HttpRequestBase request, HttpResult result) + throws RetryException { + try { + checkForFailureStatusCode(throttlerManager, logger, request, result); + checkForEmptyBody(throttlerManager, logger, request, result); + } catch (Exception e) { + throw new RetryException(true, e); + } + } + + public String getRequestType() { + return requestType; + } + + @Override + public InferenceServiceResults parseResult(HttpResult result) throws RetryException { + try { + return parseFunction.apply(result); + } catch (Exception e) { + throw new RetryException(true, e); + } + } + +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/ResponseHandler.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/ResponseHandler.java new file mode 100644 index 0000000000000..93921295bc677 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/ResponseHandler.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.retry; + +import org.apache.http.client.methods.HttpRequestBase; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.http.HttpResult; +import org.elasticsearch.xpack.inference.logging.ThrottlerManager; + +/** + * A contract for clients to specify behavior for handling http responses. Clients can pass this contract to the retry sender to parse + * the response and help with logging. + */ +public interface ResponseHandler { + + /** + * A method for checking the response from the 3rd party service. This could check the status code and that the response body + * is in the correct form. + * + * @param throttlerManager a throttler for the logs + * @param logger the logger to use for logging + * @param request the original request + * @param result the response from the server + * @throws RetryException if the response is invalid + */ + void validateResponse(ThrottlerManager throttlerManager, Logger logger, HttpRequestBase request, HttpResult result) + throws RetryException; + + /** + * A method for parsing the response from the server. + * @param result The wrapped response from the server. + * @return the parsed inference results + * @throws RetryException if a parsing error occurs + */ + InferenceServiceResults parseResult(HttpResult result) throws RetryException; + + /** + * A string to uniquely identify the type of request that is being handled. This allows loggers to clarify which type of request + * might have failed. + * + * @return a {@link String} indicating the request type that was sent (e.g. elser, elser hugging face etc) + */ + String getRequestType(); +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/Retrier.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/Retrier.java new file mode 100644 index 0000000000000..4f97b6cc4ae47 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/Retrier.java @@ -0,0 +1,16 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.retry; + +import org.apache.http.client.methods.HttpRequestBase; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.InferenceServiceResults; + +public interface Retrier { + void send(HttpRequestBase request, ResponseHandler responseHandler, ActionListener listener); +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RetryException.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RetryException.java new file mode 100644 index 0000000000000..3fe8225927f06 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RetryException.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.retry; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchWrapperException; + +public class RetryException extends ElasticsearchException implements ElasticsearchWrapperException { + private final boolean shouldRetry; + + public RetryException(boolean shouldRetry, Throwable cause) { + super(cause); + this.shouldRetry = shouldRetry; + } + + public RetryException(boolean shouldRetry, String msg) { + super(msg); + this.shouldRetry = shouldRetry; + } + + public RetryException(boolean shouldRetry, String msg, Throwable cause) { + super(msg, cause); + this.shouldRetry = shouldRetry; + } + + public boolean shouldRetry() { + return shouldRetry; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RetrySettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RetrySettings.java new file mode 100644 index 0000000000000..040903a35ab08 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RetrySettings.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.retry; + +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; + +import java.util.List; + +public class RetrySettings { + + public static final Setting RETRY_INITIAL_DELAY_SETTING = Setting.timeSetting( + "xpack.inference.http.retry.initial_delay", + TimeValue.timeValueSeconds(1), + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + public static final Setting RETRY_MAX_DELAY_BOUND_SETTING = Setting.timeSetting( + "xpack.inference.http.retry.max_delay_bound", + TimeValue.timeValueSeconds(5), + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + public static final Setting RETRY_TIMEOUT_SETTING = Setting.timeSetting( + "xpack.inference.http.retry.timeout", + TimeValue.timeValueSeconds(30), + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + private final InternalSettings internalSettings; + + public RetrySettings(Settings settings) { + var initialDelay = RETRY_INITIAL_DELAY_SETTING.get(settings); + var maxDelayBound = RETRY_MAX_DELAY_BOUND_SETTING.get(settings); + var timeoutValue = RETRY_TIMEOUT_SETTING.get(settings); + this.internalSettings = new InternalSettings(initialDelay, maxDelayBound, timeoutValue); + } + + public record InternalSettings(TimeValue initialDelay, TimeValue maxDelayBound, TimeValue timeoutValue) {} + + public InternalSettings getSettings() { + return internalSettings; + } + + public static List> getSettingsDefinitions() { + return List.of(RETRY_INITIAL_DELAY_SETTING, RETRY_MAX_DELAY_BOUND_SETTING, RETRY_TIMEOUT_SETTING); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSender.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSender.java new file mode 100644 index 0000000000000..e91349cfbc2b7 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSender.java @@ -0,0 +1,158 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.retry; + +import org.apache.http.client.methods.HttpRequestBase; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.RetryableAction; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.inference.external.http.HttpResult; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.logging.ThrottlerManager; + +import java.io.IOException; +import java.util.Objects; +import java.util.concurrent.Executor; + +import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.inference.InferencePlugin.UTILITY_THREAD_POOL_NAME; + +public class RetryingHttpSender implements Retrier { + private final Sender sender; + private final ThrottlerManager throttlerManager; + private final Logger logger; + private final RetrySettings retrySettings; + private final ThreadPool threadPool; + private final Executor executor; + + public RetryingHttpSender( + Sender sender, + ThrottlerManager throttlerManager, + Logger logger, + RetrySettings retrySettings, + ThreadPool threadPool + ) { + this(sender, throttlerManager, logger, retrySettings, threadPool, threadPool.executor(UTILITY_THREAD_POOL_NAME)); + } + + // For testing only + RetryingHttpSender( + Sender sender, + ThrottlerManager throttlerManager, + Logger logger, + RetrySettings retrySettings, + ThreadPool threadPool, + Executor executor + ) { + this.sender = Objects.requireNonNull(sender); + this.throttlerManager = Objects.requireNonNull(throttlerManager); + this.logger = Objects.requireNonNull(logger); + this.retrySettings = Objects.requireNonNull(retrySettings); + this.threadPool = Objects.requireNonNull(threadPool); + this.executor = Objects.requireNonNull(executor); + } + + private class InternalRetrier extends RetryableAction { + private final HttpRequestBase request; + private final ResponseHandler responseHandler; + + InternalRetrier(HttpRequestBase request, ResponseHandler responseHandler, ActionListener listener) { + super( + logger, + threadPool, + retrySettings.getSettings().initialDelay(), + retrySettings.getSettings().maxDelayBound(), + retrySettings.getSettings().timeoutValue(), + listener, + executor + ); + this.request = request; + this.responseHandler = responseHandler; + } + + @Override + public void tryAction(ActionListener listener) { + ActionListener responseListener = ActionListener.wrap(result -> { + try { + responseHandler.validateResponse(throttlerManager, logger, request, result); + InferenceServiceResults inferenceResults = responseHandler.parseResult(result); + + listener.onResponse(inferenceResults); + } catch (Exception e) { + logException(request, result, responseHandler.getRequestType(), e); + listener.onFailure(e); + } + }, e -> { + logException(request, responseHandler.getRequestType(), e); + listener.onFailure(transformIfRetryable(e)); + }); + + sender.send(request, responseListener); + } + + @Override + public boolean shouldRetry(Exception e) { + if (e instanceof RetryException retry) { + return retry.shouldRetry(); + } + + return false; + } + + /** + * If the connection gets closed by the server or because of the connections time to live is exceeded we'll likely get a + * {@link org.apache.http.ConnectionClosedException} exception which is a child of IOException. For now, + * we'll consider all IOExceptions retryable because something failed while we were trying to send the request + * @param e the Exception received while sending the request + * @return a {@link RetryException} if this exception can be retried + */ + private Exception transformIfRetryable(Exception e) { + var exceptionToReturn = e; + if (e instanceof IOException) { + exceptionToReturn = new RetryException(true, e); + } + + return exceptionToReturn; + } + } + + @Override + public void send(HttpRequestBase request, ResponseHandler responseHandler, ActionListener listener) { + InternalRetrier retrier = new InternalRetrier(request, responseHandler, listener); + retrier.run(); + } + + private void logException(HttpRequestBase request, String requestType, Exception exception) { + var causeException = ExceptionsHelper.unwrapCause(exception); + + throttlerManager.warn( + logger, + format("Failed while sending request [%s] of type [%s]", request.getRequestLine(), requestType), + causeException + ); + } + + private void logException(HttpRequestBase request, HttpResult result, String requestType, Exception exception) { + var causeException = ExceptionsHelper.unwrapCause(exception); + + throttlerManager.warn( + logger, + format( + "Failed to process the response for request [%s] of type [%s] with status [%s] [%s]", + request.getRequestLine(), + requestType, + result.response().getStatusLine().getStatusCode(), + result.response().getStatusLine().getReasonPhrase() + ), + causeException + ); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestExecutorService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestExecutorService.java index 328afb264c4ab..1e066410506bc 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestExecutorService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestExecutorService.java @@ -59,27 +59,41 @@ class HttpRequestExecutorService implements ExecutorService { private final HttpClientContext httpContext; private final HttpClient httpClient; private final ThreadPool threadPool; + private final CountDownLatch startupLatch; @SuppressForbidden(reason = "wraps a queue and handles errors appropriately") - HttpRequestExecutorService(String serviceName, HttpClient httpClient, ThreadPool threadPool) { - this(serviceName, httpClient, threadPool, new LinkedBlockingQueue<>()); + HttpRequestExecutorService(String serviceName, HttpClient httpClient, ThreadPool threadPool, @Nullable CountDownLatch startupLatch) { + this(serviceName, httpClient, threadPool, new LinkedBlockingQueue<>(), startupLatch); } @SuppressForbidden(reason = "wraps a queue and handles errors appropriately") - HttpRequestExecutorService(String serviceName, HttpClient httpClient, ThreadPool threadPool, int capacity) { - this(serviceName, httpClient, threadPool, new LinkedBlockingQueue<>(capacity)); + HttpRequestExecutorService( + String serviceName, + HttpClient httpClient, + ThreadPool threadPool, + int capacity, + @Nullable CountDownLatch startupLatch + ) { + this(serviceName, httpClient, threadPool, new LinkedBlockingQueue<>(capacity), startupLatch); } /** * This constructor should only be used directly for testing. */ @SuppressForbidden(reason = "wraps a queue and handles errors appropriately") - HttpRequestExecutorService(String serviceName, HttpClient httpClient, ThreadPool threadPool, BlockingQueue queue) { + HttpRequestExecutorService( + String serviceName, + HttpClient httpClient, + ThreadPool threadPool, + BlockingQueue queue, + @Nullable CountDownLatch startupLatch + ) { this.serviceName = Objects.requireNonNull(serviceName); this.httpClient = Objects.requireNonNull(httpClient); this.threadPool = Objects.requireNonNull(threadPool); this.httpContext = HttpClientContext.create(); this.queue = queue; + this.startupLatch = startupLatch; } /** @@ -87,6 +101,8 @@ class HttpRequestExecutorService implements ExecutorService { */ public void start() { try { + signalStartInitiated(); + while (running.get()) { handleTasks(); } @@ -99,6 +115,12 @@ public void start() { } } + private void signalStartInitiated() { + if (startupLatch != null) { + startupLatch.countDown(); + } + } + /** * Protects the task retrieval logic from an unexpected exception. * diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderFactory.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderFactory.java index 40adc9c4a8bea..acc7a0b3f6077 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderFactory.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderFactory.java @@ -24,6 +24,8 @@ import java.io.IOException; import java.util.List; import java.util.Objects; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import static org.elasticsearch.core.Strings.format; @@ -50,7 +52,7 @@ public HttpRequestSenderFactory( this.settings = Objects.requireNonNull(settings); } - public HttpRequestSender createSender(String serviceName) { + public Sender createSender(String serviceName) { return new HttpRequestSender(serviceName, threadPool, httpClientManager, clusterService, settings); } @@ -60,6 +62,7 @@ public HttpRequestSender createSender(String serviceName) { */ public static final class HttpRequestSender implements Sender { private static final Logger logger = LogManager.getLogger(HttpRequestSender.class); + private static final TimeValue START_COMPLETED_WAIT_TIME = TimeValue.timeValueSeconds(5); /** * The maximum time a request can take. The timer starts once a request is enqueued and continues until a response is @@ -78,6 +81,7 @@ public static final class HttpRequestSender implements Sender { private final HttpRequestExecutorService service; private final AtomicBoolean started = new AtomicBoolean(false); private volatile TimeValue maxRequestTimeout; + private final CountDownLatch startCompleted = new CountDownLatch(2); private HttpRequestSender( String serviceName, @@ -88,7 +92,7 @@ private HttpRequestSender( ) { this.threadPool = Objects.requireNonNull(threadPool); this.manager = Objects.requireNonNull(httpClientManager); - service = new HttpRequestExecutorService(serviceName, manager.getHttpClient(), threadPool); + service = new HttpRequestExecutorService(serviceName, manager.getHttpClient(), threadPool, startCompleted); this.maxRequestTimeout = MAX_REQUEST_TIMEOUT.get(settings); addSettingsUpdateConsumers(clusterService); @@ -109,8 +113,11 @@ void setMaxRequestTimeout(TimeValue maxRequestTimeout) { */ public void start() { if (started.compareAndSet(false, true)) { + // The manager must be started before the executor service. That way we guarantee that the http client + // is ready prior to the service attempting to use the http client to send a request manager.start(); threadPool.executor(UTILITY_THREAD_POOL_NAME).execute(service::start); + startCompleted.countDown(); } } @@ -130,9 +137,20 @@ public void close() throws IOException { */ public void send(HttpRequestBase request, @Nullable TimeValue timeout, ActionListener listener) { assert started.get() : "call start() before sending a request"; + waitForStartToComplete(); service.send(request, timeout, listener); } + private void waitForStartToComplete() { + try { + if (startCompleted.await(START_COMPLETED_WAIT_TIME.getSeconds(), TimeUnit.SECONDS) == false) { + throw new IllegalStateException("Http sender startup did not complete in time"); + } + } catch (InterruptedException e) { + throw new IllegalStateException("Http sender interrupted while waiting for startup to complete"); + } + } + /** * Send a request at some point in the future. The timeout used is retrieved from the settings. * @param request the http request to send @@ -140,6 +158,7 @@ public void send(HttpRequestBase request, @Nullable TimeValue timeout, ActionLis */ public void send(HttpRequestBase request, ActionListener listener) { assert started.get() : "call start() before sending a request"; + waitForStartToComplete(); service.send(request, maxRequestTimeout, listener); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RequestTask.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RequestTask.java index 82ef0bcc7bab3..5875126190e5d 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RequestTask.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RequestTask.java @@ -131,17 +131,22 @@ public boolean isCancelled() { }; } - private record Command(HttpClient httpClient, HttpUriRequest request, HttpClientContext context, ActionListener listener) - implements - Runnable { + private record Command( + HttpClient httpClient, + HttpUriRequest requestToSend, + HttpClientContext context, + ActionListener resultListener + ) implements Runnable { @Override public void run() { try { - httpClient.send(request, context, listener); + httpClient.send(requestToSend, context, resultListener); } catch (Exception e) { - logger.warn(format("Failed to send request [%s] via the http client", request.getRequestLine()), e); - listener.onFailure(new ElasticsearchException(format("Failed to send request [%s]", request.getRequestLine()), e)); + logger.warn(format("Failed to send request [%s] via the http client", requestToSend.getRequestLine()), e); + resultListener.onFailure( + new ElasticsearchException(format("Failed to send request [%s]", requestToSend.getRequestLine()), e) + ); } } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/huggingface/HuggingFaceClient.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/huggingface/HuggingFaceClient.java index ed6e5c200b367..f24a5529a4663 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/huggingface/HuggingFaceClient.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/huggingface/HuggingFaceClient.java @@ -7,46 +7,42 @@ package org.elasticsearch.xpack.inference.external.huggingface; -import org.apache.http.client.methods.HttpRequestBase; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.inference.InferenceResults; -import org.elasticsearch.xpack.inference.external.http.HttpResult; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.http.retry.AlwaysRetryingResponseHandler; +import org.elasticsearch.xpack.inference.external.http.retry.ResponseHandler; +import org.elasticsearch.xpack.inference.external.http.retry.RetrySettings; +import org.elasticsearch.xpack.inference.external.http.retry.RetryingHttpSender; import org.elasticsearch.xpack.inference.external.http.sender.Sender; import org.elasticsearch.xpack.inference.external.request.huggingface.HuggingFaceElserRequest; import org.elasticsearch.xpack.inference.external.response.huggingface.HuggingFaceElserResponseEntity; -import org.elasticsearch.xpack.inference.logging.ThrottlerManager; +import org.elasticsearch.xpack.inference.services.ServiceComponents; import java.io.IOException; -import static org.elasticsearch.core.Strings.format; - public class HuggingFaceClient { private static final Logger logger = LogManager.getLogger(HuggingFaceClient.class); + private static final ResponseHandler ELSER_RESPONSE_HANDLER = createElserHandler(); + + private final RetryingHttpSender sender; + + public HuggingFaceClient(Sender sender, ServiceComponents serviceComponents) { + this.sender = new RetryingHttpSender( + sender, + serviceComponents.throttlerManager(), + logger, + new RetrySettings(serviceComponents.settings()), + serviceComponents.threadPool() + ); + } - private final ThrottlerManager throttlerManager; - - private final Sender sender; - - public HuggingFaceClient(Sender sender, ThrottlerManager throttlerManager) { - this.sender = sender; - this.throttlerManager = throttlerManager; + public void send(HuggingFaceElserRequest request, ActionListener listener) throws IOException { + this.sender.send(request.createRequest(), ELSER_RESPONSE_HANDLER, listener); } - public void send(HuggingFaceElserRequest request, ActionListener listener) throws IOException { - HttpRequestBase httpRequest = request.createRequest(); - ActionListener responseListener = ActionListener.wrap(response -> { - try { - listener.onResponse(HuggingFaceElserResponseEntity.fromResponse(response)); - } catch (Exception e) { - String msg = format("Failed to parse the Hugging Face ELSER response for request [%s]", httpRequest.getRequestLine()); - throttlerManager.getThrottler().warn(logger, msg, e); - listener.onFailure(new ElasticsearchException(msg, e)); - } - }, listener::onFailure); - - sender.send(httpRequest, responseListener); + private static ResponseHandler createElserHandler() { + return new AlwaysRetryingResponseHandler("elser hugging face", HuggingFaceElserResponseEntity::fromResponse); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/openai/OpenAiAccount.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/openai/OpenAiAccount.java new file mode 100644 index 0000000000000..a89032277ff8d --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/openai/OpenAiAccount.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.openai; + +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.core.Nullable; + +import java.net.URI; +import java.util.Objects; + +public record OpenAiAccount(@Nullable URI url, @Nullable String organizationId, SecureString apiKey) { + + public OpenAiAccount { + Objects.requireNonNull(apiKey); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/openai/OpenAiClient.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/openai/OpenAiClient.java new file mode 100644 index 0000000000000..af809f1be97f9 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/openai/OpenAiClient.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.openai; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.http.retry.ResponseHandler; +import org.elasticsearch.xpack.inference.external.http.retry.RetrySettings; +import org.elasticsearch.xpack.inference.external.http.retry.RetryingHttpSender; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.external.request.openai.OpenAiEmbeddingsRequest; +import org.elasticsearch.xpack.inference.external.response.openai.OpenAiEmbeddingsResponseEntity; +import org.elasticsearch.xpack.inference.services.ServiceComponents; +import org.elasticsearch.xpack.inference.services.openai.OpenAiResponseHandler; + +import java.io.IOException; + +public class OpenAiClient { + private static final Logger logger = LogManager.getLogger(OpenAiClient.class); + private static final ResponseHandler EMBEDDINGS_HANDLER = createEmbeddingsHandler(); + + private final RetryingHttpSender sender; + + public OpenAiClient(Sender sender, ServiceComponents serviceComponents) { + this.sender = new RetryingHttpSender( + sender, + serviceComponents.throttlerManager(), + logger, + new RetrySettings(serviceComponents.settings()), + serviceComponents.threadPool() + ); + } + + public void send(OpenAiEmbeddingsRequest request, ActionListener listener) throws IOException { + sender.send(request.createRequest(), EMBEDDINGS_HANDLER, listener); + } + + private static ResponseHandler createEmbeddingsHandler() { + return new OpenAiResponseHandler("openai text embedding", result -> OpenAiEmbeddingsResponseEntity.fromResponse(result)); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/RequestUtils.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/RequestUtils.java new file mode 100644 index 0000000000000..355db7288dacc --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/RequestUtils.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request; + +import org.apache.http.Header; +import org.apache.http.HttpHeaders; +import org.apache.http.message.BasicHeader; +import org.elasticsearch.common.settings.SecureString; + +public class RequestUtils { + + public static Header createAuthBearerHeader(SecureString apiKey) { + return new BasicHeader(HttpHeaders.AUTHORIZATION, "Bearer " + apiKey.toString()); + } + + private RequestUtils() {} +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/huggingface/HuggingFaceElserRequest.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/huggingface/HuggingFaceElserRequest.java index f896bba4ae063..563b0036bdb09 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/huggingface/HuggingFaceElserRequest.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/huggingface/HuggingFaceElserRequest.java @@ -7,12 +7,10 @@ package org.elasticsearch.xpack.inference.external.request.huggingface; -import org.apache.http.Header; import org.apache.http.HttpHeaders; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpRequestBase; import org.apache.http.entity.ByteArrayEntity; -import org.apache.http.message.BasicHeader; import org.elasticsearch.common.Strings; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.inference.external.huggingface.HuggingFaceAccount; @@ -21,6 +19,8 @@ import java.nio.charset.StandardCharsets; import java.util.Objects; +import static org.elasticsearch.xpack.inference.external.request.RequestUtils.createAuthBearerHeader; + public class HuggingFaceElserRequest implements Request { private final HuggingFaceAccount account; @@ -37,12 +37,8 @@ public HttpRequestBase createRequest() { ByteArrayEntity byteEntity = new ByteArrayEntity(Strings.toString(entity).getBytes(StandardCharsets.UTF_8)); httpPost.setEntity(byteEntity); httpPost.setHeader(HttpHeaders.CONTENT_TYPE, XContentType.JSON.mediaTypeWithoutParameters()); - httpPost.setHeader(apiKeyHeader()); + httpPost.setHeader(createAuthBearerHeader(account.apiKey())); return httpPost; } - - private Header apiKeyHeader() { - return new BasicHeader(HttpHeaders.AUTHORIZATION, "Bearer " + account.apiKey().toString()); - } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/huggingface/HuggingFaceElserRequestEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/huggingface/HuggingFaceElserRequestEntity.java index f21bee923ecab..10ba249f9da7d 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/huggingface/HuggingFaceElserRequestEntity.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/huggingface/HuggingFaceElserRequestEntity.java @@ -11,9 +11,10 @@ import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; +import java.util.List; import java.util.Objects; -public record HuggingFaceElserRequestEntity(String inputs) implements ToXContentObject { +public record HuggingFaceElserRequestEntity(List inputs) implements ToXContentObject { private static final String INPUTS_FIELD = "inputs"; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiEmbeddingsRequest.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiEmbeddingsRequest.java new file mode 100644 index 0000000000000..d195563227d65 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiEmbeddingsRequest.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.openai; + +import org.apache.http.HttpHeaders; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpRequestBase; +import org.apache.http.client.utils.URIBuilder; +import org.apache.http.entity.ByteArrayEntity; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.inference.external.openai.OpenAiAccount; +import org.elasticsearch.xpack.inference.external.request.Request; + +import java.net.URI; +import java.net.URISyntaxException; +import java.nio.charset.StandardCharsets; +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.external.request.RequestUtils.createAuthBearerHeader; +import static org.elasticsearch.xpack.inference.external.request.openai.OpenAiUtils.createOrgHeader; + +public class OpenAiEmbeddingsRequest implements Request { + + private final OpenAiAccount account; + private final OpenAiEmbeddingsRequestEntity entity; + + public OpenAiEmbeddingsRequest(OpenAiAccount account, OpenAiEmbeddingsRequestEntity entity) { + this.account = Objects.requireNonNull(account); + this.entity = Objects.requireNonNull(entity); + } + + public HttpRequestBase createRequest() { + try { + URI uriForRequest = account.url() == null ? buildDefaultUri() : account.url(); + + HttpPost httpPost = new HttpPost(uriForRequest); + + ByteArrayEntity byteEntity = new ByteArrayEntity(Strings.toString(entity).getBytes(StandardCharsets.UTF_8)); + httpPost.setEntity(byteEntity); + + httpPost.setHeader(HttpHeaders.CONTENT_TYPE, XContentType.JSON.mediaType()); + httpPost.setHeader(createAuthBearerHeader(account.apiKey())); + + var org = account.organizationId(); + if (org != null) { + httpPost.setHeader(createOrgHeader(org)); + } + + return httpPost; + } catch (URISyntaxException e) { + throw new ElasticsearchStatusException("Failed to construct OpenAI URL", RestStatus.INTERNAL_SERVER_ERROR, e); + } + } + + // default for testing + static URI buildDefaultUri() throws URISyntaxException { + return new URIBuilder().setScheme("https") + .setHost(OpenAiUtils.HOST) + .setPathSegments(OpenAiUtils.VERSION_1, OpenAiUtils.EMBEDDINGS_PATH) + .build(); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiEmbeddingsRequestEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiEmbeddingsRequestEntity.java new file mode 100644 index 0000000000000..38c61e5590fa3 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiEmbeddingsRequestEntity.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.openai; + +import org.elasticsearch.core.Nullable; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +public record OpenAiEmbeddingsRequestEntity(List input, String model, @Nullable String user) implements ToXContentObject { + + private static final String INPUT_FIELD = "input"; + private static final String MODEL_FIELD = "model"; + private static final String USER_FIELD = "user"; + + public OpenAiEmbeddingsRequestEntity { + Objects.requireNonNull(input); + Objects.requireNonNull(model); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(INPUT_FIELD, input); + builder.field(MODEL_FIELD, model); + + if (user != null) { + builder.field(USER_FIELD, user); + } + + builder.endObject(); + return builder; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiUtils.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiUtils.java new file mode 100644 index 0000000000000..a6479b3ecde25 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiUtils.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.openai; + +import org.apache.http.Header; +import org.apache.http.message.BasicHeader; + +public class OpenAiUtils { + public static final String HOST = "api.openai.com"; + public static final String VERSION_1 = "v1"; + public static final String EMBEDDINGS_PATH = "embeddings"; + public static final String ORGANIZATION_HEADER = "OpenAI-Organization"; + + public static Header createOrgHeader(String org) { + return new BasicHeader(ORGANIZATION_HEADER, org); + } + + private OpenAiUtils() {} +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/huggingface/HuggingFaceElserResponseEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/huggingface/HuggingFaceElserResponseEntity.java index 2ac9eb44ed7fb..566ca9ff1351f 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/huggingface/HuggingFaceElserResponseEntity.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/huggingface/HuggingFaceElserResponseEntity.java @@ -13,16 +13,14 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; -import org.elasticsearch.xpack.core.ml.inference.results.TextExpansionResults; import org.elasticsearch.xpack.inference.external.http.HttpResult; +import org.elasticsearch.xpack.inference.results.SparseEmbeddingResults; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; -import static org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfig.DEFAULT_RESULTS_FIELD; - public class HuggingFaceElserResponseEntity { /** @@ -56,7 +54,7 @@ public class HuggingFaceElserResponseEntity { * * */ - public static TextExpansionResults fromResponse(HttpResult response) throws IOException { + public static SparseEmbeddingResults fromResponse(HttpResult response) throws IOException { var parserConfig = XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE); try (XContentParser jsonParser = XContentFactory.xContent(XContentType.JSON).createParser(parserConfig, response.body())) { @@ -64,36 +62,36 @@ public static TextExpansionResults fromResponse(HttpResult response) throws IOEx jsonParser.nextToken(); } - List parsedResponse = XContentParserUtils.parseList( + List parsedEmbeddings = XContentParserUtils.parseList( jsonParser, HuggingFaceElserResponseEntity::parseExpansionResult ); - if (parsedResponse.isEmpty()) { - return new TextExpansionResults(DEFAULT_RESULTS_FIELD, Collections.emptyList(), false); + if (parsedEmbeddings.isEmpty()) { + return new SparseEmbeddingResults(Collections.emptyList()); } - // we only handle a single response right now so just grab the first one - return parsedResponse.get(0); + return new SparseEmbeddingResults(parsedEmbeddings); } } - private static TextExpansionResults parseExpansionResult(XContentParser parser) throws IOException { + private static SparseEmbeddingResults.Embedding parseExpansionResult(XContentParser parser) throws IOException { XContentParser.Token token = parser.currentToken(); XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser); - List weightedTokens = new ArrayList<>(); + List weightedTokens = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser); var floatToken = parser.nextToken(); XContentParserUtils.ensureExpectedToken(XContentParser.Token.VALUE_NUMBER, floatToken, parser); - weightedTokens.add(new TextExpansionResults.WeightedToken(parser.currentName(), parser.floatValue())); + weightedTokens.add(new SparseEmbeddingResults.WeightedToken(parser.currentName(), parser.floatValue())); } + // TODO how do we know if the tokens were truncated so we can set this appropriately? // This will depend on whether we handle the tokenization or hugging face - return new TextExpansionResults(DEFAULT_RESULTS_FIELD, weightedTokens, false); + return new SparseEmbeddingResults.Embedding(weightedTokens, false); } private HuggingFaceElserResponseEntity() {} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/openai/OpenAiEmbeddingsResponseEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/openai/OpenAiEmbeddingsResponseEntity.java new file mode 100644 index 0000000000000..60b568678987d --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/openai/OpenAiEmbeddingsResponseEntity.java @@ -0,0 +1,131 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.response.openai; + +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentParserUtils; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.inference.external.http.HttpResult; +import org.elasticsearch.xpack.inference.results.TextEmbeddingResults; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.core.Strings.format; + +public class OpenAiEmbeddingsResponseEntity { + + /** + * Parses the OpenAI json response. + * For a request like: + * + *
    +     *     
    +     *        {
    +     *            "inputs": ["hello this is my name", "I wish I was there!"]
    +     *        }
    +     *     
    +     * 
    + * + * The response would look like: + * + *
    +     * 
    +     * {
    +     *  "object": "list",
    +     *  "data": [
    +     *      {
    +     *          "object": "embedding",
    +     *          "embedding": [
    +     *              -0.009327292,
    +     *              .... (1536 floats total for ada-002)
    +     *              -0.0028842222,
    +     *          ],
    +     *          "index": 0
    +     *      },
    +     *      {
    +     *          "object": "embedding",
    +     *          "embedding": [ ... ],
    +     *          "index": 1
    +     *      }
    +     *  ],
    +     *  "model": "text-embedding-ada-002",
    +     *  "usage": {
    +     *      "prompt_tokens": 8,
    +     *      "total_tokens": 8
    +     *  }
    +     * }
    +     * 
    +     * 
    + */ + public static TextEmbeddingResults fromResponse(HttpResult response) throws IOException { + var parserConfig = XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE); + + try (XContentParser jsonParser = XContentFactory.xContent(XContentType.JSON).createParser(parserConfig, response.body())) { + if (jsonParser.currentToken() == null) { + jsonParser.nextToken(); + } + + XContentParser.Token token = jsonParser.currentToken(); + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, token, jsonParser); + + positionParserAtTokenAfterField(jsonParser, "data"); + + List embeddingList = XContentParserUtils.parseList( + jsonParser, + OpenAiEmbeddingsResponseEntity::parseEmbeddingObject + ); + + return new TextEmbeddingResults(embeddingList); + } + } + + /** + * Iterates over the tokens until it finds a field name token with the text matching the field requested. + * + * @throws IllegalStateException if the field cannot be found + */ + private static void positionParserAtTokenAfterField(XContentParser parser, String field) throws IOException { + XContentParser.Token token; + + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME && parser.currentName().equals(field)) { + parser.nextToken(); + return; + } + } + + throw new IllegalStateException(format("Failed to find required field [%s] in OpenAI embeddings response", field)); + } + + private static TextEmbeddingResults.Embedding parseEmbeddingObject(XContentParser parser) throws IOException { + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser); + + positionParserAtTokenAfterField(parser, "embedding"); + + List embeddingValues = XContentParserUtils.parseList(parser, OpenAiEmbeddingsResponseEntity::parseEmbeddingList); + + // the parser is currently sitting at an ARRAY_END so go to the next token + parser.nextToken(); + // if there are additional fields within this object, lets skip them, so we can begin parsing the next embedding array + parser.skipChildren(); + + return new TextEmbeddingResults.Embedding(embeddingValues); + } + + private static float parseEmbeddingList(XContentParser parser) throws IOException { + XContentParser.Token token = parser.currentToken(); + XContentParserUtils.ensureExpectedToken(XContentParser.Token.VALUE_NUMBER, token, parser); + return parser.floatValue(); + } + + private OpenAiEmbeddingsResponseEntity() {} +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/openai/OpenAiErrorResponseEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/openai/OpenAiErrorResponseEntity.java new file mode 100644 index 0000000000000..10f42a8ec7d19 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/openai/OpenAiErrorResponseEntity.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.response.openai; + +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.inference.external.http.HttpResult; + +import java.io.IOException; +import java.util.Map; + +public class OpenAiErrorResponseEntity { + + private final String errorMessage; + + private OpenAiErrorResponseEntity(String errorMessage) { + this.errorMessage = errorMessage; + } + + public String getErrorMessage() { + return errorMessage; + } + + /** + * An example error response for invalid auth would look like + * + * { + * "error": { + * "message": "You didn't provide an API key...", + * "type": "invalid_request_error", + * "param": null, + * "code": null + * } + * } + * + * + * + * @param response The error response + * @return An error entity if the response is JSON with the above structure + * or null if the response does not contain the error.message field + */ + @SuppressWarnings("unchecked") + public static OpenAiErrorResponseEntity fromResponse(HttpResult response) { + try ( + XContentParser jsonParser = XContentFactory.xContent(XContentType.JSON) + .createParser(XContentParserConfiguration.EMPTY, response.body()) + ) { + var responseMap = jsonParser.map(); + var error = (Map) responseMap.get("error"); + if (error != null) { + var message = (String) error.get("message"); + if (message != null) { + return new OpenAiErrorResponseEntity(message); + } + } + } catch (IOException e) { + // swallow the error + } + + return null; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/logging/Throttler.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/logging/Throttler.java index b1dee15a93bd7..0cf0e65eaba37 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/logging/Throttler.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/logging/Throttler.java @@ -87,25 +87,22 @@ public void setDurationToWait(TimeValue durationToWait) { this.durationToWait = Duration.ofMillis(durationToWait.millis()); } - public void warn(Logger logger, String message, Throwable e) { - Objects.requireNonNull(message); - Objects.requireNonNull(e); - - if (isRunning.get()) { - logHelper(message, msgToAppend -> logger.warn(message.concat(msgToAppend), e)); + public void execute(String message, Consumer consumer) { + if (isRunning.get() == false) { + return; } - } - private void logHelper(String message, Consumer executor) { LogExecutor logExecutor = logExecutors.compute(message, (key, value) -> { if (value == null) { - return new LogExecutor(clock, executor); + return new LogExecutor(clock, consumer); } - return value.compute(executor, durationToWait); + return value.compute(consumer, durationToWait); }); - logExecutor.log(); + // This executes an internal consumer that wraps the passed in one, it will either log the message passed here + // unchanged, do nothing if it is in the throttled period, or log this message + some text saying how many times it was repeated + logExecutor.log(message); } @Override @@ -119,41 +116,38 @@ private static class LogExecutor { private final long skippedLogCalls; private final Instant timeOfLastLogCall; private final Clock clock; - private final Runnable logRunner; - - LogExecutor(Clock clock, Consumer logAppendedMessage) { - skippedLogCalls = 0; - timeOfLastLogCall = Instant.now(clock); - this.clock = clock; - // The first log message can log the original message without waiting - this.logRunner = () -> logAppendedMessage.accept(""); + private final Consumer consumer; + + LogExecutor(Clock clock, Consumer throttledConsumer) { + this(clock, 0, throttledConsumer); } - LogExecutor(Clock clock, long skippedLogCalls, Runnable logRunner) { + LogExecutor(Clock clock, long skippedLogCalls, Consumer consumer) { this.skippedLogCalls = skippedLogCalls; - timeOfLastLogCall = Instant.now(clock); - this.clock = clock; - this.logRunner = logRunner; + this.clock = Objects.requireNonNull(clock); + timeOfLastLogCall = Instant.now(this.clock); + this.consumer = Objects.requireNonNull(consumer); } - void log() { - this.logRunner.run(); + void log(String message) { + this.consumer.accept(message); } LogExecutor compute(Consumer executor, Duration durationToWait) { if (hasDurationExpired(durationToWait)) { - String msg = ""; + String messageToAppend = ""; if (this.skippedLogCalls == 1) { - msg = ", repeated 1 time"; + messageToAppend = ", repeated 1 time"; } else if (this.skippedLogCalls > 1) { - msg = format(", repeated %s times", this.skippedLogCalls); + messageToAppend = format(", repeated %s times", this.skippedLogCalls); } - String finalMsg = msg; - return new LogExecutor(this.clock, 0, () -> executor.accept(finalMsg)); + final String stringToAppend = messageToAppend; + return new LogExecutor(this.clock, 0, (message) -> executor.accept(message.concat(stringToAppend))); } - return new LogExecutor(this.clock, this.skippedLogCalls + 1, () -> {}); + // This creates a consumer that won't do anything because the original consumer is being throttled + return new LogExecutor(this.clock, this.skippedLogCalls + 1, (message) -> {}); } private boolean hasDurationExpired(Duration durationToWait) { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/logging/ThrottlerManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/logging/ThrottlerManager.java index 6c38c341a0401..2a84494d6af21 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/logging/ThrottlerManager.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/logging/ThrottlerManager.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.inference.logging; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -78,10 +79,24 @@ void setResetInterval(TimeValue resetInterval) { throttler = new Throttler(loggerSettings.resetInterval(), loggerSettings.waitDuration(), threadPool); } - public Throttler getThrottler() { + // default for testing + Throttler getThrottler() { return throttler; } + public void warn(Logger logger, String message, Throwable e) { + Objects.requireNonNull(message); + Objects.requireNonNull(e); + + throttler.execute(message, messageToLog -> logger.warn(messageToLog, e)); + } + + public void warn(Logger logger, String message) { + Objects.requireNonNull(message); + + throttler.execute(message, logger::warn); + } + @Override public void close() { throttler.close(); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/results/LegacyTextEmbeddingResults.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/results/LegacyTextEmbeddingResults.java new file mode 100644 index 0000000000000..b5d6b8483138a --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/results/LegacyTextEmbeddingResults.java @@ -0,0 +1,141 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.results; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.inference.InferenceResults; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * Writes a text embedding result in the following json format + * { + * "text_embedding": [ + * { + * "embedding": [ + * 0.1 + * ] + * }, + * { + * "embedding": [ + * 0.2 + * ] + * } + * ] + * } + * + * This class represents the way that the {@link org.elasticsearch.xpack.inference.services.openai.OpenAiService} + * formatted the response for the embeddings type. This represents what was returned prior to the + * {@link org.elasticsearch.TransportVersions#INFERENCE_SERVICE_RESULTS_ADDED} version. + * @deprecated use {@link TextEmbeddingResults} instead + */ +@Deprecated +public record LegacyTextEmbeddingResults(List embeddings) implements InferenceResults { + public static final String NAME = "text_embedding_results"; + public static final String TEXT_EMBEDDING = TaskType.TEXT_EMBEDDING.toString(); + + public LegacyTextEmbeddingResults(StreamInput in) throws IOException { + this(in.readCollectionAsList(Embedding::new)); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startArray(TEXT_EMBEDDING); + for (Embedding embedding : embeddings) { + embedding.toXContent(builder, params); + } + builder.endArray(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeCollection(embeddings); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public String getResultsField() { + return TEXT_EMBEDDING; + } + + @Override + public Map asMap() { + Map map = new LinkedHashMap<>(); + map.put(getResultsField(), embeddings.stream().map(Embedding::asMap).collect(Collectors.toList())); + + return map; + } + + @Override + public Map asMap(String outputField) { + Map map = new LinkedHashMap<>(); + map.put(outputField, embeddings.stream().map(Embedding::asMap).collect(Collectors.toList())); + + return map; + } + + @Override + public Object predictedValue() { + throw new UnsupportedOperationException("[" + NAME + "] does not support a single predicted value"); + } + + public TextEmbeddingResults transformToTextEmbeddingResults() { + return new TextEmbeddingResults(this); + } + + public record Embedding(List values) implements Writeable, ToXContentObject { + public static final String EMBEDDING = "embedding"; + + public Embedding(StreamInput in) throws IOException { + this(in.readCollectionAsImmutableList(StreamInput::readFloat)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeCollection(values, StreamOutput::writeFloat); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + builder.startArray(EMBEDDING); + for (Float value : values) { + builder.value(value); + } + builder.endArray(); + + builder.endObject(); + return builder; + } + + @Override + public String toString() { + return Strings.toString(this); + } + + public Map asMap() { + return Map.of(EMBEDDING, values); + } + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/results/SparseEmbeddingResults.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/results/SparseEmbeddingResults.java new file mode 100644 index 0000000000000..0e0299a5e12fd --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/results/SparseEmbeddingResults.java @@ -0,0 +1,177 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.results; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.inference.InferenceResults; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.xcontent.ToXContentFragment; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ml.inference.results.TextExpansionResults; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfig.DEFAULT_RESULTS_FIELD; + +public record SparseEmbeddingResults(List embeddings) implements InferenceServiceResults { + + public static final String NAME = "sparse_embedding_results"; + public static final String SPARSE_EMBEDDING = TaskType.SPARSE_EMBEDDING.toString(); + + public SparseEmbeddingResults(StreamInput in) throws IOException { + this(in.readCollectionAsList(Embedding::new)); + } + + public static SparseEmbeddingResults of(List results) { + List embeddings = new ArrayList<>(results.size()); + + for (InferenceResults result : results) { + if (result instanceof TextExpansionResults expansionResults) { + embeddings.add(Embedding.create(expansionResults.getWeightedTokens(), expansionResults.isTruncated())); + } else { + throw new IllegalArgumentException("Received invalid legacy inference result"); + } + } + + return new SparseEmbeddingResults(embeddings); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startArray(SPARSE_EMBEDDING); + + for (Embedding embedding : embeddings) { + embedding.toXContent(builder, params); + } + + builder.endArray(); + return builder; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeCollection(embeddings); + } + + public Map asMap() { + Map map = new LinkedHashMap<>(); + var embeddingList = embeddings.stream().map(Embedding::asMap).toList(); + + map.put(SPARSE_EMBEDDING, embeddingList); + return map; + } + + @Override + public List transformToLegacyFormat() { + return embeddings.stream() + .map( + embedding -> new TextExpansionResults( + DEFAULT_RESULTS_FIELD, + embedding.tokens() + .stream() + .map(weightedToken -> new TextExpansionResults.WeightedToken(weightedToken.token, weightedToken.weight)) + .toList(), + embedding.isTruncated + ) + ) + .toList(); + } + + public record Embedding(List tokens, boolean isTruncated) implements Writeable, ToXContentObject { + + public static final String EMBEDDING = "embedding"; + public static final String IS_TRUNCATED = "is_truncated"; + + public Embedding(StreamInput in) throws IOException { + this(in.readCollectionAsList(WeightedToken::new), in.readBoolean()); + } + + public static Embedding create(List weightedTokens, boolean isTruncated) { + return new Embedding( + weightedTokens.stream().map(token -> new WeightedToken(token.token(), token.weight())).toList(), + isTruncated + ); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeCollection(tokens); + out.writeBoolean(isTruncated); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(IS_TRUNCATED, isTruncated); + builder.startObject(EMBEDDING); + + for (var weightedToken : tokens) { + weightedToken.toXContent(builder, params); + } + + builder.endObject(); + builder.endObject(); + return builder; + } + + public Map asMap() { + var embeddingMap = new LinkedHashMap( + tokens.stream().collect(Collectors.toMap(WeightedToken::token, WeightedToken::weight)) + ); + + return new LinkedHashMap<>(Map.of(IS_TRUNCATED, isTruncated, EMBEDDING, embeddingMap)); + } + + @Override + public String toString() { + return Strings.toString(this); + } + } + + public record WeightedToken(String token, float weight) implements Writeable, ToXContentFragment { + public WeightedToken(StreamInput in) throws IOException { + this(in.readString(), in.readFloat()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(token); + out.writeFloat(weight); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(token, weight); + return builder; + } + + public Map asMap() { + return Map.of(token, weight); + } + + @Override + public String toString() { + return Strings.toString(this); + } + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/results/TextEmbeddingResults.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/results/TextEmbeddingResults.java new file mode 100644 index 0000000000000..74f94e1aea17d --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/results/TextEmbeddingResults.java @@ -0,0 +1,133 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.results; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.inference.InferenceResults; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * Writes a text embedding result in the follow json format + * { + * "text_embedding": [ + * { + * "embedding": [ + * 0.1 + * ] + * }, + * { + * "embedding": [ + * 0.2 + * ] + * } + * ] + * } + */ +public record TextEmbeddingResults(List embeddings) implements InferenceServiceResults { + public static final String NAME = "text_embedding_service_results"; + public static final String TEXT_EMBEDDING = TaskType.TEXT_EMBEDDING.toString(); + + public TextEmbeddingResults(StreamInput in) throws IOException { + this(in.readCollectionAsList(Embedding::new)); + } + + @SuppressWarnings("deprecation") + TextEmbeddingResults(LegacyTextEmbeddingResults legacyTextEmbeddingResults) { + this( + legacyTextEmbeddingResults.embeddings() + .stream() + .map(embedding -> new Embedding(embedding.values())) + .collect(Collectors.toList()) + ); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startArray(TEXT_EMBEDDING); + for (Embedding embedding : embeddings) { + embedding.toXContent(builder, params); + } + builder.endArray(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeCollection(embeddings); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + @SuppressWarnings("deprecation") + public List transformToLegacyFormat() { + var legacyEmbedding = new LegacyTextEmbeddingResults( + embeddings.stream().map(embedding -> new LegacyTextEmbeddingResults.Embedding(embedding.values)).toList() + ); + + return List.of(legacyEmbedding); + } + + public Map asMap() { + Map map = new LinkedHashMap<>(); + map.put(TEXT_EMBEDDING, embeddings.stream().map(Embedding::asMap).collect(Collectors.toList())); + + return map; + } + + public record Embedding(List values) implements Writeable, ToXContentObject { + public static final String EMBEDDING = "embedding"; + + public Embedding(StreamInput in) throws IOException { + this(in.readCollectionAsImmutableList(StreamInput::readFloat)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeCollection(values, StreamOutput::writeFloat); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + builder.startArray(EMBEDDING); + for (Float value : values) { + builder.value(value); + } + builder.endArray(); + + builder.endObject(); + return builder; + } + + @Override + public String toString() { + return Strings.toString(this); + } + + public Map asMap() { + return Map.of(EMBEDDING, values); + } + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/MapParsingUtils.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/MapParsingUtils.java index 0849e8fa53cf5..20bea7f1347b3 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/MapParsingUtils.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/MapParsingUtils.java @@ -8,10 +8,17 @@ package org.elasticsearch.xpack.inference.services; import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.core.Strings; import org.elasticsearch.rest.RestStatus; +import java.net.URI; +import java.net.URISyntaxException; import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.core.Strings.format; public class MapParsingUtils { /** @@ -54,8 +61,13 @@ public static Map removeFromMapOrThrowIfNull(Map return value; } + @SuppressWarnings("unchecked") + public static Map removeFromMap(Map sourceMap, String fieldName) { + return (Map) sourceMap.remove(fieldName); + } + public static void throwIfNotEmptyMap(Map settingsMap, String serviceName) { - if (settingsMap.isEmpty() == false) { + if (settingsMap != null && settingsMap.isEmpty() == false) { throw MapParsingUtils.unknownSettingsError(settingsMap, serviceName); } } @@ -74,11 +86,86 @@ public static String missingSettingErrorMsg(String settingName, String scope) { return Strings.format("[%s] does not contain the required setting [%s]", scope, settingName); } - public static String invalidUrlErrorMsg(String url, String settingName) { - return Strings.format("Invalid url [%s] received in setting [%s]", url, settingName); + public static String invalidUrlErrorMsg(String url, String settingName, String settingScope) { + return Strings.format("[%s] Invalid url [%s] received for field [%s]", settingScope, url, settingName); } public static String mustBeNonEmptyString(String settingName, String scope) { return Strings.format("[%s] Invalid value empty string. [%s] must be a non-empty string", scope, settingName); } + + // TODO improve URI validation logic + public static URI convertToUri(String url, String settingName, String settingScope, ValidationException validationException) { + try { + return createUri(url); + } catch (IllegalArgumentException ignored) { + validationException.addValidationError(MapParsingUtils.invalidUrlErrorMsg(url, settingName, settingScope)); + return null; + } + } + + public static URI createUri(String url) throws IllegalArgumentException { + Objects.requireNonNull(url); + + try { + return new URI(url); + } catch (URISyntaxException e) { + throw new IllegalArgumentException(format("unable to parse url [%s]", url), e); + } + } + + public static SecureString extractRequiredSecureString( + Map map, + String settingName, + String scope, + ValidationException validationException + ) { + String requiredField = extractRequiredString(map, settingName, scope, validationException); + + if (validationException.validationErrors().isEmpty() == false) { + return null; + } + + return new SecureString(Objects.requireNonNull(requiredField).toCharArray()); + } + + public static String extractRequiredString( + Map map, + String settingName, + String scope, + ValidationException validationException + ) { + String requiredField = MapParsingUtils.removeAsType(map, settingName, String.class); + + if (requiredField == null) { + validationException.addValidationError(MapParsingUtils.missingSettingErrorMsg(settingName, scope)); + } else if (requiredField.isEmpty()) { + validationException.addValidationError(MapParsingUtils.mustBeNonEmptyString(settingName, scope)); + } + + if (validationException.validationErrors().isEmpty() == false) { + return null; + } + + return requiredField; + } + + public static String extractOptionalString( + Map map, + String settingName, + String scope, + ValidationException validationException + ) { + String optionalField = MapParsingUtils.removeAsType(map, settingName, String.class); + + if (optionalField != null && optionalField.isEmpty()) { + validationException.addValidationError(MapParsingUtils.mustBeNonEmptyString(settingName, scope)); + } + + if (validationException.validationErrors().isEmpty() == false) { + return null; + } + + return optionalField; + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceComponents.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceComponents.java new file mode 100644 index 0000000000000..bff1ce70b6e13 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceComponents.java @@ -0,0 +1,17 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.inference.logging.ThrottlerManager; + +/** + * A container for common components need at various levels of the inference services to instantiate their internals + */ +public record ServiceComponents(ThreadPool threadPool, ThrottlerManager throttlerManager, Settings settings) {} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeService.java index 57f5acbebd05b..f1fab447ec757 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeService.java @@ -13,8 +13,8 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.inference.InferenceResults; import org.elasticsearch.inference.InferenceService; +import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.TaskType; @@ -23,8 +23,8 @@ import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.ml.action.InferTrainedModelDeploymentAction; import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction; -import org.elasticsearch.xpack.core.ml.inference.results.TextExpansionResults; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextExpansionConfigUpdate; +import org.elasticsearch.xpack.inference.results.SparseEmbeddingResults; import java.io.IOException; import java.util.List; @@ -157,7 +157,7 @@ public void start(Model model, ActionListener listener) { } @Override - public void infer(Model model, String input, Map taskSettings, ActionListener listener) { + public void infer(Model model, List input, Map taskSettings, ActionListener listener) { // No task settings to override with requestTaskSettings if (model.getConfigurations().getTaskType() != TaskType.SPARSE_EMBEDDING) { @@ -173,12 +173,11 @@ public void infer(Model model, String input, Map taskSettings, A var request = InferTrainedModelDeploymentAction.Request.forTextInput( model.getConfigurations().getModelId(), TextExpansionConfigUpdate.EMPTY_UPDATE, - List.of(input), + input, TimeValue.timeValueSeconds(10) // TODO get timeout from request ); client.execute(InferTrainedModelDeploymentAction.INSTANCE, request, ActionListener.wrap(inferenceResult -> { - var textExpansionResult = (TextExpansionResults) inferenceResult.getResults().get(0); - listener.onResponse(textExpansionResult); + listener.onResponse(SparseEmbeddingResults.of(inferenceResult.getResults())); }, listener::onFailure)); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserSecretSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserSecretSettings.java index f80de0067437b..f2df48366f786 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserSecretSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserSecretSettings.java @@ -16,12 +16,13 @@ import org.elasticsearch.inference.ModelSecrets; import org.elasticsearch.inference.SecretSettings; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.inference.services.MapParsingUtils; import java.io.IOException; import java.util.Map; import java.util.Objects; +import static org.elasticsearch.xpack.inference.services.MapParsingUtils.extractRequiredSecureString; + public record HuggingFaceElserSecretSettings(SecureString apiKey) implements SecretSettings { public static final String NAME = "hugging_face_elser_secret_settings"; @@ -29,21 +30,12 @@ public record HuggingFaceElserSecretSettings(SecureString apiKey) implements Sec public static HuggingFaceElserSecretSettings fromMap(Map map) { ValidationException validationException = new ValidationException(); - - String apiToken = MapParsingUtils.removeAsType(map, API_KEY, String.class); - - if (apiToken == null) { - validationException.addValidationError(MapParsingUtils.missingSettingErrorMsg(API_KEY, ModelSecrets.SECRET_SETTINGS)); - } else if (apiToken.isEmpty()) { - validationException.addValidationError(MapParsingUtils.mustBeNonEmptyString(API_KEY, ModelSecrets.SECRET_SETTINGS)); - } + SecureString secureApiToken = extractRequiredSecureString(map, API_KEY, ModelSecrets.SECRET_SETTINGS, validationException); if (validationException.validationErrors().isEmpty() == false) { throw validationException; } - SecureString secureApiToken = new SecureString(Objects.requireNonNull(apiToken).toCharArray()); - return new HuggingFaceElserSecretSettings(secureApiToken); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java index e25315b6bbaf0..3aaa122e93fe9 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java @@ -14,8 +14,8 @@ import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.core.IOUtils; -import org.elasticsearch.inference.InferenceResults; import org.elasticsearch.inference.InferenceService; +import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; @@ -24,9 +24,10 @@ import org.elasticsearch.xpack.inference.external.action.huggingface.HuggingFaceElserAction; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; import org.elasticsearch.xpack.inference.external.http.sender.Sender; -import org.elasticsearch.xpack.inference.logging.ThrottlerManager; +import org.elasticsearch.xpack.inference.services.ServiceComponents; import java.io.IOException; +import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -39,15 +40,12 @@ public class HuggingFaceElserService implements InferenceService { public static final String NAME = "hugging_face_elser"; private final SetOnce factory; - private final SetOnce throttlerManager; + private final SetOnce serviceComponents; private final AtomicReference sender = new AtomicReference<>(); - // This is initialized once which assumes that the settings will not change. To change the service, it - // should be deleted and then added again - private final AtomicReference action = new AtomicReference<>(); - public HuggingFaceElserService(SetOnce factory, SetOnce throttlerManager) { + public HuggingFaceElserService(SetOnce factory, SetOnce serviceComponents) { this.factory = Objects.requireNonNull(factory); - this.throttlerManager = Objects.requireNonNull(throttlerManager); + this.serviceComponents = Objects.requireNonNull(serviceComponents); } @Override @@ -90,7 +88,7 @@ public HuggingFaceElserModel parsePersistedConfig( } @Override - public void infer(Model model, String input, Map taskSettings, ActionListener listener) { + public void infer(Model model, List input, Map taskSettings, ActionListener listener) { if (model.getConfigurations().getTaskType() != TaskType.SPARSE_EMBEDDING) { listener.onFailure( new ElasticsearchStatusException( @@ -101,25 +99,23 @@ public void infer(Model model, String input, Map taskSettings, A return; } - try { - init(model); - } catch (Exception e) { - listener.onFailure(new ElasticsearchException("Failed to initialize service", e)); + if (model instanceof HuggingFaceElserModel == false) { + listener.onFailure(new ElasticsearchException("The internal model was invalid")); return; } - action.get().execute(input, listener); + init(); + + HuggingFaceElserModel huggingFaceElserModel = (HuggingFaceElserModel) model; + HuggingFaceElserAction action = new HuggingFaceElserAction(sender.get(), huggingFaceElserModel, serviceComponents.get()); + + action.execute(input, listener); } @Override public void start(Model model, ActionListener listener) { - try { - init(model); - sender.get().start(); - listener.onResponse(true); - } catch (Exception e) { - listener.onFailure(new ElasticsearchException("Failed to start service", e)); - } + init(); + listener.onResponse(true); } @Override @@ -127,20 +123,9 @@ public void close() throws IOException { IOUtils.closeWhileHandlingException(sender.get()); } - private void init(Model model) { - if (model instanceof HuggingFaceElserModel == false) { - throw new IllegalArgumentException("The internal model was invalid"); - } - + private void init() { sender.updateAndGet(current -> Objects.requireNonNullElseGet(current, () -> factory.get().createSender(name()))); - - HuggingFaceElserModel huggingFaceElserModel = (HuggingFaceElserModel) model; - action.updateAndGet( - current -> Objects.requireNonNullElseGet( - current, - () -> new HuggingFaceElserAction(sender.get(), huggingFaceElserModel, throttlerManager.get()) - ) - ); + sender.get().start(); } @Override diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserServiceSettings.java index 13f66562f6f83..4b8213909f66b 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserServiceSettings.java @@ -7,8 +7,6 @@ package org.elasticsearch.xpack.inference.services.huggingface.elser; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ValidationException; @@ -17,59 +15,36 @@ import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ServiceSettings; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.inference.services.MapParsingUtils; import java.io.IOException; import java.net.URI; -import java.net.URISyntaxException; import java.util.Map; import java.util.Objects; -import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.inference.services.MapParsingUtils.convertToUri; +import static org.elasticsearch.xpack.inference.services.MapParsingUtils.createUri; +import static org.elasticsearch.xpack.inference.services.MapParsingUtils.extractRequiredString; public record HuggingFaceElserServiceSettings(URI uri) implements ServiceSettings { public static final String NAME = "hugging_face_elser_service_settings"; - private static final Logger logger = LogManager.getLogger(HuggingFaceElserServiceSettings.class); static final String URL = "url"; public static HuggingFaceElserServiceSettings fromMap(Map map) { ValidationException validationException = new ValidationException(); - String parsedUrl = MapParsingUtils.removeAsType(map, URL, String.class); - URI uri = convertToUri(parsedUrl, validationException); - + String parsedUrl = extractRequiredString(map, URL, ModelConfigurations.SERVICE_SETTINGS, validationException); if (validationException.validationErrors().isEmpty() == false) { throw validationException; } - return new HuggingFaceElserServiceSettings(uri); - } - - private static URI convertToUri(String url, ValidationException validationException) { - if (url == null) { - validationException.addValidationError(MapParsingUtils.missingSettingErrorMsg(URL, ModelConfigurations.SERVICE_SETTINGS)); - return null; - } + URI uri = convertToUri(parsedUrl, URL, ModelConfigurations.SERVICE_SETTINGS, validationException); - try { - return createUri(url); - } catch (IllegalArgumentException ignored) { - validationException.addValidationError(MapParsingUtils.invalidUrlErrorMsg(url, ModelConfigurations.SERVICE_SETTINGS)); - return null; + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; } - } - - // TODO move this to a common location and potentially improve parsing errors - private static URI createUri(String url) throws IllegalArgumentException { - Objects.requireNonNull(url); - try { - return new URI(url); - } catch (URISyntaxException e) { - logger.info(format("Invalid URL received [%s]", url), e); - throw new IllegalArgumentException(format("unable to parse url [%s]", url), e); - } + return new HuggingFaceElserServiceSettings(uri); } public HuggingFaceElserServiceSettings { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiModel.java new file mode 100644 index 0000000000000..97823e3bc9079 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiModel.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.openai; + +import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.external.action.openai.OpenAiActionVisitor; + +import java.util.Map; + +public abstract class OpenAiModel extends Model { + + public OpenAiModel(ModelConfigurations configurations, ModelSecrets secrets) { + super(configurations, secrets); + } + + public abstract ExecutableAction accept(OpenAiActionVisitor creator, Map taskSettings); +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiResponseHandler.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiResponseHandler.java new file mode 100644 index 0000000000000..b5b6b5df99862 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiResponseHandler.java @@ -0,0 +1,127 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.openai; + +import org.apache.http.RequestLine; +import org.apache.http.client.methods.HttpRequestBase; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.core.CheckedFunction; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.http.HttpResult; +import org.elasticsearch.xpack.inference.external.http.retry.ResponseHandler; +import org.elasticsearch.xpack.inference.external.http.retry.RetryException; +import org.elasticsearch.xpack.inference.external.response.openai.OpenAiErrorResponseEntity; +import org.elasticsearch.xpack.inference.logging.ThrottlerManager; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.inference.external.http.HttpUtils.checkForEmptyBody; + +public class OpenAiResponseHandler implements ResponseHandler { + + protected final String requestType; + private final CheckedFunction parseFunction; + + public OpenAiResponseHandler(String requestType, CheckedFunction parseFunction) { + this.requestType = Objects.requireNonNull(requestType); + this.parseFunction = Objects.requireNonNull(parseFunction); + } + + @Override + public void validateResponse(ThrottlerManager throttlerManager, Logger logger, HttpRequestBase request, HttpResult result) + throws RetryException { + checkForFailureStatusCode(request, result); + checkForEmptyBody(throttlerManager, logger, request, result); + } + + @Override + public InferenceServiceResults parseResult(HttpResult result) throws RetryException { + try { + return parseFunction.apply(result); + } catch (Exception e) { + throw new RetryException(true, e); + } + } + + @Override + public String getRequestType() { + return requestType; + } + + /** + * Validates the status code throws an RetryException if not in the range [200, 300). + * + * The OpenAI API error codes are document at https://platform.openai.com/docs/guides/error-codes/api-errors + * @param request The http request + * @param result The http response and body + * @throws RetryException Throws if status code is {@code >= 300 or < 200 } + */ + static void checkForFailureStatusCode(HttpRequestBase request, HttpResult result) throws RetryException { + int statusCode = result.response().getStatusLine().getStatusCode(); + if (statusCode >= 200 && statusCode < 300) { + return; + } + + // handle error codes + if (statusCode >= 500) { + String errorMsg = buildErrorMessageWithResponse( + "Received a server error status code for request [%s] status [%s]", + request.getRequestLine(), + statusCode, + result + ); + throw new RetryException(false, errorMsg); + } else if (statusCode == 429) { + String errorMsg = buildErrorMessageWithResponse( + "Received a rate limit status code for request [%s] status [%s]", + request.getRequestLine(), + statusCode, + result + ); + throw new RetryException(false, errorMsg); // TODO back off and retry + } else if (statusCode == 401) { + String errorMsg = buildErrorMessageWithResponse( + "Received a authentication error status code for request [%s] status [%s]", + request.getRequestLine(), + statusCode, + result + ); + throw new RetryException(false, errorMsg); + } else if (statusCode >= 300 && statusCode < 400) { + String errorMsg = buildErrorMessageWithResponse( + "Unhandled redirection for request [%s] status [%s]", + request.getRequestLine(), + statusCode, + result + ); + throw new RetryException(false, errorMsg); + } else { + String errorMsg = buildErrorMessageWithResponse( + "Received an unsuccessful status code for request [%s] status [%s]", + request.getRequestLine(), + statusCode, + result + ); + throw new RetryException(false, errorMsg); + } + } + + static String buildErrorMessageWithResponse(String baseMessage, RequestLine requestLine, int statusCode, HttpResult response) { + var errorEntity = OpenAiErrorResponseEntity.fromResponse(response); + + if (errorEntity == null) { + return format(baseMessage, requestLine, statusCode); + } else { + var base = format(baseMessage, requestLine, statusCode); + return base + ". Error message: [" + errorEntity.getErrorMessage() + "]"; + } + + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java new file mode 100644 index 0000000000000..1d2d123432ab8 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java @@ -0,0 +1,166 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.openai; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.core.IOUtils; +import org.elasticsearch.inference.InferenceService; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.inference.external.action.openai.OpenAiActionCreator; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.services.ServiceComponents; +import org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsModel; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.atomic.AtomicReference; + +import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.inference.services.MapParsingUtils.removeFromMapOrThrowIfNull; +import static org.elasticsearch.xpack.inference.services.MapParsingUtils.throwIfNotEmptyMap; + +public class OpenAiService implements InferenceService { + public static final String NAME = "openai"; + + private final SetOnce factory; + private final SetOnce serviceComponents; + private final AtomicReference sender = new AtomicReference<>(); + + public OpenAiService(SetOnce factory, SetOnce serviceComponents) { + this.factory = Objects.requireNonNull(factory); + this.serviceComponents = Objects.requireNonNull(serviceComponents); + } + + @Override + public String name() { + return NAME; + } + + @Override + public OpenAiModel parseRequestConfig( + String modelId, + TaskType taskType, + Map config, + Set platformArchitectures + ) { + Map serviceSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.SERVICE_SETTINGS); + Map taskSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.TASK_SETTINGS); + + OpenAiModel model = createModel( + modelId, + taskType, + serviceSettingsMap, + taskSettingsMap, + serviceSettingsMap, + TaskType.unsupportedTaskTypeErrorMsg(taskType, NAME) + ); + + throwIfNotEmptyMap(config, NAME); + throwIfNotEmptyMap(serviceSettingsMap, NAME); + throwIfNotEmptyMap(taskSettingsMap, NAME); + + return model; + } + + private OpenAiModel createModel( + String modelId, + TaskType taskType, + Map serviceSettings, + Map taskSettings, + Map secretSettings, + String failureMessage + ) { + return switch (taskType) { + case TEXT_EMBEDDING -> new OpenAiEmbeddingsModel(modelId, taskType, NAME, serviceSettings, taskSettings, secretSettings); + default -> throw new ElasticsearchStatusException(failureMessage, RestStatus.BAD_REQUEST); + }; + } + + @Override + public OpenAiModel parsePersistedConfig(String modelId, TaskType taskType, Map config, Map secrets) { + Map serviceSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.SERVICE_SETTINGS); + Map taskSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.TASK_SETTINGS); + Map secretSettingsMap = removeFromMapOrThrowIfNull(secrets, ModelSecrets.SECRET_SETTINGS); + + OpenAiModel model = createModel( + modelId, + taskType, + serviceSettingsMap, + taskSettingsMap, + secretSettingsMap, + format("Failed to parse stored model [%s] for [%s] service, please delete and add the service again", modelId, NAME) + ); + + throwIfNotEmptyMap(config, NAME); + throwIfNotEmptyMap(secrets, NAME); + throwIfNotEmptyMap(serviceSettingsMap, NAME); + throwIfNotEmptyMap(taskSettingsMap, NAME); + throwIfNotEmptyMap(secretSettingsMap, NAME); + + return model; + } + + @Override + public void infer(Model model, List input, Map taskSettings, ActionListener listener) { + init(); + + if (model instanceof OpenAiModel == false) { + listener.onFailure( + new ElasticsearchStatusException( + format( + "The internal model was invalid, please delete the service [%s] with id [%s] and add it again.", + model.getConfigurations().getService(), + model.getConfigurations().getModelId() + ), + RestStatus.INTERNAL_SERVER_ERROR + ) + ); + return; + } + + OpenAiModel openAiModel = (OpenAiModel) model; + var actionCreator = new OpenAiActionCreator(sender.get(), serviceComponents.get()); + + var action = openAiModel.accept(actionCreator, taskSettings); + action.execute(input, listener); + } + + @Override + public void start(Model model, ActionListener listener) { + init(); + listener.onResponse(true); + } + + @Override + public void close() throws IOException { + IOUtils.closeWhileHandlingException(sender.get()); + } + + private void init() { + sender.updateAndGet(current -> Objects.requireNonNullElseGet(current, () -> factory.get().createSender(name()))); + sender.get().start(); + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.ML_INFERENCE_OPENAI_ADDED; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceSettings.java new file mode 100644 index 0000000000000..adb947b01691e --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceSettings.java @@ -0,0 +1,112 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.openai; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ServiceSettings; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.net.URI; +import java.util.Map; + +import static org.elasticsearch.xpack.inference.services.MapParsingUtils.convertToUri; +import static org.elasticsearch.xpack.inference.services.MapParsingUtils.createUri; +import static org.elasticsearch.xpack.inference.services.MapParsingUtils.extractOptionalString; + +/** + * Defines the base settings for interacting with OpenAI. + * @param uri an optional uri to override the openai url. This should only be used for testing. + */ +public record OpenAiServiceSettings(@Nullable URI uri, @Nullable String organizationId) implements ServiceSettings { + + public static final String NAME = "openai_service_settings"; + + public static final String URL = "url"; + public static final String ORGANIZATION = "organization_id"; + + public static OpenAiServiceSettings fromMap(Map map) { + ValidationException validationException = new ValidationException(); + + String url = extractOptionalString(map, URL, ModelConfigurations.SERVICE_SETTINGS, validationException); + String organizationId = extractOptionalString(map, ORGANIZATION, ModelConfigurations.SERVICE_SETTINGS, validationException); + + // Throw if any of the settings were empty strings + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + // the url is optional and only for testing + if (url == null) { + return new OpenAiServiceSettings((URI) null, organizationId); + } + + URI uri = convertToUri(url, URL, ModelConfigurations.SERVICE_SETTINGS, validationException); + + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + return new OpenAiServiceSettings(uri, organizationId); + } + + public OpenAiServiceSettings(@Nullable String url, @Nullable String organizationId) { + this(createOptionalUri(url), organizationId); + } + + private static URI createOptionalUri(String url) { + if (url == null) { + return null; + } + + return createUri(url); + } + + public OpenAiServiceSettings(StreamInput in) throws IOException { + this(in.readOptionalString(), in.readOptionalString()); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + if (uri != null) { + builder.field(URL, uri.toString()); + } + + if (organizationId != null) { + builder.field(ORGANIZATION, organizationId); + } + + builder.endObject(); + return builder; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.ML_INFERENCE_OPENAI_ADDED; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + var uriToWrite = uri != null ? uri.toString() : null; + out.writeOptionalString(uriToWrite); + out.writeOptionalString(organizationId); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsModel.java new file mode 100644 index 0000000000000..210b84d8ca31e --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsModel.java @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.openai.embeddings; + +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.external.action.openai.OpenAiActionVisitor; +import org.elasticsearch.xpack.inference.services.openai.OpenAiModel; +import org.elasticsearch.xpack.inference.services.openai.OpenAiServiceSettings; +import org.elasticsearch.xpack.inference.services.settings.DefaultSecretSettings; + +import java.util.Map; + +public class OpenAiEmbeddingsModel extends OpenAiModel { + public OpenAiEmbeddingsModel( + String modelId, + TaskType taskType, + String service, + Map serviceSettings, + Map taskSettings, + Map secrets + ) { + this( + modelId, + taskType, + service, + OpenAiServiceSettings.fromMap(serviceSettings), + OpenAiEmbeddingsTaskSettings.fromMap(taskSettings), + DefaultSecretSettings.fromMap(secrets) + ); + } + + // Should only be used directly for testing + OpenAiEmbeddingsModel( + String modelId, + TaskType taskType, + String service, + OpenAiServiceSettings serviceSettings, + OpenAiEmbeddingsTaskSettings taskSettings, + DefaultSecretSettings secrets + ) { + super(new ModelConfigurations(modelId, taskType, service, serviceSettings, taskSettings), new ModelSecrets(secrets)); + } + + private OpenAiEmbeddingsModel(OpenAiEmbeddingsModel originalModel, OpenAiEmbeddingsTaskSettings taskSettings) { + super( + new ModelConfigurations( + originalModel.getConfigurations().getModelId(), + originalModel.getConfigurations().getTaskType(), + originalModel.getConfigurations().getService(), + originalModel.getServiceSettings(), + taskSettings + ), + new ModelSecrets(originalModel.getSecretSettings()) + ); + } + + @Override + public OpenAiServiceSettings getServiceSettings() { + return (OpenAiServiceSettings) super.getServiceSettings(); + } + + @Override + public OpenAiEmbeddingsTaskSettings getTaskSettings() { + return (OpenAiEmbeddingsTaskSettings) super.getTaskSettings(); + } + + @Override + public DefaultSecretSettings getSecretSettings() { + return (DefaultSecretSettings) super.getSecretSettings(); + } + + @Override + public ExecutableAction accept(OpenAiActionVisitor creator, Map taskSettings) { + return creator.create(this, taskSettings); + } + + public OpenAiEmbeddingsModel overrideWith(Map taskSettings) { + var requestTaskSettings = OpenAiEmbeddingsRequestTaskSettings.fromMap(taskSettings); + + return new OpenAiEmbeddingsModel(this, getTaskSettings().overrideWith(requestTaskSettings)); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsRequestTaskSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsRequestTaskSettings.java new file mode 100644 index 0000000000000..4933717192266 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsRequestTaskSettings.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.openai.embeddings; + +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.inference.ModelConfigurations; + +import java.util.Map; + +import static org.elasticsearch.xpack.inference.services.MapParsingUtils.extractOptionalString; +import static org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsTaskSettings.MODEL; +import static org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsTaskSettings.USER; + +/** + * This class handles extracting OpenAI task settings from a request. The difference between this class and + * {@link OpenAiEmbeddingsTaskSettings} is that this class considers all fields as optional. It will not throw an error if a field + * is missing. This allows overriding persistent task settings. + * @param model the name of the model to use with this request + * @param user a unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse + */ +public record OpenAiEmbeddingsRequestTaskSettings(String model, String user) { + public static final OpenAiEmbeddingsRequestTaskSettings EMPTY_SETTINGS = new OpenAiEmbeddingsRequestTaskSettings(null, null); + + /** + * Extracts the task settings from a map. All settings are considered optional and the absence of a setting + * does not throw an error. + * @param map the settings received from a request + * @return a {@link OpenAiEmbeddingsRequestTaskSettings} + */ + public static OpenAiEmbeddingsRequestTaskSettings fromMap(Map map) { + if (map.isEmpty()) { + return OpenAiEmbeddingsRequestTaskSettings.EMPTY_SETTINGS; + } + + ValidationException validationException = new ValidationException(); + + String model = extractOptionalString(map, MODEL, ModelConfigurations.TASK_SETTINGS, validationException); + String user = extractOptionalString(map, USER, ModelConfigurations.TASK_SETTINGS, validationException); + + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + return new OpenAiEmbeddingsRequestTaskSettings(model, user); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettings.java new file mode 100644 index 0000000000000..05781c03f9cb0 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettings.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.openai.embeddings; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.TaskSettings; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.services.MapParsingUtils.extractOptionalString; +import static org.elasticsearch.xpack.inference.services.MapParsingUtils.extractRequiredString; + +/** + * Defines the task settings for the openai service. + * + * @param model the id of the model to use in the requests to openai + * @param user an optional unique identifier representing the end-user, which can help OpenAI to monitor and detect abuse + * see the openai docs for more details + */ +public record OpenAiEmbeddingsTaskSettings(String model, @Nullable String user) implements TaskSettings { + + public static final String NAME = "openai_embeddings_task_settings"; + public static final String MODEL = "model"; + public static final String USER = "user"; + + public static OpenAiEmbeddingsTaskSettings fromMap(Map map) { + ValidationException validationException = new ValidationException(); + + String model = extractRequiredString(map, MODEL, ModelConfigurations.TASK_SETTINGS, validationException); + String user = extractOptionalString(map, USER, ModelConfigurations.TASK_SETTINGS, validationException); + + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + return new OpenAiEmbeddingsTaskSettings(model, user); + } + + public OpenAiEmbeddingsTaskSettings { + Objects.requireNonNull(model); + } + + public OpenAiEmbeddingsTaskSettings(StreamInput in) throws IOException { + this(in.readString(), in.readOptionalString()); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(MODEL, model); + if (user != null) { + builder.field(USER, user); + } + builder.endObject(); + return builder; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.ML_INFERENCE_OPENAI_ADDED; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(model); + out.writeOptionalString(user); + } + + public OpenAiEmbeddingsTaskSettings overrideWith(OpenAiEmbeddingsRequestTaskSettings requestSettings) { + var modelToUse = requestSettings.model() == null ? model : requestSettings.model(); + var userToUse = requestSettings.user() == null ? user : requestSettings.user(); + + return new OpenAiEmbeddingsTaskSettings(modelToUse, userToUse); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/settings/DefaultSecretSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/settings/DefaultSecretSettings.java new file mode 100644 index 0000000000000..3ad29d56a88be --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/settings/DefaultSecretSettings.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.settings; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.SecretSettings; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.services.MapParsingUtils.extractRequiredSecureString; + +/** + * Contains secret settings that are common to all services. + * @param apiKey the key used to authenticate with the 3rd party service + */ +public record DefaultSecretSettings(SecureString apiKey) implements SecretSettings { + public static final String NAME = "default_secret_settings"; + + static final String API_KEY = "api_key"; + + public static DefaultSecretSettings fromMap(Map map) { + ValidationException validationException = new ValidationException(); + SecureString secureApiToken = extractRequiredSecureString(map, API_KEY, ModelSecrets.SECRET_SETTINGS, validationException); + + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + return new DefaultSecretSettings(secureApiToken); + } + + public DefaultSecretSettings { + Objects.requireNonNull(apiKey); + } + + public DefaultSecretSettings(StreamInput in) throws IOException { + this(in.readSecureString()); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(API_KEY, apiKey.toString()); + builder.endObject(); + return builder; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.ML_INFERENCE_OPENAI_ADDED; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeSecureString(apiKey); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/GetInferenceModelResponseTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/GetInferenceModelResponseTests.java new file mode 100644 index 0000000000000..472e4123c52e6 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/GetInferenceModelResponseTests.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.action; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.inference.InferenceNamedWriteablesProvider; +import org.elasticsearch.xpack.inference.ModelConfigurationsTests; + +import java.io.IOException; +import java.util.ArrayList; + +public class GetInferenceModelResponseTests extends AbstractWireSerializingTestCase { + @Override + protected Writeable.Reader instanceReader() { + return GetInferenceModelAction.Response::new; + } + + @Override + protected GetInferenceModelAction.Response createTestInstance() { + int numModels = randomIntBetween(1, 5); + var modelConfigs = new ArrayList(); + for (int i = 0; i < numModels; i++) { + modelConfigs.add(ModelConfigurationsTests.createRandomInstance()); + } + return new GetInferenceModelAction.Response(modelConfigs); + } + + @Override + protected GetInferenceModelAction.Response mutateInstance(GetInferenceModelAction.Response instance) throws IOException { + var modifiedConfigs = new ArrayList<>(instance.getModels()); + modifiedConfigs.add(ModelConfigurationsTests.createRandomInstance()); + return new GetInferenceModelAction.Response(modifiedConfigs); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(InferenceNamedWriteablesProvider.getNamedWriteables()); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/InferenceActionRequestTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/InferenceActionRequestTests.java index 3e1bea0051656..d263cf8c776ea 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/InferenceActionRequestTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/InferenceActionRequestTests.java @@ -11,10 +11,14 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.inference.TaskType; import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xcontent.json.JsonXContent; import java.io.IOException; +import java.util.ArrayList; import java.util.HashMap; +import static org.hamcrest.collection.IsIterableContainingInOrder.contains; + public class InferenceActionRequestTests extends AbstractWireSerializingTestCase { @Override @@ -27,11 +31,33 @@ protected InferenceAction.Request createTestInstance() { return new InferenceAction.Request( randomFrom(TaskType.values()), randomAlphaOfLength(6), - randomAlphaOfLength(8), + randomList(1, 5, () -> randomAlphaOfLength(8)), randomMap(0, 3, () -> new Tuple<>(randomAlphaOfLength(4), randomAlphaOfLength(4))) ); } + public void testParsing() throws IOException { + String singleInputRequest = """ + { + "input": "single text input" + } + """; + try (var parser = createParser(JsonXContent.jsonXContent, singleInputRequest)) { + var request = InferenceAction.Request.parseRequest("model_id", "sparse_embedding", parser); + assertThat(request.getInput(), contains("single text input")); + } + + String multiInputRequest = """ + { + "input": ["an array", "of", "inputs"] + } + """; + try (var parser = createParser(JsonXContent.jsonXContent, multiInputRequest)) { + var request = InferenceAction.Request.parseRequest("model_id", "sparse_embedding", parser); + assertThat(request.getInput(), contains("an array", "of", "inputs")); + } + } + @Override protected InferenceAction.Request mutateInstance(InferenceAction.Request instance) throws IOException { int select = randomIntBetween(0, 3); @@ -46,12 +72,11 @@ protected InferenceAction.Request mutateInstance(InferenceAction.Request instanc instance.getInput(), instance.getTaskSettings() ); - case 2 -> new InferenceAction.Request( - instance.getTaskType(), - instance.getModelId(), - instance.getInput() + "bar", - instance.getTaskSettings() - ); + case 2 -> { + var changedInputs = new ArrayList(instance.getInput()); + changedInputs.add("bar"); + yield new InferenceAction.Request(instance.getTaskType(), instance.getModelId(), changedInputs, instance.getTaskSettings()); + } case 3 -> { var taskSettings = new HashMap<>(instance.getTaskSettings()); if (taskSettings.isEmpty()) { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/InferenceActionResponseTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/InferenceActionResponseTests.java index 795923e56c6bb..515b6c268d0af 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/InferenceActionResponseTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/InferenceActionResponseTests.java @@ -7,18 +7,27 @@ package org.elasticsearch.xpack.inference.action; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.core.ml.AbstractBWCWireSerializationTestCase; import org.elasticsearch.xpack.core.ml.inference.MlInferenceNamedXContentProvider; -import org.elasticsearch.xpack.core.ml.inference.results.TextExpansionResultsTests; import org.elasticsearch.xpack.inference.InferenceNamedWriteablesProvider; +import org.elasticsearch.xpack.inference.results.LegacyTextEmbeddingResultsTests; +import org.elasticsearch.xpack.inference.results.SparseEmbeddingResultsTests; +import org.elasticsearch.xpack.inference.results.TextEmbeddingResultsTests; import java.io.IOException; import java.util.ArrayList; import java.util.List; -public class InferenceActionResponseTests extends AbstractWireSerializingTestCase { +import static org.elasticsearch.TransportVersions.INFERENCE_MULTIPLE_INPUTS; +import static org.elasticsearch.TransportVersions.INFERENCE_SERVICE_RESULTS_ADDED; +import static org.elasticsearch.TransportVersions.ML_INFERENCE_OPENAI_ADDED; +import static org.elasticsearch.TransportVersions.ML_INFERENCE_TASK_SETTINGS_OPTIONAL_ADDED; +import static org.elasticsearch.xpack.inference.action.InferenceAction.Response.transformToServiceResults; + +public class InferenceActionResponseTests extends AbstractBWCWireSerializationTestCase { @Override protected NamedWriteableRegistry getNamedWriteableRegistry() { @@ -35,11 +44,77 @@ protected Writeable.Reader instanceReader() { @Override protected InferenceAction.Response createTestInstance() { - return new InferenceAction.Response(TextExpansionResultsTests.createRandomResults()); + var result = switch (randomIntBetween(0, 2)) { + case 0 -> TextEmbeddingResultsTests.createRandomResults(); + case 1 -> LegacyTextEmbeddingResultsTests.createRandomResults().transformToTextEmbeddingResults(); + default -> SparseEmbeddingResultsTests.createRandomResults(); + }; + + return new InferenceAction.Response(result); } @Override protected InferenceAction.Response mutateInstance(InferenceAction.Response instance) throws IOException { return null; } + + @Override + protected InferenceAction.Response mutateInstanceForVersion(InferenceAction.Response instance, TransportVersion version) { + if (version.before(INFERENCE_MULTIPLE_INPUTS)) { + var singleResultList = instance.getResults().transformToLegacyFormat().subList(0, 1); + return new InferenceAction.Response(transformToServiceResults(singleResultList)); + } + + return instance; + } + + public void testSerializesInferenceServiceResultsAddedVersion() throws IOException { + var instance = createTestInstance(); + var copy = copyWriteable(instance, getNamedWriteableRegistry(), instanceReader(), INFERENCE_SERVICE_RESULTS_ADDED); + assertOnBWCObject(copy, instance, INFERENCE_SERVICE_RESULTS_ADDED); + } + + public void testSerializesOpenAiAddedVersion_UsingLegacyTextEmbeddingResult() throws IOException { + var embeddingResults = LegacyTextEmbeddingResultsTests.createRandomResults().transformToTextEmbeddingResults(); + var instance = new InferenceAction.Response(embeddingResults); + var copy = copyWriteable(instance, getNamedWriteableRegistry(), instanceReader(), ML_INFERENCE_OPENAI_ADDED); + assertOnBWCObject(copy, instance, ML_INFERENCE_OPENAI_ADDED); + } + + public void testSerializesOpenAiAddedVersion_UsingSparseEmbeddingResult() throws IOException { + var embeddingResults = SparseEmbeddingResultsTests.createRandomResults(); + var instance = new InferenceAction.Response(embeddingResults); + var copy = copyWriteable(instance, getNamedWriteableRegistry(), instanceReader(), ML_INFERENCE_OPENAI_ADDED); + assertOnBWCObject(copy, instance, ML_INFERENCE_OPENAI_ADDED); + } + + public void testSerializesMultipleInputsVersion_UsingLegacyTextEmbeddingResult() throws IOException { + var embeddingResults = TextEmbeddingResultsTests.createRandomResults(); + var instance = new InferenceAction.Response(embeddingResults); + var copy = copyWriteable(instance, getNamedWriteableRegistry(), instanceReader(), INFERENCE_MULTIPLE_INPUTS); + assertOnBWCObject(copy, instance, INFERENCE_MULTIPLE_INPUTS); + } + + public void testSerializesMultipleInputsVersion_UsingSparseEmbeddingResult() throws IOException { + var embeddingResults = SparseEmbeddingResultsTests.createRandomResults(); + var instance = new InferenceAction.Response(embeddingResults); + var copy = copyWriteable(instance, getNamedWriteableRegistry(), instanceReader(), INFERENCE_MULTIPLE_INPUTS); + assertOnBWCObject(copy, instance, INFERENCE_MULTIPLE_INPUTS); + } + + // Technically we should never see a text embedding result in the transport version of this test because support + // for it wasn't added until openai + public void testSerializesSingleInputVersion_UsingLegacyTextEmbeddingResult() throws IOException { + var embeddingResults = TextEmbeddingResultsTests.createRandomResults(); + var instance = new InferenceAction.Response(embeddingResults); + var copy = copyWriteable(instance, getNamedWriteableRegistry(), instanceReader(), ML_INFERENCE_TASK_SETTINGS_OPTIONAL_ADDED); + assertOnBWCObject(copy, instance, ML_INFERENCE_TASK_SETTINGS_OPTIONAL_ADDED); + } + + public void testSerializesSingleVersion_UsingSparseEmbeddingResult() throws IOException { + var embeddingResults = SparseEmbeddingResultsTests.createRandomResults().transformToLegacyFormat().subList(0, 1); + var instance = new InferenceAction.Response(transformToServiceResults(embeddingResults)); + var copy = copyWriteable(instance, getNamedWriteableRegistry(), instanceReader(), ML_INFERENCE_TASK_SETTINGS_OPTIONAL_ADDED); + assertOnBWCObject(copy, instance, ML_INFERENCE_TASK_SETTINGS_OPTIONAL_ADDED); + } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceElserActionTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceElserActionTests.java index 9809acf536c86..6e1c2d528c467 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceElserActionTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceElserActionTests.java @@ -9,11 +9,12 @@ import org.apache.http.HttpHeaders; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.inference.InferenceResults; +import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.TaskType; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.http.MockResponse; @@ -21,9 +22,12 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.inference.external.http.HttpClientManager; +import org.elasticsearch.xpack.inference.external.http.HttpResult; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; import org.elasticsearch.xpack.inference.external.http.sender.Sender; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; +import org.elasticsearch.xpack.inference.results.SparseEmbeddingResultsTests; +import org.elasticsearch.xpack.inference.services.ServiceComponents; import org.elasticsearch.xpack.inference.services.huggingface.elser.HuggingFaceElserModel; import org.elasticsearch.xpack.inference.services.huggingface.elser.HuggingFaceElserSecretSettings; import org.elasticsearch.xpack.inference.services.huggingface.elser.HuggingFaceElserServiceSettings; @@ -31,18 +35,23 @@ import org.junit.Before; import java.io.IOException; +import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; -import static org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfig.DEFAULT_RESULTS_FIELD; +import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; import static org.elasticsearch.xpack.inference.external.http.Utils.getUrl; import static org.elasticsearch.xpack.inference.external.http.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.external.http.Utils.mockClusterServiceEmpty; +import static org.elasticsearch.xpack.inference.results.SparseEmbeddingResultsTests.buildExpectation; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; @@ -66,6 +75,7 @@ public void shutdown() throws IOException { webServer.close(); } + @SuppressWarnings("unchecked") public void testExecute_ReturnsSuccessfulResponse() throws IOException { var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); @@ -83,12 +93,16 @@ public void testExecute_ReturnsSuccessfulResponse() throws IOException { var action = createAction(getUrl(webServer), sender); - PlainActionFuture listener = new PlainActionFuture<>(); - action.execute("abc", listener); + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(List.of("abc"), listener); - InferenceResults result = listener.actionGet(TIMEOUT); + var result = listener.actionGet(TIMEOUT); + + assertThat( + result.asMap(), + is(buildExpectation(List.of(new SparseEmbeddingResultsTests.EmbeddingExpectation(Map.of(".", 0.13315596f), false)))) + ); - assertThat(result.asMap(), is(Map.of(DEFAULT_RESULTS_FIELD, Map.of(".", 0.13315596f)))); assertThat(webServer.requests(), hasSize(1)); assertNull(webServer.requests().get(0).getUri().getQuery()); assertThat( @@ -99,7 +113,9 @@ public void testExecute_ReturnsSuccessfulResponse() throws IOException { var requestMap = entityAsMap(webServer.requests().get(0).getBody()); assertThat(requestMap.size(), is(1)); - assertThat(requestMap.get("inputs"), is("abc")); + assertThat(requestMap.get("inputs"), instanceOf(List.class)); + var inputList = (List) requestMap.get("inputs"); + assertThat(inputList, contains("abc")); } } @@ -110,32 +126,53 @@ public void testExecute_ThrowsURISyntaxException_ForInvalidUrl() throws IOExcept } } - public void testExecute_ThrowsElasticsearchException() { + public void testExecute_ThrowsElasticsearchException_WhenSenderThrows() { var sender = mock(Sender.class); doThrow(new ElasticsearchException("failed")).when(sender).send(any(), any()); var action = createAction(getUrl(webServer), sender); - PlainActionFuture listener = new PlainActionFuture<>(); - action.execute("abc", listener); + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(List.of("abc"), listener); var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); assertThat(thrownException.getMessage(), is("failed")); } + public void testExecute_ThrowsElasticsearchException_WhenSenderOnFailureIsCalled() { + var sender = mock(Sender.class); + + doAnswer(invocation -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + listener.onFailure(new IllegalStateException("failed")); + + return Void.TYPE; + }).when(sender).send(any(), any()); + + var action = createAction(getUrl(webServer), sender); + + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(List.of("abc"), listener); + + var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); + + assertThat(thrownException.getMessage(), is(format("Failed to send ELSER Hugging Face request to [%s]", getUrl(webServer)))); + } + public void testExecute_ThrowsException() { var sender = mock(Sender.class); doThrow(new IllegalArgumentException("failed")).when(sender).send(any(), any()); var action = createAction(getUrl(webServer), sender); - PlainActionFuture listener = new PlainActionFuture<>(); - action.execute("abc", listener); + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(List.of("abc"), listener); var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); - assertThat(thrownException.getMessage(), is("Failed to send request ELSER Hugging Face request")); + assertThat(thrownException.getMessage(), is(format("Failed to send ELSER Hugging Face request to [%s]", getUrl(webServer)))); } private HuggingFaceElserAction createAction(String url, Sender sender) { @@ -147,6 +184,6 @@ private HuggingFaceElserAction createAction(String url, Sender sender) { new HuggingFaceElserSecretSettings(new SecureString("secret".toCharArray())) ); - return new HuggingFaceElserAction(sender, model, mock(ThrottlerManager.class)); + return new HuggingFaceElserAction(sender, model, new ServiceComponents(threadPool, mock(ThrottlerManager.class), Settings.EMPTY)); } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionCreatorTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionCreatorTests.java new file mode 100644 index 0000000000000..edd51d7aa6b4c --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionCreatorTests.java @@ -0,0 +1,116 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.action.openai; + +import org.apache.http.HttpHeaders; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.http.MockResponse; +import org.elasticsearch.test.http.MockWebServer; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.inference.external.http.HttpClientManager; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.logging.ThrottlerManager; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; +import static org.elasticsearch.xpack.inference.external.http.Utils.getUrl; +import static org.elasticsearch.xpack.inference.external.http.Utils.inferenceUtilityPool; +import static org.elasticsearch.xpack.inference.external.http.Utils.mockClusterServiceEmpty; +import static org.elasticsearch.xpack.inference.external.request.openai.OpenAiUtils.ORGANIZATION_HEADER; +import static org.elasticsearch.xpack.inference.results.TextEmbeddingResultsTests.buildExpectation; +import static org.elasticsearch.xpack.inference.services.ServiceComponentsTests.createWithEmptySettings; +import static org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsModelTests.createModel; +import static org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsRequestTaskSettingsTests.getRequestTaskSettingsMap; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; + +public class OpenAiActionCreatorTests extends ESTestCase { + private static final TimeValue TIMEOUT = new TimeValue(30, TimeUnit.SECONDS); + private final MockWebServer webServer = new MockWebServer(); + private ThreadPool threadPool; + private HttpClientManager clientManager; + + @Before + public void init() throws Exception { + webServer.start(); + threadPool = createThreadPool(inferenceUtilityPool()); + clientManager = HttpClientManager.create(Settings.EMPTY, threadPool, mockClusterServiceEmpty(), mock(ThrottlerManager.class)); + } + + @After + public void shutdown() throws IOException { + clientManager.close(); + terminate(threadPool); + webServer.close(); + } + + public void testCreate_OpenAiEmbeddingsModel() throws IOException { + var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + + try (var sender = senderFactory.createSender("test_service")) { + sender.start(); + + String responseJson = """ + { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + 0.0123, + -0.0123 + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); + + var model = createModel(getUrl(webServer), "org", "secret", "model", "user"); + var actionCreator = new OpenAiActionCreator(sender, createWithEmptySettings(threadPool)); + var overriddenTaskSettings = getRequestTaskSettingsMap(null, "overridden_user"); + var action = actionCreator.create(model, overriddenTaskSettings); + + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(List.of("abc"), listener); + + var result = listener.actionGet(TIMEOUT); + + assertThat(result.asMap(), is(buildExpectation(List.of(List.of(0.0123F, -0.0123F))))); + assertThat(webServer.requests(), hasSize(1)); + assertNull(webServer.requests().get(0).getUri().getQuery()); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.AUTHORIZATION), equalTo("Bearer secret")); + assertThat(webServer.requests().get(0).getHeader(ORGANIZATION_HEADER), equalTo("org")); + + var requestMap = entityAsMap(webServer.requests().get(0).getBody()); + assertThat(requestMap.size(), is(3)); + assertThat(requestMap.get("input"), is(List.of("abc"))); + assertThat(requestMap.get("model"), is("model")); + assertThat(requestMap.get("user"), is("overridden_user")); + } + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiEmbeddingsActionTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiEmbeddingsActionTests.java new file mode 100644 index 0000000000000..eabaf02b377dd --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiEmbeddingsActionTests.java @@ -0,0 +1,229 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.action.openai; + +import org.apache.http.HttpHeaders; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.http.MockResponse; +import org.elasticsearch.test.http.MockWebServer; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.inference.external.http.HttpClientManager; +import org.elasticsearch.xpack.inference.external.http.HttpResult; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.logging.ThrottlerManager; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; +import static org.elasticsearch.xpack.inference.external.http.Utils.getUrl; +import static org.elasticsearch.xpack.inference.external.http.Utils.inferenceUtilityPool; +import static org.elasticsearch.xpack.inference.external.http.Utils.mockClusterServiceEmpty; +import static org.elasticsearch.xpack.inference.external.request.openai.OpenAiUtils.ORGANIZATION_HEADER; +import static org.elasticsearch.xpack.inference.results.TextEmbeddingResultsTests.buildExpectation; +import static org.elasticsearch.xpack.inference.services.ServiceComponentsTests.createWithEmptySettings; +import static org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsModelTests.createModel; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; + +public class OpenAiEmbeddingsActionTests extends ESTestCase { + private static final TimeValue TIMEOUT = new TimeValue(30, TimeUnit.SECONDS); + private final MockWebServer webServer = new MockWebServer(); + private ThreadPool threadPool; + private HttpClientManager clientManager; + + @Before + public void init() throws Exception { + webServer.start(); + threadPool = createThreadPool(inferenceUtilityPool()); + clientManager = HttpClientManager.create(Settings.EMPTY, threadPool, mockClusterServiceEmpty(), mock(ThrottlerManager.class)); + } + + @After + public void shutdown() throws IOException { + clientManager.close(); + terminate(threadPool); + webServer.close(); + } + + public void testExecute_ReturnsSuccessfulResponse() throws IOException { + var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + + try (var sender = senderFactory.createSender("test_service")) { + sender.start(); + + String responseJson = """ + { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + 0.0123, + -0.0123 + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); + + var action = createAction(getUrl(webServer), "org", "secret", "model", "user", sender); + + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(List.of("abc"), listener); + + var result = listener.actionGet(TIMEOUT); + + assertThat(result.asMap(), is(buildExpectation(List.of(List.of(0.0123F, -0.0123F))))); + assertThat(webServer.requests(), hasSize(1)); + assertNull(webServer.requests().get(0).getUri().getQuery()); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.AUTHORIZATION), equalTo("Bearer secret")); + assertThat(webServer.requests().get(0).getHeader(ORGANIZATION_HEADER), equalTo("org")); + + var requestMap = entityAsMap(webServer.requests().get(0).getBody()); + assertThat(requestMap.size(), is(3)); + assertThat(requestMap.get("input"), is(List.of("abc"))); + assertThat(requestMap.get("model"), is("model")); + assertThat(requestMap.get("user"), is("user")); + } + } + + public void testExecute_ThrowsURISyntaxException_ForInvalidUrl() throws IOException { + try (var sender = mock(Sender.class)) { + var thrownException = expectThrows( + IllegalArgumentException.class, + () -> createAction("^^", "org", "secret", "model", "user", sender) + ); + assertThat(thrownException.getMessage(), is("unable to parse url [^^]")); + } + } + + public void testExecute_ThrowsElasticsearchException() { + var sender = mock(Sender.class); + doThrow(new ElasticsearchException("failed")).when(sender).send(any(), any()); + + var action = createAction(getUrl(webServer), "org", "secret", "model", "user", sender); + + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(List.of("abc"), listener); + + var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); + + assertThat(thrownException.getMessage(), is("failed")); + } + + public void testExecute_ThrowsElasticsearchException_WhenSenderOnFailureIsCalled() { + var sender = mock(Sender.class); + + doAnswer(invocation -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + listener.onFailure(new IllegalStateException("failed")); + + return Void.TYPE; + }).when(sender).send(any(), any()); + + var action = createAction(getUrl(webServer), "org", "secret", "model", "user", sender); + + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(List.of("abc"), listener); + + var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); + + assertThat(thrownException.getMessage(), is(format("Failed to send OpenAI embeddings request to [%s]", getUrl(webServer)))); + } + + public void testExecute_ThrowsElasticsearchException_WhenSenderOnFailureIsCalled_WhenUrlIsNull() { + var sender = mock(Sender.class); + + doAnswer(invocation -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + listener.onFailure(new IllegalStateException("failed")); + + return Void.TYPE; + }).when(sender).send(any(), any()); + + var action = createAction(null, "org", "secret", "model", "user", sender); + + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(List.of("abc"), listener); + + var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); + + assertThat(thrownException.getMessage(), is("Failed to send OpenAI embeddings request")); + } + + public void testExecute_ThrowsException() { + var sender = mock(Sender.class); + doThrow(new IllegalArgumentException("failed")).when(sender).send(any(), any()); + + var action = createAction(getUrl(webServer), "org", "secret", "model", "user", sender); + + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(List.of("abc"), listener); + + var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); + + assertThat(thrownException.getMessage(), is(format("Failed to send OpenAI embeddings request to [%s]", getUrl(webServer)))); + } + + public void testExecute_ThrowsExceptionWithNullUrl() { + var sender = mock(Sender.class); + doThrow(new IllegalArgumentException("failed")).when(sender).send(any(), any()); + + var action = createAction(null, "org", "secret", "model", "user", sender); + + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(List.of("abc"), listener); + + var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); + + assertThat(thrownException.getMessage(), is("Failed to send OpenAI embeddings request")); + } + + private OpenAiEmbeddingsAction createAction( + String url, + String org, + String apiKey, + String modelName, + @Nullable String user, + Sender sender + ) { + var model = createModel(url, org, apiKey, modelName, user); + + return new OpenAiEmbeddingsAction(sender, model, createWithEmptySettings(threadPool)); + } + +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/HttpUtilsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/HttpUtilsTests.java new file mode 100644 index 0000000000000..affbd43958e29 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/HttpUtilsTests.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http; + +import org.apache.http.HttpResponse; +import org.apache.http.StatusLine; +import org.apache.http.client.methods.HttpRequestBase; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.test.ESTestCase; + +import static org.elasticsearch.xpack.inference.external.http.HttpUtils.checkForEmptyBody; +import static org.elasticsearch.xpack.inference.external.http.HttpUtils.checkForFailureStatusCode; +import static org.elasticsearch.xpack.inference.logging.ThrottlerManagerTests.mockThrottlerManager; +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class HttpUtilsTests extends ESTestCase { + public void testCheckForFailureStatusCode_ThrowsWhenStatusCodeIs300() { + var statusLine = mock(StatusLine.class); + when(statusLine.getStatusCode()).thenReturn(300); + + var httpResponse = mock(HttpResponse.class); + when(httpResponse.getStatusLine()).thenReturn(statusLine); + + var result = new HttpResult(httpResponse, new byte[0]); + + var thrownException = expectThrows( + IllegalStateException.class, + () -> checkForFailureStatusCode(mockThrottlerManager(), mock(Logger.class), mock(HttpRequestBase.class), result) + ); + + assertThat(thrownException.getMessage(), is("Unhandled redirection for request [null] status [300]")); + } + + public void testCheckForFailureStatusCode_DoesNotThrowWhenStatusCodeIs200() { + var statusLine = mock(StatusLine.class); + when(statusLine.getStatusCode()).thenReturn(200); + + var httpResponse = mock(HttpResponse.class); + when(httpResponse.getStatusLine()).thenReturn(statusLine); + + var result = new HttpResult(httpResponse, new byte[0]); + + checkForFailureStatusCode(mockThrottlerManager(), mock(Logger.class), mock(HttpRequestBase.class), result); + } + + public void testCheckForEmptyBody_DoesNotThrowWhenTheBodyIsNotEmpty() { + var httpResponse = mock(HttpResponse.class); + when(httpResponse.getStatusLine()).thenReturn(mock(StatusLine.class)); + + var result = new HttpResult(httpResponse, new byte[] { 'a' }); + + checkForEmptyBody(mockThrottlerManager(), mock(Logger.class), mock(HttpRequestBase.class), result); + } + + public void testCheckForEmptyBody_ThrowsWhenTheBodyIsEmpty() { + var httpResponse = mock(HttpResponse.class); + when(httpResponse.getStatusLine()).thenReturn(mock(StatusLine.class)); + + var result = new HttpResult(httpResponse, new byte[0]); + + var thrownException = expectThrows( + IllegalStateException.class, + () -> checkForEmptyBody(mockThrottlerManager(), mock(Logger.class), mock(HttpRequestBase.class), result) + ); + + assertThat(thrownException.getMessage(), is("Response body was empty for request [null]")); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/Utils.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/Utils.java index 22c36fe38a25c..b433306ec8261 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/Utils.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/Utils.java @@ -18,6 +18,7 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.inference.external.http.retry.RetrySettings; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; @@ -47,7 +48,8 @@ public static ClusterService mockClusterService(Settings settings) { HttpSettings.getSettings(), HttpClientManager.getSettings(), HttpRequestSenderFactory.HttpRequestSender.getSettings(), - ThrottlerManager.getSettings() + ThrottlerManager.getSettings(), + RetrySettings.getSettingsDefinitions() ).flatMap(Collection::stream).collect(Collectors.toSet()); var cSettings = new ClusterSettings(settings, registeredSettings); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/retry/RetrySettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/retry/RetrySettingsTests.java new file mode 100644 index 0000000000000..940205a663337 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/retry/RetrySettingsTests.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.retry; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.test.ESTestCase; + +public class RetrySettingsTests extends ESTestCase { + + /** + * Creates a {@link RetrySettings} object with initial delay of 1 millisecond, max delay bound of 1 millisecond, + * and timeout of 30 seconds + */ + public static RetrySettings createDefaultRetrySettings() { + return createRetrySettings(TimeValue.timeValueMillis(1), TimeValue.timeValueMillis(1), TimeValue.timeValueSeconds(30)); + } + + public static RetrySettings createRetrySettings(TimeValue initialDelay, TimeValue maxDelayBound, TimeValue timeout) { + var settings = buildSettingsWithRetryFields(initialDelay, maxDelayBound, timeout); + + return new RetrySettings(settings); + } + + public static Settings buildSettingsWithRetryFields(TimeValue initialDelay, TimeValue maxDelayBound, TimeValue timeout) { + return Settings.builder() + .put(RetrySettings.RETRY_INITIAL_DELAY_SETTING.getKey(), initialDelay) + .put(RetrySettings.RETRY_MAX_DELAY_BOUND_SETTING.getKey(), maxDelayBound) + .put(RetrySettings.RETRY_TIMEOUT_SETTING.getKey(), timeout) + .build(); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSenderTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSenderTests.java new file mode 100644 index 0000000000000..98c0afe655421 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSenderTests.java @@ -0,0 +1,451 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.retry; + +import org.apache.http.ConnectionClosedException; +import org.apache.http.HttpResponse; +import org.apache.http.StatusLine; +import org.apache.http.client.methods.HttpRequestBase; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.inference.external.http.HttpResult; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.logging.ThrottlerManager; +import org.junit.Before; +import org.mockito.stubbing.Answer; + +import static org.elasticsearch.xpack.inference.external.http.retry.RetrySettingsTests.createDefaultRetrySettings; +import static org.hamcrest.Matchers.is; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class RetryingHttpSenderTests extends ESTestCase { + private static final TimeValue TIMEOUT = TimeValue.timeValueSeconds(30); + + private DeterministicTaskQueue taskQueue; + + @Before + public void init() throws Exception { + taskQueue = new DeterministicTaskQueue(); + } + + public void testSend_CallsSenderAgain_AfterValidateResponseThrowsAnException() { + var sender = mock(Sender.class); + var httpResponse = mockHttpResponse(); + + doAnswer(invocation -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + listener.onResponse(new HttpResult(httpResponse, new byte[0])); + + return Void.TYPE; + }).when(sender).send(any(), any()); + + var inferenceResults = mock(InferenceServiceResults.class); + Answer answer = (invocation) -> inferenceResults; + + var handler = mock(ResponseHandler.class); + doThrow(new RetryException(true, "failed")).doNothing().when(handler).validateResponse(any(), any(), any(), any()); + // Mockito.thenReturn() does not compile when returning a + // bounded wild card list, thenAnswer must be used instead. + when(handler.parseResult(any())).thenAnswer(answer); + + var retrier = new RetryingHttpSender( + sender, + mock(ThrottlerManager.class), + mock(Logger.class), + createDefaultRetrySettings(), + taskQueue.getThreadPool(), + EsExecutors.DIRECT_EXECUTOR_SERVICE + ); + + var listener = new PlainActionFuture(); + executeTasks(() -> retrier.send(mock(HttpRequestBase.class), handler, listener), 1); + + assertThat(listener.actionGet(TIMEOUT), is(inferenceResults)); + verify(sender, times(2)).send(any(), any()); + } + + public void testSend_CallsSenderAgain_WhenAFailureStatusCodeIsReturned() { + var statusLine = mock(StatusLine.class); + when(statusLine.getStatusCode()).thenReturn(300).thenReturn(200); + + var httpResponse = mock(HttpResponse.class); + when(httpResponse.getStatusLine()).thenReturn(statusLine); + + var sender = mock(Sender.class); + + doAnswer(invocation -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + listener.onResponse(new HttpResult(httpResponse, new byte[] { 'a' })); + + return Void.TYPE; + }).when(sender).send(any(), any()); + + var inferenceResults = mock(InferenceServiceResults.class); + + var handler = new AlwaysRetryingResponseHandler("test", result -> inferenceResults); + + var retrier = new RetryingHttpSender( + sender, + mock(ThrottlerManager.class), + mock(Logger.class), + createDefaultRetrySettings(), + taskQueue.getThreadPool(), + EsExecutors.DIRECT_EXECUTOR_SERVICE + ); + + var listener = new PlainActionFuture(); + executeTasks(() -> retrier.send(mock(HttpRequestBase.class), handler, listener), 1); + + assertThat(listener.actionGet(TIMEOUT), is(inferenceResults)); + verify(sender, times(2)).send(any(), any()); + } + + public void testSend_CallsSenderAgain_WhenParsingFailsOnce() { + var sender = mock(Sender.class); + var httpResponse = mockHttpResponse(); + + doAnswer(invocation -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + listener.onResponse(new HttpResult(httpResponse, new byte[] { 'a' })); + + return Void.TYPE; + }).when(sender).send(any(), any()); + + var inferenceResults = mock(InferenceServiceResults.class); + Answer answer = (invocation) -> inferenceResults; + + var handler = mock(ResponseHandler.class); + when(handler.parseResult(any())).thenThrow(new RetryException(true, "failed")).thenAnswer(answer); + + var retrier = new RetryingHttpSender( + sender, + mock(ThrottlerManager.class), + mock(Logger.class), + createDefaultRetrySettings(), + taskQueue.getThreadPool(), + EsExecutors.DIRECT_EXECUTOR_SERVICE + ); + + var listener = new PlainActionFuture(); + executeTasks(() -> retrier.send(mock(HttpRequestBase.class), handler, listener), 1); + + assertThat(listener.actionGet(TIMEOUT), is(inferenceResults)); + verify(sender, times(2)).send(any(), any()); + } + + public void testSend_DoesNotCallSenderAgain_WhenParsingFailsWithNonRetryableException() { + var sender = mock(Sender.class); + var httpResponse = mockHttpResponse(); + + doAnswer(invocation -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + listener.onResponse(new HttpResult(httpResponse, new byte[] { 'a' })); + + return Void.TYPE; + }).when(sender).send(any(), any()); + + var inferenceResults = mock(InferenceServiceResults.class); + Answer answer = (invocation) -> inferenceResults; + + var handler = mock(ResponseHandler.class); + when(handler.parseResult(any())).thenThrow(new IllegalStateException("failed")).thenAnswer(answer); + + var retrier = new RetryingHttpSender( + sender, + mock(ThrottlerManager.class), + mock(Logger.class), + createDefaultRetrySettings(), + taskQueue.getThreadPool(), + EsExecutors.DIRECT_EXECUTOR_SERVICE + ); + + var listener = new PlainActionFuture(); + executeTasks(() -> retrier.send(mock(HttpRequestBase.class), handler, listener), 0); + + var thrownException = expectThrows(IllegalStateException.class, () -> listener.actionGet(TIMEOUT)); + assertThat(thrownException.getMessage(), is("failed")); + + verify(sender, times(1)).send(any(), any()); + } + + public void testSend_CallsSenderAgain_WhenHttpResultListenerCallsOnFailureOnce() { + var sender = mock(Sender.class); + + doAnswer(invocation -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + listener.onFailure(new RetryException(true, "failed")); + + return Void.TYPE; + }).doAnswer(invocation -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + listener.onResponse(new HttpResult(mock(HttpResponse.class), new byte[] { 'a' })); + + return Void.TYPE; + }).when(sender).send(any(), any()); + + var inferenceResults = mock(InferenceServiceResults.class); + Answer answer = (invocation) -> inferenceResults; + + var handler = mock(ResponseHandler.class); + when(handler.parseResult(any())).thenAnswer(answer); + + var retrier = new RetryingHttpSender( + sender, + mock(ThrottlerManager.class), + mock(Logger.class), + createDefaultRetrySettings(), + taskQueue.getThreadPool(), + EsExecutors.DIRECT_EXECUTOR_SERVICE + ); + + var listener = new PlainActionFuture(); + executeTasks(() -> retrier.send(mock(HttpRequestBase.class), handler, listener), 1); + + assertThat(listener.actionGet(TIMEOUT), is(inferenceResults)); + verify(sender, times(2)).send(any(), any()); + } + + public void testSend_CallsSenderAgain_WhenHttpResultListenerCallsOnFailureOnceWithConnectionClosedException() { + var sender = mock(Sender.class); + + doAnswer(invocation -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + listener.onFailure(new ConnectionClosedException("failed")); + + return Void.TYPE; + }).doAnswer(invocation -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + listener.onResponse(new HttpResult(mock(HttpResponse.class), new byte[] { 'a' })); + + return Void.TYPE; + }).when(sender).send(any(), any()); + + var inferenceResults = mock(InferenceServiceResults.class); + Answer answer = (invocation) -> inferenceResults; + + var handler = mock(ResponseHandler.class); + when(handler.parseResult(any())).thenAnswer(answer); + + var retrier = new RetryingHttpSender( + sender, + mock(ThrottlerManager.class), + mock(Logger.class), + createDefaultRetrySettings(), + taskQueue.getThreadPool(), + EsExecutors.DIRECT_EXECUTOR_SERVICE + ); + + var listener = new PlainActionFuture(); + executeTasks(() -> retrier.send(mock(HttpRequestBase.class), handler, listener), 1); + + assertThat(listener.actionGet(TIMEOUT), is(inferenceResults)); + verify(sender, times(2)).send(any(), any()); + } + + public void testSend_ReturnsFailure_WhenValidateResponseThrowsAnException_AfterOneRetry() { + var httpResponse = mock(HttpResponse.class); + when(httpResponse.getStatusLine()).thenReturn(mock(StatusLine.class)); + + var sender = mock(Sender.class); + + doAnswer(invocation -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + listener.onResponse(new HttpResult(httpResponse, new byte[0])); + + return Void.TYPE; + }).when(sender).send(any(), any()); + + var inferenceResults = mock(InferenceServiceResults.class); + Answer answer = (invocation) -> inferenceResults; + + var handler = mock(ResponseHandler.class); + doThrow(new RetryException(true, "failed")).doThrow(new IllegalStateException("failed again")) + .when(handler) + .validateResponse(any(), any(), any(), any()); + when(handler.parseResult(any())).thenAnswer(answer); + + var retrier = new RetryingHttpSender( + sender, + mock(ThrottlerManager.class), + mock(Logger.class), + createDefaultRetrySettings(), + taskQueue.getThreadPool(), + EsExecutors.DIRECT_EXECUTOR_SERVICE + ); + + var listener = new PlainActionFuture(); + executeTasks(() -> retrier.send(mock(HttpRequestBase.class), handler, listener), 1); + + var thrownException = expectThrows(IllegalStateException.class, () -> listener.actionGet(TIMEOUT)); + assertThat(thrownException.getMessage(), is("failed again")); + assertThat(thrownException.getSuppressed().length, is(1)); + assertThat(thrownException.getSuppressed()[0].getMessage(), is("failed")); + + verify(sender, times(2)).send(any(), any()); + } + + public void testSend_ReturnsFailure_WhenValidateResponseThrowsAnElasticsearchException_AfterOneRetry() { + var httpResponse = mock(HttpResponse.class); + when(httpResponse.getStatusLine()).thenReturn(mock(StatusLine.class)); + + var sender = mock(Sender.class); + + doAnswer(invocation -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + listener.onResponse(new HttpResult(httpResponse, new byte[0])); + + return Void.TYPE; + }).when(sender).send(any(), any()); + + var inferenceResults = mock(InferenceServiceResults.class); + Answer answer = (invocation) -> inferenceResults; + + var handler = mock(ResponseHandler.class); + doThrow(new RetryException(true, "failed")).doThrow(new RetryException(false, "failed again")) + .when(handler) + .validateResponse(any(), any(), any(), any()); + when(handler.parseResult(any())).thenAnswer(answer); + + var retrier = new RetryingHttpSender( + sender, + mock(ThrottlerManager.class), + mock(Logger.class), + createDefaultRetrySettings(), + taskQueue.getThreadPool(), + EsExecutors.DIRECT_EXECUTOR_SERVICE + ); + + var listener = new PlainActionFuture(); + executeTasks(() -> retrier.send(mock(HttpRequestBase.class), handler, listener), 1); + + var thrownException = expectThrows(RetryException.class, () -> listener.actionGet(TIMEOUT)); + assertThat(thrownException.getMessage(), is("failed again")); + assertThat(thrownException.getSuppressed().length, is(1)); + assertThat(thrownException.getSuppressed()[0].getMessage(), is("failed")); + verify(sender, times(2)).send(any(), any()); + } + + public void testSend_ReturnsFailure_WhenHttpResultsListenerCallsOnFailure_AfterOneRetry() { + var httpResponse = mock(HttpResponse.class); + when(httpResponse.getStatusLine()).thenReturn(mock(StatusLine.class)); + + var sender = mock(Sender.class); + + doAnswer(invocation -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + listener.onFailure(new RetryException(true, "failed")); + + return Void.TYPE; + }).doAnswer(invocation -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + listener.onFailure(new RetryException(false, "failed again")); + + return Void.TYPE; + }).when(sender).send(any(), any()); + + var handler = mock(ResponseHandler.class); + + var retrier = new RetryingHttpSender( + sender, + mock(ThrottlerManager.class), + mock(Logger.class), + createDefaultRetrySettings(), + taskQueue.getThreadPool(), + EsExecutors.DIRECT_EXECUTOR_SERVICE + ); + + var listener = new PlainActionFuture(); + executeTasks(() -> retrier.send(mock(HttpRequestBase.class), handler, listener), 1); + + var thrownException = expectThrows(RetryException.class, () -> listener.actionGet(TIMEOUT)); + assertThat(thrownException.getMessage(), is("failed again")); + assertThat(thrownException.getSuppressed().length, is(1)); + assertThat(thrownException.getSuppressed()[0].getMessage(), is("failed")); + verify(sender, times(2)).send(any(), any()); + } + + public void testSend_ReturnsFailure_WhenHttpResultsListenerCallsOnFailure_WithNonRetryableException() { + var httpResponse = mock(HttpResponse.class); + when(httpResponse.getStatusLine()).thenReturn(mock(StatusLine.class)); + + var sender = mock(Sender.class); + + doAnswer(invocation -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + listener.onFailure(new IllegalStateException("failed")); + + return Void.TYPE; + }).when(sender).send(any(), any()); + + var handler = mock(ResponseHandler.class); + + var retrier = new RetryingHttpSender( + sender, + mock(ThrottlerManager.class), + mock(Logger.class), + createDefaultRetrySettings(), + taskQueue.getThreadPool(), + EsExecutors.DIRECT_EXECUTOR_SERVICE + ); + + var listener = new PlainActionFuture(); + executeTasks(() -> retrier.send(mock(HttpRequestBase.class), handler, listener), 0); + + var thrownException = expectThrows(IllegalStateException.class, () -> listener.actionGet(TIMEOUT)); + assertThat(thrownException.getMessage(), is("failed")); + assertThat(thrownException.getSuppressed().length, is(0)); + verify(sender, times(1)).send(any(), any()); + } + + private static HttpResponse mockHttpResponse() { + var statusLine = mock(StatusLine.class); + when(statusLine.getStatusCode()).thenReturn(200); + + var httpResponse = mock(HttpResponse.class); + when(httpResponse.getStatusLine()).thenReturn(statusLine); + + return httpResponse; + } + + private void executeTasks(Runnable runnable, int retries) { + taskQueue.scheduleNow(runnable); + // Execute the task scheduled from the line above + taskQueue.runAllRunnableTasks(); + + for (int i = 0; i < retries; i++) { + // set the timing correctly to get ready to run the next task + taskQueue.advanceTime(); + taskQueue.runAllRunnableTasks(); + } + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestExecutorServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestExecutorServiceTests.java index 2dd31144b3bc2..992f0d68bd920 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestExecutorServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestExecutorServiceTests.java @@ -54,20 +54,20 @@ public void shutdown() { } public void testQueueSize_IsEmpty() { - var service = new HttpRequestExecutorService(getTestName(), mock(HttpClient.class), threadPool); + var service = new HttpRequestExecutorService(getTestName(), mock(HttpClient.class), threadPool, null); assertThat(service.queueSize(), is(0)); } public void testQueueSize_IsOne() { - var service = new HttpRequestExecutorService(getTestName(), mock(HttpClient.class), threadPool); + var service = new HttpRequestExecutorService(getTestName(), mock(HttpClient.class), threadPool, null); service.send(mock(HttpRequestBase.class), null, new PlainActionFuture<>()); assertThat(service.queueSize(), is(1)); } public void testExecute_ThrowsUnsupported() { - var service = new HttpRequestExecutorService(getTestName(), mock(HttpClient.class), threadPool); + var service = new HttpRequestExecutorService(getTestName(), mock(HttpClient.class), threadPool, null); var noopTask = mock(RequestTask.class); var thrownException = expectThrows(UnsupportedOperationException.class, () -> service.execute(noopTask)); @@ -75,16 +75,18 @@ public void testExecute_ThrowsUnsupported() { } public void testIsTerminated_IsFalse() { - var service = new HttpRequestExecutorService(getTestName(), mock(HttpClient.class), threadPool); + var service = new HttpRequestExecutorService(getTestName(), mock(HttpClient.class), threadPool, null); assertFalse(service.isTerminated()); } - public void testIsTerminated_IsTrue() { - var service = new HttpRequestExecutorService(getTestName(), mock(HttpClient.class), threadPool); + public void testIsTerminated_IsTrue() throws InterruptedException { + var latch = new CountDownLatch(1); + var service = new HttpRequestExecutorService(getTestName(), mock(HttpClient.class), threadPool, latch); service.shutdown(); service.start(); + latch.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS); assertTrue(service.isTerminated()); } @@ -98,7 +100,7 @@ public void testIsTerminated_AfterStopFromSeparateThread() throws Exception { return Void.TYPE; }).when(mockHttpClient).send(any(), any(), any()); - var service = new HttpRequestExecutorService(getTestName(), mockHttpClient, threadPool); + var service = new HttpRequestExecutorService(getTestName(), mockHttpClient, threadPool, null); Future executorTermination = threadPool.generic().submit(() -> { try { @@ -127,7 +129,7 @@ public void testIsTerminated_AfterStopFromSeparateThread() throws Exception { } public void testSend_AfterShutdown_Throws() { - var service = new HttpRequestExecutorService("test_service", mock(HttpClient.class), threadPool); + var service = new HttpRequestExecutorService("test_service", mock(HttpClient.class), threadPool, null); service.shutdown(); @@ -143,7 +145,7 @@ public void testSend_AfterShutdown_Throws() { } public void testSend_Throws_WhenQueueIsFull() { - var service = new HttpRequestExecutorService("test_service", mock(HttpClient.class), threadPool, 1); + var service = new HttpRequestExecutorService("test_service", mock(HttpClient.class), threadPool, 1, null); service.send(mock(HttpRequestBase.class), null, new PlainActionFuture<>()); var listener = new PlainActionFuture(); @@ -160,7 +162,7 @@ public void testSend_Throws_WhenQueueIsFull() { public void testTaskThrowsError_CallsOnFailure() throws Exception { var httpClient = mock(HttpClient.class); - var service = new HttpRequestExecutorService(getTestName(), httpClient, threadPool); + var service = new HttpRequestExecutorService(getTestName(), httpClient, threadPool, null); doAnswer(invocation -> { service.shutdown(); @@ -180,7 +182,7 @@ public void testTaskThrowsError_CallsOnFailure() throws Exception { } public void testShutdown_AllowsMultipleCalls() { - var service = new HttpRequestExecutorService(getTestName(), mock(HttpClient.class), threadPool); + var service = new HttpRequestExecutorService(getTestName(), mock(HttpClient.class), threadPool, null); service.shutdown(); service.shutdown(); @@ -192,7 +194,7 @@ public void testShutdown_AllowsMultipleCalls() { } public void testSend_CallsOnFailure_WhenRequestTimesOut() { - var service = new HttpRequestExecutorService("test_service", mock(HttpClient.class), threadPool); + var service = new HttpRequestExecutorService("test_service", mock(HttpClient.class), threadPool, null); var listener = new PlainActionFuture(); service.send(mock(HttpRequestBase.class), TimeValue.timeValueNanos(1), listener); @@ -206,7 +208,7 @@ public void testSend_CallsOnFailure_WhenRequestTimesOut() { } public void testSend_NotifiesTasksOfShutdown() { - var service = new HttpRequestExecutorService("test_service", mock(HttpClient.class), threadPool); + var service = new HttpRequestExecutorService("test_service", mock(HttpClient.class), threadPool, null); var listener = new PlainActionFuture(); service.send(mock(HttpRequestBase.class), null, listener); @@ -228,7 +230,7 @@ public void testQueueTake_Throwing_DoesNotCauseServiceToTerminate() throws Inter BlockingQueue queue = mock(LinkedBlockingQueue.class); when(queue.take()).thenThrow(new ElasticsearchException("failed")).thenReturn(new ShutdownTask()); - var service = new HttpRequestExecutorService("test_service", mock(HttpClient.class), threadPool, queue); + var service = new HttpRequestExecutorService("test_service", mock(HttpClient.class), threadPool, queue, null); service.start(); @@ -241,7 +243,7 @@ public void testQueueTake_ThrowingInterruptedException_TerminatesService() throw BlockingQueue queue = mock(LinkedBlockingQueue.class); when(queue.take()).thenThrow(new InterruptedException("failed")); - var service = new HttpRequestExecutorService("test_service", mock(HttpClient.class), threadPool, queue); + var service = new HttpRequestExecutorService("test_service", mock(HttpClient.class), threadPool, queue, null); Future executorTermination = threadPool.generic().submit(() -> { try { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderFactoryTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderFactoryTests.java index 82c41794695fd..af4ac7cd59977 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderFactoryTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderFactoryTests.java @@ -39,6 +39,7 @@ import static org.elasticsearch.xpack.inference.external.http.Utils.mockClusterServiceEmpty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.any; @@ -117,7 +118,10 @@ public void testHttpRequestSender_Throws_WhenATimeoutOccurs() throws Exception { var senderFactory = new HttpRequestSenderFactory(threadPool, mockManager, mockClusterServiceEmpty(), Settings.EMPTY); try (var sender = senderFactory.createSender("test_service")) { - sender.setMaxRequestTimeout(TimeValue.timeValueNanos(1)); + assertThat(sender, instanceOf(HttpRequestSenderFactory.HttpRequestSender.class)); + // hack to get around the sender interface so we can set the timeout directly + var httpSender = (HttpRequestSenderFactory.HttpRequestSender) sender; + httpSender.setMaxRequestTimeout(TimeValue.timeValueNanos(1)); sender.start(); PlainActionFuture listener = new PlainActionFuture<>(); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/huggingface/HuggingFaceClientTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/huggingface/HuggingFaceClientTests.java index 0cc97ca38de80..65d665b71f8ee 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/huggingface/HuggingFaceClientTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/huggingface/HuggingFaceClientTests.java @@ -12,7 +12,7 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.inference.InferenceResults; +import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.http.MockResponse; import org.elasticsearch.test.http.MockWebServer; @@ -21,24 +21,29 @@ import org.elasticsearch.xpack.inference.external.http.HttpClientManager; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.results.SparseEmbeddingResultsTests; +import org.elasticsearch.xpack.inference.services.ServiceComponents; import org.junit.After; import org.junit.Before; import java.io.IOException; import java.net.URISyntaxException; +import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; -import static org.elasticsearch.core.Strings.format; -import static org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfig.DEFAULT_RESULTS_FIELD; import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; import static org.elasticsearch.xpack.inference.external.http.Utils.getUrl; import static org.elasticsearch.xpack.inference.external.http.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.external.http.Utils.mockClusterServiceEmpty; +import static org.elasticsearch.xpack.inference.external.http.retry.RetrySettingsTests.buildSettingsWithRetryFields; import static org.elasticsearch.xpack.inference.external.request.huggingface.HuggingFaceElserRequestTests.createRequest; import static org.elasticsearch.xpack.inference.logging.ThrottlerManagerTests.mockThrottlerManager; +import static org.elasticsearch.xpack.inference.results.SparseEmbeddingResultsTests.buildExpectation; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doThrow; @@ -64,6 +69,7 @@ public void shutdown() throws IOException { webServer.close(); } + @SuppressWarnings("unchecked") public void testSend_SuccessfulResponse() throws IOException, URISyntaxException { var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); @@ -79,14 +85,20 @@ public void testSend_SuccessfulResponse() throws IOException, URISyntaxException """; webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); - HuggingFaceClient huggingFaceClient = new HuggingFaceClient(sender, mockThrottlerManager()); + HuggingFaceClient huggingFaceClient = new HuggingFaceClient( + sender, + new ServiceComponents(threadPool, mockThrottlerManager(), Settings.EMPTY) + ); - PlainActionFuture listener = new PlainActionFuture<>(); + PlainActionFuture listener = new PlainActionFuture<>(); huggingFaceClient.send(createRequest(getUrl(webServer), "secret", "abc"), listener); - InferenceResults result = listener.actionGet(TIMEOUT); + var result = listener.actionGet(TIMEOUT); - assertThat(result.asMap(), is(Map.of(DEFAULT_RESULTS_FIELD, Map.of(".", 0.13315596f)))); + assertThat( + result.asMap(), + is(buildExpectation(List.of(new SparseEmbeddingResultsTests.EmbeddingExpectation(Map.of(".", 0.13315596f), false)))) + ); assertThat(webServer.requests(), hasSize(1)); assertNull(webServer.requests().get(0).getUri().getQuery()); @@ -98,10 +110,13 @@ public void testSend_SuccessfulResponse() throws IOException, URISyntaxException var requestMap = entityAsMap(webServer.requests().get(0).getBody()); assertThat(requestMap.size(), is(1)); - assertThat(requestMap.get("inputs"), is("abc")); + assertThat(requestMap.get("inputs"), instanceOf(List.class)); + var inputList = (List) requestMap.get("inputs"); + assertThat(inputList, contains("abc")); } } + @SuppressWarnings("unchecked") public void testSend_FailsFromInvalidResponseFormat() throws IOException, URISyntaxException { var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); @@ -125,15 +140,23 @@ public void testSend_FailsFromInvalidResponseFormat() throws IOException, URISyn """; webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); - HuggingFaceClient huggingFaceClient = new HuggingFaceClient(sender, mockThrottlerManager()); + HuggingFaceClient huggingFaceClient = new HuggingFaceClient( + sender, + new ServiceComponents( + threadPool, + mockThrottlerManager(), + // timeout as zero for no retries + buildSettingsWithRetryFields(TimeValue.timeValueMillis(1), TimeValue.timeValueMinutes(1), TimeValue.timeValueSeconds(0)) + ) + ); - PlainActionFuture listener = new PlainActionFuture<>(); + PlainActionFuture listener = new PlainActionFuture<>(); huggingFaceClient.send(createRequest(getUrl(webServer), "secret", "abc"), listener); var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); assertThat( thrownException.getMessage(), - is(format("Failed to parse the Hugging Face ELSER response for request [POST %s HTTP/1.1]", getUrl(webServer))) + is("Failed to parse object: expecting token of type [VALUE_NUMBER] but found [START_ARRAY]") ); assertThat(webServer.requests(), hasSize(1)); @@ -146,21 +169,25 @@ public void testSend_FailsFromInvalidResponseFormat() throws IOException, URISyn var requestMap = entityAsMap(webServer.requests().get(0).getBody()); assertThat(requestMap.size(), is(1)); - assertThat(requestMap.get("inputs"), is("abc")); + assertThat(requestMap.get("inputs"), instanceOf(List.class)); + var inputList = (List) requestMap.get("inputs"); + assertThat(inputList, contains("abc")); } } - public void testSend_ThrowsException() { + public void testSend_ThrowsException() throws URISyntaxException, IOException { var sender = mock(Sender.class); doThrow(new ElasticsearchException("failed")).when(sender).send(any(), any()); - HuggingFaceClient huggingFaceClient = new HuggingFaceClient(sender, mockThrottlerManager()); - PlainActionFuture listener = new PlainActionFuture<>(); - - var thrownException = expectThrows( - ElasticsearchException.class, - () -> huggingFaceClient.send(createRequest(getUrl(webServer), "secret", "abc"), listener) + HuggingFaceClient huggingFaceClient = new HuggingFaceClient( + sender, + new ServiceComponents(threadPool, mockThrottlerManager(), Settings.EMPTY) ); + PlainActionFuture listener = new PlainActionFuture<>(); + + huggingFaceClient.send(createRequest(getUrl(webServer), "secret", "abc"), listener); + + var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); assertThat(thrownException.getMessage(), is("failed")); } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/openai/OpenAiClientTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/openai/OpenAiClientTests.java new file mode 100644 index 0000000000000..b1c7317b5e22c --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/openai/OpenAiClientTests.java @@ -0,0 +1,291 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.openai; + +import org.apache.http.HttpHeaders; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.http.MockResponse; +import org.elasticsearch.test.http.MockWebServer; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.inference.external.http.HttpClientManager; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.services.ServiceComponents; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; +import static org.elasticsearch.xpack.inference.external.http.Utils.getUrl; +import static org.elasticsearch.xpack.inference.external.http.Utils.inferenceUtilityPool; +import static org.elasticsearch.xpack.inference.external.http.Utils.mockClusterServiceEmpty; +import static org.elasticsearch.xpack.inference.external.http.retry.RetrySettingsTests.buildSettingsWithRetryFields; +import static org.elasticsearch.xpack.inference.external.request.openai.OpenAiEmbeddingsRequestTests.createRequest; +import static org.elasticsearch.xpack.inference.external.request.openai.OpenAiUtils.ORGANIZATION_HEADER; +import static org.elasticsearch.xpack.inference.logging.ThrottlerManagerTests.mockThrottlerManager; +import static org.elasticsearch.xpack.inference.results.TextEmbeddingResultsTests.buildExpectation; +import static org.elasticsearch.xpack.inference.services.ServiceComponentsTests.createWithEmptySettings; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; + +public class OpenAiClientTests extends ESTestCase { + private static final TimeValue TIMEOUT = new TimeValue(30, TimeUnit.SECONDS); + private final MockWebServer webServer = new MockWebServer(); + private ThreadPool threadPool; + private HttpClientManager clientManager; + + @Before + public void init() throws Exception { + webServer.start(); + threadPool = createThreadPool(inferenceUtilityPool()); + clientManager = HttpClientManager.create(Settings.EMPTY, threadPool, mockClusterServiceEmpty(), mockThrottlerManager()); + } + + @After + public void shutdown() throws IOException { + clientManager.close(); + terminate(threadPool); + webServer.close(); + } + + public void testSend_SuccessfulResponse() throws IOException, URISyntaxException { + var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + + try (var sender = senderFactory.createSender("test_service")) { + sender.start(); + + String responseJson = """ + { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + 0.0123, + -0.0123 + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); + + OpenAiClient openAiClient = new OpenAiClient(sender, createWithEmptySettings(threadPool)); + + PlainActionFuture listener = new PlainActionFuture<>(); + openAiClient.send(createRequest(getUrl(webServer), "org", "secret", "abc", "model", "user"), listener); + + var result = listener.actionGet(TIMEOUT); + + assertThat(result.asMap(), is(buildExpectation(List.of(List.of(0.0123F, -0.0123F))))); + + assertThat(webServer.requests(), hasSize(1)); + assertNull(webServer.requests().get(0).getUri().getQuery()); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.AUTHORIZATION), equalTo("Bearer secret")); + assertThat(webServer.requests().get(0).getHeader(ORGANIZATION_HEADER), equalTo("org")); + + var requestMap = entityAsMap(webServer.requests().get(0).getBody()); + assertThat(requestMap.size(), is(3)); + assertThat(requestMap.get("input"), is(List.of("abc"))); + assertThat(requestMap.get("model"), is("model")); + assertThat(requestMap.get("user"), is("user")); + } + } + + public void testSend_SuccessfulResponse_WithoutUser() throws IOException, URISyntaxException { + var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + + try (var sender = senderFactory.createSender("test_service")) { + sender.start(); + + String responseJson = """ + { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + 0.0123, + -0.0123 + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); + + OpenAiClient openAiClient = new OpenAiClient(sender, createWithEmptySettings(threadPool)); + + PlainActionFuture listener = new PlainActionFuture<>(); + openAiClient.send(createRequest(getUrl(webServer), "org", "secret", "abc", "model", null), listener); + + var result = listener.actionGet(TIMEOUT); + + assertThat(result.asMap(), is(buildExpectation(List.of(List.of(0.0123F, -0.0123F))))); + + assertThat(webServer.requests(), hasSize(1)); + assertNull(webServer.requests().get(0).getUri().getQuery()); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.AUTHORIZATION), equalTo("Bearer secret")); + assertThat(webServer.requests().get(0).getHeader(ORGANIZATION_HEADER), equalTo("org")); + + var requestMap = entityAsMap(webServer.requests().get(0).getBody()); + assertThat(requestMap.size(), is(2)); + assertThat(requestMap.get("input"), is(List.of("abc"))); + assertThat(requestMap.get("model"), is("model")); + } + } + + public void testSend_SuccessfulResponse_WithoutOrganization() throws IOException, URISyntaxException { + var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + + try (var sender = senderFactory.createSender("test_service")) { + sender.start(); + + String responseJson = """ + { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + 0.0123, + -0.0123 + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); + + OpenAiClient openAiClient = new OpenAiClient(sender, createWithEmptySettings(threadPool)); + + PlainActionFuture listener = new PlainActionFuture<>(); + openAiClient.send(createRequest(getUrl(webServer), null, "secret", "abc", "model", null), listener); + + var result = listener.actionGet(TIMEOUT); + + assertThat(result.asMap(), is(buildExpectation(List.of(List.of(0.0123F, -0.0123F))))); + + assertThat(webServer.requests(), hasSize(1)); + assertNull(webServer.requests().get(0).getUri().getQuery()); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.AUTHORIZATION), equalTo("Bearer secret")); + assertNull(webServer.requests().get(0).getHeader(ORGANIZATION_HEADER)); + + var requestMap = entityAsMap(webServer.requests().get(0).getBody()); + assertThat(requestMap.size(), is(2)); + assertThat(requestMap.get("input"), is(List.of("abc"))); + assertThat(requestMap.get("model"), is("model")); + } + } + + public void testSend_FailsFromInvalidResponseFormat() throws IOException, URISyntaxException { + var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + + try (var sender = senderFactory.createSender("test_service")) { + sender.start(); + + String responseJson = """ + { + "object": "list", + "data_does_not_exist": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + 0.0123, + -0.0123 + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); + + OpenAiClient openAiClient = new OpenAiClient( + sender, + new ServiceComponents( + threadPool, + mockThrottlerManager(), + // timeout as zero for no retries + buildSettingsWithRetryFields(TimeValue.timeValueMillis(1), TimeValue.timeValueMinutes(1), TimeValue.timeValueSeconds(0)) + ) + ); + + PlainActionFuture listener = new PlainActionFuture<>(); + openAiClient.send(createRequest(getUrl(webServer), "org", "secret", "abc", "model", "user"), listener); + + var thrownException = expectThrows(IllegalStateException.class, () -> listener.actionGet(TIMEOUT)); + assertThat(thrownException.getMessage(), is(format("Failed to find required field [data] in OpenAI embeddings response"))); + + assertThat(webServer.requests(), hasSize(1)); + assertNull(webServer.requests().get(0).getUri().getQuery()); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.AUTHORIZATION), equalTo("Bearer secret")); + assertThat(webServer.requests().get(0).getHeader(ORGANIZATION_HEADER), equalTo("org")); + + var requestMap = entityAsMap(webServer.requests().get(0).getBody()); + assertThat(requestMap.size(), is(3)); + assertThat(requestMap.get("input"), is(List.of("abc"))); + assertThat(requestMap.get("model"), is("model")); + assertThat(requestMap.get("user"), is("user")); + } + } + + public void testSend_ThrowsException() throws URISyntaxException, IOException { + var sender = mock(Sender.class); + doThrow(new ElasticsearchException("failed")).when(sender).send(any(), any()); + + OpenAiClient openAiClient = new OpenAiClient(sender, createWithEmptySettings(threadPool)); + PlainActionFuture listener = new PlainActionFuture<>(); + openAiClient.send(createRequest(getUrl(webServer), "org", "secret", "abc", "model", "user"), listener); + + var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); + assertThat(thrownException.getMessage(), is("failed")); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/RequestUtilsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/RequestUtilsTests.java new file mode 100644 index 0000000000000..b6690373ec097 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/RequestUtilsTests.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request; + +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.test.ESTestCase; + +import static org.elasticsearch.xpack.inference.external.request.RequestUtils.createAuthBearerHeader; +import static org.hamcrest.Matchers.is; + +public class RequestUtilsTests extends ESTestCase { + public void testCreateAuthBearerHeader() { + var header = createAuthBearerHeader(new SecureString("abc".toCharArray())); + + assertThat(header.getName(), is("Authorization")); + assertThat(header.getValue(), is("Bearer abc")); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/huggingface/HuggingFaceElserRequestEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/huggingface/HuggingFaceElserRequestEntityTests.java index b0977da234c18..06279e9c89da6 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/huggingface/HuggingFaceElserRequestEntityTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/huggingface/HuggingFaceElserRequestEntityTests.java @@ -14,21 +14,20 @@ import org.elasticsearch.xcontent.XContentType; import java.io.IOException; +import java.util.List; import static org.hamcrest.CoreMatchers.is; public class HuggingFaceElserRequestEntityTests extends ESTestCase { public void testXContent() throws IOException { - var entity = new HuggingFaceElserRequestEntity("abc"); + var entity = new HuggingFaceElserRequestEntity(List.of("abc")); - XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON).prettyPrint(); + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); entity.toXContent(builder, null); String xContentResult = Strings.toString(builder); assertThat(xContentResult, is(""" - { - "inputs" : "abc" - }""")); + {"inputs":["abc"]}""")); } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/huggingface/HuggingFaceElserRequestTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/huggingface/HuggingFaceElserRequestTests.java index 717f5a7e2409d..2a8ce9a46e498 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/huggingface/HuggingFaceElserRequestTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/huggingface/HuggingFaceElserRequestTests.java @@ -17,12 +17,15 @@ import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; +import java.util.List; import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; public class HuggingFaceElserRequestTests extends ESTestCase { + @SuppressWarnings("unchecked") public void testCreateRequest() throws URISyntaxException, IOException { var huggingFaceRequest = createRequest("www.google.com", "secret", "abc"); var httpRequest = huggingFaceRequest.createRequest(); @@ -36,12 +39,14 @@ public void testCreateRequest() throws URISyntaxException, IOException { var requestMap = entityAsMap(httpPost.getEntity().getContent()); assertThat(requestMap.size(), is(1)); - assertThat(requestMap.get("inputs"), is("abc")); + assertThat(requestMap.get("inputs"), instanceOf(List.class)); + var inputList = (List) requestMap.get("inputs"); + assertThat(inputList, contains("abc")); } public static HuggingFaceElserRequest createRequest(String url, String apiKey, String input) throws URISyntaxException { var account = new HuggingFaceAccount(new URI(url), new SecureString(apiKey.toCharArray())); - var entity = new HuggingFaceElserRequestEntity(input); + var entity = new HuggingFaceElserRequestEntity(List.of(input)); return new HuggingFaceElserRequest(account, entity); } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiEmbeddingsRequestEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiEmbeddingsRequestEntityTests.java new file mode 100644 index 0000000000000..cedfd04192c0d --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiEmbeddingsRequestEntityTests.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.openai; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.util.List; + +import static org.hamcrest.CoreMatchers.is; + +public class OpenAiEmbeddingsRequestEntityTests extends ESTestCase { + + public void testXContent_WritesUserWhenDefined() throws IOException { + var entity = new OpenAiEmbeddingsRequestEntity(List.of("abc"), "model", "user"); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + entity.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, is(""" + {"input":["abc"],"model":"model","user":"user"}""")); + } + + public void testXContent_DoesNotWriteUserWhenItIsNull() throws IOException { + var entity = new OpenAiEmbeddingsRequestEntity(List.of("abc"), "model", null); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + entity.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, is(""" + {"input":["abc"],"model":"model"}""")); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiEmbeddingsRequestTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiEmbeddingsRequestTests.java new file mode 100644 index 0000000000000..146601da86dbd --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiEmbeddingsRequestTests.java @@ -0,0 +1,102 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.openai; + +import org.apache.http.HttpHeaders; +import org.apache.http.client.methods.HttpPost; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.inference.external.openai.OpenAiAccount; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.List; + +import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; +import static org.elasticsearch.xpack.inference.external.request.openai.OpenAiEmbeddingsRequest.buildDefaultUri; +import static org.elasticsearch.xpack.inference.external.request.openai.OpenAiUtils.ORGANIZATION_HEADER; +import static org.hamcrest.Matchers.aMapWithSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; + +public class OpenAiEmbeddingsRequestTests extends ESTestCase { + public void testCreateRequest_WithUrlOrganizationUserDefined() throws URISyntaxException, IOException { + var request = createRequest("www.google.com", "org", "secret", "abc", "model", "user"); + var httpRequest = request.createRequest(); + + assertThat(httpRequest, instanceOf(HttpPost.class)); + var httpPost = (HttpPost) httpRequest; + + assertThat(httpPost.getURI().toString(), is("www.google.com")); + assertThat(httpPost.getLastHeader(HttpHeaders.CONTENT_TYPE).getValue(), is(XContentType.JSON.mediaType())); + assertThat(httpPost.getLastHeader(HttpHeaders.AUTHORIZATION).getValue(), is("Bearer secret")); + assertThat(httpPost.getLastHeader(ORGANIZATION_HEADER).getValue(), is("org")); + + var requestMap = entityAsMap(httpPost.getEntity().getContent()); + assertThat(requestMap, aMapWithSize(3)); + assertThat(requestMap.get("input"), is(List.of("abc"))); + assertThat(requestMap.get("model"), is("model")); + assertThat(requestMap.get("user"), is("user")); + } + + public void testCreateRequest_WithDefaultUrl() throws URISyntaxException, IOException { + var request = createRequest(null, "org", "secret", "abc", "model", "user"); + var httpRequest = request.createRequest(); + + assertThat(httpRequest, instanceOf(HttpPost.class)); + var httpPost = (HttpPost) httpRequest; + + assertThat(httpPost.getURI().toString(), is(buildDefaultUri().toString())); + assertThat(httpPost.getLastHeader(HttpHeaders.CONTENT_TYPE).getValue(), is(XContentType.JSON.mediaType())); + assertThat(httpPost.getLastHeader(HttpHeaders.AUTHORIZATION).getValue(), is("Bearer secret")); + assertThat(httpPost.getLastHeader(ORGANIZATION_HEADER).getValue(), is("org")); + + var requestMap = entityAsMap(httpPost.getEntity().getContent()); + assertThat(requestMap, aMapWithSize(3)); + assertThat(requestMap.get("input"), is(List.of("abc"))); + assertThat(requestMap.get("model"), is("model")); + assertThat(requestMap.get("user"), is("user")); + } + + public void testCreateRequest_WithDefaultUrlAndWithoutUserOrganization() throws URISyntaxException, IOException { + var request = createRequest(null, null, "secret", "abc", "model", null); + var httpRequest = request.createRequest(); + + assertThat(httpRequest, instanceOf(HttpPost.class)); + var httpPost = (HttpPost) httpRequest; + + assertThat(httpPost.getURI().toString(), is(buildDefaultUri().toString())); + assertThat(httpPost.getLastHeader(HttpHeaders.CONTENT_TYPE).getValue(), is(XContentType.JSON.mediaType())); + assertThat(httpPost.getLastHeader(HttpHeaders.AUTHORIZATION).getValue(), is("Bearer secret")); + assertNull(httpPost.getLastHeader(ORGANIZATION_HEADER)); + + var requestMap = entityAsMap(httpPost.getEntity().getContent()); + assertThat(requestMap, aMapWithSize(2)); + assertThat(requestMap.get("input"), is(List.of("abc"))); + assertThat(requestMap.get("model"), is("model")); + } + + public static OpenAiEmbeddingsRequest createRequest( + @Nullable String url, + @Nullable String org, + String apiKey, + String input, + String model, + @Nullable String user + ) throws URISyntaxException { + var uri = url == null ? null : new URI(url); + + var account = new OpenAiAccount(uri, org, new SecureString(apiKey.toCharArray())); + var entity = new OpenAiEmbeddingsRequestEntity(List.of(input), model, user); + + return new OpenAiEmbeddingsRequest(account, entity); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/huggingface/HuggingFaceElserResponseEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/huggingface/HuggingFaceElserResponseEntityTests.java index 8cfac1858ab50..ce94bfceed4fb 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/huggingface/HuggingFaceElserResponseEntityTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/huggingface/HuggingFaceElserResponseEntityTests.java @@ -12,14 +12,16 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentEOFException; import org.elasticsearch.xcontent.XContentParseException; -import org.elasticsearch.xpack.core.ml.inference.results.TextExpansionResults; import org.elasticsearch.xpack.inference.external.http.HttpResult; +import org.elasticsearch.xpack.inference.results.SparseEmbeddingResults; +import org.elasticsearch.xpack.inference.results.SparseEmbeddingResultsTests; import java.io.IOException; import java.nio.charset.StandardCharsets; +import java.util.List; import java.util.Map; -import java.util.stream.Collectors; +import static org.elasticsearch.xpack.inference.results.SparseEmbeddingResultsTests.buildExpectation; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; import static org.mockito.Mockito.mock; @@ -34,18 +36,18 @@ public void testFromResponse_CreatesTextExpansionResults() throws IOException { } ]"""; - TextExpansionResults parsedResults = HuggingFaceElserResponseEntity.fromResponse( + SparseEmbeddingResults parsedResults = HuggingFaceElserResponseEntity.fromResponse( new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) ); - Map tokenWeightMap = parsedResults.getWeightedTokens() - .stream() - .collect(Collectors.toMap(TextExpansionResults.WeightedToken::token, TextExpansionResults.WeightedToken::weight)); - // the results get truncated because weighted token stores them as a float - assertThat(tokenWeightMap.size(), is(2)); - assertThat(tokenWeightMap.get("."), is(0.13315596f)); - assertThat(tokenWeightMap.get("the"), is(0.67472112f)); - assertFalse(parsedResults.isTruncated()); + assertThat( + parsedResults.asMap(), + is( + buildExpectation( + List.of(new SparseEmbeddingResultsTests.EmbeddingExpectation(Map.of(".", 0.13315596f, "the", 0.67472112f), false)) + ) + ) + ); } public void testFromResponse_CreatesTextExpansionResultsForFirstItem() throws IOException { @@ -61,18 +63,21 @@ public void testFromResponse_CreatesTextExpansionResultsForFirstItem() throws IO } ]"""; - TextExpansionResults parsedResults = HuggingFaceElserResponseEntity.fromResponse( + SparseEmbeddingResults parsedResults = HuggingFaceElserResponseEntity.fromResponse( new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) ); - Map tokenWeightMap = parsedResults.getWeightedTokens() - .stream() - .collect(Collectors.toMap(TextExpansionResults.WeightedToken::token, TextExpansionResults.WeightedToken::weight)); - // the results get truncated because weighted token stores them as a float - assertThat(tokenWeightMap.size(), is(2)); - assertThat(tokenWeightMap.get("."), is(0.13315596f)); - assertThat(tokenWeightMap.get("the"), is(0.67472112f)); - assertFalse(parsedResults.isTruncated()); + assertThat( + parsedResults.asMap(), + is( + buildExpectation( + List.of( + new SparseEmbeddingResultsTests.EmbeddingExpectation(Map.of(".", 0.13315596f, "the", 0.67472112f), false), + new SparseEmbeddingResultsTests.EmbeddingExpectation(Map.of("hi", 0.13315596f, "super", 0.67472112f), false) + ) + ) + ) + ); } public void testFails_NotAnArray() { @@ -117,7 +122,7 @@ public void testFails_ValueString() { ); } - public void testFails_ValueInt() throws IOException { + public void testFromResponse_CreatesResultsWithValueInt() throws IOException { String responseJson = """ [ { @@ -126,19 +131,17 @@ public void testFails_ValueInt() throws IOException { ] """; - TextExpansionResults parsedResults = HuggingFaceElserResponseEntity.fromResponse( + SparseEmbeddingResults parsedResults = HuggingFaceElserResponseEntity.fromResponse( new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) ); - Map tokenWeightMap = parsedResults.getWeightedTokens() - .stream() - .collect(Collectors.toMap(TextExpansionResults.WeightedToken::token, TextExpansionResults.WeightedToken::weight)); - assertThat(tokenWeightMap.size(), is(1)); - assertThat(tokenWeightMap.get("field"), is(1.0f)); - assertFalse(parsedResults.isTruncated()); + assertThat( + parsedResults.asMap(), + is(buildExpectation(List.of(new SparseEmbeddingResultsTests.EmbeddingExpectation(Map.of("field", 1.0f), false)))) + ); } - public void testFails_ValueLong() throws IOException { + public void testFromResponse_CreatesResultsWithValueLong() throws IOException { String responseJson = """ [ { @@ -147,16 +150,14 @@ public void testFails_ValueLong() throws IOException { ] """; - TextExpansionResults parsedResults = HuggingFaceElserResponseEntity.fromResponse( + SparseEmbeddingResults parsedResults = HuggingFaceElserResponseEntity.fromResponse( new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) ); - Map tokenWeightMap = parsedResults.getWeightedTokens() - .stream() - .collect(Collectors.toMap(TextExpansionResults.WeightedToken::token, TextExpansionResults.WeightedToken::weight)); - assertThat(tokenWeightMap.size(), is(1)); - assertThat(tokenWeightMap.get("field"), is(4.0294965E10F)); - assertFalse(parsedResults.isTruncated()); + assertThat( + parsedResults.asMap(), + is(buildExpectation(List.of(new SparseEmbeddingResultsTests.EmbeddingExpectation(Map.of("field", 4.0294965E10F), false)))) + ); } public void testFails_ValueObject() { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/openai/OpenAiEmbeddingsResponseEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/openai/OpenAiEmbeddingsResponseEntityTests.java new file mode 100644 index 0000000000000..a3ec162b05ec8 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/openai/OpenAiEmbeddingsResponseEntityTests.java @@ -0,0 +1,353 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.response.openai; + +import org.apache.http.HttpResponse; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.inference.external.http.HttpResult; +import org.elasticsearch.xpack.inference.results.TextEmbeddingResults; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.List; + +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; + +public class OpenAiEmbeddingsResponseEntityTests extends ESTestCase { + public void testFromResponse_CreatesResultsForASingleItem() throws IOException { + String responseJson = """ + { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + 0.014539449, + -0.015288644 + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + + TextEmbeddingResults parsedResults = OpenAiEmbeddingsResponseEntity.fromResponse( + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ); + + assertThat(parsedResults.embeddings(), is(List.of(new TextEmbeddingResults.Embedding(List.of(0.014539449F, -0.015288644F))))); + } + + public void testFromResponse_CreatesResultsForMultipleItems() throws IOException { + String responseJson = """ + { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + 0.014539449, + -0.015288644 + ] + }, + { + "object": "embedding", + "index": 1, + "embedding": [ + 0.0123, + -0.0123 + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + + TextEmbeddingResults parsedResults = OpenAiEmbeddingsResponseEntity.fromResponse( + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ); + + assertThat( + parsedResults.embeddings(), + is( + List.of( + new TextEmbeddingResults.Embedding(List.of(0.014539449F, -0.015288644F)), + new TextEmbeddingResults.Embedding(List.of(0.0123F, -0.0123F)) + ) + ) + ); + } + + public void testFromResponse_FailsWhenDataFieldIsNotPresent() { + String responseJson = """ + { + "object": "list", + "not_data": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + 0.014539449, + -0.015288644 + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + + var thrownException = expectThrows( + IllegalStateException.class, + () -> OpenAiEmbeddingsResponseEntity.fromResponse( + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ) + ); + + assertThat(thrownException.getMessage(), is("Failed to find required field [data] in OpenAI embeddings response")); + } + + public void testFromResponse_FailsWhenDataFieldNotAnArray() { + String responseJson = """ + { + "object": "list", + "data": { + "test": { + "object": "embedding", + "index": 0, + "embedding": [ + 0.014539449, + -0.015288644 + ] + } + }, + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + + var thrownException = expectThrows( + ParsingException.class, + () -> OpenAiEmbeddingsResponseEntity.fromResponse( + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ) + ); + + assertThat( + thrownException.getMessage(), + is("Failed to parse object: expecting token of type [START_ARRAY] but found [START_OBJECT]") + ); + } + + public void testFromResponse_FailsWhenEmbeddingsDoesNotExist() { + String responseJson = """ + { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embeddingzzz": [ + 0.014539449, + -0.015288644 + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + + var thrownException = expectThrows( + IllegalStateException.class, + () -> OpenAiEmbeddingsResponseEntity.fromResponse( + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ) + ); + + assertThat(thrownException.getMessage(), is("Failed to find required field [embedding] in OpenAI embeddings response")); + } + + public void testFromResponse_FailsWhenEmbeddingValueIsAString() { + String responseJson = """ + { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + "abc" + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + + var thrownException = expectThrows( + ParsingException.class, + () -> OpenAiEmbeddingsResponseEntity.fromResponse( + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ) + ); + + assertThat( + thrownException.getMessage(), + is("Failed to parse object: expecting token of type [VALUE_NUMBER] but found [VALUE_STRING]") + ); + } + + public void testFromResponse_SucceedsWhenEmbeddingValueIsInt() throws IOException { + String responseJson = """ + { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + 1 + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + + TextEmbeddingResults parsedResults = OpenAiEmbeddingsResponseEntity.fromResponse( + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ); + + assertThat(parsedResults.embeddings(), is(List.of(new TextEmbeddingResults.Embedding(List.of(1.0F))))); + } + + public void testFromResponse_SucceedsWhenEmbeddingValueIsLong() throws IOException { + String responseJson = """ + { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + 40294967295 + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + + TextEmbeddingResults parsedResults = OpenAiEmbeddingsResponseEntity.fromResponse( + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ); + + assertThat(parsedResults.embeddings(), is(List.of(new TextEmbeddingResults.Embedding(List.of(4.0294965E10F))))); + } + + public void testFromResponse_FailsWhenEmbeddingValueIsAnObject() { + String responseJson = """ + { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + {} + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + + var thrownException = expectThrows( + ParsingException.class, + () -> OpenAiEmbeddingsResponseEntity.fromResponse( + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ) + ); + + assertThat( + thrownException.getMessage(), + is("Failed to parse object: expecting token of type [VALUE_NUMBER] but found [START_OBJECT]") + ); + } + + public void testFromResponse_FailsWhenIsMissingFinalClosingBracket() { + String responseJson = """ + { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + {} + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + """; + + var thrownException = expectThrows( + ParsingException.class, + () -> OpenAiEmbeddingsResponseEntity.fromResponse( + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ) + ); + + assertThat( + thrownException.getMessage(), + is("Failed to parse object: expecting token of type [VALUE_NUMBER] but found [START_OBJECT]") + ); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/openai/OpenAiErrorResponseEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/openai/OpenAiErrorResponseEntityTests.java new file mode 100644 index 0000000000000..4e3465e24c951 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/openai/OpenAiErrorResponseEntityTests.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.response.openai; + +import org.apache.http.HttpResponse; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.inference.external.http.HttpResult; + +import java.nio.charset.StandardCharsets; + +import static org.mockito.Mockito.mock; + +public class OpenAiErrorResponseEntityTests extends ESTestCase { + public void testFromResponse() { + String responseJson = """ + { + "error": { + "message": "You didn't provide an API key", + "type": "invalid_request_error", + "param": null, + "code": null + } + } + """; + + OpenAiErrorResponseEntity errorMessage = OpenAiErrorResponseEntity.fromResponse( + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ); + assertEquals("You didn't provide an API key", errorMessage.getErrorMessage()); + } + + public void testFromResponse_noMessage() { + String responseJson = """ + { + "error": { + "type": "invalid_request_error" + } + } + """; + + OpenAiErrorResponseEntity errorMessage = OpenAiErrorResponseEntity.fromResponse( + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ); + assertNull(errorMessage); + } + + public void testFromResponse_noErro() { + String responseJson = """ + { + "something": { + "not": "relevant" + } + } + """; + + OpenAiErrorResponseEntity errorMessage = OpenAiErrorResponseEntity.fromResponse( + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ); + assertNull(errorMessage); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/logging/ThrottlerManagerTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/logging/ThrottlerManagerTests.java index ba9e7851c9ad4..a9e85d0ffcb1d 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/logging/ThrottlerManagerTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/logging/ThrottlerManagerTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.inference.logging; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESTestCase; @@ -22,6 +23,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; import static org.mockito.Mockito.when; public class ThrottlerManagerTests extends ESTestCase { @@ -39,6 +41,31 @@ public void shutdown() { terminate(threadPool); } + public void testWarn_LogsOnlyOnce() { + var logger = mock(Logger.class); + + try (var throttler = new ThrottlerManager(Settings.EMPTY, threadPool, mockClusterServiceEmpty())) { + throttler.warn(logger, "test", new IllegalArgumentException("failed")); + + verify(logger, times(1)).warn(eq("test"), any(Throwable.class)); + + throttler.warn(logger, "test", new IllegalArgumentException("failed")); + verifyNoMoreInteractions(logger); + } + } + + public void testWarn_AllowsDifferentMessagesToBeLogged() { + var logger = mock(Logger.class); + + try (var throttler = new ThrottlerManager(Settings.EMPTY, threadPool, mockClusterServiceEmpty())) { + throttler.warn(logger, "test", new IllegalArgumentException("failed")); + verify(logger, times(1)).warn(eq("test"), any(Throwable.class)); + + throttler.warn(logger, "a different message", new IllegalArgumentException("failed")); + verify(logger, times(1)).warn(eq("a different message"), any(Throwable.class)); + } + } + public void testStartsNewThrottler_WhenResetIntervalIsChanged() { var mockThreadPool = mock(ThreadPool.class); when(mockThreadPool.scheduleWithFixedDelay(any(Runnable.class), any(), any())).thenReturn(mock(Scheduler.Cancellable.class)); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/logging/ThrottlerTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/logging/ThrottlerTests.java index 27df66c54cd1c..d23f057a7a23e 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/logging/ThrottlerTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/logging/ThrottlerTests.java @@ -52,7 +52,7 @@ public void testWarn_LogsOnlyOnce() { var logger = mock(Logger.class); try ( - var throttled = new Throttler( + var throttler = new Throttler( TimeValue.timeValueDays(1), TimeValue.timeValueSeconds(10), Clock.fixed(Instant.now(), ZoneId.systemDefault()), @@ -60,11 +60,11 @@ public void testWarn_LogsOnlyOnce() { new ConcurrentHashMap<>() ) ) { - throttled.warn(logger, "test", new IllegalArgumentException("failed")); + throttler.execute("test", logger::warn); - verify(logger, times(1)).warn(eq("test"), any(Throwable.class)); + verify(logger, times(1)).warn(eq("test")); - throttled.warn(logger, "test", new IllegalArgumentException("failed")); + throttler.execute("test", logger::warn); verifyNoMoreInteractions(logger); } } @@ -77,7 +77,7 @@ public void testWarn_LogsOnce_ThenOnceAfterDuration() { var clock = mock(Clock.class); try ( - var throttled = new Throttler( + var throttler = new Throttler( TimeValue.timeValueDays(1), TimeValue.timeValueSeconds(10), clock, @@ -88,17 +88,17 @@ public void testWarn_LogsOnce_ThenOnceAfterDuration() { when(clock.instant()).thenReturn(now); // The first call is always logged - throttled.warn(logger, "test", new IllegalArgumentException("failed")); + throttler.execute("test", (message) -> logger.warn(message, new IllegalArgumentException("failed"))); verify(logger, times(1)).warn(eq("test"), any(Throwable.class)); when(clock.instant()).thenReturn(now.plus(Duration.ofMinutes(1))); // This call should be allowed because the clock thinks it's after the duration period - throttled.warn(logger, "test", new IllegalArgumentException("failed")); + throttler.execute("test", (message) -> logger.warn(message, new IllegalArgumentException("failed"))); verify(logger, times(2)).warn(eq("test"), any(Throwable.class)); when(clock.instant()).thenReturn(now); // This call should not be allowed because the clock doesn't think it's pasted the wait period - throttled.warn(logger, "test", new IllegalArgumentException("failed")); + throttler.execute("test", (message) -> logger.warn(message, new IllegalArgumentException("failed"))); verifyNoMoreInteractions(logger); } } @@ -109,7 +109,7 @@ public void testWarn_AllowsDifferentMessagesToBeLogged() { var clock = mock(Clock.class); try ( - var throttled = new Throttler( + var throttler = new Throttler( TimeValue.timeValueDays(1), TimeValue.timeValueSeconds(10), clock, @@ -117,10 +117,10 @@ public void testWarn_AllowsDifferentMessagesToBeLogged() { new ConcurrentHashMap<>() ) ) { - throttled.warn(logger, "test", new IllegalArgumentException("failed")); - verify(logger, times(1)).warn(eq("test"), any(Throwable.class)); + throttler.execute("test", logger::warn); + verify(logger, times(1)).warn(eq("test")); - throttled.warn(logger, "a different message", new IllegalArgumentException("failed")); + throttler.execute("a different message", (message) -> logger.warn(message, new IllegalArgumentException("failed"))); verify(logger, times(1)).warn(eq("a different message"), any(Throwable.class)); } } @@ -133,7 +133,7 @@ public void testWarn_LogsRepeated1Time() { var clock = mock(Clock.class); try ( - var throttled = new Throttler( + var throttler = new Throttler( TimeValue.timeValueDays(1), TimeValue.timeValueSeconds(10), clock, @@ -143,16 +143,16 @@ public void testWarn_LogsRepeated1Time() { ) { when(clock.instant()).thenReturn(now); // first message is allowed - throttled.warn(logger, "test", new IllegalArgumentException("failed")); - verify(logger, times(1)).warn(eq("test"), any(Throwable.class)); + throttler.execute("test", logger::warn); + verify(logger, times(1)).warn(eq("test")); when(clock.instant()).thenReturn(now); // don't allow this message because duration hasn't expired - throttled.warn(logger, "test", new IllegalArgumentException("failed")); - verify(logger, times(1)).warn(eq("test"), any(Throwable.class)); + throttler.execute("test", logger::warn); + verify(logger, times(1)).warn(eq("test")); when(clock.instant()).thenReturn(now.plus(Duration.ofMinutes(1))); // allow this message by faking expired duration - throttled.warn(logger, "test", new IllegalArgumentException("failed")); - verify(logger, times(1)).warn(eq("test, repeated 1 time"), any(Throwable.class)); + throttler.execute("test", logger::warn); + verify(logger, times(1)).warn(eq("test, repeated 1 time")); } } @@ -164,7 +164,7 @@ public void testWarn_LogsRepeated2Times() { var clock = mock(Clock.class); try ( - var throttled = new Throttler( + var throttler = new Throttler( TimeValue.timeValueDays(1), TimeValue.timeValueSeconds(10), clock, @@ -174,16 +174,16 @@ public void testWarn_LogsRepeated2Times() { ) { when(clock.instant()).thenReturn(now); // message allowed because it is the first one - throttled.warn(logger, "test", new IllegalArgumentException("failed")); + throttler.execute("test", (message) -> logger.warn(message, new IllegalArgumentException("failed"))); verify(logger, times(1)).warn(eq("test"), any(Throwable.class)); when(clock.instant()).thenReturn(now); // don't allow these messages because duration hasn't expired - throttled.warn(logger, "test", new IllegalArgumentException("failed")); - throttled.warn(logger, "test", new IllegalArgumentException("failed")); + throttler.execute("test", (message) -> logger.warn(message, new IllegalArgumentException("failed"))); + throttler.execute("test", (message) -> logger.warn(message, new IllegalArgumentException("failed"))); verify(logger, times(1)).warn(eq("test"), any(Throwable.class)); when(clock.instant()).thenReturn(now.plus(Duration.ofMinutes(1))); // allow this message by faking the duration completion - throttled.warn(logger, "test", new IllegalArgumentException("failed")); + throttler.execute("test", (message) -> logger.warn(message, new IllegalArgumentException("failed"))); verify(logger, times(1)).warn(eq("test, repeated 2 times"), any(Throwable.class)); } } @@ -214,7 +214,7 @@ public void testClose_DoesNotAllowLoggingAnyMore() { var clock = mock(Clock.class); - var throttled = new Throttler( + var throttler = new Throttler( TimeValue.timeValueDays(1), TimeValue.timeValueSeconds(10), clock, @@ -222,8 +222,8 @@ public void testClose_DoesNotAllowLoggingAnyMore() { new ConcurrentHashMap<>() ); - throttled.close(); - throttled.warn(logger, "test", new IllegalArgumentException("failed")); + throttler.close(); + throttler.execute("test", logger::warn); verifyNoMoreInteractions(logger); } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/results/LegacyTextEmbeddingResultsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/results/LegacyTextEmbeddingResultsTests.java new file mode 100644 index 0000000000000..6553f1e7f8ae3 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/results/LegacyTextEmbeddingResultsTests.java @@ -0,0 +1,144 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.results; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xcontent.ToXContentFragment; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.is; + +@SuppressWarnings("deprecation") +public class LegacyTextEmbeddingResultsTests extends AbstractWireSerializingTestCase { + public static LegacyTextEmbeddingResults createRandomResults() { + int embeddings = randomIntBetween(1, 10); + List embeddingResults = new ArrayList<>(embeddings); + + for (int i = 0; i < embeddings; i++) { + embeddingResults.add(createRandomEmbedding()); + } + + return new LegacyTextEmbeddingResults(embeddingResults); + } + + private static LegacyTextEmbeddingResults.Embedding createRandomEmbedding() { + int columns = randomIntBetween(1, 10); + List floats = new ArrayList<>(columns); + + for (int i = 0; i < columns; i++) { + floats.add(randomFloat()); + } + + return new LegacyTextEmbeddingResults.Embedding(floats); + } + + public void testToXContent_CreatesTheRightFormatForASingleEmbedding() throws IOException { + var entity = new LegacyTextEmbeddingResults(List.of(new LegacyTextEmbeddingResults.Embedding(List.of(0.1F)))); + + assertThat( + entity.asMap(), + is( + Map.of( + LegacyTextEmbeddingResults.TEXT_EMBEDDING, + List.of(Map.of(LegacyTextEmbeddingResults.Embedding.EMBEDDING, List.of(0.1F))) + ) + ) + ); + + String xContentResult = Strings.toString(entity, true, true); + assertThat(xContentResult, is(""" + { + "text_embedding" : [ + { + "embedding" : [ + 0.1 + ] + } + ] + }""")); + } + + public void testToXContent_CreatesTheRightFormatForMultipleEmbeddings() throws IOException { + var entity = new LegacyTextEmbeddingResults( + List.of(new LegacyTextEmbeddingResults.Embedding(List.of(0.1F)), new LegacyTextEmbeddingResults.Embedding(List.of(0.2F))) + + ); + + assertThat( + entity.asMap(), + is( + Map.of( + LegacyTextEmbeddingResults.TEXT_EMBEDDING, + List.of( + Map.of(LegacyTextEmbeddingResults.Embedding.EMBEDDING, List.of(0.1F)), + Map.of(LegacyTextEmbeddingResults.Embedding.EMBEDDING, List.of(0.2F)) + ) + ) + ) + ); + + String xContentResult = Strings.toString(entity, true, true); + assertThat(xContentResult, is(""" + { + "text_embedding" : [ + { + "embedding" : [ + 0.1 + ] + }, + { + "embedding" : [ + 0.2 + ] + } + ] + }""")); + } + + private static String toJsonString(ToXContentFragment entity) throws IOException { + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON).prettyPrint(); + builder.startObject(); + entity.toXContent(builder, null); + builder.endObject(); + + return Strings.toString(builder); + } + + @Override + protected Writeable.Reader instanceReader() { + return LegacyTextEmbeddingResults::new; + } + + @Override + protected LegacyTextEmbeddingResults createTestInstance() { + return createRandomResults(); + } + + @Override + protected LegacyTextEmbeddingResults mutateInstance(LegacyTextEmbeddingResults instance) throws IOException { + // if true we reduce the embeddings list by a random amount, if false we add an embedding to the list + if (randomBoolean()) { + // -1 to remove at least one item from the list + int end = randomInt(instance.embeddings().size() - 1); + return new LegacyTextEmbeddingResults(instance.embeddings().subList(0, end)); + } else { + List embeddings = new ArrayList<>(instance.embeddings()); + embeddings.add(createRandomEmbedding()); + return new LegacyTextEmbeddingResults(embeddings); + } + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/results/SparseEmbeddingResultsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/results/SparseEmbeddingResultsTests.java new file mode 100644 index 0000000000000..9ab33ef777445 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/results/SparseEmbeddingResultsTests.java @@ -0,0 +1,181 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.results; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.is; + +public class SparseEmbeddingResultsTests extends AbstractWireSerializingTestCase { + + public static SparseEmbeddingResults createRandomResults() { + int numEmbeddings = randomIntBetween(1, 10); + int numTokens = randomIntBetween(0, 20); + return createRandomResults(numEmbeddings, numTokens); + } + + public static SparseEmbeddingResults createRandomResults(int numEmbeddings, int numTokens) { + List embeddings = new ArrayList<>(numEmbeddings); + + for (int i = 0; i < numEmbeddings; i++) { + embeddings.add(createRandomEmbedding(numTokens)); + } + + return new SparseEmbeddingResults(embeddings); + } + + public static SparseEmbeddingResults createRandomResults(List input) { + List embeddings = new ArrayList<>(input.size()); + + for (String s : input) { + int numTokens = Strings.tokenizeToStringArray(s, " ").length; + embeddings.add(createRandomEmbedding(numTokens)); + } + + return new SparseEmbeddingResults(embeddings); + } + + private static SparseEmbeddingResults.Embedding createRandomEmbedding(int numTokens) { + List tokenList = new ArrayList<>(numTokens); + for (int i = 0; i < numTokens; i++) { + tokenList.add(new SparseEmbeddingResults.WeightedToken(Integer.toString(i), (float) randomDoubleBetween(0.0, 5.0, false))); + } + + return new SparseEmbeddingResults.Embedding(tokenList, randomBoolean()); + } + + @Override + protected Writeable.Reader instanceReader() { + return SparseEmbeddingResults::new; + } + + @Override + protected SparseEmbeddingResults createTestInstance() { + return createRandomResults(); + } + + @Override + protected SparseEmbeddingResults mutateInstance(SparseEmbeddingResults instance) throws IOException { + // if true we reduce the embeddings list by a random amount, if false we add an embedding to the list + if (randomBoolean()) { + // -1 to remove at least one item from the list + int end = randomInt(instance.embeddings().size() - 1); + return new SparseEmbeddingResults(instance.embeddings().subList(0, end)); + } else { + List embeddings = new ArrayList<>(instance.embeddings()); + embeddings.add(createRandomEmbedding(randomIntBetween(0, 20))); + return new SparseEmbeddingResults(embeddings); + } + } + + public void testToXContent_CreatesTheRightFormatForASingleEmbedding() throws IOException { + var entity = createSparseResult(List.of(createEmbedding(List.of(new SparseEmbeddingResults.WeightedToken("token", 0.1F)), false))); + assertThat(entity.asMap(), is(buildExpectation(List.of(new EmbeddingExpectation(Map.of("token", 0.1F), false))))); + String xContentResult = Strings.toString(entity, true, true); + assertThat(xContentResult, is(""" + { + "sparse_embedding" : [ + { + "is_truncated" : false, + "embedding" : { + "token" : 0.1 + } + } + ] + }""")); + } + + public void testToXContent_CreatesTheRightFormatForMultipleEmbeddings() throws IOException { + var entity = createSparseResult( + List.of( + new SparseEmbeddingResults.Embedding( + List.of( + new SparseEmbeddingResults.WeightedToken("token", 0.1F), + new SparseEmbeddingResults.WeightedToken("token2", 0.2F) + ), + false + ), + new SparseEmbeddingResults.Embedding( + List.of( + new SparseEmbeddingResults.WeightedToken("token3", 0.3F), + new SparseEmbeddingResults.WeightedToken("token4", 0.4F) + ), + false + ) + ) + ); + assertThat( + entity.asMap(), + is( + buildExpectation( + List.of( + new EmbeddingExpectation(Map.of("token", 0.1F, "token2", 0.2F), false), + new EmbeddingExpectation(Map.of("token3", 0.3F, "token4", 0.4F), false) + ) + ) + ) + ); + + String xContentResult = Strings.toString(entity, true, true); + assertThat(xContentResult, is(""" + { + "sparse_embedding" : [ + { + "is_truncated" : false, + "embedding" : { + "token" : 0.1, + "token2" : 0.2 + } + }, + { + "is_truncated" : false, + "embedding" : { + "token3" : 0.3, + "token4" : 0.4 + } + } + ] + }""")); + } + + public record EmbeddingExpectation(Map tokens, boolean isTruncated) {} + + public static Map buildExpectation(List embeddings) { + return Map.of( + SparseEmbeddingResults.SPARSE_EMBEDDING, + embeddings.stream() + .map( + embedding -> Map.of( + SparseEmbeddingResults.Embedding.EMBEDDING, + embedding.tokens, + SparseEmbeddingResults.Embedding.IS_TRUNCATED, + embedding.isTruncated + ) + ) + .toList() + ); + } + + public static SparseEmbeddingResults createSparseResult(List embeddings) { + return new SparseEmbeddingResults(embeddings); + } + + public static SparseEmbeddingResults.Embedding createEmbedding( + List tokensList, + boolean isTruncated + ) { + return new SparseEmbeddingResults.Embedding(tokensList, isTruncated); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/results/TextEmbeddingResultsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/results/TextEmbeddingResultsTests.java new file mode 100644 index 0000000000000..fabb6c3de0fbc --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/results/TextEmbeddingResultsTests.java @@ -0,0 +1,132 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.results; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.is; + +public class TextEmbeddingResultsTests extends AbstractWireSerializingTestCase { + public static TextEmbeddingResults createRandomResults() { + int embeddings = randomIntBetween(1, 10); + List embeddingResults = new ArrayList<>(embeddings); + + for (int i = 0; i < embeddings; i++) { + embeddingResults.add(createRandomEmbedding()); + } + + return new TextEmbeddingResults(embeddingResults); + } + + private static TextEmbeddingResults.Embedding createRandomEmbedding() { + int columns = randomIntBetween(1, 10); + List floats = new ArrayList<>(columns); + + for (int i = 0; i < columns; i++) { + floats.add(randomFloat()); + } + + return new TextEmbeddingResults.Embedding(floats); + } + + public void testToXContent_CreatesTheRightFormatForASingleEmbedding() throws IOException { + var entity = new TextEmbeddingResults(List.of(new TextEmbeddingResults.Embedding(List.of(0.1F)))); + + assertThat( + entity.asMap(), + is(Map.of(TextEmbeddingResults.TEXT_EMBEDDING, List.of(Map.of(TextEmbeddingResults.Embedding.EMBEDDING, List.of(0.1F))))) + ); + + String xContentResult = Strings.toString(entity, true, true); + assertThat(xContentResult, is(""" + { + "text_embedding" : [ + { + "embedding" : [ + 0.1 + ] + } + ] + }""")); + } + + public void testToXContent_CreatesTheRightFormatForMultipleEmbeddings() throws IOException { + var entity = new TextEmbeddingResults( + List.of(new TextEmbeddingResults.Embedding(List.of(0.1F)), new TextEmbeddingResults.Embedding(List.of(0.2F))) + + ); + + assertThat( + entity.asMap(), + is( + Map.of( + TextEmbeddingResults.TEXT_EMBEDDING, + List.of( + Map.of(TextEmbeddingResults.Embedding.EMBEDDING, List.of(0.1F)), + Map.of(TextEmbeddingResults.Embedding.EMBEDDING, List.of(0.2F)) + ) + ) + ) + ); + + String xContentResult = Strings.toString(entity, true, true); + assertThat(xContentResult, is(""" + { + "text_embedding" : [ + { + "embedding" : [ + 0.1 + ] + }, + { + "embedding" : [ + 0.2 + ] + } + ] + }""")); + } + + @Override + protected Writeable.Reader instanceReader() { + return TextEmbeddingResults::new; + } + + @Override + protected TextEmbeddingResults createTestInstance() { + return createRandomResults(); + } + + @Override + protected TextEmbeddingResults mutateInstance(TextEmbeddingResults instance) throws IOException { + // if true we reduce the embeddings list by a random amount, if false we add an embedding to the list + if (randomBoolean()) { + // -1 to remove at least one item from the list + int end = randomInt(instance.embeddings().size() - 1); + return new TextEmbeddingResults(instance.embeddings().subList(0, end)); + } else { + List embeddings = new ArrayList<>(instance.embeddings()); + embeddings.add(createRandomEmbedding()); + return new TextEmbeddingResults(embeddings); + } + } + + public static Map buildExpectation(List> embeddings) { + return Map.of( + TextEmbeddingResults.TEXT_EMBEDDING, + embeddings.stream().map(embedding -> Map.of(TextEmbeddingResults.Embedding.EMBEDDING, embedding)).toList() + ); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/MapParsingUtilsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/MapParsingUtilsTests.java index 7b693b2ef4c0f..9ff23ea38541d 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/MapParsingUtilsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/MapParsingUtilsTests.java @@ -8,14 +8,21 @@ package org.elasticsearch.xpack.inference.services; import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.common.ValidationException; import org.elasticsearch.test.ESTestCase; import java.util.HashMap; import java.util.Map; +import static org.elasticsearch.xpack.inference.services.MapParsingUtils.convertToUri; +import static org.elasticsearch.xpack.inference.services.MapParsingUtils.createUri; +import static org.elasticsearch.xpack.inference.services.MapParsingUtils.extractOptionalString; +import static org.elasticsearch.xpack.inference.services.MapParsingUtils.extractRequiredSecureString; +import static org.elasticsearch.xpack.inference.services.MapParsingUtils.extractRequiredString; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; public class MapParsingUtilsTests extends ESTestCase { @@ -88,4 +95,148 @@ public void testRemoveAsTypeMissingReturnsNull() { assertNull(MapParsingUtils.removeAsType(new HashMap<>(), "missing", Integer.class)); assertThat(map.entrySet(), hasSize(3)); } + + public void testConvertToUri_CreatesUri() { + var validation = new ValidationException(); + var uri = convertToUri("www.elastic.co", "name", "scope", validation); + + assertNotNull(uri); + assertTrue(validation.validationErrors().isEmpty()); + assertThat(uri.toString(), is("www.elastic.co")); + } + + public void testConvertToUri_ThrowsNullPointerException_WhenPassedNull() { + var validation = new ValidationException(); + expectThrows(NullPointerException.class, () -> convertToUri(null, "name", "scope", validation)); + + assertTrue(validation.validationErrors().isEmpty()); + } + + public void testConvertToUri_AddsValidationError_WhenUrlIsInvalid() { + var validation = new ValidationException(); + var uri = convertToUri("^^", "name", "scope", validation); + + assertNull(uri); + assertThat(validation.validationErrors().size(), is(1)); + assertThat(validation.validationErrors().get(0), is("[scope] Invalid url [^^] received for field [name]")); + } + + public void testCreateUri_CreatesUri() { + var uri = createUri("www.elastic.co"); + + assertNotNull(uri); + assertThat(uri.toString(), is("www.elastic.co")); + } + + public void testCreateUri_ThrowsException_WithInvalidUrl() { + var exception = expectThrows(IllegalArgumentException.class, () -> createUri("^^")); + + assertThat(exception.getMessage(), is("unable to parse url [^^]")); + } + + public void testCreateUri_ThrowsException_WithNullUrl() { + expectThrows(NullPointerException.class, () -> createUri(null)); + } + + public void testExtractRequiredSecureString_CreatesSecureString() { + var validation = new ValidationException(); + Map map = modifiableMap(Map.of("key", "value")); + var secureString = extractRequiredSecureString(map, "key", "scope", validation); + + assertTrue(validation.validationErrors().isEmpty()); + assertNotNull(secureString); + assertThat(secureString.toString(), is("value")); + assertTrue(map.isEmpty()); + } + + public void testExtractRequiredSecureString_AddsException_WhenFieldDoesNotExist() { + var validation = new ValidationException(); + Map map = modifiableMap(Map.of("key", "value")); + var secureString = extractRequiredSecureString(map, "abc", "scope", validation); + + assertNull(secureString); + assertFalse(validation.validationErrors().isEmpty()); + assertThat(map.size(), is(1)); + assertThat(validation.validationErrors().get(0), is("[scope] does not contain the required setting [abc]")); + } + + public void testExtractRequiredSecureString_AddsException_WhenFieldIsEmpty() { + var validation = new ValidationException(); + Map map = modifiableMap(Map.of("key", "")); + var createdString = extractOptionalString(map, "key", "scope", validation); + + assertNull(createdString); + assertFalse(validation.validationErrors().isEmpty()); + assertTrue(map.isEmpty()); + assertThat(validation.validationErrors().get(0), is("[scope] Invalid value empty string. [key] must be a non-empty string")); + } + + public void testExtractRequiredString_CreatesString() { + var validation = new ValidationException(); + Map map = modifiableMap(Map.of("key", "value")); + var createdString = extractRequiredString(map, "key", "scope", validation); + + assertTrue(validation.validationErrors().isEmpty()); + assertNotNull(createdString); + assertThat(createdString, is("value")); + assertTrue(map.isEmpty()); + } + + public void testExtractRequiredString_AddsException_WhenFieldDoesNotExist() { + var validation = new ValidationException(); + Map map = modifiableMap(Map.of("key", "value")); + var createdString = extractRequiredSecureString(map, "abc", "scope", validation); + + assertNull(createdString); + assertFalse(validation.validationErrors().isEmpty()); + assertThat(map.size(), is(1)); + assertThat(validation.validationErrors().get(0), is("[scope] does not contain the required setting [abc]")); + } + + public void testExtractRequiredString_AddsException_WhenFieldIsEmpty() { + var validation = new ValidationException(); + Map map = modifiableMap(Map.of("key", "")); + var createdString = extractOptionalString(map, "key", "scope", validation); + + assertNull(createdString); + assertFalse(validation.validationErrors().isEmpty()); + assertTrue(map.isEmpty()); + assertThat(validation.validationErrors().get(0), is("[scope] Invalid value empty string. [key] must be a non-empty string")); + } + + public void testExtractOptionalString_CreatesString() { + var validation = new ValidationException(); + Map map = modifiableMap(Map.of("key", "value")); + var createdString = extractOptionalString(map, "key", "scope", validation); + + assertTrue(validation.validationErrors().isEmpty()); + assertNotNull(createdString); + assertThat(createdString, is("value")); + assertTrue(map.isEmpty()); + } + + public void testExtractOptionalString_DoesNotAddException_WhenFieldDoesNotExist() { + var validation = new ValidationException(); + Map map = modifiableMap(Map.of("key", "value")); + var createdString = extractOptionalString(map, "abc", "scope", validation); + + assertNull(createdString); + assertTrue(validation.validationErrors().isEmpty()); + assertThat(map.size(), is(1)); + } + + public void testExtractOptionalString_AddsException_WhenFieldIsEmpty() { + var validation = new ValidationException(); + Map map = modifiableMap(Map.of("key", "")); + var createdString = extractOptionalString(map, "key", "scope", validation); + + assertNull(createdString); + assertFalse(validation.validationErrors().isEmpty()); + assertTrue(map.isEmpty()); + assertThat(validation.validationErrors().get(0), is("[scope] Invalid value empty string. [key] must be a non-empty string")); + } + + private static Map modifiableMap(Map aMap) { + return new HashMap<>(aMap); + } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ServiceComponentsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ServiceComponentsTests.java new file mode 100644 index 0000000000000..8ce615ecbb060 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ServiceComponentsTests.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; + +import static org.elasticsearch.xpack.inference.logging.ThrottlerManagerTests.mockThrottlerManager; + +public class ServiceComponentsTests extends ESTestCase { + public static ServiceComponents createWithEmptySettings(ThreadPool threadPool) { + return new ServiceComponents(threadPool, mockThrottlerManager(), Settings.EMPTY); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserServiceSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserServiceSettingsTests.java index 021904d7c2b67..525f701323511 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserServiceSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserServiceSettingsTests.java @@ -32,13 +32,33 @@ public void testFromMap() { assertThat(new HuggingFaceElserServiceSettings(url), is(serviceSettings)); } + public void testFromMap_EmptyUrl_ThrowsError() { + var thrownException = expectThrows( + ValidationException.class, + () -> HuggingFaceElserServiceSettings.fromMap(new HashMap<>(Map.of(HuggingFaceElserServiceSettings.URL, ""))) + ); + + assertThat( + thrownException.getMessage(), + containsString( + Strings.format( + "Validation Failed: 1: [service_settings] Invalid value empty string. [%s] must be a non-empty string;", + HuggingFaceElserServiceSettings.URL + ) + ) + ); + } + public void testFromMap_MissingUrl_ThrowsError() { var thrownException = expectThrows(ValidationException.class, () -> HuggingFaceElserServiceSettings.fromMap(new HashMap<>())); assertThat( thrownException.getMessage(), containsString( - Strings.format("[service_settings] does not contain the required setting [%s]", HuggingFaceElserServiceSettings.URL) + Strings.format( + "Validation Failed: 1: [service_settings] does not contain the required setting [%s];", + HuggingFaceElserServiceSettings.URL + ) ) ); } @@ -52,7 +72,13 @@ public void testFromMap_InvalidUrl_ThrowsError() { assertThat( thrownException.getMessage(), - containsString(Strings.format("Invalid url [%s] received in setting [service_settings]", url)) + is( + Strings.format( + "Validation Failed: 1: [service_settings] Invalid url [%s] received for field [%s];", + url, + HuggingFaceElserServiceSettings.URL + ) + ) ); } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiResponseHandlerTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiResponseHandlerTests.java new file mode 100644 index 0000000000000..cdef3914ec7c7 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiResponseHandlerTests.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.openai; + +import org.apache.http.HttpResponse; +import org.apache.http.StatusLine; +import org.apache.http.client.methods.HttpRequestBase; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.inference.external.http.HttpResult; +import org.elasticsearch.xpack.inference.external.http.retry.RetryException; + +import static org.hamcrest.Matchers.containsString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class OpenAiResponseHandlerTests extends ESTestCase { + + public void testCheckForFailureStatusCode() { + var statusLine = mock(StatusLine.class); + when(statusLine.getStatusCode()).thenReturn(200).thenReturn(503).thenReturn(429).thenReturn(401).thenReturn(300).thenReturn(402); + + var httpResponse = mock(HttpResponse.class); + when(httpResponse.getStatusLine()).thenReturn(statusLine); + + var httpRequest = mock(HttpRequestBase.class); + + var httpResult = new HttpResult(httpResponse, new byte[] {}); + + // 200 ok + OpenAiResponseHandler.checkForFailureStatusCode(httpRequest, httpResult); + // 503 + var retryException = expectThrows( + RetryException.class, + () -> OpenAiResponseHandler.checkForFailureStatusCode(httpRequest, httpResult) + ); + assertFalse(retryException.shouldRetry()); + assertThat(retryException.getMessage(), containsString("Received a server error status code for request [null] status [503]")); + // 429 + retryException = expectThrows(RetryException.class, () -> OpenAiResponseHandler.checkForFailureStatusCode(httpRequest, httpResult)); + assertFalse(retryException.shouldRetry()); + assertThat(retryException.getMessage(), containsString("Received a rate limit status code for request [null] status [429]")); + // 401 + retryException = expectThrows(RetryException.class, () -> OpenAiResponseHandler.checkForFailureStatusCode(httpRequest, httpResult)); + assertFalse(retryException.shouldRetry()); + assertThat( + retryException.getMessage(), + containsString("Received a authentication error status code for request [null] status [401]") + ); + // 300 + retryException = expectThrows(RetryException.class, () -> OpenAiResponseHandler.checkForFailureStatusCode(httpRequest, httpResult)); + assertFalse(retryException.shouldRetry()); + assertThat(retryException.getMessage(), containsString("Unhandled redirection for request [null] status [300]")); + // 402 + retryException = expectThrows(RetryException.class, () -> OpenAiResponseHandler.checkForFailureStatusCode(httpRequest, httpResult)); + assertFalse(retryException.shouldRetry()); + assertThat(retryException.getMessage(), containsString("Received an unsuccessful status code for request [null] status [402]")); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceSettingsTests.java new file mode 100644 index 0000000000000..9fbcc3bec7a60 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceSettingsTests.java @@ -0,0 +1,141 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.openai; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; + +public class OpenAiServiceSettingsTests extends AbstractWireSerializingTestCase { + + public static OpenAiServiceSettings createRandomWithNonNullUrl() { + return new OpenAiServiceSettings(randomAlphaOfLength(15), randomAlphaOfLength(15)); + } + + /** + * The created settings can have a url set to null. + */ + public static OpenAiServiceSettings createRandom() { + var url = randomBoolean() ? randomAlphaOfLength(15) : null; + var organizationId = randomBoolean() ? randomAlphaOfLength(15) : null; + return new OpenAiServiceSettings(url, organizationId); + } + + public void testFromMap() { + var url = "https://www.abc.com"; + var org = "organization"; + var serviceSettings = OpenAiServiceSettings.fromMap( + new HashMap<>(Map.of(OpenAiServiceSettings.URL, url, OpenAiServiceSettings.ORGANIZATION, org)) + ); + + assertThat(serviceSettings, is(new OpenAiServiceSettings(url, org))); + } + + public void testFromMap_MissingUrl_DoesNotThrowException() { + var serviceSettings = OpenAiServiceSettings.fromMap(new HashMap<>(Map.of(OpenAiServiceSettings.ORGANIZATION, "org"))); + assertNull(serviceSettings.uri()); + assertThat(serviceSettings.organizationId(), is("org")); + } + + public void testFromMap_EmptyUrl_ThrowsError() { + var thrownException = expectThrows( + ValidationException.class, + () -> OpenAiServiceSettings.fromMap(new HashMap<>(Map.of(OpenAiServiceSettings.URL, ""))) + ); + + assertThat( + thrownException.getMessage(), + containsString( + Strings.format( + "Validation Failed: 1: [service_settings] Invalid value empty string. [%s] must be a non-empty string;", + OpenAiServiceSettings.URL + ) + ) + ); + } + + public void testFromMap_MissingOrganization_DoesNotThrowException() { + var serviceSettings = OpenAiServiceSettings.fromMap(new HashMap<>()); + assertNull(serviceSettings.uri()); + assertNull(serviceSettings.organizationId()); + } + + public void testFromMap_EmptyOrganization_ThrowsError() { + var thrownException = expectThrows( + ValidationException.class, + () -> OpenAiServiceSettings.fromMap(new HashMap<>(Map.of(OpenAiServiceSettings.ORGANIZATION, ""))) + ); + + assertThat( + thrownException.getMessage(), + containsString( + Strings.format( + "Validation Failed: 1: [service_settings] Invalid value empty string. [%s] must be a non-empty string;", + OpenAiServiceSettings.ORGANIZATION + ) + ) + ); + } + + public void testFromMap_InvalidUrl_ThrowsError() { + var url = "https://www.abc^.com"; + var thrownException = expectThrows( + ValidationException.class, + () -> OpenAiServiceSettings.fromMap(new HashMap<>(Map.of(OpenAiServiceSettings.URL, url))) + ); + + assertThat( + thrownException.getMessage(), + is( + Strings.format( + "Validation Failed: 1: [service_settings] Invalid url [%s] received for field [%s];", + url, + OpenAiServiceSettings.URL + ) + ) + ); + } + + @Override + protected Writeable.Reader instanceReader() { + return OpenAiServiceSettings::new; + } + + @Override + protected OpenAiServiceSettings createTestInstance() { + return createRandomWithNonNullUrl(); + } + + @Override + protected OpenAiServiceSettings mutateInstance(OpenAiServiceSettings instance) throws IOException { + return createRandomWithNonNullUrl(); + } + + public static Map getServiceSettingsMap(@Nullable String url, @Nullable String org) { + + var map = new HashMap(); + + if (url != null) { + map.put(OpenAiServiceSettings.URL, url); + } + + if (org != null) { + map.put(OpenAiServiceSettings.ORGANIZATION, org); + } + return map; + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java new file mode 100644 index 0000000000000..9cd7a4b4eed2c --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java @@ -0,0 +1,660 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.openai; + +import org.apache.http.HttpHeaders; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.http.MockResponse; +import org.elasticsearch.test.http.MockWebServer; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.inference.external.http.HttpClientManager; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderFactory; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.logging.ThrottlerManager; +import org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsModel; +import org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsModelTests; +import org.hamcrest.Matchers; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; +import static org.elasticsearch.xpack.inference.external.http.Utils.getUrl; +import static org.elasticsearch.xpack.inference.external.http.Utils.inferenceUtilityPool; +import static org.elasticsearch.xpack.inference.external.http.Utils.mockClusterServiceEmpty; +import static org.elasticsearch.xpack.inference.external.request.openai.OpenAiUtils.ORGANIZATION_HEADER; +import static org.elasticsearch.xpack.inference.results.TextEmbeddingResultsTests.buildExpectation; +import static org.elasticsearch.xpack.inference.services.ServiceComponentsTests.createWithEmptySettings; +import static org.elasticsearch.xpack.inference.services.openai.OpenAiServiceSettingsTests.getServiceSettingsMap; +import static org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsTaskSettingsTests.getTaskSettingsMap; +import static org.elasticsearch.xpack.inference.services.settings.DefaultSecretSettingsTests.getSecretSettingsMap; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +public class OpenAiServiceTests extends ESTestCase { + private static final TimeValue TIMEOUT = new TimeValue(30, TimeUnit.SECONDS); + private final MockWebServer webServer = new MockWebServer(); + private ThreadPool threadPool; + private HttpClientManager clientManager; + + @Before + public void init() throws Exception { + webServer.start(); + threadPool = createThreadPool(inferenceUtilityPool()); + clientManager = HttpClientManager.create(Settings.EMPTY, threadPool, mockClusterServiceEmpty(), mock(ThrottlerManager.class)); + } + + @After + public void shutdown() throws IOException { + clientManager.close(); + terminate(threadPool); + webServer.close(); + } + + public void testParseRequestConfig_CreatesAnOpenAiEmbeddingsModel() throws IOException { + try ( + var service = new OpenAiService( + new SetOnce<>(mock(HttpRequestSenderFactory.class)), + new SetOnce<>(createWithEmptySettings(threadPool)) + ) + ) { + var model = service.parseRequestConfig( + "id", + TaskType.TEXT_EMBEDDING, + getRequestConfigMap( + getServiceSettingsMap("url", "org"), + getTaskSettingsMap("model", "user"), + getSecretSettingsMap("secret") + ), + Set.of() + ); + + assertThat(model, instanceOf(OpenAiEmbeddingsModel.class)); + + var embeddingsModel = (OpenAiEmbeddingsModel) model; + assertThat(embeddingsModel.getServiceSettings().uri().toString(), is("url")); + assertThat(embeddingsModel.getServiceSettings().organizationId(), is("org")); + assertThat(embeddingsModel.getTaskSettings().model(), is("model")); + assertThat(embeddingsModel.getTaskSettings().user(), is("user")); + assertThat(embeddingsModel.getSecretSettings().apiKey().toString(), is("secret")); + } + } + + public void testParseRequestConfig_ThrowsUnsupportedModelType() throws IOException { + try ( + var service = new OpenAiService( + new SetOnce<>(mock(HttpRequestSenderFactory.class)), + new SetOnce<>(createWithEmptySettings(threadPool)) + ) + ) { + var thrownException = expectThrows( + ElasticsearchStatusException.class, + () -> service.parseRequestConfig( + "id", + TaskType.SPARSE_EMBEDDING, + getRequestConfigMap( + getServiceSettingsMap("url", "org"), + getTaskSettingsMap("model", "user"), + getSecretSettingsMap("secret") + ), + Set.of() + ) + ); + + assertThat(thrownException.getMessage(), is("The [openai] service does not support task type [sparse_embedding]")); + } + } + + public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInConfig() throws IOException { + try ( + var service = new OpenAiService( + new SetOnce<>(mock(HttpRequestSenderFactory.class)), + new SetOnce<>(createWithEmptySettings(threadPool)) + ) + ) { + var config = getRequestConfigMap( + getServiceSettingsMap("url", "org"), + getTaskSettingsMap("model", "user"), + getSecretSettingsMap("secret") + ); + config.put("extra_key", "value"); + + var thrownException = expectThrows( + ElasticsearchStatusException.class, + () -> service.parseRequestConfig("id", TaskType.TEXT_EMBEDDING, config, Set.of()) + ); + + assertThat( + thrownException.getMessage(), + is("Model configuration contains settings [{extra_key=value}] unknown to the [openai] service") + ); + } + } + + public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInServiceSettingsMap() throws IOException { + try ( + var service = new OpenAiService( + new SetOnce<>(mock(HttpRequestSenderFactory.class)), + new SetOnce<>(createWithEmptySettings(threadPool)) + ) + ) { + var serviceSettings = getServiceSettingsMap("url", "org"); + serviceSettings.put("extra_key", "value"); + + var config = getRequestConfigMap(serviceSettings, getTaskSettingsMap("model", "user"), getSecretSettingsMap("secret")); + + var thrownException = expectThrows( + ElasticsearchStatusException.class, + () -> service.parseRequestConfig("id", TaskType.TEXT_EMBEDDING, config, Set.of()) + ); + + assertThat( + thrownException.getMessage(), + is("Model configuration contains settings [{extra_key=value}] unknown to the [openai] service") + ); + } + } + + public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInTaskSettingsMap() throws IOException { + try ( + var service = new OpenAiService( + new SetOnce<>(mock(HttpRequestSenderFactory.class)), + new SetOnce<>(createWithEmptySettings(threadPool)) + ) + ) { + var taskSettingsMap = getTaskSettingsMap("model", "user"); + taskSettingsMap.put("extra_key", "value"); + + var config = getRequestConfigMap(getServiceSettingsMap("url", "org"), taskSettingsMap, getSecretSettingsMap("secret")); + + var thrownException = expectThrows( + ElasticsearchStatusException.class, + () -> service.parseRequestConfig("id", TaskType.TEXT_EMBEDDING, config, Set.of()) + ); + + assertThat( + thrownException.getMessage(), + is("Model configuration contains settings [{extra_key=value}] unknown to the [openai] service") + ); + } + } + + public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInSecretSettingsMap() throws IOException { + try ( + var service = new OpenAiService( + new SetOnce<>(mock(HttpRequestSenderFactory.class)), + new SetOnce<>(createWithEmptySettings(threadPool)) + ) + ) { + var secretSettingsMap = getSecretSettingsMap("secret"); + secretSettingsMap.put("extra_key", "value"); + + var config = getRequestConfigMap(getServiceSettingsMap("url", "org"), getTaskSettingsMap("model", "user"), secretSettingsMap); + + var thrownException = expectThrows( + ElasticsearchStatusException.class, + () -> service.parseRequestConfig("id", TaskType.TEXT_EMBEDDING, config, Set.of()) + ); + + assertThat( + thrownException.getMessage(), + is("Model configuration contains settings [{extra_key=value}] unknown to the [openai] service") + ); + } + } + + public void testParseRequestConfig_CreatesAnOpenAiEmbeddingsModelWithoutUserUrlOrganization() throws IOException { + try ( + var service = new OpenAiService( + new SetOnce<>(mock(HttpRequestSenderFactory.class)), + new SetOnce<>(createWithEmptySettings(threadPool)) + ) + ) { + var model = service.parseRequestConfig( + "id", + TaskType.TEXT_EMBEDDING, + getRequestConfigMap(getServiceSettingsMap(null, null), getTaskSettingsMap("model", null), getSecretSettingsMap("secret")), + Set.of() + ); + + assertThat(model, instanceOf(OpenAiEmbeddingsModel.class)); + + var embeddingsModel = (OpenAiEmbeddingsModel) model; + assertNull(embeddingsModel.getServiceSettings().uri()); + assertNull(embeddingsModel.getServiceSettings().organizationId()); + assertThat(embeddingsModel.getTaskSettings().model(), is("model")); + assertNull(embeddingsModel.getTaskSettings().user()); + assertThat(embeddingsModel.getSecretSettings().apiKey().toString(), is("secret")); + } + } + + public void testParsePersistedConfig_CreatesAnOpenAiEmbeddingsModel() throws IOException { + try ( + var service = new OpenAiService( + new SetOnce<>(mock(HttpRequestSenderFactory.class)), + new SetOnce<>(createWithEmptySettings(threadPool)) + ) + ) { + var persistedConfig = getPersistedConfigMap( + getServiceSettingsMap("url", "org"), + getTaskSettingsMap("model", "user"), + getSecretSettingsMap("secret") + ); + + var model = service.parsePersistedConfig("id", TaskType.TEXT_EMBEDDING, persistedConfig.config(), persistedConfig.secrets()); + + assertThat(model, instanceOf(OpenAiEmbeddingsModel.class)); + + var embeddingsModel = (OpenAiEmbeddingsModel) model; + assertThat(embeddingsModel.getServiceSettings().uri().toString(), is("url")); + assertThat(embeddingsModel.getServiceSettings().organizationId(), is("org")); + assertThat(embeddingsModel.getTaskSettings().model(), is("model")); + assertThat(embeddingsModel.getTaskSettings().user(), is("user")); + assertThat(embeddingsModel.getSecretSettings().apiKey().toString(), is("secret")); + } + } + + public void testParsePersistedConfig_ThrowsErrorTryingToParseInvalidModel() throws IOException { + try ( + var service = new OpenAiService( + new SetOnce<>(mock(HttpRequestSenderFactory.class)), + new SetOnce<>(createWithEmptySettings(threadPool)) + ) + ) { + var persistedConfig = getPersistedConfigMap( + getServiceSettingsMap("url", "org"), + getTaskSettingsMap("model", "user"), + getSecretSettingsMap("secret") + ); + + var thrownException = expectThrows( + ElasticsearchStatusException.class, + () -> service.parsePersistedConfig("id", TaskType.SPARSE_EMBEDDING, persistedConfig.config(), persistedConfig.secrets()) + ); + + assertThat( + thrownException.getMessage(), + is("Failed to parse stored model [id] for [openai] service, please delete and add the service again") + ); + } + } + + public void testParsePersistedConfig_CreatesAnOpenAiEmbeddingsModelWithoutUserUrlOrganization() throws IOException { + try ( + var service = new OpenAiService( + new SetOnce<>(mock(HttpRequestSenderFactory.class)), + new SetOnce<>(createWithEmptySettings(threadPool)) + ) + ) { + var persistedConfig = getPersistedConfigMap( + getServiceSettingsMap(null, null), + getTaskSettingsMap("model", null), + getSecretSettingsMap("secret") + ); + + var model = service.parsePersistedConfig("id", TaskType.TEXT_EMBEDDING, persistedConfig.config(), persistedConfig.secrets()); + + assertThat(model, instanceOf(OpenAiEmbeddingsModel.class)); + + var embeddingsModel = (OpenAiEmbeddingsModel) model; + assertNull(embeddingsModel.getServiceSettings().uri()); + assertNull(embeddingsModel.getServiceSettings().organizationId()); + assertThat(embeddingsModel.getTaskSettings().model(), is("model")); + assertNull(embeddingsModel.getTaskSettings().user()); + assertThat(embeddingsModel.getSecretSettings().apiKey().toString(), is("secret")); + } + } + + public void testParsePersistedConfig_ThrowsWhenAnExtraKeyExistsInConfig() throws IOException { + try ( + var service = new OpenAiService( + new SetOnce<>(mock(HttpRequestSenderFactory.class)), + new SetOnce<>(createWithEmptySettings(threadPool)) + ) + ) { + var persistedConfig = getPersistedConfigMap( + getServiceSettingsMap("url", "org"), + getTaskSettingsMap("model", "user"), + getSecretSettingsMap("secret") + ); + persistedConfig.config().put("extra_key", "value"); + + var thrownException = expectThrows( + ElasticsearchStatusException.class, + () -> service.parsePersistedConfig("id", TaskType.TEXT_EMBEDDING, persistedConfig.config(), persistedConfig.secrets()) + ); + + assertThat( + thrownException.getMessage(), + is("Model configuration contains settings [{extra_key=value}] unknown to the [openai] service") + ); + } + } + + public void testParsePersistedConfig_ThrowsWhenAnExtraKeyExistsInSecretsSettings() throws IOException { + try ( + var service = new OpenAiService( + new SetOnce<>(mock(HttpRequestSenderFactory.class)), + new SetOnce<>(createWithEmptySettings(threadPool)) + ) + ) { + var secretSettingsMap = getSecretSettingsMap("secret"); + secretSettingsMap.put("extra_key", "value"); + + var persistedConfig = getPersistedConfigMap( + getServiceSettingsMap("url", "org"), + getTaskSettingsMap("model", "user"), + secretSettingsMap + ); + + var thrownException = expectThrows( + ElasticsearchStatusException.class, + () -> service.parsePersistedConfig("id", TaskType.TEXT_EMBEDDING, persistedConfig.config(), persistedConfig.secrets()) + ); + + assertThat( + thrownException.getMessage(), + is("Model configuration contains settings [{extra_key=value}] unknown to the [openai] service") + ); + } + } + + public void testParsePersistedConfig_ThrowsWhenAnExtraKeyExistsInSecrets() throws IOException { + try ( + var service = new OpenAiService( + new SetOnce<>(mock(HttpRequestSenderFactory.class)), + new SetOnce<>(createWithEmptySettings(threadPool)) + ) + ) { + var persistedConfig = getPersistedConfigMap( + getServiceSettingsMap("url", "org"), + getTaskSettingsMap("model", "user"), + getSecretSettingsMap("secret") + ); + persistedConfig.secrets.put("extra_key", "value"); + + var thrownException = expectThrows( + ElasticsearchStatusException.class, + () -> service.parsePersistedConfig("id", TaskType.TEXT_EMBEDDING, persistedConfig.config(), persistedConfig.secrets()) + ); + + assertThat( + thrownException.getMessage(), + is("Model configuration contains settings [{extra_key=value}] unknown to the [openai] service") + ); + } + } + + public void testParsePersistedConfig_ThrowsWhenAnExtraKeyExistsInServiceSettings() throws IOException { + try ( + var service = new OpenAiService( + new SetOnce<>(mock(HttpRequestSenderFactory.class)), + new SetOnce<>(createWithEmptySettings(threadPool)) + ) + ) { + var serviceSettingsMap = getServiceSettingsMap("url", "org"); + serviceSettingsMap.put("extra_key", "value"); + + var persistedConfig = getPersistedConfigMap( + serviceSettingsMap, + getTaskSettingsMap("model", "user"), + getSecretSettingsMap("secret") + ); + + var thrownException = expectThrows( + ElasticsearchStatusException.class, + () -> service.parsePersistedConfig("id", TaskType.TEXT_EMBEDDING, persistedConfig.config(), persistedConfig.secrets()) + ); + + assertThat( + thrownException.getMessage(), + is("Model configuration contains settings [{extra_key=value}] unknown to the [openai] service") + ); + } + } + + public void testParsePersistedConfig_ThrowsWhenAnExtraKeyExistsInTaskSettings() throws IOException { + try ( + var service = new OpenAiService( + new SetOnce<>(mock(HttpRequestSenderFactory.class)), + new SetOnce<>(createWithEmptySettings(threadPool)) + ) + ) { + var taskSettingsMap = getTaskSettingsMap("model", "user"); + taskSettingsMap.put("extra_key", "value"); + + var persistedConfig = getPersistedConfigMap( + getServiceSettingsMap("url", "org"), + taskSettingsMap, + getSecretSettingsMap("secret") + ); + + var thrownException = expectThrows( + ElasticsearchStatusException.class, + () -> service.parsePersistedConfig("id", TaskType.TEXT_EMBEDDING, persistedConfig.config(), persistedConfig.secrets()) + ); + + assertThat( + thrownException.getMessage(), + is("Model configuration contains settings [{extra_key=value}] unknown to the [openai] service") + ); + } + } + + public void testStart_InitializesTheSender() throws IOException { + var sender = mock(Sender.class); + + var factory = mock(HttpRequestSenderFactory.class); + when(factory.createSender(anyString())).thenReturn(sender); + + try (var service = new OpenAiService(new SetOnce<>(factory), new SetOnce<>(createWithEmptySettings(threadPool)))) { + PlainActionFuture listener = new PlainActionFuture<>(); + service.start(mock(Model.class), listener); + + listener.actionGet(TIMEOUT); + verify(sender, times(1)).start(); + verify(factory, times(1)).createSender(anyString()); + } + + verify(sender, times(1)).close(); + verifyNoMoreInteractions(factory); + verifyNoMoreInteractions(sender); + } + + public void testStart_CallingStartTwiceKeepsSameSenderReference() throws IOException { + var sender = mock(Sender.class); + + var factory = mock(HttpRequestSenderFactory.class); + when(factory.createSender(anyString())).thenReturn(sender); + + try (var service = new OpenAiService(new SetOnce<>(factory), new SetOnce<>(createWithEmptySettings(threadPool)))) { + PlainActionFuture listener = new PlainActionFuture<>(); + service.start(mock(Model.class), listener); + listener.actionGet(TIMEOUT); + + service.start(mock(Model.class), listener); + listener.actionGet(TIMEOUT); + + verify(factory, times(1)).createSender(anyString()); + verify(sender, times(2)).start(); + } + + verify(sender, times(1)).close(); + verifyNoMoreInteractions(factory); + verifyNoMoreInteractions(sender); + } + + public void testInfer_ThrowsErrorWhenModelIsNotOpenAiModel() throws IOException { + var sender = mock(Sender.class); + + var factory = mock(HttpRequestSenderFactory.class); + when(factory.createSender(anyString())).thenReturn(sender); + + var mockModel = getInvalidModel("model_id", "service_name"); + + try (var service = new OpenAiService(new SetOnce<>(factory), new SetOnce<>(createWithEmptySettings(threadPool)))) { + PlainActionFuture listener = new PlainActionFuture<>(); + service.infer(mockModel, List.of(""), new HashMap<>(), listener); + + var thrownException = expectThrows(ElasticsearchStatusException.class, () -> listener.actionGet(TIMEOUT)); + assertThat( + thrownException.getMessage(), + is("The internal model was invalid, please delete the service [service_name] with id [model_id] and add it again.") + ); + + verify(factory, times(1)).createSender(anyString()); + verify(sender, times(1)).start(); + } + + verify(sender, times(1)).close(); + verifyNoMoreInteractions(factory); + verifyNoMoreInteractions(sender); + } + + public void testInfer_SendsRequest() throws IOException { + var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + + try (var service = new OpenAiService(new SetOnce<>(senderFactory), new SetOnce<>(createWithEmptySettings(threadPool)))) { + + String responseJson = """ + { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + 0.0123, + -0.0123 + ] + } + ], + "model": "text-embedding-ada-002-v2", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + """; + webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); + + var model = OpenAiEmbeddingsModelTests.createModel(getUrl(webServer), "org", "secret", "model", "user"); + PlainActionFuture listener = new PlainActionFuture<>(); + service.infer(model, List.of("abc"), new HashMap<>(), listener); + + var result = listener.actionGet(TIMEOUT); + + assertThat(result.asMap(), Matchers.is(buildExpectation(List.of(List.of(0.0123F, -0.0123F))))); + assertThat(webServer.requests(), hasSize(1)); + assertNull(webServer.requests().get(0).getUri().getQuery()); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.AUTHORIZATION), equalTo("Bearer secret")); + assertThat(webServer.requests().get(0).getHeader(ORGANIZATION_HEADER), equalTo("org")); + + var requestMap = entityAsMap(webServer.requests().get(0).getBody()); + assertThat(requestMap.size(), Matchers.is(3)); + assertThat(requestMap.get("input"), Matchers.is(List.of("abc"))); + assertThat(requestMap.get("model"), Matchers.is("model")); + assertThat(requestMap.get("user"), Matchers.is("user")); + } + } + + public void testInfer_UnauthorisedResponse() throws IOException { + var senderFactory = new HttpRequestSenderFactory(threadPool, clientManager, mockClusterServiceEmpty(), Settings.EMPTY); + + try (var service = new OpenAiService(new SetOnce<>(senderFactory), new SetOnce<>(createWithEmptySettings(threadPool)))) { + + String responseJson = """ + { + "error": { + "message": "Incorrect API key provided:", + "type": "invalid_request_error", + "param": null, + "code": "invalid_api_key" + } + } + """; + webServer.enqueue(new MockResponse().setResponseCode(401).setBody(responseJson)); + + var model = OpenAiEmbeddingsModelTests.createModel(getUrl(webServer), "org", "secret", "model", "user"); + PlainActionFuture listener = new PlainActionFuture<>(); + service.infer(model, List.of("abc"), new HashMap<>(), listener); + + var error = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); + assertThat(error.getMessage(), containsString("Received a authentication error status code for request")); + assertThat(error.getMessage(), containsString("Error message: [Incorrect API key provided:]")); + assertThat(webServer.requests(), hasSize(1)); + } + } + + private static Model getInvalidModel(String modelId, String serviceName) { + var mockConfigs = mock(ModelConfigurations.class); + when(mockConfigs.getModelId()).thenReturn(modelId); + when(mockConfigs.getService()).thenReturn(serviceName); + + var mockModel = mock(Model.class); + when(mockModel.getConfigurations()).thenReturn(mockConfigs); + + return mockModel; + } + + private Map getRequestConfigMap( + Map serviceSettings, + Map taskSettings, + Map secretSettings + ) { + var builtServiceSettings = new HashMap<>(); + builtServiceSettings.putAll(serviceSettings); + builtServiceSettings.putAll(secretSettings); + + return new HashMap<>( + Map.of(ModelConfigurations.SERVICE_SETTINGS, builtServiceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings) + ); + } + + private PeristedConfig getPersistedConfigMap( + Map serviceSettings, + Map taskSettings, + Map secretSettings + ) { + + return new PeristedConfig( + new HashMap<>(Map.of(ModelConfigurations.SERVICE_SETTINGS, serviceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings)), + new HashMap<>(Map.of(ModelSecrets.SECRET_SETTINGS, secretSettings)) + ); + } + + private record PeristedConfig(Map config, Map secrets) {} +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsModelTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsModelTests.java new file mode 100644 index 0000000000000..96ced66723f04 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsModelTests.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.openai.embeddings; + +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.inference.services.openai.OpenAiServiceSettings; +import org.elasticsearch.xpack.inference.services.settings.DefaultSecretSettings; + +import static org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsRequestTaskSettingsTests.getRequestTaskSettingsMap; +import static org.hamcrest.Matchers.is; + +public class OpenAiEmbeddingsModelTests extends ESTestCase { + + public void testOverrideWith_OverridesUser() { + var model = createModel("url", "org", "api_key", "model_name", null); + var requestTaskSettingsMap = getRequestTaskSettingsMap(null, "user_override"); + + var overriddenModel = model.overrideWith(requestTaskSettingsMap); + + assertThat(overriddenModel, is(createModel("url", "org", "api_key", "model_name", "user_override"))); + } + + public static OpenAiEmbeddingsModel createModel( + String url, + @Nullable String org, + String apiKey, + String modelName, + @Nullable String user + ) { + return new OpenAiEmbeddingsModel( + "id", + TaskType.TEXT_EMBEDDING, + "service", + new OpenAiServiceSettings(url, org), + new OpenAiEmbeddingsTaskSettings(modelName, user), + new DefaultSecretSettings(new SecureString(apiKey.toCharArray())) + ); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsRequestTaskSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsRequestTaskSettingsTests.java new file mode 100644 index 0000000000000..b76e9f9a6d5c6 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsRequestTaskSettingsTests.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.openai.embeddings; + +import org.elasticsearch.core.Nullable; +import org.elasticsearch.test.ESTestCase; + +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.is; + +public class OpenAiEmbeddingsRequestTaskSettingsTests extends ESTestCase { + public void testFromMap_ReturnsEmptySettings_WhenTheMapIsEmpty() { + var settings = OpenAiEmbeddingsRequestTaskSettings.fromMap(new HashMap<>(Map.of())); + + assertNull(settings.model()); + assertNull(settings.user()); + } + + public void testFromMap_ReturnsEmptySettings_WhenTheMapDoesNotContainTheFields() { + var settings = OpenAiEmbeddingsRequestTaskSettings.fromMap(new HashMap<>(Map.of("key", "model"))); + + assertNull(settings.model()); + assertNull(settings.user()); + } + + public void testFromMap_ReturnsEmptyModel_WhenTheMapDoesNotContainThatField() { + var settings = OpenAiEmbeddingsRequestTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiEmbeddingsTaskSettings.USER, "user"))); + + assertNull(settings.model()); + assertThat(settings.user(), is("user")); + } + + public void testFromMap_ReturnsEmptyUser_WhenTheDoesMapNotContainThatField() { + var settings = OpenAiEmbeddingsRequestTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiEmbeddingsTaskSettings.MODEL, "model"))); + + assertNull(settings.user()); + assertThat(settings.model(), is("model")); + } + + public static Map getRequestTaskSettingsMap(@Nullable String model, @Nullable String user) { + var map = new HashMap(); + + if (model != null) { + map.put(OpenAiEmbeddingsTaskSettings.MODEL, model); + } + + if (user != null) { + map.put(OpenAiEmbeddingsTaskSettings.USER, user); + } + + return map; + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettingsTests.java new file mode 100644 index 0000000000000..d33ec12016cad --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettingsTests.java @@ -0,0 +1,128 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.openai.embeddings; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.is; + +public class OpenAiEmbeddingsTaskSettingsTests extends AbstractWireSerializingTestCase { + + public static OpenAiEmbeddingsTaskSettings createRandomWithUser() { + return new OpenAiEmbeddingsTaskSettings(randomAlphaOfLength(15), randomAlphaOfLength(15)); + } + + /** + * The created settings can have the user set to null. + */ + public static OpenAiEmbeddingsTaskSettings createRandom() { + var user = randomBoolean() ? randomAlphaOfLength(15) : null; + return new OpenAiEmbeddingsTaskSettings(randomAlphaOfLength(15), user); + } + + public void testFromMap_MissingModel_ThrowException() { + var thrownException = expectThrows( + ValidationException.class, + () -> OpenAiEmbeddingsTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiEmbeddingsTaskSettings.USER, "user"))) + ); + + assertThat( + thrownException.getMessage(), + is( + Strings.format( + "Validation Failed: 1: [task_settings] does not contain the required setting [%s];", + OpenAiEmbeddingsTaskSettings.MODEL + ) + ) + ); + } + + public void testFromMap_CreatesWithModelAndUser() { + var taskSettings = OpenAiEmbeddingsTaskSettings.fromMap( + new HashMap<>(Map.of(OpenAiEmbeddingsTaskSettings.MODEL, "model", OpenAiEmbeddingsTaskSettings.USER, "user")) + ); + + assertThat(taskSettings.model(), is("model")); + assertThat(taskSettings.user(), is("user")); + } + + public void testFromMap_MissingUser_DoesNotThrowException() { + var taskSettings = OpenAiEmbeddingsTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiEmbeddingsTaskSettings.MODEL, "model"))); + + assertThat(taskSettings.model(), is("model")); + assertNull(taskSettings.user()); + } + + public void testOverrideWith_KeepsOriginalValuesWithOverridesAreNull() { + var taskSettings = OpenAiEmbeddingsTaskSettings.fromMap( + new HashMap<>(Map.of(OpenAiEmbeddingsTaskSettings.MODEL, "model", OpenAiEmbeddingsTaskSettings.USER, "user")) + ); + + var overriddenTaskSettings = taskSettings.overrideWith(OpenAiEmbeddingsRequestTaskSettings.EMPTY_SETTINGS); + assertThat(overriddenTaskSettings, is(taskSettings)); + } + + public void testOverrideWith_UsesOverriddenSettings() { + var taskSettings = OpenAiEmbeddingsTaskSettings.fromMap( + new HashMap<>(Map.of(OpenAiEmbeddingsTaskSettings.MODEL, "model", OpenAiEmbeddingsTaskSettings.USER, "user")) + ); + + var requestTaskSettings = OpenAiEmbeddingsRequestTaskSettings.fromMap( + new HashMap<>(Map.of(OpenAiEmbeddingsTaskSettings.MODEL, "model2", OpenAiEmbeddingsTaskSettings.USER, "user2")) + ); + + var overriddenTaskSettings = taskSettings.overrideWith(requestTaskSettings); + assertThat(overriddenTaskSettings, is(new OpenAiEmbeddingsTaskSettings("model2", "user2"))); + } + + public void testOverrideWith_UsesOnlyNonNullModelSetting() { + var taskSettings = OpenAiEmbeddingsTaskSettings.fromMap( + new HashMap<>(Map.of(OpenAiEmbeddingsTaskSettings.MODEL, "model", OpenAiEmbeddingsTaskSettings.USER, "user")) + ); + + var requestTaskSettings = OpenAiEmbeddingsRequestTaskSettings.fromMap( + new HashMap<>(Map.of(OpenAiEmbeddingsTaskSettings.MODEL, "model2")) + ); + + var overriddenTaskSettings = taskSettings.overrideWith(requestTaskSettings); + assertThat(overriddenTaskSettings, is(new OpenAiEmbeddingsTaskSettings("model2", "user"))); + } + + @Override + protected Writeable.Reader instanceReader() { + return OpenAiEmbeddingsTaskSettings::new; + } + + @Override + protected OpenAiEmbeddingsTaskSettings createTestInstance() { + return createRandomWithUser(); + } + + @Override + protected OpenAiEmbeddingsTaskSettings mutateInstance(OpenAiEmbeddingsTaskSettings instance) throws IOException { + return createRandomWithUser(); + } + + public static Map getTaskSettingsMap(String model, @Nullable String user) { + var map = new HashMap(Map.of(OpenAiEmbeddingsTaskSettings.MODEL, model)); + + if (user != null) { + map.put(OpenAiEmbeddingsTaskSettings.USER, user); + } + + return map; + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/settings/DefaultSecretSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/settings/DefaultSecretSettingsTests.java new file mode 100644 index 0000000000000..2fd952fbbdda4 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/settings/DefaultSecretSettingsTests.java @@ -0,0 +1,80 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.settings; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; + +public class DefaultSecretSettingsTests extends AbstractWireSerializingTestCase { + + public static DefaultSecretSettings createRandom() { + return new DefaultSecretSettings(new SecureString(randomAlphaOfLength(15).toCharArray())); + } + + public void testFromMap() { + var apiKey = "abc"; + var serviceSettings = DefaultSecretSettings.fromMap(new HashMap<>(Map.of(DefaultSecretSettings.API_KEY, apiKey))); + + assertThat(new DefaultSecretSettings(new SecureString(apiKey.toCharArray())), is(serviceSettings)); + } + + public void testFromMap_MissingApiKey_ThrowsError() { + var thrownException = expectThrows(ValidationException.class, () -> DefaultSecretSettings.fromMap(new HashMap<>())); + + assertThat( + thrownException.getMessage(), + containsString(Strings.format("[secret_settings] does not contain the required setting [%s]", DefaultSecretSettings.API_KEY)) + ); + } + + public void testFromMap_EmptyApiKey_ThrowsError() { + var thrownException = expectThrows( + ValidationException.class, + () -> DefaultSecretSettings.fromMap(new HashMap<>(Map.of(DefaultSecretSettings.API_KEY, ""))) + ); + + assertThat( + thrownException.getMessage(), + containsString( + Strings.format( + "[secret_settings] Invalid value empty string. [%s] must be a non-empty string", + DefaultSecretSettings.API_KEY + ) + ) + ); + } + + @Override + protected Writeable.Reader instanceReader() { + return DefaultSecretSettings::new; + } + + @Override + protected DefaultSecretSettings createTestInstance() { + return createRandom(); + } + + @Override + protected DefaultSecretSettings mutateInstance(DefaultSecretSettings instance) throws IOException { + return createRandom(); + } + + public static Map getSecretSettingsMap(String apiKey) { + return new HashMap<>(Map.of(DefaultSecretSettings.API_KEY, apiKey)); + } +} diff --git a/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java b/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java index 6c8462c9e4948..ebe25ea1da1d9 100644 --- a/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java +++ b/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java @@ -30,7 +30,6 @@ import org.elasticsearch.index.fielddata.FieldDataContext; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.plain.ConstantIndexFieldData; -import org.elasticsearch.index.mapper.BlockDocValuesReader; import org.elasticsearch.index.mapper.BlockLoader; import org.elasticsearch.index.mapper.ConstantFieldType; import org.elasticsearch.index.mapper.DocumentParserContext; @@ -137,45 +136,10 @@ public String familyTypeName() { @Override public BlockLoader blockLoader(BlockLoaderContext blContext) { - // TODO build a constant block directly if (value == null) { - return BlockDocValuesReader.nulls(); + return BlockLoader.CONSTANT_NULLS; } - BytesRef bytes = new BytesRef(value); - return context -> new BlockDocValuesReader() { - private int docId; - - @Override - public int docID() { - return docId; - } - - @Override - public BlockLoader.BytesRefBuilder builder(BlockLoader.BuilderFactory factory, int expectedCount) { - return factory.bytesRefs(expectedCount); - } - - @Override - public BlockLoader.Block readValues(BlockLoader.BuilderFactory factory, BlockLoader.Docs docs) { - try (BlockLoader.BytesRefBuilder builder = builder(factory, docs.count())) { - for (int i = 0; i < docs.count(); i++) { - builder.appendBytesRef(bytes); - } - return builder.build(); - } - } - - @Override - public void readValuesFromSingleDoc(int docId, BlockLoader.Builder builder) { - this.docId = docId; - ((BlockLoader.BytesRefBuilder) builder).appendBytesRef(bytes); - } - - @Override - public String toString() { - return "ConstantKeyword"; - } - }; + return BlockLoader.constantBytes(new BytesRef(value)); } @Override diff --git a/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapperTests.java b/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapperTests.java index aaa28e28b72c9..1bd591a827059 100644 --- a/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapperTests.java +++ b/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapperTests.java @@ -16,8 +16,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.CheckedFunction; -import org.elasticsearch.index.mapper.BlockDocValuesReader; import org.elasticsearch.index.mapper.BlockLoader; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentParsingException; @@ -229,24 +227,7 @@ protected boolean allowsNullValues() { * for newly created indices that haven't received any documents that * contain the field. */ - public void testNullValueBlockLoaderReadValues() throws IOException { - testNullBlockLoader(blockReader -> (TestBlock) blockReader.readValues(TestBlock.FACTORY, TestBlock.docs(0))); - } - - /** - * Test loading blocks when there is no defined value. This is allowed - * for newly created indices that haven't received any documents that - * contain the field. - */ - public void testNullValueBlockLoaderReadValuesFromSingleDoc() throws IOException { - testNullBlockLoader(blockReader -> { - TestBlock block = (TestBlock) blockReader.builder(TestBlock.FACTORY, 1); - blockReader.readValuesFromSingleDoc(0, block); - return block; - }); - } - - private void testNullBlockLoader(CheckedFunction body) throws IOException { + public void testNullValueBlockLoader() throws IOException { MapperService mapper = createMapperService(syntheticSourceMapping(b -> { b.startObject("field"); b.field("type", "constant_keyword"); @@ -267,6 +248,11 @@ public SearchLookup lookup() { public Set sourcePaths(String name) { return mapper.mappingLookup().sourcePaths(name); } + + @Override + public String parentField(String field) { + throw new UnsupportedOperationException(); + } }); try (Directory directory = newDirectory()) { RandomIndexWriter iw = new RandomIndexWriter(random(), directory); @@ -274,7 +260,18 @@ public Set sourcePaths(String name) { iw.addDocument(doc); iw.close(); try (DirectoryReader reader = DirectoryReader.open(directory)) { - TestBlock block = body.apply(loader.reader(reader.leaves().get(0))); + TestBlock block = (TestBlock) loader.columnAtATimeReader(reader.leaves().get(0)) + .read(TestBlock.factory(reader.numDocs()), new BlockLoader.Docs() { + @Override + public int count() { + return 1; + } + + @Override + public int get(int i) { + return 0; + } + }); assertThat(block.get(0), nullValue()); } } diff --git a/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/SearchIdleTests.java b/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/SearchIdleTests.java index 4ba4a1df72933..24ad009990b0b 100644 --- a/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/SearchIdleTests.java +++ b/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/SearchIdleTests.java @@ -116,8 +116,8 @@ public void testSearchIdleConstantKeywordMatchNoIndex() throws InterruptedExcept "type=keyword" ); - assertEquals(RestStatus.CREATED, client().prepareIndex(idleIndex).setSource("keyword", randomAlphaOfLength(10)).get().status()); - assertEquals(RestStatus.CREATED, client().prepareIndex(activeIndex).setSource("keyword", randomAlphaOfLength(10)).get().status()); + assertEquals(RestStatus.CREATED, prepareIndex(idleIndex).setSource("keyword", randomAlphaOfLength(10)).get().status()); + assertEquals(RestStatus.CREATED, prepareIndex(activeIndex).setSource("keyword", randomAlphaOfLength(10)).get().status()); assertEquals(RestStatus.OK, indicesAdmin().prepareRefresh(idleIndex, activeIndex).get().getStatus()); waitUntil( @@ -185,8 +185,8 @@ public void testSearchIdleConstantKeywordMatchOneIndex() throws InterruptedExcep "type=keyword" ); - assertEquals(RestStatus.CREATED, client().prepareIndex(idleIndex).setSource("keyword", randomAlphaOfLength(10)).get().status()); - assertEquals(RestStatus.CREATED, client().prepareIndex(activeIndex).setSource("keyword", randomAlphaOfLength(10)).get().status()); + assertEquals(RestStatus.CREATED, prepareIndex(idleIndex).setSource("keyword", randomAlphaOfLength(10)).get().status()); + assertEquals(RestStatus.CREATED, prepareIndex(activeIndex).setSource("keyword", randomAlphaOfLength(10)).get().status()); assertEquals(RestStatus.OK, indicesAdmin().prepareRefresh(idleIndex, activeIndex).get().getStatus()); waitUntil( @@ -251,8 +251,8 @@ public void testSearchIdleConstantKeywordMatchTwoIndices() throws InterruptedExc "type=keyword" ); - assertEquals(RestStatus.CREATED, client().prepareIndex(idleIndex).setSource("keyword", randomAlphaOfLength(10)).get().status()); - assertEquals(RestStatus.CREATED, client().prepareIndex(activeIndex).setSource("keyword", randomAlphaOfLength(10)).get().status()); + assertEquals(RestStatus.CREATED, prepareIndex(idleIndex).setSource("keyword", randomAlphaOfLength(10)).get().status()); + assertEquals(RestStatus.CREATED, prepareIndex(activeIndex).setSource("keyword", randomAlphaOfLength(10)).get().status()); assertEquals(RestStatus.OK, indicesAdmin().prepareRefresh(idleIndex, activeIndex).get().getStatus()); waitUntil( @@ -310,8 +310,8 @@ public void testSearchIdleWildcardQueryMatchOneIndex() throws InterruptedExcepti "type=constant_keyword,value=test2_value" ); - assertEquals(RestStatus.CREATED, client().prepareIndex(idleIndex).setSource("keyword", "value").get().status()); - assertEquals(RestStatus.CREATED, client().prepareIndex(activeIndex).setSource("keyword", "value").get().status()); + assertEquals(RestStatus.CREATED, prepareIndex(idleIndex).setSource("keyword", "value").get().status()); + assertEquals(RestStatus.CREATED, prepareIndex(activeIndex).setSource("keyword", "value").get().status()); assertEquals(RestStatus.OK, indicesAdmin().prepareRefresh(idleIndex, activeIndex).get().getStatus()); waitUntil( diff --git a/x-pack/plugin/mapper-counted-keyword/build.gradle b/x-pack/plugin/mapper-counted-keyword/build.gradle new file mode 100644 index 0000000000000..f2e7152406962 --- /dev/null +++ b/x-pack/plugin/mapper-counted-keyword/build.gradle @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +apply plugin: 'elasticsearch.internal-es-plugin' + +esplugin { + name 'counted-keyword' + description 'Module for the counted-keyword field type, which allows to consider duplicates in an array of values of that type.' + classname 'org.elasticsearch.xpack.countedkeyword.CountedKeywordMapperPlugin' + extendedPlugins = ['x-pack-core'] +} +base { + archivesName = 'x-pack-counted-keyword' +} + +dependencies { + compileOnly project(path: xpackModule('core')) +} diff --git a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordFieldMapper.java b/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordFieldMapper.java new file mode 100644 index 0000000000000..d04bb88325cc7 --- /dev/null +++ b/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordFieldMapper.java @@ -0,0 +1,393 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.countedkeyword; + +import org.apache.lucene.document.BinaryDocValuesField; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.index.BinaryDocValues; +import org.apache.lucene.index.DocValues; +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.search.SortField; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.ByteArrayStreamInput; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.index.fielddata.AbstractSortedSetDocValues; +import org.elasticsearch.index.fielddata.FieldData; +import org.elasticsearch.index.fielddata.FieldDataContext; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.LeafOrdinalsFieldData; +import org.elasticsearch.index.fielddata.plain.AbstractIndexOrdinalsFieldData; +import org.elasticsearch.index.fielddata.plain.AbstractLeafOrdinalsFieldData; +import org.elasticsearch.index.mapper.BinaryFieldMapper; +import org.elasticsearch.index.mapper.DocumentParserContext; +import org.elasticsearch.index.mapper.FieldMapper; +import org.elasticsearch.index.mapper.KeywordFieldMapper; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.Mapper; +import org.elasticsearch.index.mapper.MapperBuilderContext; +import org.elasticsearch.index.mapper.SourceValueFetcher; +import org.elasticsearch.index.mapper.StringFieldType; +import org.elasticsearch.index.mapper.TextSearchInfo; +import org.elasticsearch.index.mapper.ValueFetcher; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.script.field.KeywordDocValuesField; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.MultiValueMode; +import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; +import org.elasticsearch.search.sort.BucketedSort; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.SortedMap; +import java.util.TreeMap; + +import static org.elasticsearch.common.lucene.Lucene.KEYWORD_ANALYZER; + +/** + *

    A special field mapper for multi-valued keywords that may contain duplicate values. If the associated counted_terms + * aggregation is used, duplicates are considered in aggregation results. Consider the following values:

    + * + *
      + *
    • ["a", "a", "b"]
    • + *
    • ["a", "b", "b"]
    • + *
    + * + *

    While a regular keyword and the corresponding terms aggregation deduplicates values and reports a count of + * 2 for each key (one per document), a counted_terms aggregation on a counted_keyword field will consider + * the actual count and report a count of 3 for each key.

    + * + *

    Only regular source is supported; synthetic source won't work.

    + */ +public class CountedKeywordFieldMapper extends FieldMapper { + public static final String CONTENT_TYPE = "counted_keyword"; + public static final String COUNT_FIELD_NAME_SUFFIX = "_count"; + + public static final FieldType FIELD_TYPE; + + static { + FieldType ft = new FieldType(); + ft.setDocValuesType(DocValuesType.SORTED_SET); + ft.setTokenized(false); + ft.setOmitNorms(true); + ft.setIndexOptions(IndexOptions.DOCS); + ft.freeze(); + FIELD_TYPE = freezeAndDeduplicateFieldType(ft); + } + + private static class CountedKeywordFieldType extends StringFieldType { + + private final MappedFieldType countFieldType; + + CountedKeywordFieldType( + String name, + boolean isIndexed, + boolean isStored, + boolean hasDocValues, + TextSearchInfo textSearchInfo, + Map meta, + MappedFieldType countFieldType + ) { + super(name, isIndexed, isStored, hasDocValues, textSearchInfo, meta); + this.countFieldType = countFieldType; + } + + @Override + public ValueFetcher valueFetcher(SearchExecutionContext context, String format) { + return SourceValueFetcher.identity(name(), context, format); + } + + @Override + public IndexFieldData.Builder fielddataBuilder(FieldDataContext fieldDataContext) { + failIfNoDocValues(); + + return (cache, breakerService) -> new AbstractIndexOrdinalsFieldData( + name(), + CoreValuesSourceType.KEYWORD, + cache, + breakerService, + (dv, n) -> new KeywordDocValuesField(FieldData.toString(dv), n) + ) { + + @Override + public LeafOrdinalsFieldData load(LeafReaderContext context) { + final SortedSetDocValues dvValues; + final BinaryDocValues dvCounts; + try { + dvValues = DocValues.getSortedSet(context.reader(), getFieldName()); + dvCounts = DocValues.getBinary(context.reader(), countFieldType.name()); + } catch (IOException e) { + throw new UncheckedIOException("Unable to load " + CONTENT_TYPE + " doc values", e); + } + + return new AbstractLeafOrdinalsFieldData(toScriptFieldFactory) { + + @Override + public SortedSetDocValues getOrdinalsValues() { + return new CountedKeywordSortedBinaryDocValues(dvValues, dvCounts); + } + + @Override + public long ramBytesUsed() { + return 0; // Unknown + } + + @Override + public void close() { + // nothing to close + } + }; + } + + @Override + public LeafOrdinalsFieldData loadDirect(LeafReaderContext context) { + return load(context); + } + + @Override + public SortField sortField( + Object missingValue, + MultiValueMode sortMode, + XFieldComparatorSource.Nested nested, + boolean reverse + ) { + throw new UnsupportedOperationException("can't sort on the [" + CONTENT_TYPE + "] field"); + } + + @Override + public BucketedSort newBucketedSort( + BigArrays bigArrays, + Object missingValue, + MultiValueMode sortMode, + XFieldComparatorSource.Nested nested, + SortOrder sortOrder, + DocValueFormat format, + int bucketSize, + BucketedSort.ExtraData extra + ) { + throw new IllegalArgumentException("can't sort on the [" + CONTENT_TYPE + "] field"); + } + }; + } + + @Override + public String typeName() { + return CONTENT_TYPE; + } + } + + static class CountedKeywordSortedBinaryDocValues extends AbstractSortedSetDocValues { + private final SortedSetDocValues dvValues; + private final BinaryDocValues dvCounts; + private int sumCount; + private Iterator ordsForThisDoc; + private final ByteArrayStreamInput scratch = new ByteArrayStreamInput(); + + CountedKeywordSortedBinaryDocValues(SortedSetDocValues dvValues, BinaryDocValues dvCounts) { + this.dvValues = dvValues; + this.dvCounts = dvCounts; + } + + @Override + public boolean advanceExact(int doc) throws IOException { + sumCount = 0; + if (dvValues.advanceExact(doc)) { + boolean exactMatch = dvCounts.advanceExact(doc); + assert exactMatch; + + BytesRef encodedValue = dvCounts.binaryValue(); + scratch.reset(encodedValue.bytes, encodedValue.offset, encodedValue.length); + int[] counts = scratch.readVIntArray(); + assert counts.length == dvValues.docValueCount(); + + List values = new ArrayList<>(); + for (int count : counts) { + this.sumCount += count; + long ord = dvValues.nextOrd(); + for (int j = 0; j < count; j++) { + values.add(ord); + } + } + this.ordsForThisDoc = values.iterator(); + return true; + } else { + ordsForThisDoc = null; + return false; + } + } + + @Override + public int docValueCount() { + return sumCount; + } + + @Override + public long nextOrd() { + if (ordsForThisDoc.hasNext()) { + return ordsForThisDoc.next(); + } else { + return NO_MORE_ORDS; + } + } + + @Override + public BytesRef lookupOrd(long ord) throws IOException { + return dvValues.lookupOrd(ord); + } + + @Override + public long getValueCount() { + return dvValues.getValueCount(); + } + + @Override + public TermsEnum termsEnum() throws IOException { + return dvValues.termsEnum(); + } + } + + public static class Builder extends FieldMapper.Builder { + private final Parameter> meta = Parameter.metaParam(); + + protected Builder(String name) { + super(name); + } + + @Override + protected Parameter[] getParameters() { + return new Parameter[] { meta }; + } + + @Override + public FieldMapper build(MapperBuilderContext context) { + + BinaryFieldMapper countFieldMapper = new BinaryFieldMapper.Builder(name + COUNT_FIELD_NAME_SUFFIX, true).build(context); + return new CountedKeywordFieldMapper( + name, + FIELD_TYPE, + new CountedKeywordFieldType( + context.buildFullName(name), + true, + false, + true, + new TextSearchInfo(FIELD_TYPE, null, KEYWORD_ANALYZER, KEYWORD_ANALYZER), + meta.getValue(), + countFieldMapper.fieldType() + ), + multiFieldsBuilder.build(this, context), + copyTo, + countFieldMapper + ); + } + } + + public static TypeParser PARSER = new TypeParser((n, c) -> new CountedKeywordFieldMapper.Builder(n)); + + private final FieldType fieldType; + private final BinaryFieldMapper countFieldMapper; + + protected CountedKeywordFieldMapper( + String simpleName, + FieldType fieldType, + MappedFieldType mappedFieldType, + MultiFields multiFields, + CopyTo copyTo, + BinaryFieldMapper countFieldMapper + ) { + super(simpleName, mappedFieldType, multiFields, copyTo); + this.fieldType = fieldType; + this.countFieldMapper = countFieldMapper; + } + + @Override + public boolean parsesArrayValue() { + return true; + } + + @Override + protected void parseCreateField(DocumentParserContext context) throws IOException { + XContentParser parser = context.parser(); + SortedMap values = new TreeMap<>(); + if (parser.currentToken() == XContentParser.Token.VALUE_NULL) { + return; + } + if (parser.currentToken() == XContentParser.Token.START_ARRAY) { + parseArray(context, values); + } else if (parser.currentToken() == XContentParser.Token.VALUE_STRING) { + parseValue(parser, values); + } else { + throw new IllegalArgumentException("Encountered unexpected token [" + parser.currentToken() + "]."); + } + int i = 0; + int[] counts = new int[values.size()]; + for (Map.Entry value : values.entrySet()) { + context.doc().add(new KeywordFieldMapper.KeywordField(name(), new BytesRef(value.getKey()), fieldType)); + counts[i++] = value.getValue(); + } + BytesStreamOutput streamOutput = new BytesStreamOutput(); + streamOutput.writeVIntArray(counts); + context.doc().add(new BinaryDocValuesField(countFieldMapper.name(), streamOutput.bytes().toBytesRef())); + } + + private void parseArray(DocumentParserContext context, SortedMap values) throws IOException { + XContentParser parser = context.parser(); + while (true) { + XContentParser.Token token = parser.nextToken(); + if (token == XContentParser.Token.END_ARRAY) { + return; + } + if (token == XContentParser.Token.VALUE_STRING) { + parseValue(parser, values); + } else if (token == XContentParser.Token.VALUE_NULL) { + // ignore null values + } else { + throw new IllegalArgumentException("Encountered unexpected token [" + token + "]."); + } + } + } + + private static void parseValue(XContentParser parser, SortedMap values) throws IOException { + String value = parser.text(); + if (values.containsKey(value) == false) { + values.put(value, 1); + } else { + values.put(value, values.get(value) + 1); + } + } + + @Override + public Iterator iterator() { + List mappers = new ArrayList<>(); + Iterator m = super.iterator(); + while (m.hasNext()) { + mappers.add(m.next()); + } + mappers.add(countFieldMapper); + return mappers.iterator(); + } + + @Override + public FieldMapper.Builder getMergeBuilder() { + return new Builder(simpleName()).init(this); + } + + @Override + protected String contentType() { + return CONTENT_TYPE; + } +} diff --git a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordMapperPlugin.java b/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordMapperPlugin.java new file mode 100644 index 0000000000000..62fb10be05f9d --- /dev/null +++ b/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordMapperPlugin.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.countedkeyword; + +import org.elasticsearch.index.mapper.Mapper; +import org.elasticsearch.plugins.MapperPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.SearchPlugin; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +/** + *

    This plugin adds two associated features:

    + *
      + *
    1. The mapping type counted_keyword that behaves like keyword except that it counts duplicate values.
    2. + *
    3. The counted_terms aggregation that operates on fields mapped as counted_keyword and considers + * duplicate values in the doc_count that it returns.
    4. + *
    + * + *

    Both features are considered a tech preview and are thus intentionally undocumented.

    + */ +public class CountedKeywordMapperPlugin extends Plugin implements MapperPlugin, SearchPlugin { + @Override + public Map getMappers() { + Map mappers = new LinkedHashMap<>(); + mappers.put(CountedKeywordFieldMapper.CONTENT_TYPE, CountedKeywordFieldMapper.PARSER); + return Collections.unmodifiableMap(mappers); + } + + @Override + public List getAggregations() { + List specs = new ArrayList<>(); + specs.add( + new SearchPlugin.AggregationSpec( + CountedTermsAggregationBuilder.NAME, + CountedTermsAggregationBuilder::new, + CountedTermsAggregationBuilder.PARSER + ).setAggregatorRegistrar(CountedTermsAggregationBuilder::registerAggregators) + ); + return List.copyOf(specs); + } +} diff --git a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregationBuilder.java b/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregationBuilder.java new file mode 100644 index 0000000000000..72e3eb4efacf9 --- /dev/null +++ b/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregationBuilder.java @@ -0,0 +1,160 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.countedkeyword; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator; +import org.elasticsearch.search.aggregations.support.AggregationContext; +import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; +import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; +import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry; +import org.elasticsearch.search.aggregations.support.ValuesSourceType; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +public class CountedTermsAggregationBuilder extends ValuesSourceAggregationBuilder { + public static final String NAME = "counted_terms"; + static final ValuesSourceRegistry.RegistryKey REGISTRY_KEY = new ValuesSourceRegistry.RegistryKey<>( + NAME, + CountedTermsAggregatorSupplier.class + ); + + public static final ParseField REQUIRED_SIZE_FIELD_NAME = new ParseField("size"); + + public static final ObjectParser PARSER = ObjectParser.fromBuilder( + NAME, + CountedTermsAggregationBuilder::new + ); + static { + ValuesSourceAggregationBuilder.declareFields(PARSER, true, true, false); + + PARSER.declareInt(CountedTermsAggregationBuilder::size, REQUIRED_SIZE_FIELD_NAME); + } + + // see TermsAggregationBuilder.DEFAULT_BUCKET_COUNT_THRESHOLDS + private TermsAggregator.BucketCountThresholds bucketCountThresholds = new TermsAggregator.BucketCountThresholds(1, 0, 10, -1); + + public CountedTermsAggregationBuilder(String name) { + super(name); + } + + protected CountedTermsAggregationBuilder( + ValuesSourceAggregationBuilder clone, + AggregatorFactories.Builder factoriesBuilder, + Map metadata + ) { + super(clone, factoriesBuilder, metadata); + } + + protected CountedTermsAggregationBuilder(StreamInput in) throws IOException { + super(in); + bucketCountThresholds = new TermsAggregator.BucketCountThresholds(in); + } + + public static void registerAggregators(ValuesSourceRegistry.Builder builder) { + CountedTermsAggregatorFactory.registerAggregators(builder); + } + + public CountedTermsAggregationBuilder size(int size) { + if (size <= 0) { + throw new IllegalArgumentException("[size] must be greater than 0. Found [" + size + "] in [" + name + "]"); + } + bucketCountThresholds.setRequiredSize(size); + return this; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.COUNTED_KEYWORD_ADDED; + } + + @Override + protected AggregationBuilder shallowCopy(AggregatorFactories.Builder factoriesBuilder, Map metadata) { + return new CountedTermsAggregationBuilder(this, factoriesBuilder, metadata); + } + + @Override + public BucketCardinality bucketCardinality() { + return BucketCardinality.MANY; + } + + @Override + public String getType() { + return NAME; + } + + @Override + protected void innerWriteTo(StreamOutput out) throws IOException { + bucketCountThresholds.writeTo(out); + } + + @Override + protected ValuesSourceType defaultValueSourceType() { + return CoreValuesSourceType.KEYWORD; + } + + @Override + protected ValuesSourceAggregatorFactory innerBuild( + AggregationContext context, + ValuesSourceConfig config, + AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder + ) throws IOException { + CountedTermsAggregatorSupplier aggregatorSupplier = context.getValuesSourceRegistry().getAggregator(REGISTRY_KEY, config); + return new CountedTermsAggregatorFactory( + name, + config, + bucketCountThresholds, + context, + parent, + subFactoriesBuilder, + metadata, + aggregatorSupplier + ); + } + + @Override + protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { + // expose only size in XContent as only size can be set externally + builder.field(REQUIRED_SIZE_FIELD_NAME.getPreferredName(), bucketCountThresholds.getRequiredSize()); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (super.equals(o) == false) { + return false; + } + CountedTermsAggregationBuilder that = (CountedTermsAggregationBuilder) o; + return Objects.equals(bucketCountThresholds, that.bucketCountThresholds); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), bucketCountThresholds); + } +} diff --git a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregator.java b/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregator.java new file mode 100644 index 0000000000000..5e1b1e3624f00 --- /dev/null +++ b/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregator.java @@ -0,0 +1,175 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.countedkeyword; + +import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.AggregationExecutionContext; +import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.CardinalityUpperBound; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.InternalOrder; +import org.elasticsearch.search.aggregations.LeafBucketCollector; +import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; +import org.elasticsearch.search.aggregations.bucket.terms.BucketPriorityQueue; +import org.elasticsearch.search.aggregations.bucket.terms.BytesKeyedBucketOrds; +import org.elasticsearch.search.aggregations.bucket.terms.InternalTerms; +import org.elasticsearch.search.aggregations.bucket.terms.StringTerms; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator; +import org.elasticsearch.search.aggregations.support.AggregationContext; +import org.elasticsearch.search.aggregations.support.ValuesSource; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Map; +import java.util.function.BiConsumer; +import java.util.function.Supplier; + +import static java.util.Collections.emptyList; +import static org.apache.lucene.index.SortedSetDocValues.NO_MORE_ORDS; +import static org.elasticsearch.search.aggregations.InternalOrder.isKeyOrder; + +class CountedTermsAggregator extends TermsAggregator { + private final BytesKeyedBucketOrds bucketOrds; + protected final ValuesSource.Bytes.WithOrdinals valuesSource; + + @SuppressWarnings("this-escape") + CountedTermsAggregator( + String name, + AggregatorFactories factories, + ValuesSource.Bytes.WithOrdinals valuesSource, + BucketOrder order, + DocValueFormat format, + BucketCountThresholds bucketCountThresholds, + AggregationContext context, + Aggregator parent, + CardinalityUpperBound cardinality, + Map metadata + ) throws IOException { + super(name, factories, context, parent, bucketCountThresholds, order, format, SubAggCollectionMode.DEPTH_FIRST, metadata); + this.valuesSource = valuesSource; + this.bucketOrds = BytesKeyedBucketOrds.build(context.bigArrays(), cardinality); + } + + @Override + public LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCtx, LeafBucketCollector sub) throws IOException { + SortedSetDocValues ords = valuesSource.ordinalsValues(aggCtx.getLeafReaderContext()); + return new LeafBucketCollectorBase(sub, ords) { + + @Override + public void collect(int doc, long owningBucketOrd) throws IOException { + if (ords.advanceExact(doc) == false) { + return; + } + for (long ord = ords.nextOrd(); ord != NO_MORE_ORDS; ord = ords.nextOrd()) { + long bucketOrdinal = bucketOrds.add(owningBucketOrd, ords.lookupOrd(ord)); + if (bucketOrdinal < 0) { // already seen + bucketOrdinal = -1 - bucketOrdinal; + collectExistingBucket(sub, doc, bucketOrdinal); + } else { + collectBucket(sub, doc, bucketOrdinal); + } + } + } + }; + } + + @Override + public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + StringTerms.Bucket[][] topBucketsPerOrd = new StringTerms.Bucket[owningBucketOrds.length][]; + long[] otherDocCounts = new long[owningBucketOrds.length]; + for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { + int size = (int) Math.min(bucketOrds.size(), bucketCountThresholds.getShardSize()); + + // as users can't control sort order, in practice we'll always sort by doc count descending + BucketPriorityQueue ordered = new BucketPriorityQueue<>(size, partiallyBuiltBucketComparator); + StringTerms.Bucket spare = null; + BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds[ordIdx]); + Supplier emptyBucketBuilder = () -> new StringTerms.Bucket(new BytesRef(), 0, null, false, 0, format); + while (ordsEnum.next()) { + long docCount = bucketDocCount(ordsEnum.ord()); + otherDocCounts[ordIdx] += docCount; + if (spare == null) { + spare = emptyBucketBuilder.get(); + } + ordsEnum.readValue(spare.getTermBytes()); + spare.setDocCount(docCount); + spare.setBucketOrd(ordsEnum.ord()); + spare = ordered.insertWithOverflow(spare); + } + + topBucketsPerOrd[ordIdx] = new StringTerms.Bucket[ordered.size()]; + for (int i = ordered.size() - 1; i >= 0; --i) { + topBucketsPerOrd[ordIdx][i] = ordered.pop(); + otherDocCounts[ordIdx] -= topBucketsPerOrd[ordIdx][i].getDocCount(); + topBucketsPerOrd[ordIdx][i].setTermBytes(BytesRef.deepCopyOf(topBucketsPerOrd[ordIdx][i].getTermBytes())); + } + } + + buildSubAggsForAllBuckets(topBucketsPerOrd, InternalTerms.Bucket::getBucketOrd, InternalTerms.Bucket::setAggregations); + InternalAggregation[] result = new InternalAggregation[owningBucketOrds.length]; + for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { + final BucketOrder reduceOrder; + if (isKeyOrder(order) == false) { + reduceOrder = InternalOrder.key(true); + Arrays.sort(topBucketsPerOrd[ordIdx], reduceOrder.comparator()); + } else { + reduceOrder = order; + } + result[ordIdx] = new StringTerms( + name, + reduceOrder, + order, + bucketCountThresholds.getRequiredSize(), + bucketCountThresholds.getMinDocCount(), + metadata(), + format, + bucketCountThresholds.getShardSize(), + false, + otherDocCounts[ordIdx], + Arrays.asList(topBucketsPerOrd[ordIdx]), + null + ); + } + return result; + } + + @Override + public InternalAggregation buildEmptyAggregation() { + return new StringTerms( + name, + order, + order, + bucketCountThresholds.getRequiredSize(), + bucketCountThresholds.getMinDocCount(), + metadata(), + format, + bucketCountThresholds.getShardSize(), + false, + 0, + emptyList(), + 0L + ); + } + + @Override + public void collectDebugInfo(BiConsumer add) { + super.collectDebugInfo(add); + add.accept("total_buckets", bucketOrds.size()); + } + + @Override + protected void doClose() { + Releasables.close(bucketOrds); + } + +} diff --git a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregatorFactory.java b/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregatorFactory.java new file mode 100644 index 0000000000000..3b8be76f14da8 --- /dev/null +++ b/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregatorFactory.java @@ -0,0 +1,127 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.countedkeyword; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.CardinalityUpperBound; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.NonCollectingAggregator; +import org.elasticsearch.search.aggregations.bucket.BucketUtils; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator; +import org.elasticsearch.search.aggregations.bucket.terms.UnmappedTerms; +import org.elasticsearch.search.aggregations.support.AggregationContext; +import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; +import org.elasticsearch.search.aggregations.support.SamplingContext; +import org.elasticsearch.search.aggregations.support.ValuesSource; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; +import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; +import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +class CountedTermsAggregatorFactory extends ValuesSourceAggregatorFactory { + private final BucketOrder order = BucketOrder.count(false); + private final CountedTermsAggregatorSupplier supplier; + private final TermsAggregator.BucketCountThresholds bucketCountThresholds; + + static void registerAggregators(ValuesSourceRegistry.Builder builder) { + builder.register( + CountedTermsAggregationBuilder.REGISTRY_KEY, + List.of(CoreValuesSourceType.KEYWORD), + CountedTermsAggregatorFactory.bytesSupplier(), + true + ); + } + + /** + * This supplier is used for all the field types that should be aggregated as bytes/strings, + * including those that need global ordinals + */ + static CountedTermsAggregatorSupplier bytesSupplier() { + return (name, factories, valuesSourceConfig, order, bucketCountThresholds, context, parent, cardinality, metadata) -> { + + assert valuesSourceConfig.getValuesSource() instanceof ValuesSource.Bytes.WithOrdinals; + ValuesSource.Bytes.WithOrdinals ordinalsValuesSource = (ValuesSource.Bytes.WithOrdinals) valuesSourceConfig.getValuesSource(); + + return new CountedTermsAggregator( + name, + factories, + ordinalsValuesSource, + order, + valuesSourceConfig.format(), + bucketCountThresholds, + context, + parent, + cardinality, + metadata + ); + }; + } + + CountedTermsAggregatorFactory( + String name, + ValuesSourceConfig config, + TermsAggregator.BucketCountThresholds bucketCountThresholds, + AggregationContext context, + AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder, + Map metadata, + CountedTermsAggregatorSupplier supplier + ) throws IOException { + super(name, config, context, parent, subFactoriesBuilder, metadata); + this.bucketCountThresholds = bucketCountThresholds; + this.supplier = supplier; + } + + @Override + protected Aggregator createUnmapped(Aggregator parent, Map metadata) throws IOException { + final InternalAggregation aggregation = new UnmappedTerms( + name, + order, + bucketCountThresholds.getRequiredSize(), + bucketCountThresholds.getMinDocCount(), + metadata + ); + return new NonCollectingAggregator(name, context, parent, factories, metadata) { + @Override + public InternalAggregation buildEmptyAggregation() { + return aggregation; + } + }; + } + + @Override + protected Aggregator doCreateInternal(Aggregator parent, CardinalityUpperBound cardinality, Map metadata) + throws IOException { + bucketCountThresholds.setShardSize(BucketUtils.suggestShardSideQueueSize(bucketCountThresholds.getRequiredSize())); + // If min_doc_count and shard_min_doc_count is provided, we do not support them being larger than 1 + // This is because we cannot be sure about their relative scale when sampled + if (getSamplingContext().map(SamplingContext::isSampled).orElse(false)) { + if (bucketCountThresholds.getMinDocCount() > 1 || bucketCountThresholds.getShardMinDocCount() > 1) { + throw new ElasticsearchStatusException( + "aggregation [{}] is within a sampling context; " + + "min_doc_count, provided [{}], and min_shard_doc_count, provided [{}], cannot be greater than 1", + RestStatus.BAD_REQUEST, + name(), + bucketCountThresholds.getMinDocCount(), + bucketCountThresholds.getShardMinDocCount() + ); + } + } + bucketCountThresholds.ensureValidity(); + + return supplier.build(name, factories, config, order, bucketCountThresholds, context, parent, cardinality, metadata); + } +} diff --git a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregatorSupplier.java b/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregatorSupplier.java new file mode 100644 index 0000000000000..2817863f6b42c --- /dev/null +++ b/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregatorSupplier.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.countedkeyword; + +import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.CardinalityUpperBound; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator; +import org.elasticsearch.search.aggregations.support.AggregationContext; +import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; + +import java.io.IOException; +import java.util.Map; + +interface CountedTermsAggregatorSupplier { + Aggregator build( + String name, + AggregatorFactories factories, + ValuesSourceConfig valuesSourceConfig, + BucketOrder order, + TermsAggregator.BucketCountThresholds bucketCountThresholds, + AggregationContext context, + Aggregator parent, + CardinalityUpperBound cardinality, + Map metadata + ) throws IOException; +} diff --git a/x-pack/plugin/mapper-counted-keyword/src/test/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordFieldMapperTests.java b/x-pack/plugin/mapper-counted-keyword/src/test/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordFieldMapperTests.java new file mode 100644 index 0000000000000..1468ed456b132 --- /dev/null +++ b/x-pack/plugin/mapper-counted-keyword/src/test/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordFieldMapperTests.java @@ -0,0 +1,85 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.countedkeyword; + +import org.apache.lucene.index.IndexableField; +import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.MapperTestCase; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.xcontent.XContentBuilder; +import org.junit.AssumptionViolatedException; + +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +public class CountedKeywordFieldMapperTests extends MapperTestCase { + @Override + protected Collection getPlugins() { + return Collections.singletonList(new CountedKeywordMapperPlugin()); + } + + @Override + protected void minimalMapping(XContentBuilder b) throws IOException { + b.field("type", CountedKeywordFieldMapper.CONTENT_TYPE); + } + + @Override + protected Object getSampleValueForDocument() { + return new String[] { "a", "a", "b", "c" }; + } + + @Override + protected Object getSampleValueForQuery() { + return "b"; + } + + @Override + protected boolean supportsIgnoreMalformed() { + return false; + } + + @Override + protected boolean supportsStoredFields() { + return false; + } + + @Override + protected void registerParameters(ParameterChecker checker) { + // Nothing to do + } + + @Override + protected Object generateRandomInputValue(MappedFieldType ft) { + return randomBoolean() ? null : randomAlphaOfLengthBetween(1, 10); + } + + @Override + protected SyntheticSourceSupport syntheticSourceSupport(boolean ignoreMalformed) { + throw new AssumptionViolatedException("not supported"); + } + + @Override + protected IngestScriptSupport ingestScriptSupport() { + throw new AssumptionViolatedException("not supported"); + } + + public void testDottedFieldNames() throws IOException { + DocumentMapper mapper = createDocumentMapper(mapping(b -> { + b.startObject("dotted.field"); + b.field("type", CountedKeywordFieldMapper.CONTENT_TYPE); + b.endObject(); + })); + ParsedDocument doc = mapper.parse(source(b -> b.field("dotted.field", "1234"))); + List fields = doc.rootDoc().getFields("dotted.field"); + assertEquals(1, fields.size()); + } +} diff --git a/x-pack/plugin/mapper-counted-keyword/src/test/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordFieldTypeTests.java b/x-pack/plugin/mapper-counted-keyword/src/test/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordFieldTypeTests.java new file mode 100644 index 0000000000000..c29e4513562fc --- /dev/null +++ b/x-pack/plugin/mapper-counted-keyword/src/test/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordFieldTypeTests.java @@ -0,0 +1,176 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.countedkeyword; + +import org.apache.lucene.index.BinaryDocValues; +import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.List; + +public class CountedKeywordFieldTypeTests extends ESTestCase { + public void testSingleValuedField() throws Exception { + SortedSetDocValues sd = new CollectionBasedSortedSetDocValues(List.of(new BytesRef("a"))); + BinaryDocValues bd = new CollectionBasedBinaryDocValues(List.of(toBytesRef(new int[] { 3 }))); + + CountedKeywordFieldMapper.CountedKeywordSortedBinaryDocValues dv = + new CountedKeywordFieldMapper.CountedKeywordSortedBinaryDocValues(sd, bd); + + assertTrue(dv.advanceExact(0)); + + assertEquals(3, dv.docValueCount()); + + assertOrdinal(dv, "a", 3); + } + + public void testMultiValuedField() throws Exception { + SortedSetDocValues sd = new CollectionBasedSortedSetDocValues(List.of(new BytesRef("a"), new BytesRef("b"))); + BinaryDocValues bd = new CollectionBasedBinaryDocValues(List.of(toBytesRef(new int[] { 1, 5 }))); + + CountedKeywordFieldMapper.CountedKeywordSortedBinaryDocValues dv = + new CountedKeywordFieldMapper.CountedKeywordSortedBinaryDocValues(sd, bd); + + assertTrue(dv.advanceExact(0)); + + assertEquals(6, dv.docValueCount()); + + assertOrdinal(dv, "a", 1); + assertOrdinal(dv, "b", 5); + } + + private void assertOrdinal(CountedKeywordFieldMapper.CountedKeywordSortedBinaryDocValues dv, String value, int times) + throws IOException { + for (int i = 0; i < times; i++) { + long ordinal = dv.nextOrd(); + assertNotEquals(DocIdSetIterator.NO_MORE_DOCS, ordinal); + assertEquals(new BytesRef(value), dv.lookupOrd(ordinal)); + } + } + + private BytesRef toBytesRef(int[] counts) throws IOException { + try (BytesStreamOutput streamOutput = new BytesStreamOutput()) { + streamOutput.writeVIntArray(counts); + return streamOutput.bytes().toBytesRef(); + } + } + + private static class CollectionBasedSortedSetDocValues extends SortedSetDocValues { + private final List docValues; + + private final DocIdSetIterator disi; + + private long currentOrd = -1; + + private CollectionBasedSortedSetDocValues(List docValues) { + this.docValues = docValues; + this.disi = DocIdSetIterator.all(docValues.size()); + } + + @Override + public long nextOrd() { + currentOrd++; + if (currentOrd >= docValues.size()) { + return NO_MORE_ORDS; + } + return currentOrd; + } + + @Override + public int docValueCount() { + return docValues.size(); + } + + @Override + public BytesRef lookupOrd(long ord) throws IOException { + return docValues.get((int) ord); + } + + @Override + public long getValueCount() { + return docValues.size(); + } + + @Override + public boolean advanceExact(int target) throws IOException { + currentOrd = -1; + return disi.advance(target) == target; + } + + @Override + public int docID() { + return disi.docID(); + } + + @Override + public int nextDoc() throws IOException { + currentOrd = -1; + return disi.nextDoc(); + } + + @Override + public int advance(int target) throws IOException { + currentOrd = -1; + return disi.advance(target); + } + + @Override + public long cost() { + return disi.cost(); + } + } + + private static class CollectionBasedBinaryDocValues extends BinaryDocValues { + private final List docValues; + private final DocIdSetIterator disi; + + private int current = -1; + + private CollectionBasedBinaryDocValues(List docValues) { + this.docValues = docValues; + this.disi = DocIdSetIterator.all(docValues.size()); + } + + @Override + public BytesRef binaryValue() { + return docValues.get(current); + } + + @Override + public boolean advanceExact(int target) throws IOException { + current = target; + return disi.advance(target) == target; + } + + @Override + public int docID() { + return disi.docID(); + } + + @Override + public int nextDoc() throws IOException { + current = -1; + return disi.nextDoc(); + } + + @Override + public int advance(int target) throws IOException { + current = -1; + return disi.advance(target); + } + + @Override + public long cost() { + return disi.cost(); + } + } + +} diff --git a/x-pack/plugin/mapper-counted-keyword/src/test/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregationBuilderTests.java b/x-pack/plugin/mapper-counted-keyword/src/test/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregationBuilderTests.java new file mode 100644 index 0000000000000..ba266e82fecc8 --- /dev/null +++ b/x-pack/plugin/mapper-counted-keyword/src/test/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregationBuilderTests.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.countedkeyword; + +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.search.aggregations.BaseAggregationTestCase; + +import java.util.Collection; +import java.util.Collections; + +public class CountedTermsAggregationBuilderTests extends BaseAggregationTestCase { + @Override + protected Collection> getPlugins() { + return Collections.singletonList(CountedKeywordMapperPlugin.class); + } + + @Override + protected CountedTermsAggregationBuilder createTestAggregatorBuilder() { + return new CountedTermsAggregationBuilder(randomAlphaOfLengthBetween(1, 10)).field(randomAlphaOfLength(7)); + } +} diff --git a/x-pack/plugin/mapper-counted-keyword/src/test/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregatorTests.java b/x-pack/plugin/mapper-counted-keyword/src/test/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregatorTests.java new file mode 100644 index 0000000000000..02d629c7604ac --- /dev/null +++ b/x-pack/plugin/mapper-counted-keyword/src/test/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregatorTests.java @@ -0,0 +1,92 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.countedkeyword; + +import org.apache.lucene.index.IndexableField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.index.mapper.FieldMapper; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.MapperBuilderContext; +import org.elasticsearch.index.mapper.MappingLookup; +import org.elasticsearch.index.mapper.SourceToParse; +import org.elasticsearch.index.mapper.TestDocumentParserContext; +import org.elasticsearch.plugins.SearchPlugin; +import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.bucket.terms.InternalTerms; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xcontent.json.JsonXContent; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +public class CountedTermsAggregatorTests extends AggregatorTestCase { + @Override + protected List getSearchPlugins() { + return Collections.singletonList(new CountedKeywordMapperPlugin()); + } + + public void testAggregatesCountedKeywords() throws Exception { + FieldMapper mapper = new CountedKeywordFieldMapper.Builder("stacktraces").build(MapperBuilderContext.root(false, false)); + MappedFieldType fieldType = mapper.fieldType(); + + CountedTermsAggregationBuilder aggregationBuilder = new CountedTermsAggregationBuilder("st").field("stacktraces"); + testCase(iw -> { + iw.addDocument(doc(mapper, "a", null, "a", "b")); + iw.addDocument(doc(mapper, "b", "c", "d")); + iw.addDocument(doc(mapper, new String[] { null })); + + }, (InternalTerms result) -> { + // note how any nulls are ignored + Map expectedBuckets = Map.of("a", 2L, "b", 2L, "c", 1L, "d", 1L); + assertEquals("Bucket count does not match", expectedBuckets.size(), result.getBuckets().size()); + + Set seenUniqueKeys = new HashSet<>(); + for (InternalTerms.Bucket bucket : result.getBuckets()) { + String k = bucket.getKeyAsString(); + assertTrue("Unexpected bucket key [" + k + "]", expectedBuckets.containsKey(k)); + assertEquals(expectedBuckets.get(k).longValue(), bucket.getDocCount()); + seenUniqueKeys.add(k); + } + // ensure no duplicate keys + assertEquals("Every bucket key must be unique", expectedBuckets.size(), seenUniqueKeys.size()); + assertTrue(AggregationInspectionHelper.hasValue(result)); + }, new AggTestConfig(aggregationBuilder, fieldType)); + } + + private List doc(FieldMapper mapper, String... values) { + // quote regular strings but keep null values unquoted so they are not treated as regular strings + List quotedValues = Arrays.stream(values).map(v -> v != null ? "\"" + v + "\"" : v).toList(); + String source = "[" + Strings.collectionToCommaDelimitedString(quotedValues) + "]"; + try { + XContentParser parser = createParser(JsonXContent.jsonXContent, source); + // move to first token + parser.nextToken(); + TestDocumentParserContext ctx = new TestDocumentParserContext( + MappingLookup.EMPTY, + new SourceToParse("test", new BytesArray(source), XContentType.JSON) + ) { + @Override + public XContentParser parser() { + return parser; + } + }; + mapper.parse(ctx); + return ctx.doc().getFields(); + } catch (IOException e) { + throw new AssertionError(e); + } + } +} diff --git a/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java index 90c055f3e77bb..97ffd50d5b8c3 100644 --- a/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java +++ b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java @@ -319,12 +319,12 @@ public Query rangeQuery( public BlockLoader blockLoader(BlockLoaderContext blContext) { if (indexMode == IndexMode.TIME_SERIES && metricType == TimeSeriesParams.MetricType.COUNTER) { // Counters are not supported by ESQL so we load them in null - return BlockDocValuesReader.nulls(); + return BlockLoader.CONSTANT_NULLS; } if (hasDocValues()) { - return BlockDocValuesReader.longs(name()); + return new BlockDocValuesReader.LongsBlockLoader(name()); } - return BlockSourceReader.longs(new SourceValueFetcher(blContext.sourcePaths(name()), nullValueFormatted) { + return new BlockSourceReader.LongsBlockLoader(new SourceValueFetcher(blContext.sourcePaths(name()), nullValueFormatted) { @Override protected Object parseSourceValue(Object value) { if (value.equals("")) { diff --git a/x-pack/plugin/mapper-unsigned-long/src/test/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongTests.java b/x-pack/plugin/mapper-unsigned-long/src/test/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongTests.java index 0b0af8f8f9acf..3407a71f23265 100644 --- a/x-pack/plugin/mapper-unsigned-long/src/test/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongTests.java +++ b/x-pack/plugin/mapper-unsigned-long/src/test/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongTests.java @@ -78,8 +78,8 @@ public void setupSuiteScopeCluster() throws Exception { prepareCreate("idx-sort").setMapping("ul_field", "type=unsigned_long").setSettings(sortSettings).get(); List builders = new ArrayList<>(); for (int i = 0; i < numDocs; i++) { - builders.add(client().prepareIndex("idx").setSource(jsonBuilder().startObject().field("ul_field", values[i]).endObject())); - builders.add(client().prepareIndex("idx-sort").setSource(jsonBuilder().startObject().field("ul_field", values[i]).endObject())); + builders.add(prepareIndex("idx").setSource(jsonBuilder().startObject().field("ul_field", values[i]).endObject())); + builders.add(prepareIndex("idx-sort").setSource(jsonBuilder().startObject().field("ul_field", values[i]).endObject())); } indexRandom(true, builders); diff --git a/x-pack/plugin/mapper-version/src/internalClusterTest/java/org/elasticsearch/xpack/versionfield/VersionFieldIT.java b/x-pack/plugin/mapper-version/src/internalClusterTest/java/org/elasticsearch/xpack/versionfield/VersionFieldIT.java index 98f09a1b6fcae..0dc7ca8006f8a 100644 --- a/x-pack/plugin/mapper-version/src/internalClusterTest/java/org/elasticsearch/xpack/versionfield/VersionFieldIT.java +++ b/x-pack/plugin/mapper-version/src/internalClusterTest/java/org/elasticsearch/xpack/versionfield/VersionFieldIT.java @@ -59,14 +59,11 @@ public void testTermsAggregation() throws Exception { .get(); ensureGreen(); - client().prepareIndex(indexName).setId("1").setSource(jsonBuilder().startObject().field("version", "1.0").endObject()).get(); - client().prepareIndex(indexName).setId("2").setSource(jsonBuilder().startObject().field("version", "1.3.0").endObject()).get(); - client().prepareIndex(indexName) - .setId("3") - .setSource(jsonBuilder().startObject().field("version", "2.1.0-alpha").endObject()) - .get(); - client().prepareIndex(indexName).setId("4").setSource(jsonBuilder().startObject().field("version", "2.1.0").endObject()).get(); - client().prepareIndex(indexName).setId("5").setSource(jsonBuilder().startObject().field("version", "3.11.5").endObject()).get(); + prepareIndex(indexName).setId("1").setSource(jsonBuilder().startObject().field("version", "1.0").endObject()).get(); + prepareIndex(indexName).setId("2").setSource(jsonBuilder().startObject().field("version", "1.3.0").endObject()).get(); + prepareIndex(indexName).setId("3").setSource(jsonBuilder().startObject().field("version", "2.1.0-alpha").endObject()).get(); + prepareIndex(indexName).setId("4").setSource(jsonBuilder().startObject().field("version", "2.1.0").endObject()).get(); + prepareIndex(indexName).setId("5").setSource(jsonBuilder().startObject().field("version", "3.11.5").endObject()).get(); indicesAdmin().prepareRefresh().get(); // terms aggs @@ -104,14 +101,11 @@ public void testTermsEnum() throws Exception { .get(); ensureGreen(); - client().prepareIndex(indexName).setId("1").setSource(jsonBuilder().startObject().field("version", "1.0").endObject()).get(); - client().prepareIndex(indexName).setId("2").setSource(jsonBuilder().startObject().field("version", "1.3.0").endObject()).get(); - client().prepareIndex(indexName) - .setId("3") - .setSource(jsonBuilder().startObject().field("version", "2.1.0-alpha").endObject()) - .get(); - client().prepareIndex(indexName).setId("4").setSource(jsonBuilder().startObject().field("version", "2.1.0").endObject()).get(); - client().prepareIndex(indexName).setId("5").setSource(jsonBuilder().startObject().field("version", "3.11.5").endObject()).get(); + prepareIndex(indexName).setId("1").setSource(jsonBuilder().startObject().field("version", "1.0").endObject()).get(); + prepareIndex(indexName).setId("2").setSource(jsonBuilder().startObject().field("version", "1.3.0").endObject()).get(); + prepareIndex(indexName).setId("3").setSource(jsonBuilder().startObject().field("version", "2.1.0-alpha").endObject()).get(); + prepareIndex(indexName).setId("4").setSource(jsonBuilder().startObject().field("version", "2.1.0").endObject()).get(); + prepareIndex(indexName).setId("5").setSource(jsonBuilder().startObject().field("version", "3.11.5").endObject()).get(); indicesAdmin().prepareRefresh().get(); { diff --git a/x-pack/plugin/mapper-version/src/main/java/org/elasticsearch/xpack/versionfield/VersionStringFieldMapper.java b/x-pack/plugin/mapper-version/src/main/java/org/elasticsearch/xpack/versionfield/VersionStringFieldMapper.java index f4fb83fd9a91c..1ed63bb17e201 100644 --- a/x-pack/plugin/mapper-version/src/main/java/org/elasticsearch/xpack/versionfield/VersionStringFieldMapper.java +++ b/x-pack/plugin/mapper-version/src/main/java/org/elasticsearch/xpack/versionfield/VersionStringFieldMapper.java @@ -295,7 +295,7 @@ protected BytesRef indexedValueForSearch(Object value) { @Override public BlockLoader blockLoader(BlockLoaderContext blContext) { failIfNoDocValues(); - return BlockDocValuesReader.bytesRefsFromOrds(name()); + return new BlockDocValuesReader.BytesRefsFromOrdsBlockLoader(name()); } @Override diff --git a/x-pack/plugin/mapper-version/src/test/java/org/elasticsearch/xpack/versionfield/VersionStringFieldTests.java b/x-pack/plugin/mapper-version/src/test/java/org/elasticsearch/xpack/versionfield/VersionStringFieldTests.java index 82d81e0c24f09..038f2b54965ce 100644 --- a/x-pack/plugin/mapper-version/src/test/java/org/elasticsearch/xpack/versionfield/VersionStringFieldTests.java +++ b/x-pack/plugin/mapper-version/src/test/java/org/elasticsearch/xpack/versionfield/VersionStringFieldTests.java @@ -40,18 +40,12 @@ public String setUpIndex(String indexName) throws IOException { createIndex(indexName, Settings.builder().put("index.number_of_shards", 1).build(), "_doc", "version", "type=version"); ensureGreen(indexName); - client().prepareIndex(indexName).setId("1").setSource(jsonBuilder().startObject().field("version", "11.1.0").endObject()).get(); - client().prepareIndex(indexName).setId("2").setSource(jsonBuilder().startObject().field("version", "1.0.0").endObject()).get(); - client().prepareIndex(indexName) - .setId("3") - .setSource(jsonBuilder().startObject().field("version", "1.3.0+build.1234567").endObject()) - .get(); - client().prepareIndex(indexName) - .setId("4") - .setSource(jsonBuilder().startObject().field("version", "2.1.0-alpha.beta").endObject()) - .get(); - client().prepareIndex(indexName).setId("5").setSource(jsonBuilder().startObject().field("version", "2.1.0").endObject()).get(); - client().prepareIndex(indexName).setId("6").setSource(jsonBuilder().startObject().field("version", "21.11.0").endObject()).get(); + prepareIndex(indexName).setId("1").setSource(jsonBuilder().startObject().field("version", "11.1.0").endObject()).get(); + prepareIndex(indexName).setId("2").setSource(jsonBuilder().startObject().field("version", "1.0.0").endObject()).get(); + prepareIndex(indexName).setId("3").setSource(jsonBuilder().startObject().field("version", "1.3.0+build.1234567").endObject()).get(); + prepareIndex(indexName).setId("4").setSource(jsonBuilder().startObject().field("version", "2.1.0-alpha.beta").endObject()).get(); + prepareIndex(indexName).setId("5").setSource(jsonBuilder().startObject().field("version", "2.1.0").endObject()).get(); + prepareIndex(indexName).setId("6").setSource(jsonBuilder().startObject().field("version", "21.11.0").endObject()).get(); client().admin().indices().prepareRefresh(indexName).get(); return indexName; } @@ -168,8 +162,8 @@ public void testPrefixQuery() throws IOException { public void testSort() throws IOException { String indexName = setUpIndex("test"); // also adding some invalid versions that should be sorted after legal ones - client().prepareIndex(indexName).setSource(jsonBuilder().startObject().field("version", "1.2.3alpha").endObject()).get(); - client().prepareIndex(indexName).setSource(jsonBuilder().startObject().field("version", "1.3.567#12").endObject()).get(); + prepareIndex(indexName).setSource(jsonBuilder().startObject().field("version", "1.2.3alpha").endObject()).get(); + prepareIndex(indexName).setSource(jsonBuilder().startObject().field("version", "1.3.567#12").endObject()).get(); client().admin().indices().prepareRefresh(indexName).get(); // sort based on version field @@ -206,20 +200,13 @@ public void testRegexQuery() throws Exception { createIndex(indexName, Settings.builder().put("index.number_of_shards", 1).build(), "_doc", "version", "type=version"); ensureGreen(indexName); - client().prepareIndex(indexName) - .setId("1") + prepareIndex(indexName).setId("1") .setSource(jsonBuilder().startObject().field("version", "1.0.0alpha2.1.0-rc.1").endObject()) .get(); - client().prepareIndex(indexName) - .setId("2") - .setSource(jsonBuilder().startObject().field("version", "1.3.0+build.1234567").endObject()) - .get(); - client().prepareIndex(indexName) - .setId("3") - .setSource(jsonBuilder().startObject().field("version", "2.1.0-alpha.beta").endObject()) - .get(); - client().prepareIndex(indexName).setId("4").setSource(jsonBuilder().startObject().field("version", "2.1.0").endObject()).get(); - client().prepareIndex(indexName).setId("5").setSource(jsonBuilder().startObject().field("version", "2.33.0").endObject()).get(); + prepareIndex(indexName).setId("2").setSource(jsonBuilder().startObject().field("version", "1.3.0+build.1234567").endObject()).get(); + prepareIndex(indexName).setId("3").setSource(jsonBuilder().startObject().field("version", "2.1.0-alpha.beta").endObject()).get(); + prepareIndex(indexName).setId("4").setSource(jsonBuilder().startObject().field("version", "2.1.0").endObject()).get(); + prepareIndex(indexName).setId("5").setSource(jsonBuilder().startObject().field("version", "2.33.0").endObject()).get(); client().admin().indices().prepareRefresh(indexName).get(); SearchResponse response = client().prepareSearch(indexName).setQuery(QueryBuilders.regexpQuery("version", "2.*0")).get(); @@ -259,21 +246,14 @@ public void testFuzzyQuery() throws Exception { createIndex(indexName, Settings.builder().put("index.number_of_shards", 1).build(), "_doc", "version", "type=version"); ensureGreen(indexName); - client().prepareIndex(indexName) - .setId("1") + prepareIndex(indexName).setId("1") .setSource(jsonBuilder().startObject().field("version", "1.0.0-alpha.2.1.0-rc.1").endObject()) .get(); - client().prepareIndex(indexName) - .setId("2") - .setSource(jsonBuilder().startObject().field("version", "1.3.0+build.1234567").endObject()) - .get(); - client().prepareIndex(indexName) - .setId("3") - .setSource(jsonBuilder().startObject().field("version", "2.1.0-alpha.beta").endObject()) - .get(); - client().prepareIndex(indexName).setId("4").setSource(jsonBuilder().startObject().field("version", "2.1.0").endObject()).get(); - client().prepareIndex(indexName).setId("5").setSource(jsonBuilder().startObject().field("version", "2.33.0").endObject()).get(); - client().prepareIndex(indexName).setId("6").setSource(jsonBuilder().startObject().field("version", "2.a3.0").endObject()).get(); + prepareIndex(indexName).setId("2").setSource(jsonBuilder().startObject().field("version", "1.3.0+build.1234567").endObject()).get(); + prepareIndex(indexName).setId("3").setSource(jsonBuilder().startObject().field("version", "2.1.0-alpha.beta").endObject()).get(); + prepareIndex(indexName).setId("4").setSource(jsonBuilder().startObject().field("version", "2.1.0").endObject()).get(); + prepareIndex(indexName).setId("5").setSource(jsonBuilder().startObject().field("version", "2.33.0").endObject()).get(); + prepareIndex(indexName).setId("6").setSource(jsonBuilder().startObject().field("version", "2.a3.0").endObject()).get(); client().admin().indices().prepareRefresh(indexName).get(); SearchResponse response = client().prepareSearch(indexName).setQuery(QueryBuilders.fuzzyQuery("version", "2.3.0")).get(); @@ -306,7 +286,7 @@ public void testWildcardQuery() throws Exception { "3.1.1+b", "3.1.123" )) { - client().prepareIndex(indexName).setSource(jsonBuilder().startObject().field("version", version).endObject()).get(); + prepareIndex(indexName).setSource(jsonBuilder().startObject().field("version", version).endObject()).get(); } client().admin().indices().prepareRefresh(indexName).get(); @@ -354,16 +334,10 @@ public void testStoreMalformed() throws Exception { createIndex(indexName, Settings.builder().put("index.number_of_shards", 1).build(), "_doc", "version", "type=version"); ensureGreen(indexName); - client().prepareIndex(indexName) - .setId("1") - .setSource(jsonBuilder().startObject().field("version", "1.invalid.0").endObject()) - .get(); - client().prepareIndex(indexName).setId("2").setSource(jsonBuilder().startObject().field("version", "2.2.0").endObject()).get(); - client().prepareIndex(indexName) - .setId("3") - .setSource(jsonBuilder().startObject().field("version", "2.2.0-badchar!").endObject()) - .get(); - client().prepareIndex(indexName).setId("4").setSource(jsonBuilder().startObject().field("version", "").endObject()).get(); + prepareIndex(indexName).setId("1").setSource(jsonBuilder().startObject().field("version", "1.invalid.0").endObject()).get(); + prepareIndex(indexName).setId("2").setSource(jsonBuilder().startObject().field("version", "2.2.0").endObject()).get(); + prepareIndex(indexName).setId("3").setSource(jsonBuilder().startObject().field("version", "2.2.0-badchar!").endObject()).get(); + prepareIndex(indexName).setId("4").setSource(jsonBuilder().startObject().field("version", "").endObject()).get(); client().admin().indices().prepareRefresh(indexName).get(); SearchResponse response = client().prepareSearch(indexName).addDocValueField("version").get(); @@ -426,14 +400,11 @@ public void testAggs() throws Exception { createIndex(indexName, Settings.builder().put("index.number_of_shards", 1).build(), "_doc", "version", "type=version"); ensureGreen(indexName); - client().prepareIndex(indexName).setId("1").setSource(jsonBuilder().startObject().field("version", "1.0").endObject()).get(); - client().prepareIndex(indexName).setId("2").setSource(jsonBuilder().startObject().field("version", "1.3.0").endObject()).get(); - client().prepareIndex(indexName) - .setId("3") - .setSource(jsonBuilder().startObject().field("version", "2.1.0-alpha").endObject()) - .get(); - client().prepareIndex(indexName).setId("4").setSource(jsonBuilder().startObject().field("version", "2.1.0").endObject()).get(); - client().prepareIndex(indexName).setId("5").setSource(jsonBuilder().startObject().field("version", "3.11.5").endObject()).get(); + prepareIndex(indexName).setId("1").setSource(jsonBuilder().startObject().field("version", "1.0").endObject()).get(); + prepareIndex(indexName).setId("2").setSource(jsonBuilder().startObject().field("version", "1.3.0").endObject()).get(); + prepareIndex(indexName).setId("3").setSource(jsonBuilder().startObject().field("version", "2.1.0-alpha").endObject()).get(); + prepareIndex(indexName).setId("4").setSource(jsonBuilder().startObject().field("version", "2.1.0").endObject()).get(); + prepareIndex(indexName).setId("5").setSource(jsonBuilder().startObject().field("version", "3.11.5").endObject()).get(); client().admin().indices().prepareRefresh(indexName).get(); // terms aggs @@ -469,16 +440,9 @@ public void testMultiValues() throws Exception { createIndex(indexName, Settings.builder().put("index.number_of_shards", 1).build(), "_doc", "version", "type=version"); ensureGreen(indexName); - client().prepareIndex(indexName) - .setId("1") - .setSource(jsonBuilder().startObject().array("version", "1.0.0", "3.0.0").endObject()) - .get(); - client().prepareIndex(indexName) - .setId("2") - .setSource(jsonBuilder().startObject().array("version", "2.0.0", "4.alpha.0").endObject()) - .get(); - client().prepareIndex(indexName) - .setId("3") + prepareIndex(indexName).setId("1").setSource(jsonBuilder().startObject().array("version", "1.0.0", "3.0.0").endObject()).get(); + prepareIndex(indexName).setId("2").setSource(jsonBuilder().startObject().array("version", "2.0.0", "4.alpha.0").endObject()).get(); + prepareIndex(indexName).setId("3") .setSource(jsonBuilder().startObject().array("version", "2.1.0", "2.2.0", "5.99.0").endObject()) .get(); client().admin().indices().prepareRefresh(indexName).get(); diff --git a/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/TransportGetTrainedModelPackageConfigAction.java b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/TransportGetTrainedModelPackageConfigAction.java index 41db25881185f..6cdeb93d1e07d 100644 --- a/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/TransportGetTrainedModelPackageConfigAction.java +++ b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/TransportGetTrainedModelPackageConfigAction.java @@ -90,7 +90,7 @@ protected void masterOperation(Task task, Request request, ClusterState state, A if (packagedModelId.equals(packageConfig.getPackagedModelId()) == false) { // the package is somehow broken - listener.onFailure(new ElasticsearchStatusException("Invalid package", RestStatus.INTERNAL_SERVER_ERROR)); + listener.onFailure(new ElasticsearchStatusException("Invalid package name", RestStatus.INTERNAL_SERVER_ERROR)); return; } diff --git a/x-pack/plugin/ml/build.gradle b/x-pack/plugin/ml/build.gradle index f66d49b7880f5..2373dc4d54c99 100644 --- a/x-pack/plugin/ml/build.gradle +++ b/x-pack/plugin/ml/build.gradle @@ -86,6 +86,7 @@ dependencies { testImplementation project(':modules:reindex') testImplementation project(':modules:analysis-common') testImplementation project(':modules:mapper-extras') + testImplementation project(':modules:lang-mustache') // This should not be here testImplementation(testArtifact(project(xpackModule('security')))) testImplementation project(path: xpackModule('wildcard')) @@ -103,6 +104,7 @@ dependencies { changing = true } testImplementation 'org.ini4j:ini4j:0.5.2' + testImplementation "com.google.jimfs:jimfs:${versions.jimfs}" } artifacts { @@ -118,19 +120,16 @@ tasks.register("extractNativeLicenses", Copy) { } include 'platform/licenses/**' } -project.afterEvaluate { - // Add an extra licenses directory to the combined notices - tasks.named('generateNotice').configure { - dependsOn "extractNativeLicenses" - licensesDir new File("${project.buildDir}/extractedNativeLicenses/platform/licenses") - outputs.upToDateWhen { - extractNativeLicenses.didWork - } - } + +// Add an extra licenses directory to the combined notices +tasks.named('generateNotice').configure { + dependsOn "extractNativeLicenses" + inputs.dir("${project.buildDir}/extractedNativeLicenses/platform/licenses") + licenseDirs.add(tasks.named("extractNativeLicenses").map {new File(it.destinationDir, "platform/licenses") }) } tasks.named("dependencyLicenses").configure { mapping from: /lucene-.*/, to: 'lucene' } -addQaCheckDependencies(project) \ No newline at end of file +addQaCheckDependencies(project) diff --git a/x-pack/plugin/ml/qa/basic-multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlRescorerIT.java b/x-pack/plugin/ml/qa/basic-multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlRescorerIT.java index 8dec8dcdb020e..6dbb0a46121aa 100644 --- a/x-pack/plugin/ml/qa/basic-multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlRescorerIT.java +++ b/x-pack/plugin/ml/qa/basic-multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlRescorerIT.java @@ -29,17 +29,25 @@ public class MlRescorerIT extends ESRestTestCase { @Before public void setupModelAndData() throws IOException { putRegressionModel(MODEL_ID, """ - { + { "description": "super complex model for tests", - "input": {"field_names": ["cost", "product"]}, + "input": { "field_names": ["cost", "product"] }, "inference_config": { "learn_to_rank": { - "feature_extractors": [{ - "query_extractor": { - "feature_name": "two", - "query": {"script_score": {"query": {"match_all":{}}, "script": {"source": "return 2.0;"}}} + "feature_extractors": [ + { + "query_extractor": { + "feature_name": "two", + "query": { "script_score": { "query": { "match_all": {} }, "script": { "source": "return 2.0;" } } } + } + }, + { + "query_extractor": { + "feature_name": "product_bm25", + "query": { "term": { "product": "{{keyword}}" } } + } } - }] + ] } }, "definition": { @@ -174,11 +182,13 @@ public void setupModelAndData() throws IOException { } } } - }"""); + } + """); createIndex(INDEX_NAME, Settings.builder().put("number_of_shards", randomIntBetween(1, 3)).build(), """ "properties":{ - "product":{"type": "keyword"}, - "cost":{"type": "integer"}}"""); + "product":{ "type": "keyword" }, + "cost":{ "type": "integer" } + }"""); indexData("{ \"product\": \"TV\", \"cost\": 300 }"); indexData("{ \"product\": \"TV\", \"cost\": 400 }"); indexData("{ \"product\": \"VCR\", \"cost\": 150 }"); @@ -191,16 +201,15 @@ public void setupModelAndData() throws IOException { public void testLtrSimple() throws Exception { Response searchResponse = search(""" { - "query": { - "match": { "product": { "query": "TV"}} - }, - "rescore": { + "query": { + "match": { "product": { "query": "TV" } } + }, + "rescore": { "window_size": 10, - "inference": { + "learn_to_rank": { "model_id": "basic-ltr-model" - } + } } - }"""); Map response = responseAsMap(searchResponse); @@ -211,23 +220,16 @@ public void testLtrSimple() throws Exception { public void testLtrSimpleDFS() throws Exception { Response searchResponse = searchDfs(""" { - "query": { - "match": { "product": { "query": "TV"}} - }, - "rescore": { + "query": { + "match": { "product": { "query": "TV" } } + }, + "rescore": { "window_size": 10, - "inference": { + "learn_to_rank": { "model_id": "basic-ltr-model", - "inference_config": { - "learn_to_rank": { - "feature_extractors":[ - {"query_extractor": {"feature_name": "product_bm25", "query": {"term": {"product": "TV"}}}} - ] - } - } - } + "params": { "keyword": "TV" } + } } - }"""); Map response = responseAsMap(searchResponse); @@ -235,20 +237,13 @@ public void testLtrSimpleDFS() throws Exception { searchResponse = searchDfs(""" { - "rescore": { + "rescore": { "window_size": 10, - "inference": { + "learn_to_rank": { "model_id": "basic-ltr-model", - "inference_config": { - "learn_to_rank": { - "feature_extractors":[ - {"query_extractor": {"feature_name": "product_bm25", "query": {"term": {"product": "TV"}}}} - ] - } - } - } + "params": { "keyword": "TV" } + } } - }"""); response = responseAsMap(searchResponse); @@ -262,16 +257,16 @@ public void testLtrSimpleDFS() throws Exception { @SuppressWarnings("unchecked") public void testLtrSimpleEmpty() throws Exception { Response searchResponse = search(""" - { "query": { - "term": { "product": "computer"} - }, - "rescore": { + { + "query": { + "term": { "product": "computer" } + }, + "rescore": { "window_size": 10, - "inference": { + "learn_to_rank": { "model_id": "basic-ltr-model" - } + } } - }"""); Map response = responseAsMap(searchResponse); @@ -281,16 +276,16 @@ public void testLtrSimpleEmpty() throws Exception { @SuppressWarnings("unchecked") public void testLtrEmptyDFS() throws Exception { Response searchResponse = searchDfs(""" - { "query": { - "match": { "product": { "query": "computer"}} - }, - "rescore": { + { + "query": { + "match": { "product": { "query": "computer"} } + }, + "rescore": { "window_size": 10, - "inference": { + "learn_to_rank": { "model_id": "basic-ltr-model" - } + } } - }"""); Map response = responseAsMap(searchResponse); @@ -300,30 +295,31 @@ public void testLtrEmptyDFS() throws Exception { @SuppressWarnings("unchecked") public void testLtrCanMatch() throws Exception { Response searchResponse = searchCanMatch(""" - { "query": { - "match": { "product": { "query": "TV"}} - }, - "rescore": { + { + "query": { + "match": { "product": { "query": "TV" } } + }, + "rescore": { "window_size": 10, - "inference": { + "learn_to_rank": { "model_id": "basic-ltr-model" - } + } } - }""", false); Map response = responseAsMap(searchResponse); assertThat(response.toString(), (List) XContentMapValues.extractValue("hits.hits._score", response), contains(20.0, 20.0)); searchResponse = searchCanMatch(""" - { "query": { - "match": { "product": { "query": "TV"}} - }, - "rescore": { + { + "query": { + "match": { "product": { "query": "TV" } } + }, + "rescore": { "window_size": 10, - "inference": { + "learn_to_rank": { "model_id": "basic-ltr-model" - } + } } }""", true); @@ -363,5 +359,4 @@ private void putRegressionModel(String modelId, String body) throws IOException model.setJsonEntity(body); assertThat(client().performRequest(model).getStatusLine().getStatusCode(), equalTo(200)); } - } diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ClassificationEvaluationIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ClassificationEvaluationIT.java index 3265e30a0a64c..c42f2b0fcda6f 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ClassificationEvaluationIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ClassificationEvaluationIT.java @@ -117,8 +117,7 @@ public void testEvaluate_AllMetrics_KeywordField_CaseSensitivity() { String actualField = "fieldA"; String predictedField = "fieldB"; client().admin().indices().prepareCreate(indexName).setMapping(actualField, "type=keyword", predictedField, "type=keyword").get(); - client().prepareIndex(indexName) - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + prepareIndex(indexName).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .setSource(actualField, "crocodile", predictedField, "cRoCoDiLe") .get(); diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ClassificationIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ClassificationIT.java index b3852f218d199..b610ac60dab00 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ClassificationIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ClassificationIT.java @@ -838,8 +838,7 @@ public void testDeleteExpiredData_RemovesUnusedState() throws Exception { // Delete the config straight from the config index DeleteResponse deleteResponse = client().prepareDelete(".ml-config", DataFrameAnalyticsConfig.documentId(jobId)) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .execute() - .actionGet(); + .get(); assertThat(deleteResponse.status(), equalTo(RestStatus.OK)); // Now calling the _delete_expired_data API should remove unused state diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java index f9e8ea6fb4ed9..cf73b5a4a7544 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java @@ -125,10 +125,10 @@ public void testDeleteExpiredDataNoThrottle() throws Exception { public void testDeleteExpiredDataActionDeletesEmptyStateIndices() throws Exception { client().admin().indices().prepareCreate(".ml-state").get(); client().admin().indices().prepareCreate(".ml-state-000001").get(); - client().prepareIndex(".ml-state-000001").setSource("field_1", "value_1").get(); + prepareIndex(".ml-state-000001").setSource("field_1", "value_1").get(); client().admin().indices().prepareCreate(".ml-state-000003").get(); client().admin().indices().prepareCreate(".ml-state-000005").get(); - client().prepareIndex(".ml-state-000005").setSource("field_5", "value_5").get(); + prepareIndex(".ml-state-000005").setSource("field_5", "value_5").get(); client().admin().indices().prepareCreate(".ml-state-000007").addAlias(new Alias(".ml-state-write").isHidden(true)).get(); refresh(); diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/InferenceIngestInputConfigIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/InferenceIngestInputConfigIT.java index a3e5c3993398e..d71273e7f8119 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/InferenceIngestInputConfigIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/InferenceIngestInputConfigIT.java @@ -7,8 +7,6 @@ package org.elasticsearch.xpack.ml.integration; -import org.elasticsearch.client.Request; -import org.elasticsearch.client.Response; import org.elasticsearch.core.Strings; import org.elasticsearch.xpack.core.ml.utils.MapHelper; @@ -118,16 +116,4 @@ private static String pipelineDefinition(String modelId, String inputOutput) { ] }""", modelId, inputOutput); } - - private Response simulatePipeline(String pipelineDef, String docs) throws IOException { - String simulate = Strings.format(""" - { - "pipeline": %s, - "docs": %s - }""", pipelineDef, docs); - - Request request = new Request("POST", "_ingest/pipeline/_simulate?error_trace=true"); - request.setJsonEntity(simulate); - return client().performRequest(request); - } } diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeAutodetectIntegTestCase.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeAutodetectIntegTestCase.java index fab4eec92d981..79ac65e8b14be 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeAutodetectIntegTestCase.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeAutodetectIntegTestCase.java @@ -271,8 +271,7 @@ protected void assertThatNumberOfAnnotationsIsEqualTo(int expectedNumberOfAnnota // Refresh the annotations index so that recently indexed annotation docs are visible. indicesAdmin().prepareRefresh(AnnotationIndex.LATEST_INDEX_NAME) .setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED_HIDDEN) - .execute() - .actionGet(); + .get(); SearchRequest searchRequest = new SearchRequest(AnnotationIndex.READ_ALIAS_NAME).indicesOptions( IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED_HIDDEN diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java index 34d50216ae325..209f12c2e90ce 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java @@ -13,9 +13,9 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; import org.elasticsearch.action.datastreams.CreateDataStreamAction; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterModule; @@ -311,12 +311,12 @@ protected static List fetchAllAuditMessages(String jobId) throws Excepti RefreshResponse refreshResponse = client().execute(RefreshAction.INSTANCE, refreshRequest).actionGet(); assertThat(refreshResponse.getStatus().getStatus(), anyOf(equalTo(200), equalTo(201))); - SearchRequest searchRequest = new SearchRequestBuilder(client(), SearchAction.INSTANCE).setIndices( + SearchRequest searchRequest = new SearchRequestBuilder(client(), TransportSearchAction.TYPE).setIndices( NotificationsIndex.NOTIFICATIONS_INDEX ).addSort("timestamp", SortOrder.ASC).setQuery(QueryBuilders.termQuery("job_id", jobId)).setSize(100).request(); List messages = new ArrayList<>(); assertResponse( - client().execute(SearchAction.INSTANCE, searchRequest), + client().execute(TransportSearchAction.TYPE, searchRequest), searchResponse -> Arrays.stream(searchResponse.getHits().getHits()) .map(hit -> (String) hit.getSourceAsMap().get("message")) .forEach(messages::add) @@ -440,16 +440,11 @@ protected static void createDataStreamAndTemplate(String dataStreamName, String client().execute( PutComposableIndexTemplateAction.INSTANCE, new PutComposableIndexTemplateAction.Request(dataStreamName + "_template").indexTemplate( - new ComposableIndexTemplate( - Collections.singletonList(dataStreamName), - new Template(null, new CompressedXContent(mapping), null), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList(dataStreamName)) + .template(new Template(null, new CompressedXContent(mapping), null)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ) ).actionGet(); client().execute(CreateDataStreamAction.INSTANCE, new CreateDataStreamAction.Request(dataStreamName)).actionGet(); diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ModelSnapshotRetentionIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ModelSnapshotRetentionIT.java index bc4d9e82ef831..11ab23bf665bd 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ModelSnapshotRetentionIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ModelSnapshotRetentionIT.java @@ -13,8 +13,8 @@ import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexAction; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.master.MasterNodeRequest; @@ -190,7 +190,7 @@ private List getAvailableModelStateDocIds() throws Exception { private List getDocIdsFromSearch(SearchRequest searchRequest) throws Exception { List docIds = new ArrayList<>(); - assertResponse(client().execute(SearchAction.INSTANCE, searchRequest), searchResponse -> { + assertResponse(client().execute(TransportSearchAction.TYPE, searchRequest), searchResponse -> { assertThat(searchResponse.getHits(), notNullValue()); for (SearchHit searchHit : searchResponse.getHits().getHits()) { docIds.add(searchHit.getId()); diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java index a3106eac4ab22..34ef0baecccc5 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java @@ -31,7 +31,6 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import static org.elasticsearch.xpack.ml.integration.InferenceIngestIT.putPipeline; import static org.elasticsearch.xpack.ml.integration.InferenceIngestIT.simulateRequest; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; @@ -635,7 +634,7 @@ public void testInferencePipelineAgainstUnallocatedModel() throws IOException { ) ); - client().performRequest(putPipeline("my_pipeline", """ + putPipeline("my_pipeline", """ {"processors": [ { "inference": { @@ -643,7 +642,7 @@ public void testInferencePipelineAgainstUnallocatedModel() throws IOException { } } ] - }""")); + }"""); Request request = new Request("PUT", "undeployed_model_index/_doc/1?pipeline=my_pipeline&refresh=true"); request.setJsonEntity(""" @@ -717,7 +716,7 @@ public void testStopUsedDeploymentByIngestProcessor() throws IOException { putVocabulary(List.of("these", "are", "my", "words"), modelId); startDeployment(modelId); - client().performRequest(putPipeline("my_pipeline", Strings.format(""" + putPipeline("my_pipeline", Strings.format(""" { "processors": [ { @@ -726,7 +725,7 @@ public void testStopUsedDeploymentByIngestProcessor() throws IOException { } } ] - }""", modelId))); + }""", modelId)); ResponseException ex = expectThrows(ResponseException.class, () -> stopDeployment(modelId)); assertThat(ex.getResponse().getStatusLine().getStatusCode(), equalTo(409)); assertThat( @@ -749,7 +748,7 @@ public void testStopWithModelAliasUsedDeploymentByIngestProcessor() throws IOExc startDeployment(modelId); client().performRequest(new Request("PUT", Strings.format("_ml/trained_models/%s/model_aliases/%s", modelId, modelAlias))); - client().performRequest(putPipeline("my_pipeline", Strings.format(""" + putPipeline("my_pipeline", Strings.format(""" { "processors": [ { @@ -758,7 +757,7 @@ public void testStopWithModelAliasUsedDeploymentByIngestProcessor() throws IOExc } } ] - }""", modelAlias))); + }""", modelAlias)); ResponseException ex = expectThrows(ResponseException.class, () -> stopDeployment(modelId)); assertThat(ex.getResponse().getStatusLine().getStatusCode(), equalTo(409)); assertThat( diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelRestTestCase.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelRestTestCase.java index b278f9fe9e466..c785ae96c5c16 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelRestTestCase.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelRestTestCase.java @@ -357,6 +357,24 @@ protected void forceMergeIndex(String index) throws IOException { assertOkWithErrorMessage(client().performRequest(request)); } + protected void putPipeline(String pipelineId, String pipelineDefinition) throws IOException { + Request request = new Request("PUT", "_ingest/pipeline/" + pipelineId); + request.setJsonEntity(pipelineDefinition); + assertOkWithErrorMessage(client().performRequest(request)); + } + + protected Response simulatePipeline(String pipelineDef, String docs) throws IOException { + String simulate = Strings.format(""" + { + "pipeline": %s, + "docs": %s + }""", pipelineDef, docs); + + Request request = new Request("POST", "_ingest/pipeline/_simulate?error_trace=true"); + request.setJsonEntity(simulate); + return client().performRequest(request); + } + @SuppressWarnings("unchecked") protected int getAllocationCount(String modelId) throws IOException { Response response = getTrainedModelStats(modelId); diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/RegressionIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/RegressionIT.java index 953c224eada30..ed7cfad8bf195 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/RegressionIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/RegressionIT.java @@ -436,8 +436,7 @@ public void testDeleteExpiredData_RemovesUnusedState() throws Exception { // Delete the config straight from the config index DeleteResponse deleteResponse = client().prepareDelete(".ml-config", DataFrameAnalyticsConfig.documentId(jobId)) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .execute() - .actionGet(); + .get(); assertThat(deleteResponse.status(), equalTo(RestStatus.OK)); // Now calling the _delete_expired_data API should remove unused state diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureResetIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureResetIT.java index 77a5d7c4f8a32..948544dc95bf8 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureResetIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureResetIT.java @@ -307,7 +307,7 @@ private void putTrainedModelIngestPipeline(String pipelineId) throws Exception { } private void indexDocForInference(String pipelineId) { - client().prepareIndex("foo").setPipeline(pipelineId).setSource("{\"text\": \"this is some plain text.\"}", XContentType.JSON).get(); + prepareIndex("foo").setPipeline(pipelineId).setSource("{\"text\": \"this is some plain text.\"}", XContentType.JSON).get(); } } diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/TextEmbeddingQueryIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/TextEmbeddingQueryIT.java index c3b738f66127a..8e425ea071879 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/TextEmbeddingQueryIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/TextEmbeddingQueryIT.java @@ -283,7 +283,7 @@ public void testHybridSearch() throws IOException { } } - public void testSearchWithMissingModel() throws IOException { + public void testSearchWithMissingModel() { String modelId = "missing-model"; String indexName = modelId + "-index"; @@ -291,6 +291,120 @@ public void testSearchWithMissingModel() throws IOException { assertThat(e.getMessage(), containsString("Could not find trained model [missing-model]")); } + @SuppressWarnings("unchecked") + public void testModelWithPrefixStrings() throws IOException { + String modelId = "model-with-prefix-strings"; + String ingestPrefix = "passage: "; + String searchPrefix = "query: "; + + createTextEmbeddingModelWithPrefixString(modelId, searchPrefix, ingestPrefix); + putModelDefinition(modelId, BASE_64_ENCODED_MODEL, RAW_MODEL_SIZE); + putVocabulary( + List.of( + "these", + "are", + "my", + "words", + "the", + "washing", + "machine", + "is", + "leaking", + "octopus", + "comforter", + "smells", + ingestPrefix, + searchPrefix + ), + modelId + ); + startDeployment(modelId); + + String pipelineDefinition = Strings.format(""" + { + "processors": [ + { + "inference": { + "model_id": "%s", + "input_output": { + "input_field": "source_text", + "output_field": "embedding" + }, + "inference_config": { + "text_embedding": { + } + } + } + } + ] + } + """, modelId); + + String docSource = """ + [ + {"_source": { + "source_text": "the washing machine is leaking"}} + ] + """; + + // At ingest the prefix is automatically added + var simulateResponse = simulatePipeline(pipelineDefinition, docSource); + var simulateResponseMap = entityAsMap(simulateResponse); + var simulatedDocs = (List>) simulateResponseMap.get("docs"); + List pipelineEmbedding = (List) MapHelper.dig("doc._source.embedding", simulatedDocs.get(0)); + assertNotNull(simulateResponseMap.toString(), pipelineEmbedding); + + // Create the embedding for the same input text used in + // simulate pipeline ingest. Here the ingest prefix is + // manually added, the resulting embeddings should be + // the same. + var inferenceResponse = infer(ingestPrefix + "the washing machine is leaking", modelId); + Map inferenceResult = ((List>) entityAsMap(inferenceResponse).get("inference_results")).get(0); + List inferenceEmbedding = (List) inferenceResult.get("predicted_value"); + assertNotNull(inferenceResult.toString(), inferenceEmbedding); + // embeddings are exactly equal + assertEquals(inferenceEmbedding, pipelineEmbedding); + + // Now check the search prefix + List inputs = List.of( + searchPrefix + "my words", + "the machine is leaking", + "washing machine", + "these are my words", + "the octopus comforter smells" + ); + List filters = List.of("foo", "bar", "baz", "foo", "bar"); + List> embeddings = new ArrayList<>(); + + // Generate the text embeddings via the inference API + // then index them for search + for (var input : inputs) { + Response inference = infer(input, modelId); + List> responseMap = (List>) entityAsMap(inference).get("inference_results"); + List embedding = (List) responseMap.get(0).get("predicted_value"); + embeddings.add(embedding); + } + + // index dense vectors + String indexName = modelId + "_index"; + createVectorSearchIndex(indexName); + bulkIndexDocs(inputs, filters, embeddings, indexName); + forceMergeIndex(indexName); + + // the input "my words" should be prefixed with searchPrefix + var textEmbeddingSearchResponse = textEmbeddingSearch(indexName, "my words", modelId, "embedding"); + assertOkWithErrorMessage(textEmbeddingSearchResponse); + + Map responseMap = responseAsMap(textEmbeddingSearchResponse); + List> hits = (List>) MapHelper.dig("hits.hits", responseMap); + Map topHit = hits.get(0); + String sourceText = (String) MapHelper.dig("_source.source_text", topHit); + // The top hit should have the search prefix + assertEquals(searchPrefix + "my words", sourceText); + List foundEmbedding = (List) MapHelper.dig("_source.embedding", topHit); + assertEquals(embeddings.get(0), foundEmbedding); + } + protected Response textEmbeddingSearch(String index, String modelText, String modelId, String denseVectorFieldName) throws IOException { Request request = new Request("GET", index + "/_search?error_trace=true"); @@ -390,4 +504,27 @@ private void bulkIndexDocs(List sourceText, List filters, List expectedScores) throws IOException { - assertThat((List) XContentMapValues.extractValue("hits.hits._score", responseAsMap(response)), equalTo(expectedScores)); - } -} diff --git a/x-pack/plugin/ml/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/LearnToRankRescorerIT.java b/x-pack/plugin/ml/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/LearnToRankRescorerIT.java new file mode 100644 index 0000000000000..d246f070f0b8d --- /dev/null +++ b/x-pack/plugin/ml/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/LearnToRankRescorerIT.java @@ -0,0 +1,278 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.integration; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; + +public class LearnToRankRescorerIT extends InferenceTestCase { + + private static final String MODEL_ID = "ltr-model"; + private static final String INDEX_NAME = "store"; + + @Before + public void setupModelAndData() throws IOException { + putRegressionModel(MODEL_ID, """ + { + "description": "super complex model for tests", + "input": {"field_names": ["cost", "product"]}, + "inference_config": { + "learn_to_rank": { + "feature_extractors": [ + { + "query_extractor": { + "feature_name": "two", + "query": {"script_score": {"query": {"match_all":{}}, "script": {"source": "return 2.0;"}}} + } + }, + { + "query_extractor": { + "feature_name": "product_bm25", + "query": {"term": {"product": "{{keyword}}"}} + } + } + ] + } + }, + "definition": { + "preprocessors" : [{ + "one_hot_encoding": { + "field": "product", + "hot_map": { + "TV": "type_tv", + "VCR": "type_vcr", + "Laptop": "type_laptop" + } + } + }], + "trained_model": { + "ensemble": { + "feature_names": ["cost", "type_tv", "type_vcr", "type_laptop", "two", "product_bm25"], + "target_type": "regression", + "trained_models": [ + { + "tree": { + "feature_names": ["cost"], + "tree_structure": [ + { + "node_index": 0, + "split_feature": 0, + "split_gain": 12, + "threshold": 400, + "decision_type": "lte", + "default_left": true, + "left_child": 1, + "right_child": 2 + }, + { + "node_index": 1, + "leaf_value": 5.0 + }, + { + "node_index": 2, + "leaf_value": 2.0 + } + ], + "target_type": "regression" + } + }, + { + "tree": { + "feature_names": [ + "type_tv" + ], + "tree_structure": [ + { + "node_index": 0, + "split_feature": 0, + "split_gain": 12, + "threshold": 1, + "decision_type": "lt", + "default_left": true, + "left_child": 1, + "right_child": 2 + }, + { + "node_index": 1, + "leaf_value": 1.0 + }, + { + "node_index": 2, + "leaf_value": 12.0 + } + ], + "target_type": "regression" + } + }, + { + "tree": { + "feature_names": [ + "two" + ], + "tree_structure": [ + { + "node_index": 0, + "split_feature": 0, + "split_gain": 12, + "threshold": 1, + "decision_type": "lt", + "default_left": true, + "left_child": 1, + "right_child": 2 + }, + { + "node_index": 1, + "leaf_value": 1.0 + }, + { + "node_index": 2, + "leaf_value": 2.0 + } + ], + "target_type": "regression" + } + }, + { + "tree": { + "feature_names": [ + "product_bm25" + ], + "tree_structure": [ + { + "node_index": 0, + "split_feature": 0, + "split_gain": 12, + "threshold": 1, + "decision_type": "lt", + "default_left": true, + "left_child": 1, + "right_child": 2 + }, + { + "node_index": 1, + "leaf_value": 1.0 + }, + { + "node_index": 2, + "leaf_value": 4.0 + } + ], + "target_type": "regression" + } + } + ] + } + } + } + } + """); + createIndex(INDEX_NAME, Settings.EMPTY, """ + "properties":{ + "product":{"type": "keyword"}, + "cost":{"type": "integer"} + }"""); + indexData("{ \"product\": \"TV\", \"cost\": 300}"); + indexData("{ \"product\": \"TV\", \"cost\": 400}"); + indexData("{ \"product\": \"TV\", \"cost\": 600}"); + indexData("{ \"product\": \"VCR\", \"cost\": 15}"); + indexData("{ \"product\": \"VCR\", \"cost\": 350}"); + indexData("{ \"product\": \"VCR\", \"cost\": 580}"); + indexData("{ \"product\": \"Laptop\", \"cost\": 100}"); + indexData("{ \"product\": \"Laptop\", \"cost\": 300}"); + indexData("{ \"product\": \"Laptop\", \"cost\": 500}"); + adminClient().performRequest(new Request("POST", INDEX_NAME + "/_refresh")); + } + + public void testLearnToRankRescore() throws Exception { + Request request = new Request("GET", "store/_search?size=3&error_trace"); + request.setJsonEntity(""" + { + "rescore": { + "window_size": 10, + "learn_to_rank": { "model_id": "ltr-model" } + } + }"""); + assertHitScores(client().performRequest(request), List.of(20.0, 20.0, 17.0)); + request.setJsonEntity(""" + { + "query": { "term": { "product": "Laptop" } }, + "rescore": { + "window_size": 10, + "learn_to_rank": { + "model_id": "ltr-model", + "params": { + "keyword": "Laptop" + } + } + } + }"""); + assertHitScores(client().performRequest(request), List.of(12.0, 12.0, 9.0)); + request.setJsonEntity(""" + { + "query": {"term": { "product": "Laptop" } }, + "rescore": { + "window_size": 10, + "learn_to_rank": { "model_id": "ltr-model"} + } + }"""); + assertHitScores(client().performRequest(request), List.of(9.0, 9.0, 6.0)); + } + + public void testLearnToRankRescoreSmallWindow() throws Exception { + Request request = new Request("GET", "store/_search?size=5"); + request.setJsonEntity(""" + { + "rescore": { + "window_size": 2, + "learn_to_rank": { "model_id": "ltr-model" } + } + }"""); + assertHitScores(client().performRequest(request), List.of(20.0, 20.0, 1.0, 1.0, 1.0)); + } + + public void testLearnToRankRescorerWithChainedRescorers() throws IOException { + Request request = new Request("GET", "store/_search?size=5"); + request.setJsonEntity(""" + { + "rescore": [ + { + "window_size": 4, + "query": { "rescore_query" : { "script_score": { "query": { "match_all": {} }, "script": { "source": "return 4" } } } } + }, + { + "window_size": 3, + "learn_to_rank": { "model_id": "ltr-model" } + }, + { + "window_size": 2, + "query": { "rescore_query": { "script_score": { "query": { "match_all": {} }, "script": { "source": "return 20"} } } } + } + ] + }"""); + assertHitScores(client().performRequest(request), List.of(40.0, 40.0, 17.0, 5.0, 1.0)); + } + + private void indexData(String data) throws IOException { + Request request = new Request("POST", INDEX_NAME + "/_doc"); + request.setJsonEntity(data); + client().performRequest(request); + } + + @SuppressWarnings("unchecked") + private static void assertHitScores(Response response, List expectedScores) throws IOException { + assertThat((List) XContentMapValues.extractValue("hits.hits._score", responseAsMap(response)), equalTo(expectedScores)); + } +} diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/license/MachineLearningLicensingIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/license/MachineLearningLicensingIT.java index 01a9c166ff0e4..81ae60ecfa9ae 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/license/MachineLearningLicensingIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/license/MachineLearningLicensingIT.java @@ -99,7 +99,7 @@ public void testMachineLearningPutJobActionRestricted() { // test that license restricted apis do not work ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, () -> { - PlainActionFuture listener = PlainActionFuture.newFuture(); + PlainActionFuture listener = new PlainActionFuture<>(); client().execute(PutJobAction.INSTANCE, new PutJobAction.Request(createJob(jobId)), listener); listener.actionGet(); }); @@ -112,7 +112,7 @@ public void testMachineLearningPutJobActionRestricted() { enableLicensing(mode); assertMLAllowed(true); // test that license restricted apis do now work - PlainActionFuture listener = PlainActionFuture.newFuture(); + PlainActionFuture listener = new PlainActionFuture<>(); client().execute(PutJobAction.INSTANCE, new PutJobAction.Request(createJob(jobId)), listener); PutJobAction.Response response = listener.actionGet(); assertNotNull(response); @@ -122,7 +122,7 @@ public void testMachineLearningOpenJobActionRestricted() throws Exception { String jobId = "testmachinelearningopenjobactionrestricted"; assertMLAllowed(true); // test that license restricted apis do now work - PlainActionFuture putJobListener = PlainActionFuture.newFuture(); + PlainActionFuture putJobListener = new PlainActionFuture<>(); client().execute(PutJobAction.INSTANCE, new PutJobAction.Request(createJob(jobId)), putJobListener); PutJobAction.Response response = putJobListener.actionGet(); assertNotNull(response); @@ -133,7 +133,7 @@ public void testMachineLearningOpenJobActionRestricted() throws Exception { assertMLAllowed(false); // test that license restricted apis do not work ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, () -> { - PlainActionFuture listener = PlainActionFuture.newFuture(); + PlainActionFuture listener = new PlainActionFuture<>(); client().execute(OpenJobAction.INSTANCE, new OpenJobAction.Request(jobId), listener); listener.actionGet(); }); @@ -153,7 +153,7 @@ public void testMachineLearningOpenJobActionRestricted() throws Exception { }); // test that license restricted apis do now work - PlainActionFuture listener = PlainActionFuture.newFuture(); + PlainActionFuture listener = new PlainActionFuture<>(); client().execute(OpenJobAction.INSTANCE, new OpenJobAction.Request(jobId), listener); NodeAcknowledgedResponse response2 = listener.actionGet(); assertNotNull(response2); @@ -164,7 +164,7 @@ public void testMachineLearningPutDatafeedActionRestricted() { String datafeedId = jobId + "-datafeed"; assertMLAllowed(true); // test that license restricted apis do now work - PlainActionFuture putJobListener = PlainActionFuture.newFuture(); + PlainActionFuture putJobListener = new PlainActionFuture<>(); client().execute(PutJobAction.INSTANCE, new PutJobAction.Request(createJob(jobId)), putJobListener); PutJobAction.Response putJobResponse = putJobListener.actionGet(); assertNotNull(putJobResponse); @@ -175,7 +175,7 @@ public void testMachineLearningPutDatafeedActionRestricted() { assertMLAllowed(false); // test that license restricted apis do not work ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, () -> { - PlainActionFuture listener = PlainActionFuture.newFuture(); + PlainActionFuture listener = new PlainActionFuture<>(); client().execute( PutDatafeedAction.INSTANCE, new PutDatafeedAction.Request(createDatafeed(datafeedId, jobId, Collections.singletonList(jobId))), @@ -192,7 +192,7 @@ public void testMachineLearningPutDatafeedActionRestricted() { enableLicensing(mode); assertMLAllowed(true); // test that license restricted apis do now work - PlainActionFuture listener = PlainActionFuture.newFuture(); + PlainActionFuture listener = new PlainActionFuture<>(); client().execute( PutDatafeedAction.INSTANCE, new PutDatafeedAction.Request(createDatafeed(datafeedId, jobId, Collections.singletonList(jobId))), @@ -211,12 +211,12 @@ public void testAutoCloseJobWithDatafeed() throws Exception { {"_doc":{"properties":{"time":{"type":"date"}}}}""").get(); // put job - PlainActionFuture putJobListener = PlainActionFuture.newFuture(); + PlainActionFuture putJobListener = new PlainActionFuture<>(); client().execute(PutJobAction.INSTANCE, new PutJobAction.Request(createJob(jobId)), putJobListener); PutJobAction.Response putJobResponse = putJobListener.actionGet(); assertNotNull(putJobResponse); // put datafeed - PlainActionFuture putDatafeedListener = PlainActionFuture.newFuture(); + PlainActionFuture putDatafeedListener = new PlainActionFuture<>(); client().execute( PutDatafeedAction.INSTANCE, new PutDatafeedAction.Request(createDatafeed(datafeedId, jobId, Collections.singletonList(datafeedIndex))), @@ -225,12 +225,12 @@ public void testAutoCloseJobWithDatafeed() throws Exception { PutDatafeedAction.Response putDatafeedResponse = putDatafeedListener.actionGet(); assertNotNull(putDatafeedResponse); // open job - PlainActionFuture openJobListener = PlainActionFuture.newFuture(); + PlainActionFuture openJobListener = new PlainActionFuture<>(); client().execute(OpenJobAction.INSTANCE, new OpenJobAction.Request(jobId), openJobListener); NodeAcknowledgedResponse openJobResponse = openJobListener.actionGet(); assertNotNull(openJobResponse); // start datafeed - PlainActionFuture listener = PlainActionFuture.newFuture(); + PlainActionFuture listener = new PlainActionFuture<>(); client().execute(StartDatafeedAction.INSTANCE, new StartDatafeedAction.Request(datafeedId, 0L), listener); listener.actionGet(); @@ -260,12 +260,12 @@ public void testAutoCloseJobWithDatafeed() throws Exception { assertMLAllowed(true); // open job - PlainActionFuture openJobListener2 = PlainActionFuture.newFuture(); + PlainActionFuture openJobListener2 = new PlainActionFuture<>(); client().execute(OpenJobAction.INSTANCE, new OpenJobAction.Request(jobId), openJobListener2); NodeAcknowledgedResponse openJobResponse3 = openJobListener2.actionGet(); assertNotNull(openJobResponse3); // start datafeed - PlainActionFuture listener2 = PlainActionFuture.newFuture(); + PlainActionFuture listener2 = new PlainActionFuture<>(); client().execute(StartDatafeedAction.INSTANCE, new StartDatafeedAction.Request(datafeedId, 0L), listener2); listener2.actionGet(); @@ -310,11 +310,11 @@ public void testMachineLearningStartDatafeedActionRestricted() throws Exception prepareCreate(datafeedIndex).setMapping(""" {"_doc":{"properties":{"time":{"type":"date"}}}}""").get(); // test that license restricted apis do now work - PlainActionFuture putJobListener = PlainActionFuture.newFuture(); + PlainActionFuture putJobListener = new PlainActionFuture<>(); client().execute(PutJobAction.INSTANCE, new PutJobAction.Request(createJob(jobId)), putJobListener); PutJobAction.Response putJobResponse = putJobListener.actionGet(); assertNotNull(putJobResponse); - PlainActionFuture putDatafeedListener = PlainActionFuture.newFuture(); + PlainActionFuture putDatafeedListener = new PlainActionFuture<>(); client().execute( PutDatafeedAction.INSTANCE, new PutDatafeedAction.Request(createDatafeed(datafeedId, jobId, Collections.singletonList(datafeedIndex))), @@ -322,7 +322,7 @@ public void testMachineLearningStartDatafeedActionRestricted() throws Exception ); PutDatafeedAction.Response putDatafeedResponse = putDatafeedListener.actionGet(); assertNotNull(putDatafeedResponse); - PlainActionFuture openJobListener = PlainActionFuture.newFuture(); + PlainActionFuture openJobListener = new PlainActionFuture<>(); client().execute(OpenJobAction.INSTANCE, new OpenJobAction.Request(jobId), openJobListener); NodeAcknowledgedResponse openJobResponse = openJobListener.actionGet(); assertNotNull(openJobResponse); @@ -343,7 +343,7 @@ public void testMachineLearningStartDatafeedActionRestricted() throws Exception // test that license restricted apis do not work ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, () -> { - PlainActionFuture listener = PlainActionFuture.newFuture(); + PlainActionFuture listener = new PlainActionFuture<>(); client().execute(StartDatafeedAction.INSTANCE, new StartDatafeedAction.Request(datafeedId, 0L), listener); listener.actionGet(); }); @@ -357,12 +357,12 @@ public void testMachineLearningStartDatafeedActionRestricted() throws Exception assertMLAllowed(true); // test that license restricted apis do now work // re-open job now that the license is valid again - PlainActionFuture openJobListener2 = PlainActionFuture.newFuture(); + PlainActionFuture openJobListener2 = new PlainActionFuture<>(); client().execute(OpenJobAction.INSTANCE, new OpenJobAction.Request(jobId), openJobListener2); NodeAcknowledgedResponse openJobResponse3 = openJobListener2.actionGet(); assertNotNull(openJobResponse3); - PlainActionFuture listener = PlainActionFuture.newFuture(); + PlainActionFuture listener = new PlainActionFuture<>(); client().execute(StartDatafeedAction.INSTANCE, new StartDatafeedAction.Request(datafeedId, 0L), listener); NodeAcknowledgedResponse response = listener.actionGet(); assertNotNull(response); @@ -376,11 +376,11 @@ public void testMachineLearningStopDatafeedActionNotRestricted() throws Exceptio prepareCreate(datafeedIndex).setMapping(""" {"_doc":{"properties":{"time":{"type":"date"}}}}""").get(); // test that license restricted apis do now work - PlainActionFuture putJobListener = PlainActionFuture.newFuture(); + PlainActionFuture putJobListener = new PlainActionFuture<>(); client().execute(PutJobAction.INSTANCE, new PutJobAction.Request(createJob(jobId)), putJobListener); PutJobAction.Response putJobResponse = putJobListener.actionGet(); assertNotNull(putJobResponse); - PlainActionFuture putDatafeedListener = PlainActionFuture.newFuture(); + PlainActionFuture putDatafeedListener = new PlainActionFuture<>(); client().execute( PutDatafeedAction.INSTANCE, new PutDatafeedAction.Request(createDatafeed(datafeedId, jobId, Collections.singletonList(datafeedIndex))), @@ -388,11 +388,11 @@ public void testMachineLearningStopDatafeedActionNotRestricted() throws Exceptio ); PutDatafeedAction.Response putDatafeedResponse = putDatafeedListener.actionGet(); assertNotNull(putDatafeedResponse); - PlainActionFuture openJobListener = PlainActionFuture.newFuture(); + PlainActionFuture openJobListener = new PlainActionFuture<>(); client().execute(OpenJobAction.INSTANCE, new OpenJobAction.Request(jobId), openJobListener); NodeAcknowledgedResponse openJobResponse = openJobListener.actionGet(); assertNotNull(openJobResponse); - PlainActionFuture startDatafeedListener = PlainActionFuture.newFuture(); + PlainActionFuture startDatafeedListener = new PlainActionFuture<>(); client().execute(StartDatafeedAction.INSTANCE, new StartDatafeedAction.Request(datafeedId, 0L), startDatafeedListener); NodeAcknowledgedResponse startDatafeedResponse = startDatafeedListener.actionGet(); assertNotNull(startDatafeedResponse); @@ -404,7 +404,7 @@ public void testMachineLearningStopDatafeedActionNotRestricted() throws Exceptio enableLicensing(randomValidLicenseType()); } - PlainActionFuture listener = PlainActionFuture.newFuture(); + PlainActionFuture listener = new PlainActionFuture<>(); client().execute(StopDatafeedAction.INSTANCE, new StopDatafeedAction.Request(datafeedId), listener); if (invalidLicense) { // the stop datafeed due to invalid license happens async, so check if the datafeed turns into stopped state: @@ -433,11 +433,11 @@ public void testMachineLearningCloseJobActionNotRestricted() throws Exception { String jobId = "testmachinelearningclosejobactionnotrestricted"; assertMLAllowed(true); // test that license restricted apis do now work - PlainActionFuture putJobListener = PlainActionFuture.newFuture(); + PlainActionFuture putJobListener = new PlainActionFuture<>(); client().execute(PutJobAction.INSTANCE, new PutJobAction.Request(createJob(jobId)), putJobListener); PutJobAction.Response putJobResponse = putJobListener.actionGet(); assertNotNull(putJobResponse); - PlainActionFuture openJobListener = PlainActionFuture.newFuture(); + PlainActionFuture openJobListener = new PlainActionFuture<>(); client().execute(OpenJobAction.INSTANCE, new OpenJobAction.Request(jobId), openJobListener); NodeAcknowledgedResponse openJobResponse = openJobListener.actionGet(); assertNotNull(openJobResponse); @@ -449,7 +449,7 @@ public void testMachineLearningCloseJobActionNotRestricted() throws Exception { enableLicensing(randomValidLicenseType()); } - PlainActionFuture listener = PlainActionFuture.newFuture(); + PlainActionFuture listener = new PlainActionFuture<>(); CloseJobAction.Request request = new CloseJobAction.Request(jobId); request.setCloseTimeout(TimeValue.timeValueSeconds(20)); if (invalidLicense) { @@ -469,7 +469,7 @@ public void testMachineLearningDeleteJobActionNotRestricted() { String jobId = "testmachinelearningclosejobactionnotrestricted"; assertMLAllowed(true); // test that license restricted apis do now work - PlainActionFuture putJobListener = PlainActionFuture.newFuture(); + PlainActionFuture putJobListener = new PlainActionFuture<>(); client().execute(PutJobAction.INSTANCE, new PutJobAction.Request(createJob(jobId)), putJobListener); PutJobAction.Response putJobResponse = putJobListener.actionGet(); assertNotNull(putJobResponse); @@ -478,7 +478,7 @@ public void testMachineLearningDeleteJobActionNotRestricted() { License.OperationMode mode = randomLicenseType(); enableLicensing(mode); - PlainActionFuture listener = PlainActionFuture.newFuture(); + PlainActionFuture listener = new PlainActionFuture<>(); client().execute(DeleteJobAction.INSTANCE, new DeleteJobAction.Request(jobId), listener); listener.actionGet(); } @@ -488,11 +488,11 @@ public void testMachineLearningDeleteDatafeedActionNotRestricted() { String datafeedId = jobId + "-datafeed"; assertMLAllowed(true); // test that license restricted apis do now work - PlainActionFuture putJobListener = PlainActionFuture.newFuture(); + PlainActionFuture putJobListener = new PlainActionFuture<>(); client().execute(PutJobAction.INSTANCE, new PutJobAction.Request(createJob(jobId)), putJobListener); PutJobAction.Response putJobResponse = putJobListener.actionGet(); assertNotNull(putJobResponse); - PlainActionFuture putDatafeedListener = PlainActionFuture.newFuture(); + PlainActionFuture putDatafeedListener = new PlainActionFuture<>(); client().execute( PutDatafeedAction.INSTANCE, new PutDatafeedAction.Request(createDatafeed(datafeedId, jobId, Collections.singletonList(jobId))), @@ -505,7 +505,7 @@ public void testMachineLearningDeleteDatafeedActionNotRestricted() { License.OperationMode mode = randomLicenseType(); enableLicensing(mode); - PlainActionFuture listener = PlainActionFuture.newFuture(); + PlainActionFuture listener = new PlainActionFuture<>(); client().execute(DeleteDatafeedAction.INSTANCE, new DeleteDatafeedAction.Request(datafeedId), listener); listener.actionGet(); } @@ -527,7 +527,7 @@ public void testMachineLearningCreateInferenceProcessorRestricted() { }]} """; // Creating a pipeline should work - PlainActionFuture putPipelineListener = PlainActionFuture.newFuture(); + PlainActionFuture putPipelineListener = new PlainActionFuture<>(); client().execute( PutPipelineAction.INSTANCE, new PutPipelineRequest( @@ -540,11 +540,7 @@ public void testMachineLearningCreateInferenceProcessorRestricted() { AcknowledgedResponse putPipelineResponse = putPipelineListener.actionGet(); assertTrue(putPipelineResponse.isAcknowledged()); - client().prepareIndex("infer_license_test") - .setPipeline("test_infer_license_pipeline") - .setSource("{}", XContentType.JSON) - .execute() - .actionGet(); + prepareIndex("infer_license_test").setPipeline("test_infer_license_pipeline").setSource("{}", XContentType.JSON).get(); String simulateSource = Strings.format(""" { @@ -557,7 +553,7 @@ public void testMachineLearningCreateInferenceProcessorRestricted() { "col4": 10 }}] }""", pipeline); - PlainActionFuture simulatePipelineListener = PlainActionFuture.newFuture(); + PlainActionFuture simulatePipelineListener = new PlainActionFuture<>(); client().execute( SimulatePipelineAction.INSTANCE, new SimulatePipelineRequest(new BytesArray(simulateSource.getBytes(StandardCharsets.UTF_8)), XContentType.JSON), @@ -573,17 +569,13 @@ public void testMachineLearningCreateInferenceProcessorRestricted() { // Inference against the previous pipeline should still work try { - client().prepareIndex("infer_license_test") - .setPipeline("test_infer_license_pipeline") - .setSource("{}", XContentType.JSON) - .execute() - .actionGet(); + prepareIndex("infer_license_test").setPipeline("test_infer_license_pipeline").setSource("{}", XContentType.JSON).get(); } catch (ElasticsearchSecurityException ex) { fail(ex.getMessage()); } // Creating a new pipeline with an inference processor should work - putPipelineListener = PlainActionFuture.newFuture(); + putPipelineListener = new PlainActionFuture<>(); client().execute( PutPipelineAction.INSTANCE, new PutPipelineRequest( @@ -598,11 +590,7 @@ public void testMachineLearningCreateInferenceProcessorRestricted() { // Inference against the new pipeline should fail since it has never previously succeeded ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, () -> { - client().prepareIndex("infer_license_test") - .setPipeline("test_infer_license_pipeline_again") - .setSource("{}", XContentType.JSON) - .execute() - .actionGet(); + prepareIndex("infer_license_test").setPipeline("test_infer_license_pipeline_again").setSource("{}", XContentType.JSON).get(); }); assertThat(e.status(), is(RestStatus.FORBIDDEN)); assertThat(e.getMessage(), containsString("non-compliant")); @@ -621,7 +609,7 @@ public void testMachineLearningCreateInferenceProcessorRestricted() { enableLicensing(mode); assertMLAllowed(true); // test that license restricted apis do now work - PlainActionFuture putPipelineListenerNewLicense = PlainActionFuture.newFuture(); + PlainActionFuture putPipelineListenerNewLicense = new PlainActionFuture<>(); client().execute( PutPipelineAction.INSTANCE, new PutPipelineRequest( @@ -634,7 +622,7 @@ public void testMachineLearningCreateInferenceProcessorRestricted() { AcknowledgedResponse putPipelineResponseNewLicense = putPipelineListenerNewLicense.actionGet(); assertTrue(putPipelineResponseNewLicense.isAcknowledged()); - PlainActionFuture simulatePipelineListenerNewLicense = PlainActionFuture.newFuture(); + PlainActionFuture simulatePipelineListenerNewLicense = new PlainActionFuture<>(); client().execute( SimulatePipelineAction.INSTANCE, new SimulatePipelineRequest(new BytesArray(simulateSource.getBytes(StandardCharsets.UTF_8)), XContentType.JSON), @@ -645,16 +633,8 @@ public void testMachineLearningCreateInferenceProcessorRestricted() { // both ingest pipelines should work - client().prepareIndex("infer_license_test") - .setPipeline("test_infer_license_pipeline") - .setSource("{}", XContentType.JSON) - .execute() - .actionGet(); - client().prepareIndex("infer_license_test") - .setPipeline("test_infer_license_pipeline_again") - .setSource("{}", XContentType.JSON) - .execute() - .actionGet(); + prepareIndex("infer_license_test").setPipeline("test_infer_license_pipeline").setSource("{}", XContentType.JSON).get(); + prepareIndex("infer_license_test").setPipeline("test_infer_license_pipeline_again").setSource("{}", XContentType.JSON).get(); } public void testMachineLearningInferModelRestricted() { @@ -662,7 +642,7 @@ public void testMachineLearningInferModelRestricted() { assertMLAllowed(true); putInferenceModel(modelId); - PlainActionFuture inferModelSuccess = PlainActionFuture.newFuture(); + PlainActionFuture inferModelSuccess = new PlainActionFuture<>(); client().execute( InferModelAction.INSTANCE, InferModelAction.Request.forIngestDocs( @@ -701,7 +681,7 @@ public void testMachineLearningInferModelRestricted() { assertThat(e.getMetadata(LicenseUtils.EXPIRED_FEATURE_METADATA), hasItem(XPackField.MACHINE_LEARNING)); // Inferring with previously Licensed == true should pass, but indicate license issues - inferModelSuccess = PlainActionFuture.newFuture(); + inferModelSuccess = new PlainActionFuture<>(); client().execute( InferModelAction.INSTANCE, InferModelAction.Request.forIngestDocs( @@ -722,7 +702,7 @@ public void testMachineLearningInferModelRestricted() { enableLicensing(mode); assertMLAllowed(true); - PlainActionFuture listener = PlainActionFuture.newFuture(); + PlainActionFuture listener = new PlainActionFuture<>(); client().execute( InferModelAction.INSTANCE, InferModelAction.Request.forIngestDocs( diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/AnnotationIndexIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/AnnotationIndexIT.java index 3f2ca0703bbdc..9daf353b11380 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/AnnotationIndexIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/AnnotationIndexIT.java @@ -323,8 +323,7 @@ private boolean annotationsIndexExists(String expectedName) { GetIndexResponse getIndexResponse = indicesAdmin().prepareGetIndex() .setIndices(AnnotationIndex.LATEST_INDEX_NAME) .setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN) - .execute() - .actionGet(); + .get(); return Arrays.asList(getIndexResponse.getIndices()).contains(expectedName); } diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/AnomalyJobCRUDIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/AnomalyJobCRUDIT.java index 4a091891de0a2..35d99b97f99c9 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/AnomalyJobCRUDIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/AnomalyJobCRUDIT.java @@ -114,8 +114,7 @@ public void testUpdateModelMemoryLimitOnceEstablished() { public void testCreateWithExistingCategorizerDocs() { String jobId = "job-id-with-existing-docs"; testCreateWithExistingDocs( - client().prepareIndex(".ml-state-000001") - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + prepareIndex(".ml-state-000001").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .setId(jobId + "_categorizer_state#1") .setSource("{}", XContentType.JSON) .request(), @@ -126,8 +125,7 @@ public void testCreateWithExistingCategorizerDocs() { public void testCreateWithExistingQuantilesDocs() { String jobId = "job-id-with-existing-docs"; testCreateWithExistingDocs( - client().prepareIndex(".ml-state-000001") - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + prepareIndex(".ml-state-000001").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .setId(jobId + "_quantiles") .setSource("{}", XContentType.JSON) .request(), @@ -138,8 +136,7 @@ public void testCreateWithExistingQuantilesDocs() { public void testCreateWithExistingResultsDocs() { String jobId = "job-id-with-existing-docs"; testCreateWithExistingDocs( - client().prepareIndex(".ml-anomalies-shared") - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + prepareIndex(".ml-anomalies-shared").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .setId(jobId + "_1464739200000_1") .setSource("{\"job_id\": \"" + jobId + "\"}", XContentType.JSON) .request(), diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/AutodetectResultProcessorIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/AutodetectResultProcessorIT.java index 51161d51c8a41..c24c1c1becb18 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/AutodetectResultProcessorIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/AutodetectResultProcessorIT.java @@ -777,8 +777,7 @@ private List getAnnotations() throws Exception { // Refresh the annotations index so that recently indexed annotation docs are visible. indicesAdmin().prepareRefresh(AnnotationIndex.LATEST_INDEX_NAME) .setIndicesOptions(IndicesOptions.STRICT_EXPAND_OPEN_HIDDEN_FORBID_CLOSED) - .execute() - .actionGet(); + .get(); SearchRequest searchRequest = new SearchRequest(AnnotationIndex.READ_ALIAS_NAME); List annotations = new ArrayList<>(); diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java index 2d16f90ad4221..822f8df35949e 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java @@ -45,6 +45,8 @@ import org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase; import java.io.IOException; +import java.time.Duration; +import java.time.temporal.ChronoUnit; import java.util.Collection; import java.util.Collections; import java.util.List; @@ -74,6 +76,7 @@ public void testFailOverBasics() throws Exception { OpenJobAction.Request openJobRequest = new OpenJobAction.Request(job.getId()); client().execute(OpenJobAction.INSTANCE, openJobRequest).actionGet(); awaitJobOpenedAndAssigned(job.getId(), null); + assertRecentLastTaskStateChangeTime(MlTasks.jobTaskId(job.getId()), Duration.of(10, ChronoUnit.SECONDS), null); setMlIndicesDelayedNodeLeftTimeoutToZero(); @@ -81,11 +84,13 @@ public void testFailOverBasics() throws Exception { internalCluster().stopRandomDataNode(); ensureStableCluster(3); awaitJobOpenedAndAssigned(job.getId(), null); + assertRecentLastTaskStateChangeTime(MlTasks.jobTaskId(job.getId()), Duration.of(10, ChronoUnit.SECONDS), null); ensureGreen(); // replicas must be assigned, otherwise we could lose a whole index internalCluster().stopRandomDataNode(); ensureStableCluster(2); awaitJobOpenedAndAssigned(job.getId(), null); + assertRecentLastTaskStateChangeTime(MlTasks.jobTaskId(job.getId()), Duration.of(10, ChronoUnit.SECONDS), null); } @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/82591") diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/CategorizeTextDistributedIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/CategorizeTextDistributedIT.java index b6cc40ba63f3b..bed5b1cb1bbfa 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/CategorizeTextDistributedIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/CategorizeTextDistributedIT.java @@ -57,25 +57,29 @@ public void testDistributedCategorizeText() { for (int i = 0; i < 10; ++i) { BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); for (int j = 0; j < 250; ++j) { - IndexRequestBuilder indexRequestBuilder = client().prepareIndex(indexName) - .setSource(Map.of("message", "Aug 29, 2019 2:02:51 PM org.apache.coyote.http11.Http11BaseProtocol destroy")); + IndexRequestBuilder indexRequestBuilder = prepareIndex(indexName).setSource( + Map.of("message", "Aug 29, 2019 2:02:51 PM org.apache.coyote.http11.Http11BaseProtocol destroy") + ); bulkRequestBuilder.add(indexRequestBuilder); - indexRequestBuilder = client().prepareIndex(indexName) - .setSource(Map.of("message", "Aug 29, 2019 2:02:51 PM org.apache.coyote.http11.Http11BaseProtocol init")); + indexRequestBuilder = prepareIndex(indexName).setSource( + Map.of("message", "Aug 29, 2019 2:02:51 PM org.apache.coyote.http11.Http11BaseProtocol init") + ); bulkRequestBuilder.add(indexRequestBuilder); - indexRequestBuilder = client().prepareIndex(indexName) - .setSource(Map.of("message", "Aug 29, 2019 2:02:51 PM org.apache.coyote.http11.Http11BaseProtocol start")); + indexRequestBuilder = prepareIndex(indexName).setSource( + Map.of("message", "Aug 29, 2019 2:02:51 PM org.apache.coyote.http11.Http11BaseProtocol start") + ); bulkRequestBuilder.add(indexRequestBuilder); - indexRequestBuilder = client().prepareIndex(indexName) - .setSource(Map.of("message", "Aug 29, 2019 2:02:51 PM org.apache.coyote.http11.Http11BaseProtocol stop")); + indexRequestBuilder = prepareIndex(indexName).setSource( + Map.of("message", "Aug 29, 2019 2:02:51 PM org.apache.coyote.http11.Http11BaseProtocol stop") + ); bulkRequestBuilder.add(indexRequestBuilder); } - bulkRequestBuilder.execute().actionGet(); + bulkRequestBuilder.get(); } - client().admin().indices().prepareRefresh(indexName).execute().actionGet(); + client().admin().indices().prepareRefresh(indexName).get(); // Confirm the theory that all 3 nodes will have a shard on - IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats(indexName).execute().actionGet(); + IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats(indexName).get(); Set nodesWithShards = Arrays.stream(indicesStatsResponse.getShards()) .map(ShardStats::getShardRouting) .map(ShardRouting::currentNodeId) diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/ChunkedTrainedModelRestorerIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/ChunkedTrainedModelRestorerIT.java index db5c01634936b..fd577cfd44c73 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/ChunkedTrainedModelRestorerIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/ChunkedTrainedModelRestorerIT.java @@ -202,8 +202,7 @@ private void putModelDefinitions(List docs, String in BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); for (TrainedModelDefinitionDoc doc : docs) { try (XContentBuilder xContentBuilder = doc.toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS)) { - IndexRequestBuilder indexRequestBuilder = client().prepareIndex(index) - .setSource(xContentBuilder) + IndexRequestBuilder indexRequestBuilder = prepareIndex(index).setSource(xContentBuilder) .setId(TrainedModelDefinitionDoc.docId(doc.getModelId(), startingDocNum++)); bulkRequestBuilder.add(indexRequestBuilder); diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/DataFrameAnalyticsConfigProviderIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/DataFrameAnalyticsConfigProviderIT.java index 8ae6a3bfb9c7e..3b84c5d86c00c 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/DataFrameAnalyticsConfigProviderIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/DataFrameAnalyticsConfigProviderIT.java @@ -32,6 +32,7 @@ import org.elasticsearch.xpack.ml.notifications.DataFrameAnalyticsAuditor; import org.junit.Before; +import java.time.Instant; import java.util.Collections; import java.util.Map; import java.util.concurrent.atomic.AtomicReference; @@ -383,7 +384,7 @@ private static ClusterState clusterStateWithRunningAnalyticsTask(String analytic ); builder.updateTaskState( MlTasks.dataFrameAnalyticsTaskId(analyticsId), - new DataFrameAnalyticsTaskState(analyticsState, builder.getLastAllocationId(), null) + new DataFrameAnalyticsTaskState(analyticsState, builder.getLastAllocationId(), null, Instant.now()) ); PersistentTasksCustomMetadata tasks = builder.build(); diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/DatafeedCcsIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/DatafeedCcsIT.java index 19325a55f75d1..a5a4103d0cb7a 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/DatafeedCcsIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/DatafeedCcsIT.java @@ -190,8 +190,7 @@ private boolean doesLocalAuditMessageExist(String message) { try { SearchResponse response = client(LOCAL_CLUSTER).prepareSearch(".ml-notifications*") .setQuery(new MatchPhraseQueryBuilder("message", message)) - .execute() - .actionGet(); + .get(); return response.getHits().getTotalHits().value > 0; } catch (ElasticsearchException e) { return false; diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/IndexLayoutIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/IndexLayoutIT.java index 6ae040f9ba756..db88cb5dc266e 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/IndexLayoutIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/IndexLayoutIT.java @@ -9,6 +9,7 @@ import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.CloseJobAction; import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction; import org.elasticsearch.xpack.core.ml.action.OpenJobAction; @@ -21,6 +22,8 @@ import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase; +import java.time.Duration; +import java.time.temporal.ChronoUnit; import java.util.Collections; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; @@ -44,6 +47,7 @@ public void testCrudOnTwoJobsInSharedIndex() throws Exception { .actionGet(); assertEquals(statsResponse.getResponse().results().get(0).getState(), JobState.OPENED); }); + assertRecentLastTaskStateChangeTime(MlTasks.jobTaskId(jobId), Duration.of(10, ChronoUnit.SECONDS), null); assertBusy(() -> { GetJobsStatsAction.Response statsResponse = client().execute( GetJobsStatsAction.INSTANCE, @@ -51,6 +55,7 @@ public void testCrudOnTwoJobsInSharedIndex() throws Exception { ).actionGet(); assertEquals(statsResponse.getResponse().results().get(0).getState(), JobState.OPENED); }); + assertRecentLastTaskStateChangeTime(MlTasks.jobTaskId(jobId2), Duration.of(10, ChronoUnit.SECONDS), null); OriginSettingClient client = new OriginSettingClient(client(), ML_ORIGIN); assertThat( diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java index fb639e55fe306..891779e28439b 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java @@ -792,8 +792,7 @@ public void testGetSnapshots() { .build() ); // Add a snapshot WITHOUT a min version. - client().prepareIndex(AnomalyDetectorsIndex.jobResultsAliasedName("other_job")) - .setId(ModelSnapshot.documentId("other_job", "11")) + prepareIndex(AnomalyDetectorsIndex.jobResultsAliasedName("other_job")).setId(ModelSnapshot.documentId("other_job", "11")) .setSource(""" {"job_id":"other_job","snapshot_id":"11", "snapshot_doc_count":1,"retain":false}""", XContentType.JSON) .get(); @@ -1040,7 +1039,7 @@ private void indexScheduledEvents(List events) throws IOExceptio bulkRequest.add(indexRequest); } } - BulkResponse response = bulkRequest.execute().actionGet(); + BulkResponse response = bulkRequest.get(); if (response.hasFailures()) { throw new IllegalStateException(Strings.toString(response)); } @@ -1065,7 +1064,7 @@ private void indexFilters(List filters) throws IOException { bulkRequest.add(indexRequest); } } - bulkRequest.execute().actionGet(); + bulkRequest.get(); } private void indexModelSizeStats(ModelSizeStats modelSizeStats) { @@ -1115,6 +1114,6 @@ private void indexCalendars(List calendars) throws IOException { bulkRequest.add(indexRequest); } } - bulkRequest.execute().actionGet(); + bulkRequest.get(); } } diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/JobsAndModelsIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/JobsAndModelsIT.java index 54890c65f7576..1458b9ccf693c 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/JobsAndModelsIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/JobsAndModelsIT.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.CloseJobAction; import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction; import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsStatsAction; @@ -36,6 +37,8 @@ import org.elasticsearch.xpack.ml.inference.persistence.TrainedModelDefinitionDoc; import org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase; +import java.time.Duration; +import java.time.temporal.ChronoUnit; import java.util.List; import java.util.Set; @@ -231,6 +234,8 @@ public void testCluster_GivenAnomalyDetectionJobAndTrainedModelDeployment_Should assertThat(jobStats.getNode(), is(not(equalTo(modelStats.getDeploymentStats().getNodeStats().get(0).getNode())))); }); + assertRecentLastTaskStateChangeTime(MlTasks.jobTaskId(jobId), Duration.of(10, ChronoUnit.SECONDS), null); + // Clean up client().execute(CloseJobAction.INSTANCE, new CloseJobAction.Request(jobId).setForce(true)).actionGet(); client().execute(StopTrainedModelDeploymentAction.INSTANCE, new StopTrainedModelDeploymentAction.Request(model.getModelId())) diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/MlAutoUpdateServiceIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/MlAutoUpdateServiceIT.java index 743f018a425a3..dc0c090900393 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/MlAutoUpdateServiceIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/MlAutoUpdateServiceIT.java @@ -74,8 +74,7 @@ public void createComponents() throws Exception { public void testAutomaticModelUpdate() throws Exception { ensureGreen("_all"); IndexNameExpressionResolver indexNameExpressionResolver = TestIndexNameExpressionResolver.newInstance(); - client().prepareIndex(MlConfigIndex.indexName()) - .setId(DatafeedConfig.documentId("farequote-datafeed-with-old-agg")) + prepareIndex(MlConfigIndex.indexName()).setId(DatafeedConfig.documentId("farequote-datafeed-with-old-agg")) .setSource(AGG_WITH_OLD_DATE_HISTOGRAM_INTERVAL, XContentType.JSON) .get(); AtomicReference getConfigHolder = new AtomicReference<>(); diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/PyTorchStateStreamerIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/PyTorchStateStreamerIT.java index db5e22b686c04..a8fd66bb04413 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/PyTorchStateStreamerIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/PyTorchStateStreamerIT.java @@ -103,8 +103,7 @@ private void putModelDefinition(List docs) throws IOE for (int i = 0; i < docs.size(); i++) { TrainedModelDefinitionDoc doc = docs.get(i); try (XContentBuilder xContentBuilder = doc.toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS)) { - IndexRequestBuilder indexRequestBuilder = client().prepareIndex(InferenceIndexConstants.LATEST_INDEX_NAME) - .setSource(xContentBuilder) + IndexRequestBuilder indexRequestBuilder = prepareIndex(InferenceIndexConstants.LATEST_INDEX_NAME).setSource(xContentBuilder) .setId(TrainedModelDefinitionDoc.docId(doc.getModelId(), i)); bulkRequestBuilder.add(indexRequestBuilder); diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureLicenseTrackingIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureLicenseTrackingIT.java index 4ad4dfdbaccf8..f7bf94e0479e8 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureLicenseTrackingIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureLicenseTrackingIT.java @@ -118,6 +118,7 @@ public void testFeatureTrackingAnomalyJob() throws Exception { }); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/102381") public void testFeatureTrackingInferenceModelPipeline() throws Exception { String modelId = "test-load-models-classification-license-tracking"; Map oneHotEncoding = new HashMap<>(); diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TrainedModelProviderIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TrainedModelProviderIT.java index 1656970f17158..51f6243778517 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TrainedModelProviderIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TrainedModelProviderIT.java @@ -322,8 +322,7 @@ public void testGetTruncatedModelDeprecatedDefinition() throws Exception { ) { AtomicReference putDocHolder = new AtomicReference<>(); blockingCall( - listener -> client().prepareIndex(InferenceIndexConstants.LATEST_INDEX_NAME) - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + listener -> prepareIndex(InferenceIndexConstants.LATEST_INDEX_NAME).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .setSource(xContentBuilder) .setId(TrainedModelDefinitionDoc.docId(modelId, 0)) .execute(listener), @@ -372,8 +371,7 @@ public void testGetTruncatedModelDefinition() throws Exception { TrainedModelDefinitionDoc doc = docBuilders.get(i).build(); try (XContentBuilder xContentBuilder = doc.toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS)) { - IndexRequestBuilder indexRequestBuilder = client().prepareIndex(InferenceIndexConstants.LATEST_INDEX_NAME) - .setSource(xContentBuilder) + IndexRequestBuilder indexRequestBuilder = prepareIndex(InferenceIndexConstants.LATEST_INDEX_NAME).setSource(xContentBuilder) .setId(TrainedModelDefinitionDoc.docId(modelId, i)); bulkRequestBuilder.add(indexRequestBuilder); @@ -413,8 +411,7 @@ public void testGetTrainedModelForInference() throws InterruptedException, IOExc for (int i = 0; i < docBuilders.size(); i++) { TrainedModelDefinitionDoc doc = docBuilders.get(i).build(); try (XContentBuilder xContentBuilder = doc.toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS)) { - IndexRequestBuilder indexRequestBuilder = client().prepareIndex(InferenceIndexConstants.LATEST_INDEX_NAME) - .setSource(xContentBuilder) + IndexRequestBuilder indexRequestBuilder = prepareIndex(InferenceIndexConstants.LATEST_INDEX_NAME).setSource(xContentBuilder) .setId(TrainedModelDefinitionDoc.docId(modelId, i)); bulkRequestBuilder.add(indexRequestBuilder); diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/UnusedStatsRemoverIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/UnusedStatsRemoverIT.java index 8a278681523fe..4c8382047e796 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/UnusedStatsRemoverIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/UnusedStatsRemoverIT.java @@ -65,7 +65,7 @@ public void createComponents() { public void testRemoveUnusedStats() throws Exception { - client().prepareIndex("foo").setId("some-empty-doc").setSource("{}", XContentType.JSON).get(); + prepareIndex("foo").setId("some-empty-doc").setSource("{}", XContentType.JSON).get(); PutDataFrameAnalyticsAction.Request request = new PutDataFrameAnalyticsAction.Request( new DataFrameAnalyticsConfig.Builder().setId("analytics-with-stats") diff --git a/x-pack/plugin/ml/src/main/java/module-info.java b/x-pack/plugin/ml/src/main/java/module-info.java index 52dee889d15fc..a73c9bdfa32b4 100644 --- a/x-pack/plugin/ml/src/main/java/module-info.java +++ b/x-pack/plugin/ml/src/main/java/module-info.java @@ -33,7 +33,6 @@ provides org.elasticsearch.painless.spi.PainlessExtension with org.elasticsearch.xpack.ml.MachineLearningPainlessExtension; provides org.elasticsearch.xpack.autoscaling.AutoscalingExtension with org.elasticsearch.xpack.ml.autoscaling.MlAutoscalingExtension; - provides org.elasticsearch.features.FeatureSpecification with org.elasticsearch.xpack.ml.MlFeatures; exports org.elasticsearch.xpack.ml; exports org.elasticsearch.xpack.ml.action; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index b4b8084b4b328..3320a51009257 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -44,7 +44,6 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.TimeValue; import org.elasticsearch.env.Environment; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.analysis.CharFilterFactory; import org.elasticsearch.index.analysis.TokenizerFactory; import org.elasticsearch.index.query.QueryBuilder; @@ -186,12 +185,12 @@ import org.elasticsearch.xpack.core.ml.dataframe.evaluation.MlEvaluationNamedXContentProvider; import org.elasticsearch.xpack.core.ml.dataframe.stats.AnalysisStatsNamedWriteablesProvider; import org.elasticsearch.xpack.core.ml.inference.MlInferenceNamedXContentProvider; -import org.elasticsearch.xpack.core.ml.inference.MlLTRNamedXContentProvider; import org.elasticsearch.xpack.core.ml.inference.persistence.InferenceIndexConstants; import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.elasticsearch.xpack.core.ml.job.snapshot.upgrade.SnapshotUpgradeTaskParams; import org.elasticsearch.xpack.core.ml.job.snapshot.upgrade.SnapshotUpgradeTaskState; +import org.elasticsearch.xpack.core.ml.ltr.MlLTRNamedXContentProvider; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.template.TemplateUtils; import org.elasticsearch.xpack.ml.action.TransportAuditMlNotificationAction; @@ -323,13 +322,14 @@ import org.elasticsearch.xpack.ml.inference.deployment.DeploymentManager; import org.elasticsearch.xpack.ml.inference.ingest.InferenceProcessor; import org.elasticsearch.xpack.ml.inference.loadingservice.ModelLoadingService; +import org.elasticsearch.xpack.ml.inference.ltr.InferenceRescorerFeature; +import org.elasticsearch.xpack.ml.inference.ltr.LearnToRankRescorerBuilder; +import org.elasticsearch.xpack.ml.inference.ltr.LearnToRankService; import org.elasticsearch.xpack.ml.inference.modelsize.MlModelSizeNamedXContentProvider; import org.elasticsearch.xpack.ml.inference.persistence.TrainedModelProvider; import org.elasticsearch.xpack.ml.inference.pytorch.process.BlackHolePyTorchProcess; import org.elasticsearch.xpack.ml.inference.pytorch.process.NativePyTorchProcessFactory; import org.elasticsearch.xpack.ml.inference.pytorch.process.PyTorchProcessFactory; -import org.elasticsearch.xpack.ml.inference.rescorer.InferenceRescorerBuilder; -import org.elasticsearch.xpack.ml.inference.rescorer.InferenceRescorerFeature; import org.elasticsearch.xpack.ml.job.JobManager; import org.elasticsearch.xpack.ml.job.JobManagerHolder; import org.elasticsearch.xpack.ml.job.NodeLoadDetector; @@ -486,8 +486,6 @@ public class MachineLearning extends Plugin public static final String TRAINED_MODEL_CIRCUIT_BREAKER_NAME = "model_inference"; - public static final NodeFeature STATE_RESET_FALLBACK_ON_DISABLED = new NodeFeature("ml.state_reset_fallback_on_disabled"); - private static final long DEFAULT_MODEL_CIRCUIT_BREAKER_LIMIT = (long) ((0.50) * JvmInfo.jvmInfo().getMem().getHeapMax().getBytes()); private static final double DEFAULT_MODEL_CIRCUIT_BREAKER_OVERHEAD = 1.0D; @@ -656,6 +654,23 @@ public void loadExtensions(ExtensionLoader loader) { Property.NodeScope ); + // The next two settings currently only have an effect in serverless. They can be set as overrides to + // trigger a scale up of the ML tier so that it could accommodate the dummy entity in addition to + // whatever the standard autoscaling formula thinks is necessary. + public static final Setting DUMMY_ENTITY_MEMORY = Setting.memorySizeSetting( + "xpack.ml.dummy_entity_memory", + ByteSizeValue.ZERO, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + public static final Setting DUMMY_ENTITY_PROCESSORS = Setting.intSetting( + "xpack.ml.dummy_entity_processors", + 0, + 0, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + public static final Setting PROCESS_CONNECT_TIMEOUT = Setting.timeSetting( "xpack.ml.process_connect_timeout", TimeValue.timeValueSeconds(10), @@ -745,6 +760,7 @@ public void loadExtensions(ExtensionLoader loader) { private final SetOnce mlLifeCycleService = new SetOnce<>(); private final SetOnce inferenceModelBreaker = new SetOnce<>(); private final SetOnce modelLoadingService = new SetOnce<>(); + private final SetOnce learnToRankService = new SetOnce<>(); private final SetOnce mlAutoscalingDeciderService = new SetOnce<>(); private final SetOnce deploymentManager = new SetOnce<>(); private final SetOnce trainedModelAllocationClusterServiceSetOnce = new SetOnce<>(); @@ -785,7 +801,9 @@ public List> getSettings() { NIGHTLY_MAINTENANCE_REQUESTS_PER_SECOND, MachineLearningField.USE_AUTO_MACHINE_MEMORY_PERCENT, MAX_ML_NODE_SIZE, - DELAYED_DATA_CHECK_FREQ + DELAYED_DATA_CHECK_FREQ, + DUMMY_ENTITY_MEMORY, + DUMMY_ENTITY_PROCESSORS ); } @@ -870,9 +888,9 @@ public List> getRescorers() { // Inference rescorer requires access to the model loading service return List.of( new RescorerSpec<>( - InferenceRescorerBuilder.NAME, - in -> new InferenceRescorerBuilder(in, modelLoadingService::get), - parser -> InferenceRescorerBuilder.fromXContent(parser, modelLoadingService::get) + LearnToRankRescorerBuilder.NAME, + in -> new LearnToRankRescorerBuilder(in, learnToRankService.get()), + parser -> LearnToRankRescorerBuilder.fromXContent(parser, learnToRankService.get()) ) ); } @@ -1100,6 +1118,11 @@ public Collection createComponents(PluginServices services) { getLicenseState() ); this.modelLoadingService.set(modelLoadingService); + + this.learnToRankService.set( + new LearnToRankService(modelLoadingService, trainedModelProvider, services.scriptService(), services.xContentRegistry()) + ); + this.deploymentManager.set( new DeploymentManager(client, xContentRegistry, threadPool, pyTorchProcessFactory, getMaxModelDeploymentsPerNode()) ); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java index 62029e5e9cb98..53f6c19ce43f1 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java @@ -52,6 +52,7 @@ import org.elasticsearch.xpack.ml.job.task.JobTask; import org.elasticsearch.xpack.ml.notifications.AnomalyDetectionAuditor; +import java.time.Instant; import java.util.ArrayList; import java.util.Collection; import java.util.HashSet; @@ -425,7 +426,7 @@ protected void taskOperation( JobTask jobTask, ActionListener listener ) { - JobTaskState taskState = new JobTaskState(JobState.CLOSING, jobTask.getAllocationId(), "close job (api)"); + JobTaskState taskState = new JobTaskState(JobState.CLOSING, jobTask.getAllocationId(), "close job (api)", Instant.now()); jobTask.updatePersistentTaskState(taskState, ActionListener.wrap(task -> { // we need to fork because we are now on a network threadpool and closeJob method may take a while to complete: threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME).execute(new AbstractRunnable() { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteForecastAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteForecastAction.java index 11ad47c00ebd1..5aa85a6331c22 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteForecastAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteForecastAction.java @@ -13,9 +13,9 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.bulk.BulkItemResponse; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -116,7 +116,7 @@ protected void doExecute(Task task, DeleteForecastAction.Request request, Action .query(query); SearchRequest searchRequest = new SearchRequest(AnomalyDetectorsIndex.jobResultsAliasedName(jobId)).source(source); - executeAsyncWithOrigin(client, ML_ORIGIN, SearchAction.INSTANCE, searchRequest, forecastStatsHandler); + executeAsyncWithOrigin(client, ML_ORIGIN, TransportSearchAction.TYPE, searchRequest, forecastStatsHandler); } static List extractForecastIds(SearchHit[] forecastsToDelete, JobState jobState, String jobId) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportEvaluateDataFrameAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportEvaluateDataFrameAction.java index 8fb4be69b6764..4336489ce5d24 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportEvaluateDataFrameAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportEvaluateDataFrameAction.java @@ -7,8 +7,8 @@ package org.elasticsearch.xpack.ml.action; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.client.internal.Client; @@ -146,7 +146,7 @@ private TypedChainTaskExecutor.ChainTask nextTask() { SearchRequest searchRequest = new SearchRequest(request.getIndices()).source(searchSourceBuilder); useSecondaryAuthIfAvailable( securityContext, - () -> client.execute(SearchAction.INSTANCE, searchRequest, ActionListener.wrap(searchResponse -> { + () -> client.execute(TransportSearchAction.TYPE, searchRequest, ActionListener.wrap(searchResponse -> { evaluation.process(searchResponse); if (evaluation.hasAllResults() == false) { add(nextTask()); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDataFrameAnalyticsStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDataFrameAnalyticsStatsAction.java index c0c4a5cb5b3b8..38da82124e77f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDataFrameAnalyticsStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDataFrameAnalyticsStatsAction.java @@ -11,10 +11,10 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; -import org.elasticsearch.action.search.MultiSearchAction; import org.elasticsearch.action.search.MultiSearchRequest; import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.TransportMultiSearchAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.tasks.TransportTasksAction; @@ -267,7 +267,7 @@ private void searchStats(DataFrameAnalyticsConfig config, TaskId parentTaskId, A executeAsyncWithOrigin( client, ML_ORIGIN, - MultiSearchAction.INSTANCE, + TransportMultiSearchAction.TYPE, multiSearchRequest, ActionListener.wrap(multiSearchResponse -> { MultiSearchResponse.Item[] itemResponses = multiSearchResponse.getResponses(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsAction.java index 1c535326b3296..3c9ba3700dc8e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsAction.java @@ -11,10 +11,11 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; +import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestParameters; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.cluster.node.stats.TransportNodesStatsAction; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.client.internal.Client; @@ -348,7 +349,7 @@ private void definitionLengths(List modelIds, TaskId parentTaskId, Actio .request(); searchRequest.setParentTask(parentTaskId); - executeAsyncWithOrigin(client, ML_ORIGIN, SearchAction.INSTANCE, searchRequest, ActionListener.wrap(searchResponse -> { + executeAsyncWithOrigin(client, ML_ORIGIN, TransportSearchAction.TYPE, searchRequest, ActionListener.wrap(searchResponse -> { Map totalDefinitionLengthByModelId = new HashMap<>(); for (SearchHit hit : searchResponse.getHits().getHits()) { DocumentField modelIdField = hit.field(TrainedModelConfig.MODEL_ID.getPreferredName()); @@ -389,7 +390,8 @@ static Map inferenceIngestStatsByModelId( static NodesStatsRequest nodeStatsRequest(ClusterState state, TaskId parentTaskId) { String[] ingestNodes = state.nodes().getIngestNodes().keySet().toArray(String[]::new); NodesStatsRequest nodesStatsRequest = new NodesStatsRequest(ingestNodes).clear() - .addMetric(NodesStatsRequest.Metric.INGEST.metricName()); + .addMetric(NodesStatsRequestParameters.Metric.INGEST.metricName()); + nodesStatsRequest.setIncludeShardsStats(false); nodesStatsRequest.setParentTask(parentTaskId); return nodesStatsRequest; } @@ -491,6 +493,4 @@ IngestStats.Stats build() { } } - private record ModelAndDeployment(String modelId, String deploymentId) {} - } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInferTrainedModelDeploymentAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInferTrainedModelDeploymentAction.java index 5217d7ed1c181..65d630ebf1d6e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInferTrainedModelDeploymentAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInferTrainedModelDeploymentAction.java @@ -107,6 +107,7 @@ protected void taskOperation( request.getUpdate(), request.isHighPriority(), request.getInferenceTimeout(), + request.getPrefixType(), actionTask, orderedListener(count, results, slot++, nlpInputs.size(), listener) ); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java index d414a013a0e8c..3cf0189c28df2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java @@ -286,6 +286,7 @@ private void inferAgainstAllocatedModel( ); } deploymentRequest.setHighPriority(request.isHighPriority()); + deploymentRequest.setPrefixType(request.getPrefixType()); deploymentRequest.setNodes(node.v1()); deploymentRequest.setParentTask(parentTaskId); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportMlInfoAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportMlInfoAction.java index cd7d4258855eb..beab594e199ff 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportMlInfoAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportMlInfoAction.java @@ -155,7 +155,7 @@ private Map limits() { clusterSettings.get(MachineLearning.ALLOCATED_PROCESSORS_SCALE) ); if (singleNodeProcessors.count() > 0) { - limits.put("max_single_ml_node_processors", singleNodeProcessors.roundDown()); + limits.put("max_single_ml_node_processors", singleNodeProcessors.roundUp()); } Processors totalMlProcessors = MlProcessors.getTotalMlNodeProcessors( nodes, @@ -163,8 +163,8 @@ private Map limits() { ); if (totalMlProcessors.count() > 0) { int potentialExtraProcessors = Math.max(0, clusterSettings.get(MachineLearning.MAX_LAZY_ML_NODES) - mlNodes.size()) - * singleNodeProcessors.roundDown(); - limits.put("total_ml_processors", totalMlProcessors.roundDown() + potentialExtraProcessors); + * singleNodeProcessors.roundUp(); + limits.put("total_ml_processors", totalMlProcessors.roundUp() + potentialExtraProcessors); } } return limits; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java index 5d4c809c9c83c..5dd671962569a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java @@ -132,6 +132,10 @@ private void previewDatafeed( PreviewDatafeedAction.Request request, ActionListener listener ) { + final QueryBuilder extraFilters = request.getStartTime().isPresent() || request.getEndTime().isPresent() + ? null + : QueryBuilders.boolQuery().mustNot(QueryBuilders.termsQuery(DataTierFieldMapper.NAME, "data_frozen", "data_cold")); + DatafeedConfig.Builder previewDatafeedBuilder = buildPreviewDatafeed(datafeedConfig); useSecondaryAuthIfAvailable(securityContext, () -> { previewDatafeedBuilder.setHeaders( @@ -144,6 +148,7 @@ private void previewDatafeed( DataExtractorFactory.create( new ParentTaskAssigningClient(client, parentTaskId), previewDatafeedConfig, + extraFilters, job, xContentRegistry, // Fake DatafeedTimingStatsReporter that does not have access to results index @@ -158,9 +163,7 @@ private void previewDatafeed( final long start = request.getStartTime().orElse(0); final long end = request.getEndTime() .orElse(isDateNanos ? DateUtils.MAX_NANOSECOND_INSTANT.toEpochMilli() : Long.MAX_VALUE); - DataExtractor dataExtractor = request.getStartTime().isPresent() || request.getEndTime().isPresent() - ? dataExtractorFactory.newExtractor(start, end) - : dataExtractorFactory.newExtractor(start, end, hotOnly); + DataExtractor dataExtractor = dataExtractorFactory.newExtractor(start, end); threadPool.executor(UTILITY_THREAD_POOL_NAME).execute(() -> previewDatafeed(dataExtractor, l)); }) ) @@ -226,7 +229,7 @@ static void previewDatafeed(DataExtractor dataExtractor, ActionListener { if (searchResponse.getHits().getTotalHits().value > 0) { @@ -788,7 +789,8 @@ private void executeTask(DataFrameAnalyticsTask task) { DataFrameAnalyticsTaskState startedState = new DataFrameAnalyticsTaskState( DataFrameAnalyticsState.STARTED, task.getAllocationId(), - null + null, + Instant.now() ); task.updatePersistentTaskState( startedState, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDataFrameAnalyticsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDataFrameAnalyticsAction.java index 961331c33c2ce..c3d35fbc11593 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDataFrameAnalyticsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDataFrameAnalyticsAction.java @@ -47,6 +47,7 @@ import org.elasticsearch.xpack.ml.dataframe.persistence.DataFrameAnalyticsConfigProvider; import org.elasticsearch.xpack.ml.notifications.DataFrameAnalyticsAuditor; +import java.time.Instant; import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; @@ -385,7 +386,8 @@ protected void taskOperation( DataFrameAnalyticsTaskState stoppingState = new DataFrameAnalyticsTaskState( DataFrameAnalyticsState.STOPPING, task.getAllocationId(), - null + null, + Instant.now() ); task.updatePersistentTaskState(stoppingState, ActionListener.wrap(pTask -> { threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME).execute(new AbstractRunnable() { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointAggregator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointAggregator.java index e244e7acb8402..613b36882f919 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointAggregator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointAggregator.java @@ -91,7 +91,8 @@ public InternalAggregation doReduce(Aggregations aggregations, AggregationReduce ChangeType changeType = changePValue(bucketValues, candidatePoints, P_VALUE_THRESHOLD); if (changeType.pValue() > P_VALUE_THRESHOLD) { try { - changeType = maxDeviationKdePValue(bucketValues, P_VALUE_THRESHOLD); + SpikeAndDipDetector detect = new SpikeAndDipDetector(bucketValues.getValues()); + changeType = detect.at(P_VALUE_THRESHOLD); } catch (NotStrictlyPositiveException nspe) { logger.debug("failure calculating spikes", nspe); } @@ -106,42 +107,6 @@ public InternalAggregation doReduce(Aggregations aggregations, AggregationReduce return new InternalChangePointAggregation(name(), metadata(), changePointBucket, changeType); } - static ChangeType maxDeviationKdePValue(MlAggsHelper.DoubleBucketValues bucketValues, double pValueThreshold) { - double[] timeWindow = bucketValues.getValues(); - double variance = RunningStats.from(timeWindow, i -> 1.0).variance(); - if (variance == 0.0) { - return new ChangeType.Stationary(); - } - int minIndex = 0; - double minValue = Double.MAX_VALUE; - int maxIndex = 0; - double maxValue = -Double.MAX_VALUE; - for (int i = 0; i < timeWindow.length; i++) { - if (timeWindow[i] < minValue) { - minValue = timeWindow[i]; - minIndex = i; - } - if (timeWindow[i] > maxValue) { - maxValue = timeWindow[i]; - maxIndex = i; - } else if (timeWindow[i] == maxValue) { - maxIndex = i; - } - } - KDE dist = new KDE(timeWindow, minIndex, maxIndex); - KDE.ValueAndMagnitude cdf = dist.cdf(minValue); - KDE.ValueAndMagnitude sf = dist.sf(maxValue); - - if (cdf.isMoreSignificant(sf, timeWindow.length) && cdf.significance(timeWindow.length) * 2 < pValueThreshold) { - return new ChangeType.Dip(cdf.significance(timeWindow.length) * 2, bucketValues.getBucketIndex(minIndex)); - } - if (sf.significance(timeWindow.length) * 2 < pValueThreshold) { - return new ChangeType.Spike(sf.significance(timeWindow.length) * 2, bucketValues.getBucketIndex(maxIndex)); - } - return new ChangeType.Stationary(); - - } - static ChangeType changePValue( MlAggsHelper.DoubleBucketValues bucketValues, Tuple candidateChangePointsAndStep, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/KDE.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/KDE.java index 5937f8cca8727..1a0342981c1d3 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/KDE.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/KDE.java @@ -7,16 +7,10 @@ package org.elasticsearch.xpack.ml.aggs.changepoint; -import org.apache.commons.math3.distribution.NormalDistribution; import org.apache.commons.math3.special.Erf; import org.apache.commons.math3.util.FastMath; -import org.elasticsearch.common.Randomness; import java.util.Arrays; -import java.util.List; -import java.util.stream.Collectors; -import java.util.stream.DoubleStream; -import java.util.stream.IntStream; import static org.apache.commons.math3.stat.StatUtils.variance; @@ -27,44 +21,6 @@ final class KDE { private static final double SQRT2 = FastMath.sqrt(2.0); private static final double ESTIMATOR_EPS = 1e-10; - /** - * Fit KDE choosing bandwidth by maximum likelihood cross validation. - * @param orderedValues the provided values, sorted - * @return the maximum likelihood bandwidth - */ - private static double maxLikelihoodBandwidth(double[] orderedValues) { - int step = Math.max((int) (orderedValues.length / 10.0 + 0.5), 2); - IntStream.Builder trainingIndicesBuilder = IntStream.builder(); - IntStream.Builder testIndicesBuilder = IntStream.builder(); - for (int i = 0; i < orderedValues.length; i += step) { - int adjStep = Math.min(i + step, orderedValues.length) - i; - List indices = IntStream.range(i, i + adjStep).boxed().collect(Collectors.toList()); - Randomness.shuffle(indices); - int n = Math.min(adjStep / 2, 4); - indices.stream().limit(n).forEach(trainingIndicesBuilder::add); - indices.stream().skip(n).forEach(testIndicesBuilder::add); - } - int[] trainingIndices = trainingIndicesBuilder.build().toArray(); - int[] testIndices = testIndicesBuilder.build().toArray(); - Arrays.sort(trainingIndices); - Arrays.sort(testIndices); - double[] xTrain = IntStream.of(trainingIndices).mapToDouble(i -> orderedValues[i]).toArray(); - double maxLogLikelihood = -Double.MAX_VALUE; - double result = 0; - for (int i = 0; i < 20; ++i) { - double bandwidth = 0.02 * (i + 1) * (orderedValues[orderedValues.length - 1] - orderedValues[0]); - double logBandwidth = Math.log(bandwidth); - double logLikelihood = IntStream.of(testIndices) - .mapToDouble(j -> logLikelihood(xTrain, bandwidth, logBandwidth, orderedValues[j])) - .sum(); - if (logLikelihood >= maxLogLikelihood) { - maxLogLikelihood = logLikelihood; - result = bandwidth; - } - } - return result; - } - private static int lowerBound(double[] xs, double x) { int retVal = Arrays.binarySearch(xs, x); if (retVal < 0) { @@ -73,84 +29,96 @@ private static int lowerBound(double[] xs, double x) { return retVal; } - private static double logLikelihood(double[] xs, double bandwidth, double logBandwidth, double x) { - int a = lowerBound(xs, x - 3.0 * bandwidth); - int b = lowerBound(xs, x + 3.0 * bandwidth); - double[] logPdfs = IntStream.range(Math.max(Math.min(a, b - 1), 0), Math.min(Math.max(b, a + 1), xs.length)).mapToDouble(i -> { - double y = (x - xs[i]) / bandwidth; - return -0.5 * y * y - logBandwidth; - }).toArray(); - double maxLogPdf = DoubleStream.of(logPdfs).max().orElseThrow(); - double result = DoubleStream.of(logPdfs).map(logPdf -> Math.exp(logPdf - maxLogPdf)).sum(); - return Math.log(result) + maxLogPdf; + private interface IntervalComplementFunction { + double value(int a, int b); + } + + private interface KernelFunction { + double value(double centre, double x); + } + + private ValueAndMagnitude evaluate(IntervalComplementFunction complement, KernelFunction kernel, double x) { + if (bandwidth == 0.0 || orderedValues.length == 0) { + return new ValueAndMagnitude(1.0, 0.0); + } + int a = Math.min(lowerBound(orderedValues, x - 3.0 * bandwidth), orderedValues.length - 1); + int b = Math.max(lowerBound(orderedValues, x + 3.0 * bandwidth), a + 1); + // Account for all the values outside the interval [a, b) using the kernel complement. + double kdfx = complement.value(a, b); + double diff = Double.MAX_VALUE; + for (int i = a; i < b; i++) { + double centre = orderedValues[i]; + kdfx += kernel.value(centre, x); + diff = Math.min(Math.abs(centre - x), diff); + } + return new ValueAndMagnitude(kdfx / orderedValues.length, diff); } private final double[] orderedValues; private final double bandwidth; - KDE(double[] values, int minIndex, int maxIndex) { - int excluded = (int) (0.025 * ((double) values.length) + 0.5); - double[] orderedValues = new double[values.length]; - int j = 0; - for (int i = 0; i < values.length; i++) { - if ((i >= minIndex - excluded && i <= minIndex + excluded) || (i >= maxIndex - excluded && i <= maxIndex + excluded)) { - continue; + // This uses Silverman's rule of thumb for the bandwidth and chooses it to be proportional + // to the standard deviation divided by the 5th root of the sample count. The constant of + // proportionality is supplied as the smoothing parameter. + // + // A value of 1.06 is recommended by Silverman, which is optimal for Gaussian data with + // Gaussian Kernel. This tends to oversmooth from an minimum (M)ISE perspective on many + // distributions. However, we actually prefer oversmoothing for our use case. + // + // Note that orderedValues must be ordered ascending and are shallow copied. + KDE(double[] orderedValues, double smoothing) { + + for (int i = 1; i < orderedValues.length; i++) { + if (orderedValues[i - 1] > orderedValues[i]) { + throw new IllegalArgumentException("Values must be ordered ascending, got [" + Arrays.toString(orderedValues) + "]."); } - orderedValues[j++] = values[i]; } - this.orderedValues = Arrays.copyOf(orderedValues, j); - Arrays.sort(this.orderedValues); - double var = variance(this.orderedValues); - this.bandwidth = var > 0 ? maxLikelihoodBandwidth(this.orderedValues) : 0.01 * (values[maxIndex] - values[minIndex]); + + this.orderedValues = orderedValues; + bandwidth = smoothing * Math.pow(orderedValues.length, -0.2) * Math.sqrt(variance(orderedValues)); } ValueAndMagnitude cdf(double x) { - int a = lowerBound(orderedValues, x - 4.0 * bandwidth); - int b = lowerBound(orderedValues, x + 4.0 * bandwidth); - double cdf = 0.0; - double diff = Double.MAX_VALUE; - for (int i = a; i < Math.min(Math.max(b, a + 1), orderedValues.length); i++) { - cdf += new NormalDistribution(orderedValues[i], bandwidth).cumulativeProbability(x); - diff = Math.min(Math.abs(orderedValues[i] - x), diff); - } - cdf /= orderedValues.length; - return new ValueAndMagnitude(cdf, diff); + return evaluate((a, b) -> a, (centre, x_) -> normCdf(centre, x_), x); } ValueAndMagnitude sf(double x) { - int a = lowerBound(orderedValues, x - 4.0 * bandwidth); - int b = lowerBound(orderedValues, x + 4.0 * bandwidth); - double sf = 0.0; - double diff = Double.MAX_VALUE; - for (int i = Math.max(Math.min(a, b - 1), 0); i < b; i++) { - sf += normSf(orderedValues[i], bandwidth, x); - diff = Math.min(Math.abs(orderedValues[i] - x), diff); + return evaluate((a, b) -> orderedValues.length - b, (centre, x_) -> normSf(centre, x_), x); + } + + double normCdf(double mean, double x) { + final double dev = x - mean; + if (Math.abs(dev) > 40.0 * bandwidth) { + return dev > 0 ? 1.0d : 0.0d; } - sf /= orderedValues.length; - return new ValueAndMagnitude(sf, diff); + // We use the fact that erf(-x) = -erf(x) and substitute for erfc(x) = 1 - erf(x) + return 0.5 * Erf.erfc(-dev / (bandwidth * SQRT2)); } - static double normSf(double mean, double standardDeviation, double x) { + double normSf(double mean, double x) { final double dev = x - mean; - if (Math.abs(dev) > 40 * standardDeviation) { + if (Math.abs(dev) > 40.0 * bandwidth) { return dev > 0 ? 0.0d : 1.0d; } - return 0.5 * Erf.erfc(dev / (standardDeviation * SQRT2)); + return 0.5 * Erf.erfc(dev / (bandwidth * SQRT2)); + } + + int size() { + return orderedValues.length; + } + + double[] data() { + return orderedValues; } record ValueAndMagnitude(double value, double magnitude) { - boolean isMoreSignificant(ValueAndMagnitude o, int numberOfTestedValues) { - int c = Double.compare(significance(numberOfTestedValues), o.significance(numberOfTestedValues)); - if (c != 0) { - return c < 0; - } else { - return magnitude > o.magnitude; - } + boolean isMoreSignificant(ValueAndMagnitude o) { + int c = Double.compare(value, o.value()); + return c != 0 ? (c < 0) : (magnitude > o.magnitude); } - double significance(int numberOfTestedValues) { + double pValue(int numberOfTestedValues) { return value > ESTIMATOR_EPS ? 1 - Math.pow(1 - value, numberOfTestedValues) : numberOfTestedValues * value; } } - } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/SpikeAndDipDetector.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/SpikeAndDipDetector.java new file mode 100644 index 0000000000000..8bbd793637ab3 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/SpikeAndDipDetector.java @@ -0,0 +1,178 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.aggs.changepoint; + +import java.util.Arrays; + +/** + * Detects spikes and dips in a time series. + */ +final class SpikeAndDipDetector { + + private record SpikeOrDip(int index, int startExcluded, int endExcluded) { + double value(double[] values) { + return values[index]; + } + + boolean includes(int i) { + return i >= startExcluded && i < endExcluded; + } + } + + private int argmax(double[] values, int start, int end, boolean negate) { + int argmax = 0; + double max = negate ? -values[0] : values[0]; + for (int i = 1; i < values.length; i++) { + double value = negate ? -values[i] : values[i]; + if (value > max) { + argmax = i; + max = value; + } + } + return argmax; + } + + private double sum(double[] values, int start, int end, boolean negate) { + double sum = 0.0; + for (int i = start; i < end; i++) { + sum += values[i]; + } + return negate ? -sum : sum; + } + + private SpikeOrDip findSpikeOrDip(double[] values, int extent, boolean negate) { + + extent = Math.min(extent, values.length - 1); + + final int argmax = argmax(values, 0, values.length, negate); + + // Find the maximum average interval of width extent which includes argmax. + int maxStart = Math.max(0, argmax + 1 - extent); + int maxEnd = Math.min(maxStart + extent, values.length); + double maxSum = sum(values, maxStart, maxEnd, negate); + for (int start = maxStart + 1; start <= argmax; start++) { + if (start + extent >= values.length) { + break; + } + double average = sum(values, start, start + extent, negate); + if (average > maxSum) { + maxStart = start; + maxSum = average; + } + } + + return new SpikeOrDip(argmax, maxStart, maxStart + extent); + } + + private interface ExcludedPredicate { + boolean exclude(int index); + } + + private double[] removeIf(ExcludedPredicate should, double[] values) { + int numKept = 0; + for (int i = 0; i < values.length; i++) { + if (should.exclude(i) == false) { + numKept++; + } + } + double[] newValues = new double[numKept]; + int j = 0; + for (int i = 0; i < values.length; i++) { + if (should.exclude(i) == false) { + newValues[j++] = values[i]; + } + } + return newValues; + } + + private final int numValues; + private final int dipIndex; + private final int spikeIndex; + private final double dipValue; + private final double spikeValue; + private final KDE spikeTestKDE; + private final KDE dipTestKDE; + + SpikeAndDipDetector(double[] values) { + + numValues = values.length; + + if (values.length < 4) { + dipIndex = -1; + spikeIndex = -1; + dipValue = Double.NaN; + spikeValue = Double.NaN; + spikeTestKDE = null; + dipTestKDE = null; + return; + } + + // Usually roughly 10% of values. + int spikeLength = Math.max((int) (0.1 * values.length + 0.5), 2) - 1; + + SpikeOrDip dip = findSpikeOrDip(values, spikeLength, true); + SpikeOrDip spike = findSpikeOrDip(values, spikeLength, false); + + dipIndex = dip.index(); + spikeIndex = spike.index(); + dipValue = dip.value(values); + spikeValue = spike.value(values); + + double[] dipKDEValues = removeIf((i) -> (dip.includes(i) || i == spike.index()), values); + double[] spikeKDEValues = removeIf((i) -> (spike.includes(i) || i == dip.index()), values); + Arrays.sort(dipKDEValues); + Arrays.sort(spikeKDEValues); + + // We purposely over smooth to only surface visually significant spikes and dips. + dipTestKDE = new KDE(dipKDEValues, 1.36); + spikeTestKDE = new KDE(spikeKDEValues, 1.36); + } + + ChangeType at(double pValueThreshold) { + if (dipIndex == -1 || spikeIndex == -1) { + return new ChangeType.Indeterminable( + "not enough buckets to check for dip or spike. Requires at least [3]; found [" + numValues + "]" + ); + } + + KDE.ValueAndMagnitude dipLeftLeftTailTest = dipTestKDE.cdf(dipValue); + KDE.ValueAndMagnitude spikeRightTailTest = spikeTestKDE.sf(spikeValue); + double dipPValue = dipLeftLeftTailTest.pValue(numValues); + double spikePValue = spikeRightTailTest.pValue(numValues); + + if (dipPValue < pValueThreshold && spikePValue < pValueThreshold) { + if (dipLeftLeftTailTest.isMoreSignificant(spikeRightTailTest)) { + return new ChangeType.Dip(dipPValue, dipIndex); + } + return new ChangeType.Spike(spikePValue, spikeIndex); + } + if (dipPValue < pValueThreshold) { + return new ChangeType.Dip(dipPValue, dipIndex); + } + if (spikePValue < pValueThreshold) { + return new ChangeType.Spike(spikePValue, spikeIndex); + } + return new ChangeType.Stationary(); + } + + double dipValue() { + return dipValue; + } + + double spikeValue() { + return spikeValue; + } + + KDE spikeTestKDE() { + return spikeTestKDE; + } + + KDE dipTestKDE() { + return dipTestKDE; + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingResourceTracker.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingResourceTracker.java index ac6f3914b8f40..ad5a07387bc74 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingResourceTracker.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingResourceTracker.java @@ -41,6 +41,8 @@ import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.core.Tuple.tuple; +import static org.elasticsearch.xpack.ml.MachineLearning.DUMMY_ENTITY_MEMORY; +import static org.elasticsearch.xpack.ml.MachineLearning.DUMMY_ENTITY_PROCESSORS; import static org.elasticsearch.xpack.ml.MachineLearning.MAX_OPEN_JOBS_PER_NODE; import static org.elasticsearch.xpack.ml.job.JobNodeSelector.AWAITING_LAZY_ASSIGNMENT; @@ -58,7 +60,13 @@ static MlJobRequirements of(long memory, int processors, int jobs) { static MlJobRequirements of(long memory, int processors) { return new MlJobRequirements(memory, processors, 1); } - }; + } + + record MlDummyAutoscalingEntity(long memory, int processors) { + static MlDummyAutoscalingEntity of(long memory, int processors) { + return new MlDummyAutoscalingEntity(memory, processors); + } + } private MlAutoscalingResourceTracker() {} @@ -82,9 +90,15 @@ public static void getMlAutoscalingStats( : 0L; int processorsAvailableFirstNode = (firstMlNode != null) ? MlProcessors.get(clusterState.nodes().get(firstMlNode), clusterSettings.get(MachineLearning.ALLOCATED_PROCESSORS_SCALE)) - .roundDown() + .roundUp() : 0; + MlDummyAutoscalingEntity mlDummyAutoscalingEntity = new MlDummyAutoscalingEntity( + // Treat a ByteSizeValue of -1 as 0, since 0 is the default dummy entity size + Math.max(0L, DUMMY_ENTITY_MEMORY.get(settings).getBytes()), + DUMMY_ENTITY_PROCESSORS.get(settings) + ); + // Todo: MAX_LOW_PRIORITY_MODELS_PER_NODE not checked yet int maxOpenJobsPerNode = MAX_OPEN_JOBS_PER_NODE.get(settings); @@ -95,6 +109,7 @@ public static void getMlAutoscalingStats( modelMemoryAvailableFirstNode, processorsAvailableFirstNode, maxOpenJobsPerNode, + mlDummyAutoscalingEntity, listener ); } @@ -106,6 +121,7 @@ static void getMemoryAndProcessors( long perNodeAvailableModelMemoryInBytes, int perNodeAvailableProcessors, int maxOpenJobsPerNode, + MlDummyAutoscalingEntity dummyAutoscalingEntity, ActionListener listener ) { Map> perNodeModelMemoryInBytes = new HashMap<>(); @@ -262,6 +278,23 @@ static void getMemoryAndProcessors( minNodes = Math.min(3, Math.max(minNodes, numberOfAllocations)); } + // dummy autoscaling entity + if (dummyEntityFitsOnLeastLoadedNode( + perNodeModelMemoryInBytes, + perNodeAvailableModelMemoryInBytes, + perNodeAvailableProcessors, + dummyAutoscalingEntity + ) == false) { + logger.info( + "Scaling up due to dummy entity: dummyEntityMemory: [{}], dummyEntityProcessors: [{}]", + dummyAutoscalingEntity.memory, + dummyAutoscalingEntity.processors + ); + + modelMemoryBytesSum += dummyAutoscalingEntity.memory; + processorsSum += dummyAutoscalingEntity.processors; + } + // check for downscaling long removeNodeMemoryInBytes = 0; @@ -282,7 +315,8 @@ static void getMemoryAndProcessors( perNodeModelMemoryInBytes, perNodeAvailableModelMemoryInBytes, perNodeAvailableProcessors, - maxOpenJobsPerNode + maxOpenJobsPerNode, + dummyAutoscalingEntity ))) { removeNodeMemoryInBytes = perNodeMemoryInBytes; } @@ -304,6 +338,73 @@ static void getMemoryAndProcessors( ); } + /** + * Check if the dummy autoscaling entity task can be added by placing + * the task on the least loaded node. + * + * If there exists a node that can accommodate the dummy entity then return true (nothing to do), + * else return false and increment the memory and processor counts accordingly. + * + * We perform the calculation by identifying the least loaded node in terms of memory + * and determining if the addition of the dummy entity's memory and processor requirements could + * be accommodated on it. + * + * If the calculation returns false then treat the case as for a single trained model job + * that is already assigned, i.e. increment modelMemoryBytesSum and processorsSum appropriately. + * + * @param perNodeJobRequirements per Node lists of requirements + * @param perNodeMemoryInBytes total model memory available on every node + * @param perNodeProcessors total processors on every node + * @param dummyAutoscalingEntity "dummy" entity requirements used to potentially trigger a scaling event + * @return true if the dummy entity can be accommodated, false if not + */ + static boolean dummyEntityFitsOnLeastLoadedNode( + Map> perNodeJobRequirements, // total up requirements... + long perNodeMemoryInBytes, + int perNodeProcessors, + MlDummyAutoscalingEntity dummyAutoscalingEntity + ) { + + if (dummyAutoscalingEntity.processors == 0 && dummyAutoscalingEntity.memory == 0L) { + return true; + } + + if (perNodeJobRequirements.size() < 1) { + return false; + } + + // Note: we check least loaded based _only_ on memory... + Optional leastLoadedNodeRequirements = perNodeJobRequirements.values() + .stream() + .map( + value -> value.stream() + .reduce( + MlJobRequirements.of(0L, 0, 0), + (subtotal, element) -> MlJobRequirements.of( + subtotal.memory + element.memory, + subtotal.processors + element.processors, + subtotal.jobs + element.jobs + ) + ) + ) + .min(Comparator.comparingLong(value -> value.memory)); + + assert (leastLoadedNodeRequirements.isPresent()); + assert leastLoadedNodeRequirements.get().memory >= 0L; + assert leastLoadedNodeRequirements.get().processors >= 0; + + // Check if the dummy entity could be accommodated + if (leastLoadedNodeRequirements.get().memory + dummyAutoscalingEntity.memory > perNodeMemoryInBytes) { + return false; + } + + if (leastLoadedNodeRequirements.get().processors + dummyAutoscalingEntity.processors > perNodeProcessors) { + return false; + } + + return true; + } + /** * Return some autoscaling stats that tell the autoscaler not to change anything, but without making it think an error has occurred. */ @@ -340,7 +441,8 @@ static boolean checkIfOneNodeCouldBeRemoved( Map> perNodeJobRequirements, long perNodeMemoryInBytes, int perNodeProcessors, - int maxOpenJobsPerNode + int maxOpenJobsPerNode, + MlDummyAutoscalingEntity dummyAutoscalingEntity ) { if (perNodeJobRequirements.size() <= 1) { return false; @@ -378,6 +480,10 @@ static boolean checkIfOneNodeCouldBeRemoved( String candidateNode = leastLoadedNodeAndMemoryUsage.get().getKey(); List candidateJobRequirements = perNodeJobRequirements.get(candidateNode); + if (dummyAutoscalingEntity.memory > 0L || dummyAutoscalingEntity.processors > 0) { + candidateJobRequirements = new ArrayList<>(candidateJobRequirements); + candidateJobRequirements.add(MlJobRequirements.of(dummyAutoscalingEntity.memory, dummyAutoscalingEntity.processors)); + } perNodeMlJobRequirementSum.remove(candidateNode); // if all jobs fit on other nodes, we can scale down one node diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedConfigAutoUpdater.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedConfigAutoUpdater.java index 138eff9f49d01..330327dc31a46 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedConfigAutoUpdater.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedConfigAutoUpdater.java @@ -74,7 +74,7 @@ public String getName() { @Override public void runUpdate() { - PlainActionFuture> getdatafeeds = PlainActionFuture.newFuture(); + PlainActionFuture> getdatafeeds = new PlainActionFuture<>(); provider.expandDatafeedConfigs("_all", true, null, getdatafeeds); List datafeedConfigBuilders = getdatafeeds.actionGet(); List updates = datafeedConfigBuilders.stream() @@ -96,7 +96,7 @@ public void runUpdate() { List failures = new ArrayList<>(); for (DatafeedUpdate update : updates) { - PlainActionFuture updateDatafeeds = PlainActionFuture.newFuture(); + PlainActionFuture updateDatafeeds = new PlainActionFuture<>(); provider.updateDatefeedConfig( update.getId(), update, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java index bdc143c7dde4c..999d85b6dd549 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java @@ -10,7 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; @@ -130,9 +130,9 @@ public void putDatafeed( ActionListener getRollupIndexCapsActionHandler = ActionListener.wrap(response -> { if (response.getJobs().isEmpty()) { // This means no rollup indexes are in the config - indicesPrivilegesBuilder.privileges(SearchAction.NAME); + indicesPrivilegesBuilder.privileges(TransportSearchAction.TYPE.name()); } else { - indicesPrivilegesBuilder.privileges(SearchAction.NAME, RollupSearchAction.NAME); + indicesPrivilegesBuilder.privileges(TransportSearchAction.TYPE.name(), RollupSearchAction.NAME); } if (indices.length == 0) { privResponseListener.onResponse(new HasPrivilegesResponse()); @@ -142,7 +142,7 @@ public void putDatafeed( } }, e -> { if (ExceptionsHelper.unwrapCause(e) instanceof IndexNotFoundException) { - indicesPrivilegesBuilder.privileges(SearchAction.NAME); + indicesPrivilegesBuilder.privileges(TransportSearchAction.TYPE.name()); privRequest.indexPrivileges(indicesPrivilegesBuilder.build()); client.execute(HasPrivilegesAction.INSTANCE, privRequest, privResponseListener); } else { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DatafeedDelayedDataDetector.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DatafeedDelayedDataDetector.java index 321277df02dbf..341746a097bb2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DatafeedDelayedDataDetector.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DatafeedDelayedDataDetector.java @@ -6,9 +6,9 @@ */ package org.elasticsearch.xpack.ml.datafeed.delayeddatacheck; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.util.Maps; @@ -134,7 +134,7 @@ private Map checkCurrentBucketEventCount(long start, long end) { SearchRequest searchRequest = new SearchRequest(datafeedIndices).source(searchSourceBuilder).indicesOptions(indicesOptions); try (ThreadContext.StoredContext ignore = client.threadPool().getThreadContext().stashWithOrigin(ML_ORIGIN)) { - SearchResponse response = client.execute(SearchAction.INSTANCE, searchRequest).actionGet(); + SearchResponse response = client.execute(TransportSearchAction.TYPE, searchRequest).actionGet(); List buckets = ((Histogram) response.getAggregations().get(DATE_BUCKETS)).getBuckets(); Map hashMap = Maps.newMapWithExpectedSize(buckets.size()); for (Histogram.Bucket bucket : buckets) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactory.java index 1893e8b30ab67..ca6f138967bbe 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactory.java @@ -32,13 +32,18 @@ public interface DataExtractorFactory { DataExtractor newExtractor(long start, long end); /** - * Creates a new extractor with the additional filter - * @param start start time of the extractor - * @param end end time of the extractor - * @param queryBuilder An additional query filter to apply to the supplied datafeed query - * @return new extractor + * Creates a {@code DataExtractorFactory} for the given datafeed-job combination. */ - DataExtractor newExtractor(long start, long end, QueryBuilder queryBuilder); + static void create( + Client client, + DatafeedConfig datafeed, + Job job, + NamedXContentRegistry xContentRegistry, + DatafeedTimingStatsReporter timingStatsReporter, + ActionListener listener + ) { + create(client, datafeed, null, job, xContentRegistry, timingStatsReporter, listener); + } /** * Creates a {@code DataExtractorFactory} for the given datafeed-job combination. @@ -46,6 +51,7 @@ public interface DataExtractorFactory { static void create( Client client, DatafeedConfig datafeed, + QueryBuilder extraFilters, Job job, NamedXContentRegistry xContentRegistry, DatafeedTimingStatsReporter timingStatsReporter, @@ -56,7 +62,7 @@ static void create( ActionListener factoryHandler = ActionListener.wrap( factory -> listener.onResponse( datafeed.getChunkingConfig().isEnabled() - ? new ChunkedDataExtractorFactory(client, datafeed, job, xContentRegistry, factory, timingStatsReporter) + ? new ChunkedDataExtractorFactory(client, datafeed, extraFilters, job, xContentRegistry, factory, timingStatsReporter) : factory ), listener::onFailure @@ -69,14 +75,22 @@ static void create( return; } if (hasAggs == false) { - ScrollDataExtractorFactory.create(client, datafeed, job, xContentRegistry, timingStatsReporter, factoryHandler); + ScrollDataExtractorFactory.create( + client, + datafeed, + extraFilters, + job, + xContentRegistry, + timingStatsReporter, + factoryHandler + ); return; } if (hasRollup && datafeed.getRuntimeMappings().isEmpty() == false) { // TODO Rollup V2 will support runtime fields listener.onFailure( new IllegalArgumentException( - "The datafeed has runtime_mappings defined, " + "runtime fields are not supported in rollup searches" + "The datafeed has runtime_mappings defined, runtime fields are not supported in rollup searches" ) ); return; @@ -90,6 +104,7 @@ static void create( final DataExtractorFactory dataExtractorFactory = new CompositeAggregationDataExtractorFactory( client, datafeed, + extraFilters, job, xContentRegistry, timingStatsReporter, @@ -107,6 +122,7 @@ static void create( RollupDataExtractorFactory.create( client, datafeed, + extraFilters, job, response.getJobs(), xContentRegistry, @@ -115,7 +131,7 @@ static void create( ); } else { factoryHandler.onResponse( - new AggregationDataExtractorFactory(client, datafeed, job, xContentRegistry, timingStatsReporter) + new AggregationDataExtractorFactory(client, datafeed, extraFilters, job, xContentRegistry, timingStatsReporter) ); } }, e -> { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AbstractAggregationDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AbstractAggregationDataExtractor.java index 0d4ba9fd2086d..421581e2622ab 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AbstractAggregationDataExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AbstractAggregationDataExtractor.java @@ -77,6 +77,11 @@ public void cancel() { hasNext = false; } + @Override + public void destroy() { + cancel(); + } + @Override public long getEndTime() { return context.end; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractor.java index f1b817c7835a3..42766f6ebf12b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractor.java @@ -6,8 +6,8 @@ */ package org.elasticsearch.xpack.ml.datafeed.extractor.aggregation; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.client.internal.Client; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.xpack.ml.datafeed.DatafeedTimingStatsReporter; @@ -30,7 +30,7 @@ class AggregationDataExtractor extends AbstractAggregationDataExtractor new SearchRequestBuilder(client, SearchAction.INSTANCE).setSource(searchSourceBuilder) + return (searchSourceBuilder) -> new SearchRequestBuilder(client, TransportSearchAction.TYPE).setSource(searchSourceBuilder) .setIndicesOptions(indicesOptions) .setAllowPartialSearchResults(false) .setIndices(indices); @@ -37,19 +38,10 @@ public static AggregatedSearchRequestBuilder requestBuilder(Client client, Strin @Override public DataExtractor newExtractor(long start, long end) { - return buildExtractor(start, end, datafeedConfig.getParsedQuery(xContentRegistry)); - } - - @Override - public DataExtractor newExtractor(long start, long end, QueryBuilder queryBuilder) { - return buildExtractor( - start, - end, - QueryBuilders.boolQuery().filter(datafeedConfig.getParsedQuery(xContentRegistry)).filter(queryBuilder) - ); - } - - private DataExtractor buildExtractor(long start, long end, QueryBuilder queryBuilder) { + QueryBuilder queryBuilder = datafeedConfig.getParsedQuery(xContentRegistry); + if (extraFilters != null) { + queryBuilder = QueryBuilders.boolQuery().filter(queryBuilder).filter(extraFilters); + } long histogramInterval = datafeedConfig.getHistogramIntervalMillis(xContentRegistry); AggregationDataExtractorContext dataExtractorContext = new AggregationDataExtractorContext( job.getId(), diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/CompositeAggregationDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/CompositeAggregationDataExtractor.java index 13a7e8cf9a89e..859dd506a7712 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/CompositeAggregationDataExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/CompositeAggregationDataExtractor.java @@ -90,6 +90,11 @@ public void cancel() { isCancelled = true; } + @Override + public void destroy() { + cancel(); + } + @Override public long getEndTime() { return context.end; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/CompositeAggregationDataExtractorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/CompositeAggregationDataExtractorFactory.java index 83d84fc0f12c5..373ca8f5ada64 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/CompositeAggregationDataExtractorFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/CompositeAggregationDataExtractorFactory.java @@ -32,6 +32,7 @@ public class CompositeAggregationDataExtractorFactory implements DataExtractorFa private final Client client; private final DatafeedConfig datafeedConfig; + private final QueryBuilder extraFilters; private final Job job; private final DatafeedTimingStatsReporter timingStatsReporter; private final String compositeAggName; @@ -46,6 +47,7 @@ public class CompositeAggregationDataExtractorFactory implements DataExtractorFa public CompositeAggregationDataExtractorFactory( Client client, DatafeedConfig datafeedConfig, + QueryBuilder extraFilters, Job job, NamedXContentRegistry xContentRegistry, DatafeedTimingStatsReporter timingStatsReporter, @@ -53,6 +55,7 @@ public CompositeAggregationDataExtractorFactory( ) { this.client = Objects.requireNonNull(client); this.datafeedConfig = Objects.requireNonNull(datafeedConfig); + this.extraFilters = extraFilters; this.job = Objects.requireNonNull(job); this.timingStatsReporter = Objects.requireNonNull(timingStatsReporter); this.parsedQuery = datafeedConfig.getParsedQuery(xContentRegistry); @@ -92,15 +95,10 @@ public CompositeAggregationDataExtractorFactory( @Override public DataExtractor newExtractor(long start, long end) { - return buildNewExtractor(start, end, parsedQuery); - } - - @Override - public DataExtractor newExtractor(long start, long end, QueryBuilder queryBuilder) { - return buildNewExtractor(start, end, QueryBuilders.boolQuery().filter(parsedQuery).filter(queryBuilder)); - } - - private DataExtractor buildNewExtractor(long start, long end, QueryBuilder queryBuilder) { + QueryBuilder queryBuilder = parsedQuery; + if (extraFilters != null) { + queryBuilder = QueryBuilders.boolQuery().filter(queryBuilder).filter(extraFilters); + } CompositeAggregationBuilder compositeAggregationBuilder = new CompositeAggregationBuilder( compositeAggName, compositeValuesSourceBuilders diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/RollupDataExtractorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/RollupDataExtractorFactory.java index ab87f36c89aab..40a578430a033 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/RollupDataExtractorFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/RollupDataExtractorFactory.java @@ -50,6 +50,7 @@ public class RollupDataExtractorFactory implements DataExtractorFactory { private final Client client; private final DatafeedConfig datafeedConfig; + private final QueryBuilder extraFilters; private final Job job; private final NamedXContentRegistry xContentRegistry; private final DatafeedTimingStatsReporter timingStatsReporter; @@ -57,12 +58,14 @@ public class RollupDataExtractorFactory implements DataExtractorFactory { private RollupDataExtractorFactory( Client client, DatafeedConfig datafeedConfig, + QueryBuilder extraFilters, Job job, NamedXContentRegistry xContentRegistry, DatafeedTimingStatsReporter timingStatsReporter ) { this.client = Objects.requireNonNull(client); this.datafeedConfig = Objects.requireNonNull(datafeedConfig); + this.extraFilters = extraFilters; this.job = Objects.requireNonNull(job); this.xContentRegistry = xContentRegistry; this.timingStatsReporter = Objects.requireNonNull(timingStatsReporter); @@ -80,19 +83,10 @@ public static AggregatedSearchRequestBuilder requestBuilder(Client client, Strin @Override public DataExtractor newExtractor(long start, long end) { - return buildExtractor(start, end, datafeedConfig.getParsedQuery(xContentRegistry)); - } - - @Override - public DataExtractor newExtractor(long start, long end, QueryBuilder queryBuilder) { - return buildExtractor( - start, - end, - QueryBuilders.boolQuery().filter(datafeedConfig.getParsedQuery(xContentRegistry)).filter(queryBuilder) - ); - } - - private DataExtractor buildExtractor(long start, long end, QueryBuilder queryBuilder) { + QueryBuilder queryBuilder = datafeedConfig.getParsedQuery(xContentRegistry); + if (extraFilters != null) { + queryBuilder = QueryBuilders.boolQuery().filter(queryBuilder).filter(extraFilters); + } long histogramInterval = datafeedConfig.getHistogramIntervalMillis(xContentRegistry); AggregationDataExtractorContext dataExtractorContext = new AggregationDataExtractorContext( job.getId(), @@ -114,6 +108,7 @@ private DataExtractor buildExtractor(long start, long end, QueryBuilder queryBui public static void create( Client client, DatafeedConfig datafeed, + QueryBuilder extraFilters, Job job, Map rollupJobsWithCaps, NamedXContentRegistry xContentRegistry, @@ -171,7 +166,7 @@ public static void create( return; } - listener.onResponse(new RollupDataExtractorFactory(client, datafeed, job, xContentRegistry, timingStatsReporter)); + listener.onResponse(new RollupDataExtractorFactory(client, datafeed, extraFilters, job, xContentRegistry, timingStatsReporter)); } private static boolean validInterval(long datafeedInterval, ParsedRollupCaps rollupJobGroupConfig) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractor.java index 090c85cb8ba36..03dda0a8201b7 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractor.java @@ -9,10 +9,10 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionRequestBuilder; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.client.internal.Client; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.Aggregations; @@ -197,6 +197,14 @@ public void cancel() { isCancelled = true; } + @Override + public void destroy() { + cancel(); + if (currentExtractor != null) { + currentExtractor.destroy(); + } + } + @Override public long getEndTime() { return context.end; @@ -271,7 +279,7 @@ private SearchSourceBuilder rangeSearchBuilder() { } private SearchRequestBuilder rangeSearchRequest() { - return new SearchRequestBuilder(client, SearchAction.INSTANCE).setIndices(context.indices) + return new SearchRequestBuilder(client, TransportSearchAction.TYPE).setIndices(context.indices) .setIndicesOptions(context.indicesOptions) .setSource(rangeSearchBuilder()) .setAllowPartialSearchResults(false) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorFactory.java index 65d082ffeadea..d0fbeb03150ed 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorFactory.java @@ -23,6 +23,8 @@ public class ChunkedDataExtractorFactory implements DataExtractorFactory { private final Client client; private final DatafeedConfig datafeedConfig; + + private final QueryBuilder extraFilters; private final Job job; private final DataExtractorFactory dataExtractorFactory; private final NamedXContentRegistry xContentRegistry; @@ -31,6 +33,7 @@ public class ChunkedDataExtractorFactory implements DataExtractorFactory { public ChunkedDataExtractorFactory( Client client, DatafeedConfig datafeedConfig, + QueryBuilder extraFilters, Job job, NamedXContentRegistry xContentRegistry, DataExtractorFactory dataExtractorFactory, @@ -38,6 +41,7 @@ public ChunkedDataExtractorFactory( ) { this.client = Objects.requireNonNull(client); this.datafeedConfig = Objects.requireNonNull(datafeedConfig); + this.extraFilters = extraFilters; this.job = Objects.requireNonNull(job); this.dataExtractorFactory = Objects.requireNonNull(dataExtractorFactory); this.xContentRegistry = xContentRegistry; @@ -46,19 +50,10 @@ public ChunkedDataExtractorFactory( @Override public DataExtractor newExtractor(long start, long end) { - return buildExtractor(start, end, datafeedConfig.getParsedQuery(xContentRegistry)); - } - - @Override - public DataExtractor newExtractor(long start, long end, QueryBuilder queryBuilder) { - return buildExtractor( - start, - end, - QueryBuilders.boolQuery().filter(datafeedConfig.getParsedQuery(xContentRegistry)).filter(queryBuilder) - ); - } - - private DataExtractor buildExtractor(long start, long end, QueryBuilder queryBuilder) { + QueryBuilder queryBuilder = datafeedConfig.getParsedQuery(xContentRegistry); + if (extraFilters != null) { + queryBuilder = QueryBuilders.boolQuery().filter(queryBuilder).filter(extraFilters); + } ChunkedDataExtractorContext.TimeAligner timeAligner = newTimeAligner(); ChunkedDataExtractorContext dataExtractorContext = new ChunkedDataExtractorContext( job.getId(), diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java index e7aba2211b2df..8757e1afd8123 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java @@ -9,14 +9,14 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.action.search.ClearScrollAction; import org.elasticsearch.action.search.ClearScrollRequest; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchScrollAction; import org.elasticsearch.action.search.SearchScrollRequestBuilder; +import org.elasticsearch.action.search.TransportClearScrollAction; +import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.action.search.TransportSearchScrollAction; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.core.TimeValue; @@ -84,6 +84,12 @@ public void cancel() { isCancelled = true; } + @Override + public void destroy() { + cancel(); + clearScroll(); + } + @Override public long getEndTime() { return context.end; @@ -148,7 +154,7 @@ private SearchRequestBuilder buildSearchRequest(long start) { .query(ExtractorUtils.wrapInTimeRangeQuery(context.query, context.extractedFields.timeField(), start, context.end)) .runtimeMappings(context.runtimeMappings); - SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client, SearchAction.INSTANCE).setScroll(SCROLL_TIMEOUT) + SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client, TransportSearchAction.TYPE).setScroll(SCROLL_TIMEOUT) .setIndices(context.indices) .setIndicesOptions(context.indicesOptions) .setAllowPartialSearchResults(false) @@ -244,7 +250,9 @@ protected SearchResponse executeSearchScrollRequest(String scrollId) { context.headers, ClientHelper.ML_ORIGIN, client, - () -> new SearchScrollRequestBuilder(client, SearchScrollAction.INSTANCE).setScroll(SCROLL_TIMEOUT).setScrollId(scrollId).get() + () -> new SearchScrollRequestBuilder(client, TransportSearchScrollAction.TYPE).setScroll(SCROLL_TIMEOUT) + .setScrollId(scrollId) + .get() ); try { checkForSkippedClusters(searchResponse); @@ -278,7 +286,7 @@ private void innerClearScroll(String scrollId) { context.headers, ClientHelper.ML_ORIGIN, client, - () -> client.execute(ClearScrollAction.INSTANCE, request).actionGet() + () -> client.execute(TransportClearScrollAction.TYPE, request).actionGet() ); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorFactory.java index 7a485d08d0643..27838795aedd2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorFactory.java @@ -38,6 +38,7 @@ public class ScrollDataExtractorFactory implements DataExtractorFactory { private final Client client; private final DatafeedConfig datafeedConfig; + private final QueryBuilder extraFilters; private final Job job; private final TimeBasedExtractedFields extractedFields; private final NamedXContentRegistry xContentRegistry; @@ -46,6 +47,7 @@ public class ScrollDataExtractorFactory implements DataExtractorFactory { private ScrollDataExtractorFactory( Client client, DatafeedConfig datafeedConfig, + QueryBuilder extraFilters, Job job, TimeBasedExtractedFields extractedFields, NamedXContentRegistry xContentRegistry, @@ -53,6 +55,7 @@ private ScrollDataExtractorFactory( ) { this.client = Objects.requireNonNull(client); this.datafeedConfig = Objects.requireNonNull(datafeedConfig); + this.extraFilters = extraFilters; this.job = Objects.requireNonNull(job); this.extractedFields = Objects.requireNonNull(extractedFields); this.xContentRegistry = xContentRegistry; @@ -61,19 +64,10 @@ private ScrollDataExtractorFactory( @Override public DataExtractor newExtractor(long start, long end) { - return buildExtractor(start, end, datafeedConfig.getParsedQuery(xContentRegistry)); - } - - @Override - public DataExtractor newExtractor(long start, long end, QueryBuilder queryBuilder) { - return buildExtractor( - start, - end, - QueryBuilders.boolQuery().filter(datafeedConfig.getParsedQuery(xContentRegistry)).filter(queryBuilder) - ); - } - - private DataExtractor buildExtractor(long start, long end, QueryBuilder queryBuilder) { + QueryBuilder queryBuilder = datafeedConfig.getParsedQuery(xContentRegistry); + if (extraFilters != null) { + queryBuilder = QueryBuilders.boolQuery().filter(queryBuilder).filter(extraFilters); + } ScrollDataExtractorContext dataExtractorContext = new ScrollDataExtractorContext( job.getId(), extractedFields, @@ -93,6 +87,7 @@ private DataExtractor buildExtractor(long start, long end, QueryBuilder queryBui public static void create( Client client, DatafeedConfig datafeed, + QueryBuilder extraFilters, Job job, NamedXContentRegistry xContentRegistry, DatafeedTimingStatsReporter timingStatsReporter, @@ -123,7 +118,9 @@ public static void create( return; } TimeBasedExtractedFields fields = TimeBasedExtractedFields.build(job, datafeed, fieldCapabilitiesResponse); - listener.onResponse(new ScrollDataExtractorFactory(client, datafeed, job, fields, xContentRegistry, timingStatsReporter)); + listener.onResponse( + new ScrollDataExtractorFactory(client, datafeed, extraFilters, job, fields, xContentRegistry, timingStatsReporter) + ); }, e -> { Throwable cause = ExceptionsHelper.unwrapCause(e); if (cause instanceof IndexNotFoundException notFound) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsTask.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsTask.java index c6af1bcfa6f18..7f50be8a663fe 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsTask.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsTask.java @@ -13,9 +13,9 @@ import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.index.IndexAction; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.ParentTaskAssigningClient; @@ -45,6 +45,7 @@ import org.elasticsearch.xpack.ml.notifications.DataFrameAnalyticsAuditor; import org.elasticsearch.xpack.ml.utils.persistence.MlParserUtils; +import java.time.Instant; import java.util.List; import java.util.Map; import java.util.Objects; @@ -178,7 +179,8 @@ public void setFailed(Exception error) { DataFrameAnalyticsTaskState newTaskState = new DataFrameAnalyticsTaskState( DataFrameAnalyticsState.FAILED, getAllocationId(), - reason + reason, + Instant.now() ); updatePersistentTaskState(newTaskState, ActionListener.wrap(updatedTask -> { String message = Messages.getMessage( @@ -274,7 +276,7 @@ void persistProgress(Client clientToUse, String jobId, Runnable runnable) { SearchRequest searchRequest = new SearchRequest(AnomalyDetectorsIndex.jobStateIndexPattern()).source( new SearchSourceBuilder().size(1).query(new IdsQueryBuilder().addIds(progressDocId)) ); - executeAsyncWithOrigin(clientToUse, ML_ORIGIN, SearchAction.INSTANCE, searchRequest, searchFormerProgressDocListener); + executeAsyncWithOrigin(clientToUse, ML_ORIGIN, TransportSearchAction.TYPE, searchRequest, searchFormerProgressDocListener); }, e -> { LOGGER.error( () -> format("[%s] cannot persist progress as an error occurred while updating task progress", taskParams.getId()), diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractor.java index f93988681f605..7ea6fdf55ca0a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractor.java @@ -9,9 +9,9 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.util.CachedSupplier; import org.elasticsearch.core.Nullable; @@ -127,7 +127,7 @@ public Optional> next() throws IOException { */ public void preview(ActionListener> listener) { - SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client, SearchAction.INSTANCE) + SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client, TransportSearchAction.TYPE) // This ensures the search throws if there are failures and the scroll context gets cleared automatically .setAllowPartialSearchResults(false) .setIndices(context.indices) @@ -146,7 +146,7 @@ public void preview(ActionListener> listener) { context.headers, ClientHelper.ML_ORIGIN, client, - SearchAction.INSTANCE, + TransportSearchAction.TYPE, searchRequestBuilder.request(), ActionListener.wrap(searchResponse -> { if (searchResponse.getHits().getHits().length == 0) { @@ -203,7 +203,7 @@ private SearchRequestBuilder buildSearchRequest() { LOGGER.trace(() -> format("[%s] Searching docs with [%s] in [%s, %s)", context.jobId, INCREMENTAL_ID, from, to)); - SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client, SearchAction.INSTANCE) + SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client, TransportSearchAction.TYPE) // This ensures the search throws if there are failures and the scroll context gets cleared automatically .setAllowPartialSearchResults(false) .addSort(DestinationIndex.INCREMENTAL_ID, SortOrder.ASC) @@ -383,7 +383,7 @@ public void collectDataSummaryAsync(ActionListener dataSummaryActio context.headers, ClientHelper.ML_ORIGIN, client, - SearchAction.INSTANCE, + TransportSearchAction.TYPE, searchRequestBuilder.request(), ActionListener.wrap( searchResponse -> dataSummaryActionListener.onResponse( @@ -401,7 +401,7 @@ private SearchRequestBuilder buildDataSummarySearchRequestBuilder() { summaryQuery = QueryBuilders.boolQuery().filter(summaryQuery).filter(allExtractedFieldsExistQuery()); } - return new SearchRequestBuilder(client, SearchAction.INSTANCE).setAllowPartialSearchResults(false) + return new SearchRequestBuilder(client, TransportSearchAction.TYPE).setAllowPartialSearchResults(false) .setIndices(context.indices) .setSize(0) .setQuery(summaryQuery) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetectorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetectorFactory.java index db68f49b78429..9e925ff3f8fee 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetectorFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetectorFactory.java @@ -17,9 +17,9 @@ import org.elasticsearch.action.fieldcaps.FieldCapabilitiesAction; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.settings.Settings; @@ -141,7 +141,14 @@ private void getCardinalitiesForFieldsWithConstraints( ); } SearchRequest searchRequest = new SearchRequest(index).source(searchSourceBuilder); - ClientHelper.executeWithHeadersAsync(config.getHeaders(), ML_ORIGIN, client, SearchAction.INSTANCE, searchRequest, searchListener); + ClientHelper.executeWithHeadersAsync( + config.getHeaders(), + ML_ORIGIN, + client, + TransportSearchAction.TYPE, + searchRequest, + searchListener + ); } private static void buildFieldCardinalitiesMap( diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsBuilder.java index 0427de3345f89..2f23029009069 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsBuilder.java @@ -12,6 +12,7 @@ import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.ml.process.NativeController; import org.elasticsearch.xpack.ml.process.ProcessPipes; +import org.elasticsearch.xpack.ml.utils.FileUtils; import java.io.IOException; import java.io.OutputStreamWriter; @@ -80,6 +81,7 @@ private List buildAnalyticsCommand() throws IOException { private void addConfigFile(List command) throws IOException { Path tempDir = tempDirPathSupplier.get(); + FileUtils.recreateTempDirectoryIfNeeded(tempDir); Path configFile = Files.createTempFile(tempDir, "analysis", ".conf"); filesToDelete.add(configFile); try ( diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/InferenceStep.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/InferenceStep.java index 3e77f051a4519..65ac2b678d93b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/InferenceStep.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/InferenceStep.java @@ -12,8 +12,8 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.QueryBuilders; @@ -122,7 +122,7 @@ private void searchIfTestDocsExist(ActionListener listener) { executeAsyncWithOrigin( client, ML_ORIGIN, - SearchAction.INSTANCE, + TransportSearchAction.TYPE, searchRequest, ActionListener.wrap( searchResponse -> listener.onResponse(searchResponse.getHits().getTotalHits().value > 0), @@ -142,7 +142,7 @@ private void getModelId(ActionListener listener) { SearchRequest searchRequest = new SearchRequest(InferenceIndexConstants.INDEX_PATTERN); searchRequest.source(searchSourceBuilder); - executeAsyncWithOrigin(client, ML_ORIGIN, SearchAction.INSTANCE, searchRequest, ActionListener.wrap(searchResponse -> { + executeAsyncWithOrigin(client, ML_ORIGIN, TransportSearchAction.TYPE, searchRequest, ActionListener.wrap(searchResponse -> { SearchHit[] hits = searchResponse.getHits().getHits(); if (hits.length == 0) { listener.onFailure(new ResourceNotFoundException("No model could be found to perform inference")); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java index 26adca10b8da8..3efae0ed58bf6 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java @@ -34,6 +34,7 @@ import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction; import org.elasticsearch.xpack.core.ml.action.UpdateTrainedModelAssignmentRoutingInfoAction; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelPrefixStrings; import org.elasticsearch.xpack.core.ml.inference.assignment.AssignmentState; import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfo; import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfoUpdate; @@ -289,10 +290,11 @@ public void infer( NlpInferenceInput input, boolean skipQueue, TimeValue timeout, + TrainedModelPrefixStrings.PrefixType prefixType, CancellableTask parentActionTask, ActionListener listener ) { - deploymentManager.infer(task, config, input, skipQueue, timeout, parentActionTask, listener); + deploymentManager.infer(task, config, input, skipQueue, timeout, prefixType, parentActionTask, listener); } public Optional modelStats(TrainedModelDeploymentTask task) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java index a8c449ec5d8e7..f48e67f377817 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java @@ -12,8 +12,8 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.Strings; @@ -33,6 +33,7 @@ import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction; import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; import org.elasticsearch.xpack.core.ml.inference.TrainedModelInput; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelPrefixStrings; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.IndexLocation; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfig; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.NlpConfig; @@ -131,7 +132,7 @@ ProcessContext addProcessContext(Long id, ProcessContext processContext) { } public void startDeployment(TrainedModelDeploymentTask task, ActionListener finalListener) { - logger.info("[{}] Starting model deployment", task.getDeploymentId()); + logger.info("[{}] Starting model deployment of model [{}]", task.getDeploymentId(), task.getModelId()); if (processContextByAllocation.size() >= maxProcesses) { finalListener.onFailure( @@ -167,34 +168,43 @@ public void startDeployment(TrainedModelDeploymentTask task, ActionListener getVerifiedModel = ActionListener.wrap((modelConfig) -> { processContext.modelInput.set(modelConfig.getInput()); + processContext.prefixes.set(modelConfig.getPrefixStrings()); if (modelConfig.getInferenceConfig() instanceof NlpConfig nlpConfig) { task.init(nlpConfig); SearchRequest searchRequest = vocabSearchRequest(nlpConfig.getVocabularyConfig(), modelConfig.getModelId()); - executeAsyncWithOrigin(client, ML_ORIGIN, SearchAction.INSTANCE, searchRequest, ActionListener.wrap(searchVocabResponse -> { - if (searchVocabResponse.getHits().getHits().length == 0) { - failedDeploymentListener.onFailure( - new ResourceNotFoundException( - Messages.getMessage( - Messages.VOCABULARY_NOT_FOUND, - modelConfig.getModelId(), - VocabularyConfig.docId(modelConfig.getModelId()) + executeAsyncWithOrigin( + client, + ML_ORIGIN, + TransportSearchAction.TYPE, + searchRequest, + ActionListener.wrap(searchVocabResponse -> { + if (searchVocabResponse.getHits().getHits().length == 0) { + failedDeploymentListener.onFailure( + new ResourceNotFoundException( + Messages.getMessage( + Messages.VOCABULARY_NOT_FOUND, + modelConfig.getModelId(), + VocabularyConfig.docId(modelConfig.getModelId()) + ) ) - ) + ); + return; + } + + Vocabulary vocabulary = parseVocabularyDocLeniently(searchVocabResponse.getHits().getAt(0)); + NlpTask nlpTask = new NlpTask(nlpConfig, vocabulary); + NlpTask.Processor processor = nlpTask.createProcessor(); + processContext.nlpTaskProcessor.set(processor); + // here, we are being called back on the searching thread, which MAY be a network thread + // `startAndLoad` creates named pipes, blocking the calling thread, better to execute that in our utility + // executor. + executorServiceForDeployment.execute( + () -> processContext.startAndLoad(modelConfig.getLocation(), modelLoadedListener) ); - return; - } - - Vocabulary vocabulary = parseVocabularyDocLeniently(searchVocabResponse.getHits().getAt(0)); - NlpTask nlpTask = new NlpTask(nlpConfig, vocabulary); - NlpTask.Processor processor = nlpTask.createProcessor(); - processContext.nlpTaskProcessor.set(processor); - // here, we are being called back on the searching thread, which MAY be a network thread - // `startAndLoad` creates named pipes, blocking the calling thread, better to execute that in our utility - // executor. - executorServiceForDeployment.execute(() -> processContext.startAndLoad(modelConfig.getLocation(), modelLoadedListener)); - }, failedDeploymentListener::onFailure)); + }, failedDeploymentListener::onFailure) + ); } else { failedDeploymentListener.onFailure( new IllegalArgumentException( @@ -319,6 +329,7 @@ public void infer( NlpInferenceInput input, boolean skipQueue, TimeValue timeout, + TrainedModelPrefixStrings.PrefixType prefixType, CancellableTask parentActionTask, ActionListener listener ) { @@ -336,6 +347,7 @@ public void infer( processContext, config, input, + prefixType, threadPool, parentActionTask, listener @@ -437,6 +449,7 @@ class ProcessContext { private final SetOnce process = new SetOnce<>(); private final SetOnce nlpTaskProcessor = new SetOnce<>(); private final SetOnce modelInput = new SetOnce<>(); + private final SetOnce prefixes = new SetOnce<>(); private final PyTorchResultProcessor resultProcessor; private final PyTorchStateStreamer stateStreamer; private final PriorityProcessWorkerExecutorService priorityProcessWorker; @@ -681,5 +694,9 @@ SetOnce getProcess() { SetOnce getNlpTaskProcessor() { return nlpTaskProcessor; } + + SetOnce getPrefixStrings() { + return prefixes; + } } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/InferencePyTorchAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/InferencePyTorchAction.java index c91efb09d3cae..945203c345a3c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/InferencePyTorchAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/InferencePyTorchAction.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.Strings; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.InferenceResults; @@ -18,6 +19,7 @@ import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelPrefixStrings; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfig; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.NlpConfig; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; @@ -26,7 +28,6 @@ import org.elasticsearch.xpack.ml.inference.pytorch.results.PyTorchResult; import java.io.IOException; -import java.util.Collections; import java.util.List; import static org.elasticsearch.core.Strings.format; @@ -39,6 +40,7 @@ class InferencePyTorchAction extends AbstractPyTorchAction { private final NlpInferenceInput input; @Nullable private final CancellableTask parentActionTask; + private final TrainedModelPrefixStrings.PrefixType prefixType; InferencePyTorchAction( String deploymentId, @@ -47,6 +49,7 @@ class InferencePyTorchAction extends AbstractPyTorchAction { DeploymentManager.ProcessContext processContext, InferenceConfig config, NlpInferenceInput input, + TrainedModelPrefixStrings.PrefixType prefixType, ThreadPool threadPool, @Nullable CancellableTask parentActionTask, ActionListener listener @@ -54,6 +57,7 @@ class InferencePyTorchAction extends AbstractPyTorchAction { super(deploymentId, requestId, timeout, processContext, threadPool, listener); this.config = config; this.input = input; + this.prefixType = prefixType; this.parentActionTask = parentActionTask; } @@ -83,15 +87,39 @@ protected void doRun() throws Exception { final String requestIdStr = String.valueOf(getRequestId()); try { + String inputText = input.extractInput(getProcessContext().getModelInput().get()); + if (prefixType != TrainedModelPrefixStrings.PrefixType.NONE) { + var prefixStrings = getProcessContext().getPrefixStrings().get(); + if (prefixStrings != null) { + switch (prefixType) { + case SEARCH: { + if (Strings.isNullOrEmpty(prefixStrings.searchPrefix()) == false) { + inputText = prefixStrings.searchPrefix() + inputText; + } + } + break; + case INGEST: { + if (Strings.isNullOrEmpty(prefixStrings.ingestPrefix()) == false) { + inputText = prefixStrings.ingestPrefix() + inputText; + } + } + break; + default: + throw new IllegalStateException("[" + getDeploymentId() + "] Unhandled input prefix type [" + prefixType + "]"); + } + } + } + // The request builder expect a list of inputs which are then batched. // TODO batching was implemented for expected use-cases such as zero-shot classification but is not used here. - List text = Collections.singletonList(input.extractInput(getProcessContext().getModelInput().get())); + var inputs = List.of(inputText); + NlpTask.Processor processor = getProcessContext().getNlpTaskProcessor().get(); - processor.validateInputs(text); + processor.validateInputs(inputs); assert config instanceof NlpConfig; NlpConfig nlpConfig = (NlpConfig) config; NlpTask.Request request = processor.getRequestBuilder(nlpConfig) - .buildRequest(text, requestIdStr, nlpConfig.getTokenization().getTruncate(), nlpConfig.getTokenization().getSpan()); + .buildRequest(inputs, requestIdStr, nlpConfig.getTokenization().getTruncate(), nlpConfig.getTokenization().getSpan()); logger.debug(() -> format("handling request [%s]", requestIdStr)); // Tokenization is non-trivial, so check for cancellation one last time before sending request to the native process diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/TrainedModelDeploymentTask.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/TrainedModelDeploymentTask.java index ea06a0a0aba90..cd7ed9e3eb55a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/TrainedModelDeploymentTask.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/TrainedModelDeploymentTask.java @@ -23,6 +23,7 @@ import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction; import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction.TaskParams; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelPrefixStrings; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfig; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfigUpdate; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; @@ -148,6 +149,7 @@ public void infer( InferenceConfigUpdate update, boolean skipQueue, TimeValue timeout, + TrainedModelPrefixStrings.PrefixType prefixType, CancellableTask parentActionTask, ActionListener listener ) { @@ -175,6 +177,7 @@ public void infer( input, skipQueue, timeout, + prefixType, parentActionTask, listener ); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ingest/InferenceProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ingest/InferenceProcessor.java index 5518903dde125..e600ddd42107f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ingest/InferenceProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ingest/InferenceProcessor.java @@ -26,6 +26,7 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xpack.core.ml.MlConfigVersion; import org.elasticsearch.xpack.core.ml.action.InferModelAction; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelPrefixStrings; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ClassificationConfig; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ClassificationConfigUpdate; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.EmptyConfigUpdate; @@ -245,13 +246,15 @@ InferModelAction.Request buildRequest(IngestDocument ingestDocument) { } } } - return InferModelAction.Request.forTextInput( + var request = InferModelAction.Request.forTextInput( modelId, inferenceConfig, requestInputs, previouslyLicensed, InferModelAction.Request.DEFAULT_TIMEOUT_FOR_INGEST ); + request.setPrefixType(TrainedModelPrefixStrings.PrefixType.INGEST); + return request; } else { Map fields = new HashMap<>(ingestDocument.getSourceAndMetadata()); // Add ingestMetadata as previous processors might have added metadata from which we are predicting (see: foreach processor) @@ -260,13 +263,15 @@ InferModelAction.Request buildRequest(IngestDocument ingestDocument) { } LocalModel.mapFieldsIfNecessary(fields, fieldMap); - return InferModelAction.Request.forIngestDocs( + var request = InferModelAction.Request.forIngestDocs( modelId, List.of(fields), inferenceConfig, previouslyLicensed, InferModelAction.Request.DEFAULT_TIMEOUT_FOR_INGEST ); + request.setPrefixType(TrainedModelPrefixStrings.PrefixType.INGEST); + return request; } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/FeatureExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/FeatureExtractor.java similarity index 91% rename from x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/FeatureExtractor.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/FeatureExtractor.java index 36bf36ef99c52..90e462e3dc511 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/FeatureExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/FeatureExtractor.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.ml.inference.rescorer; +package org.elasticsearch.xpack.ml.inference.ltr; import org.apache.lucene.index.LeafReaderContext; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/FieldValueFeatureExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/FieldValueFeatureExtractor.java similarity index 97% rename from x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/FieldValueFeatureExtractor.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/FieldValueFeatureExtractor.java index 9f0ef84fc3575..5a2e3d29df949 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/FieldValueFeatureExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/FieldValueFeatureExtractor.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.ml.inference.rescorer; +package org.elasticsearch.xpack.ml.inference.ltr; import org.apache.lucene.index.LeafReaderContext; import org.elasticsearch.index.mapper.MappedFieldType; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerFeature.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/InferenceRescorerFeature.java similarity index 89% rename from x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerFeature.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/InferenceRescorerFeature.java index 2b88faa3e4c14..8a26714c7c06b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerFeature.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/InferenceRescorerFeature.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.ml.inference.rescorer; +package org.elasticsearch.xpack.ml.inference.ltr; import org.elasticsearch.common.util.FeatureFlag; @@ -14,7 +14,7 @@ * * Upon removal, ensure transport serialization is all corrected for future BWC. * - * See {@link InferenceRescorerBuilder} + * See {@link LearnToRankRescorerBuilder} */ public class InferenceRescorerFeature { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearnToRankRescorer.java similarity index 91% rename from x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorer.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearnToRankRescorer.java index df3e0756ea39a..dd1df7d8090d6 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearnToRankRescorer.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.ml.inference.rescorer; +package org.elasticsearch.xpack.ml.inference.ltr; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -32,17 +32,17 @@ import static java.util.stream.Collectors.toUnmodifiableSet; -public class InferenceRescorer implements Rescorer { +public class LearnToRankRescorer implements Rescorer { - public static final InferenceRescorer INSTANCE = new InferenceRescorer(); - private static final Logger logger = LogManager.getLogger(InferenceRescorer.class); + public static final LearnToRankRescorer INSTANCE = new LearnToRankRescorer(); + private static final Logger logger = LogManager.getLogger(LearnToRankRescorer.class); private static final Comparator SCORE_DOC_COMPARATOR = (o1, o2) -> { int cmp = Float.compare(o2.score, o1.score); return cmp == 0 ? Integer.compare(o1.doc, o2.doc) : cmp; }; - private InferenceRescorer() { + private LearnToRankRescorer() { } @@ -51,11 +51,11 @@ public TopDocs rescore(TopDocs topDocs, IndexSearcher searcher, RescoreContext r if (topDocs.scoreDocs.length == 0) { return topDocs; } - InferenceRescorerContext ltrRescoreContext = (InferenceRescorerContext) rescoreContext; - if (ltrRescoreContext.inferenceDefinition == null) { + LearnToRankRescorerContext ltrRescoreContext = (LearnToRankRescorerContext) rescoreContext; + if (ltrRescoreContext.regressionModelDefinition == null) { throw new IllegalStateException("local model reference is null, missing rewriteAndFetch before rescore phase?"); } - LocalModel definition = ltrRescoreContext.inferenceDefinition; + LocalModel definition = ltrRescoreContext.regressionModelDefinition; // First take top slice of incoming docs, to be rescored: TopDocs topNFirstPass = topN(topDocs, rescoreContext.getWindowSize()); @@ -104,7 +104,7 @@ public TopDocs rescore(TopDocs topDocs, IndexSearcher searcher, RescoreContext r for (int i = 0; i < hitsToRescore.length; i++) { Map features = docFeatures.get(i); try { - InferenceResults results = definition.inferLtr(features, ltrRescoreContext.inferenceConfig); + InferenceResults results = definition.inferLtr(features, ltrRescoreContext.learnToRankConfig); if (results instanceof WarningInferenceResults warningInferenceResults) { logger.warn("Failure rescoring doc, warning returned [" + warningInferenceResults.getWarning() + "]"); } else if (results.predictedValue() instanceof Number prediction) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearnToRankRescorerBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearnToRankRescorerBuilder.java new file mode 100644 index 0000000000000..49a082c9da6df --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearnToRankRescorerBuilder.java @@ -0,0 +1,328 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.inference.ltr; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.Rewriteable; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.search.rescore.RescorerBuilder; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfig; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.LearnToRankConfig; +import org.elasticsearch.xpack.ml.inference.loadingservice.LocalModel; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; + +public class LearnToRankRescorerBuilder extends RescorerBuilder { + + public static final String NAME = "learn_to_rank"; + private static final ParseField MODEL_FIELD = new ParseField("model_id"); + private static final ParseField PARAMS_FIELD = new ParseField("params"); + private static final ObjectParser PARSER = new ObjectParser<>(NAME, false, Builder::new); + + static { + PARSER.declareString(Builder::setModelId, MODEL_FIELD); + PARSER.declareObject(Builder::setParams, (p, c) -> p.map(), PARAMS_FIELD); + } + + public static LearnToRankRescorerBuilder fromXContent(XContentParser parser, LearnToRankService learnToRankService) { + return PARSER.apply(parser, null).build(learnToRankService); + } + + private final String modelId; + private final Map params; + private final LearnToRankService learnToRankService; + private final LocalModel localModel; + private final LearnToRankConfig learnToRankConfig; + + private boolean rescoreOccurred = false; + + LearnToRankRescorerBuilder(String modelId, Map params, LearnToRankService learnToRankService) { + this(modelId, null, params, learnToRankService); + } + + LearnToRankRescorerBuilder( + String modelId, + LearnToRankConfig learnToRankConfig, + Map params, + LearnToRankService learnToRankService + ) { + this.modelId = modelId; + this.params = params; + this.learnToRankConfig = learnToRankConfig; + this.learnToRankService = learnToRankService; + + // Local inference model is not loaded yet. Will be done in a later rewrite. + this.localModel = null; + } + + LearnToRankRescorerBuilder( + LocalModel localModel, + LearnToRankConfig learnToRankConfig, + Map params, + LearnToRankService learnToRankService + ) { + this.modelId = localModel.getModelId(); + this.params = params; + this.learnToRankConfig = learnToRankConfig; + this.localModel = localModel; + this.learnToRankService = learnToRankService; + } + + public LearnToRankRescorerBuilder(StreamInput input, LearnToRankService learnToRankService) throws IOException { + super(input); + this.modelId = input.readString(); + this.params = input.readMap(); + this.learnToRankConfig = (LearnToRankConfig) input.readOptionalNamedWriteable(InferenceConfig.class); + this.learnToRankService = learnToRankService; + + this.localModel = null; + } + + public String modelId() { + return modelId; + } + + public Map params() { + return params; + } + + public LearnToRankConfig learnToRankConfig() { + return learnToRankConfig; + } + + public LearnToRankService learnToRankService() { + return learnToRankService; + } + + public LocalModel localModel() { + return localModel; + } + + @Override + public RescorerBuilder rewrite(QueryRewriteContext ctx) throws IOException { + if (ctx.convertToDataRewriteContext() != null) { + return doDataNodeRewrite(ctx); + } + if (ctx.convertToSearchExecutionContext() != null) { + return doSearchRewrite(ctx); + } + return doCoordinatorNodeRewrite(ctx); + } + + /** + * Here we fetch the stored model inference context, apply the given update, and rewrite. + * + * This can and be done on the coordinator as it not only validates if the stored model is of the appropriate type, it allows + * any stored logic to rewrite on the coordinator level if possible. + * @param ctx QueryRewriteContext + * @return rewritten LearnToRankRescorerBuilder or self if no changes + * @throws IOException when rewrite fails + */ + private RescorerBuilder doCoordinatorNodeRewrite(QueryRewriteContext ctx) throws IOException { + // We have requested for the stored config and fetch is completed, get the config and rewrite further if required + if (learnToRankConfig != null) { + LearnToRankConfig rewrittenConfig = Rewriteable.rewrite(learnToRankConfig, ctx); + if (rewrittenConfig == learnToRankConfig) { + return this; + } + LearnToRankRescorerBuilder builder = new LearnToRankRescorerBuilder(modelId, rewrittenConfig, params, learnToRankService); + if (windowSize != null) { + builder.windowSize(windowSize); + } + return builder; + } + + if (learnToRankService == null) { + throw new IllegalStateException("Learn to rank service must be available"); + } + + SetOnce configSetOnce = new SetOnce<>(); + GetTrainedModelsAction.Request request = new GetTrainedModelsAction.Request(modelId); + request.setAllowNoResources(false); + ctx.registerAsyncAction( + (c, l) -> learnToRankService.loadLearnToRankConfig(modelId, params, ActionListener.wrap(learnToRankConfig -> { + configSetOnce.set(learnToRankConfig); + l.onResponse(null); + }, l::onFailure)) + ); + + LearnToRankRescorerBuilder builder = new RewritingLearnToRankRescorerBuilder( + (rewritingBuilder) -> configSetOnce.get() == null + ? rewritingBuilder + : new LearnToRankRescorerBuilder(modelId, configSetOnce.get(), params, learnToRankService) + ); + + if (windowSize() != null) { + builder.windowSize(windowSize); + } + return builder; + } + + /** + * This rewrite phase occurs on the data node when we know we will want to use the model for inference + * @param ctx Rewrite context + * @return A rewritten rescorer with a model definition or a model definition supplier populated + */ + private RescorerBuilder doDataNodeRewrite(QueryRewriteContext ctx) throws IOException { + assert learnToRankConfig != null; + + // The model is already loaded, no need to rewrite further. + if (localModel != null) { + return this; + } + + if (learnToRankService == null) { + throw new IllegalStateException("Learn to rank service must be available"); + } + + LearnToRankConfig rewrittenConfig = Rewriteable.rewrite(learnToRankConfig, ctx); + SetOnce localModelSetOnce = new SetOnce<>(); + ctx.registerAsyncAction((c, l) -> learnToRankService.loadLocalModel(modelId, ActionListener.wrap(lm -> { + localModelSetOnce.set(lm); + l.onResponse(null); + }, l::onFailure))); + + LearnToRankRescorerBuilder builder = new RewritingLearnToRankRescorerBuilder( + (rewritingBuilder) -> localModelSetOnce.get() != null + ? new LearnToRankRescorerBuilder(localModelSetOnce.get(), rewrittenConfig, params, learnToRankService) + : rewritingBuilder + ); + + if (windowSize() != null) { + builder.windowSize(windowSize()); + } + return builder; + } + + /** + * This rewrite phase occurs on the data node when we know we will want to use the model for inference + * @param ctx Rewrite context + * @return A rewritten rescorer with a model definition or a model definition supplier populated + * @throws IOException If fetching, parsing, or overall rewrite failures occur + */ + private RescorerBuilder doSearchRewrite(QueryRewriteContext ctx) throws IOException { + if (learnToRankConfig == null) { + return this; + } + LearnToRankConfig rewrittenConfig = Rewriteable.rewrite(learnToRankConfig, ctx); + if (rewrittenConfig == learnToRankConfig) { + return this; + } + LearnToRankRescorerBuilder builder = new LearnToRankRescorerBuilder(localModel, rewrittenConfig, params, learnToRankService); + if (windowSize != null) { + builder.windowSize(windowSize); + } + return builder; + } + + @Override + protected LearnToRankRescorerContext innerBuildContext(int windowSize, SearchExecutionContext context) { + rescoreOccurred = true; + return new LearnToRankRescorerContext(windowSize, LearnToRankRescorer.INSTANCE, learnToRankConfig, localModel, context); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + // TODO: update transport version when released! + return TransportVersion.current(); + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + assert localModel == null || rescoreOccurred : "Unnecessarily populated local model object"; + out.writeString(modelId); + out.writeGenericMap(params); + out.writeOptionalNamedWriteable(learnToRankConfig); + } + + @Override + protected void doXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(NAME); + builder.field(MODEL_FIELD.getPreferredName(), modelId); + if (this.params != null) { + builder.field(PARAMS_FIELD.getPreferredName(), this.params); + } + builder.endObject(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (super.equals(o) == false) return false; + LearnToRankRescorerBuilder that = (LearnToRankRescorerBuilder) o; + + return Objects.equals(modelId, that.modelId) + && Objects.equals(params, that.params) + && Objects.equals(learnToRankConfig, that.learnToRankConfig) + && Objects.equals(localModel, that.localModel) + && Objects.equals(learnToRankService, that.learnToRankService) + && rescoreOccurred == that.rescoreOccurred; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), modelId, params, learnToRankConfig, localModel, learnToRankService, rescoreOccurred); + } + + static class Builder { + private String modelId; + private Map params = null; + + public void setModelId(String modelId) { + this.modelId = modelId; + } + + public void setParams(Map params) { + this.params = params; + } + + LearnToRankRescorerBuilder build(LearnToRankService learnToRankService) { + return new LearnToRankRescorerBuilder(modelId, params, learnToRankService); + } + } + + private static class RewritingLearnToRankRescorerBuilder extends LearnToRankRescorerBuilder { + + private final Function rewriteFunction; + + RewritingLearnToRankRescorerBuilder(Function rewriteFunction) { + super(null, null, null); + this.rewriteFunction = rewriteFunction; + } + + @Override + public RescorerBuilder rewrite(QueryRewriteContext ctx) throws IOException { + LearnToRankRescorerBuilder builder = this.rewriteFunction.apply(this); + + if (windowSize() != null) { + builder.windowSize(windowSize()); + } + + return builder; + } + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerContext.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearnToRankRescorerContext.java similarity index 74% rename from x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerContext.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearnToRankRescorerContext.java index fed3effdc06f6..844f96208cb35 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerContext.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearnToRankRescorerContext.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.ml.inference.rescorer; +package org.elasticsearch.xpack.ml.inference.ltr; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; @@ -24,43 +24,43 @@ import java.util.ArrayList; import java.util.List; -public class InferenceRescorerContext extends RescoreContext { +public class LearnToRankRescorerContext extends RescoreContext { final SearchExecutionContext executionContext; - final LocalModel inferenceDefinition; - final LearnToRankConfig inferenceConfig; + final LocalModel regressionModelDefinition; + final LearnToRankConfig learnToRankConfig; /** * @param windowSize how many documents to rescore * @param rescorer The rescorer to apply - * @param inferenceConfig The inference config containing updated and rewritten parameters - * @param inferenceDefinition The local model inference definition, may be null during certain search phases. + * @param learnToRankConfig The inference config containing updated and rewritten parameters + * @param regressionModelDefinition The local model inference definition, may be null during certain search phases. * @param executionContext The local shard search context */ - public InferenceRescorerContext( + public LearnToRankRescorerContext( int windowSize, Rescorer rescorer, - LearnToRankConfig inferenceConfig, - LocalModel inferenceDefinition, + LearnToRankConfig learnToRankConfig, + LocalModel regressionModelDefinition, SearchExecutionContext executionContext ) { super(windowSize, rescorer); this.executionContext = executionContext; - this.inferenceDefinition = inferenceDefinition; - this.inferenceConfig = inferenceConfig; + this.regressionModelDefinition = regressionModelDefinition; + this.learnToRankConfig = learnToRankConfig; } List buildFeatureExtractors(IndexSearcher searcher) throws IOException { - assert this.inferenceDefinition != null && this.inferenceConfig != null; + assert this.regressionModelDefinition != null && this.learnToRankConfig != null; List featureExtractors = new ArrayList<>(); - if (this.inferenceDefinition.inputFields().isEmpty() == false) { + if (this.regressionModelDefinition.inputFields().isEmpty() == false) { featureExtractors.add( - new FieldValueFeatureExtractor(new ArrayList<>(this.inferenceDefinition.inputFields()), this.executionContext) + new FieldValueFeatureExtractor(new ArrayList<>(this.regressionModelDefinition.inputFields()), this.executionContext) ); } List weights = new ArrayList<>(); List queryFeatureNames = new ArrayList<>(); - for (LearnToRankFeatureExtractorBuilder featureExtractorBuilder : inferenceConfig.getFeatureExtractorBuilders()) { + for (LearnToRankFeatureExtractorBuilder featureExtractorBuilder : learnToRankConfig.getFeatureExtractorBuilders()) { if (featureExtractorBuilder instanceof QueryExtractorBuilder queryExtractorBuilder) { Query query = executionContext.toQuery(queryExtractorBuilder.query().getParsedQuery()).query(); Weight weight = searcher.rewrite(query).createWeight(searcher, ScoreMode.COMPLETE, 1f); @@ -77,11 +77,11 @@ List buildFeatureExtractors(IndexSearcher searcher) throws IOE @Override public List getParsedQueries() { - if (this.inferenceConfig == null) { + if (this.learnToRankConfig == null) { return List.of(); } List parsedQueries = new ArrayList<>(); - for (LearnToRankFeatureExtractorBuilder featureExtractorBuilder : inferenceConfig.getFeatureExtractorBuilders()) { + for (LearnToRankFeatureExtractorBuilder featureExtractorBuilder : learnToRankConfig.getFeatureExtractorBuilders()) { if (featureExtractorBuilder instanceof QueryExtractorBuilder queryExtractorBuilder) { parsedQueries.add(executionContext.toQuery(queryExtractorBuilder.query().getParsedQuery())); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearnToRankService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearnToRankService.java new file mode 100644 index 0000000000000..1443ccd687620 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearnToRankService.java @@ -0,0 +1,195 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.inference.ltr; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.script.TemplateScript; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfig; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.LearnToRankConfig; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ltr.LearnToRankFeatureExtractorBuilder; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ltr.QueryExtractorBuilder; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.ml.utils.QueryProvider; +import org.elasticsearch.xpack.ml.inference.loadingservice.LocalModel; +import org.elasticsearch.xpack.ml.inference.loadingservice.ModelLoadingService; +import org.elasticsearch.xpack.ml.inference.persistence.TrainedModelProvider; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Optional; + +import static org.elasticsearch.script.Script.DEFAULT_TEMPLATE_LANG; +import static org.elasticsearch.xcontent.ToXContent.EMPTY_PARAMS; +import static org.elasticsearch.xpack.core.ml.job.messages.Messages.INFERENCE_CONFIG_QUERY_BAD_FORMAT; + +public class LearnToRankService { + private final ModelLoadingService modelLoadingService; + private final TrainedModelProvider trainedModelProvider; + private final ScriptService scriptService; + private final XContentParserConfiguration parserConfiguration; + + public LearnToRankService( + ModelLoadingService modelLoadingService, + TrainedModelProvider trainedModelProvider, + ScriptService scriptService, + NamedXContentRegistry xContentRegistry + ) { + this(modelLoadingService, trainedModelProvider, scriptService, XContentParserConfiguration.EMPTY.withRegistry(xContentRegistry)); + } + + LearnToRankService( + ModelLoadingService modelLoadingService, + TrainedModelProvider trainedModelProvider, + ScriptService scriptService, + XContentParserConfiguration parserConfiguration + ) { + this.modelLoadingService = modelLoadingService; + this.scriptService = scriptService; + this.trainedModelProvider = trainedModelProvider; + this.parserConfiguration = parserConfiguration; + } + + /** + * Asynchronously load a regression model to be used for learn to rank. + * + * @param modelId The model id to be loaded. + * @param listener Response listener. + */ + public void loadLocalModel(String modelId, ActionListener listener) { + modelLoadingService.getModelForLearnToRank(modelId, listener); + } + + /** + * Asynchronously load the learn to rank config by model id. + * Once the model is loaded, templates are executed using params provided. + * + * @param modelId Id of the model. + * @param params Templates params. + * @param listener Response listener. + */ + public void loadLearnToRankConfig(String modelId, Map params, ActionListener listener) { + trainedModelProvider.getTrainedModel( + modelId, + GetTrainedModelsAction.Includes.all(), + null, + ActionListener.wrap(trainedModelConfig -> { + if (trainedModelConfig.getInferenceConfig() instanceof LearnToRankConfig retrievedInferenceConfig) { + for (LearnToRankFeatureExtractorBuilder builder : retrievedInferenceConfig.getFeatureExtractorBuilders()) { + builder.validate(); + } + listener.onResponse(applyParams(retrievedInferenceConfig, params)); + return; + } + listener.onFailure( + ExceptionsHelper.badRequestException( + Messages.getMessage( + Messages.INFERENCE_CONFIG_INCORRECT_TYPE, + Optional.ofNullable(trainedModelConfig.getInferenceConfig()).map(InferenceConfig::getName).orElse("null"), + LearnToRankConfig.NAME.getPreferredName() + ) + ) + ); + }, listener::onFailure) + ); + } + + /** + * Applies templates params to a {@link LearnToRankConfig} object. + * + * @param config Original config. + * @param params Templates params. + * @return A {@link LearnToRankConfig} object with templates applied. + * + * @throws IOException + */ + private LearnToRankConfig applyParams(LearnToRankConfig config, Map params) throws IOException { + if (scriptService.isLangSupported(DEFAULT_TEMPLATE_LANG) == false) { + return config; + } + + if (params == null || params.isEmpty()) { + // TODO: better handling of missing parameters. + return config; + } + + List featureExtractorBuilders = new ArrayList<>(); + + for (LearnToRankFeatureExtractorBuilder featureExtractorBuilder : config.getFeatureExtractorBuilders()) { + featureExtractorBuilders.add(applyParams(featureExtractorBuilder, params)); + } + + return LearnToRankConfig.builder(config).setLearnToRankFeatureExtractorBuilders(featureExtractorBuilders).build(); + } + + /** + * Applies templates to features extractors. + * + * @param featureExtractorBuilder Source feature extractor builder. + * @param params Templates params. + * @return A new feature extractor with templates applied. + * + * @throws IOException + */ + private LearnToRankFeatureExtractorBuilder applyParams( + LearnToRankFeatureExtractorBuilder featureExtractorBuilder, + Map params + ) throws IOException { + if (featureExtractorBuilder instanceof QueryExtractorBuilder queryExtractorBuilder) { + return applyParams(queryExtractorBuilder, params); + } + + return featureExtractorBuilder; + } + + /** + * Applies templates to a {@link QueryExtractorBuilder} object. + * + * @param queryExtractorBuilder Source query extractor builder. + * @param params Templates params. + * @return A {@link QueryExtractorBuilder} with templates applied. + * + * @throws IOException + */ + private QueryExtractorBuilder applyParams(QueryExtractorBuilder queryExtractorBuilder, Map params) throws IOException { + String templateSource = templateSource(queryExtractorBuilder.query()); + + if (templateSource.contains("{{") == false) { + return queryExtractorBuilder; + } + + Script script = new Script(ScriptType.INLINE, DEFAULT_TEMPLATE_LANG, templateSource, Collections.emptyMap()); + String parsedTemplate = scriptService.compile(script, TemplateScript.CONTEXT).newInstance(params).execute(); + // TODO: handle missing params. + XContentParser parser = XContentType.JSON.xContent().createParser(parserConfiguration, parsedTemplate); + + return new QueryExtractorBuilder( + queryExtractorBuilder.featureName(), + QueryProvider.fromXContent(parser, false, INFERENCE_CONFIG_QUERY_BAD_FORMAT) + ); + } + + private String templateSource(QueryProvider queryProvider) throws IOException { + try (XContentBuilder configSourceBuilder = XContentBuilder.builder(XContentType.JSON.xContent())) { + return BytesReference.bytes(queryProvider.toXContent(configSourceBuilder, EMPTY_PARAMS)).utf8ToString(); + } + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/QueryFeatureExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/QueryFeatureExtractor.java similarity index 98% rename from x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/QueryFeatureExtractor.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/QueryFeatureExtractor.java index 6920849e9af6d..bbc377a67ec0b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/QueryFeatureExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/QueryFeatureExtractor.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.ml.inference.rescorer; +package org.elasticsearch.xpack.ml.inference.ltr; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.DisiPriorityQueue; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java index b704dc37bfc22..02a841e44585f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java @@ -25,9 +25,9 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.MultiSearchRequest; import org.elasticsearch.action.search.MultiSearchResponse; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.internal.Client; @@ -400,7 +400,7 @@ public void getTrainedModelMetadata( if (parentTaskId != null) { searchRequest.setParentTask(parentTaskId); } - executeAsyncWithOrigin(client, ML_ORIGIN, SearchAction.INSTANCE, searchRequest, ActionListener.wrap(searchResponse -> { + executeAsyncWithOrigin(client, ML_ORIGIN, TransportSearchAction.TYPE, searchRequest, ActionListener.wrap(searchResponse -> { if (searchResponse.getHits().getHits().length == 0) { listener.onFailure(new ResourceNotFoundException(Messages.getMessage(Messages.MODEL_METADATA_NOT_FOUND, modelIds))); return; @@ -691,7 +691,7 @@ public void getTrainedModel( executeAsyncWithOrigin( client, ML_ORIGIN, - SearchAction.INSTANCE, + TransportSearchAction.TYPE, ChunkedTrainedModelRestorer.buildSearch( client, modelId, @@ -731,7 +731,7 @@ public void getTrainedModel( }, getTrainedModelListener::onFailure) ); }, getTrainedModelListener::onFailure); - executeAsyncWithOrigin(client, ML_ORIGIN, SearchAction.INSTANCE, trainedModelConfigSearch, trainedModelSearchHandler); + executeAsyncWithOrigin(client, ML_ORIGIN, TransportSearchAction.TYPE, trainedModelConfigSearch, trainedModelSearchHandler); } public void getTrainedModels( @@ -872,7 +872,7 @@ public void getTrainedModels( getTrainedModelListener.onResponse(configs); }, getTrainedModelListener::onFailure); - executeAsyncWithOrigin(client, ML_ORIGIN, SearchAction.INSTANCE, searchRequest, configSearchHandler); + executeAsyncWithOrigin(client, ML_ORIGIN, TransportSearchAction.TYPE, searchRequest, configSearchHandler); } public void deleteTrainedModel(String modelId, ActionListener listener) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerBuilder.java deleted file mode 100644 index 8d450a43722ed..0000000000000 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerBuilder.java +++ /dev/null @@ -1,449 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.ml.inference.rescorer; - -import org.apache.lucene.util.SetOnce; -import org.elasticsearch.TransportVersion; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.index.query.QueryRewriteContext; -import org.elasticsearch.index.query.Rewriteable; -import org.elasticsearch.index.query.SearchExecutionContext; -import org.elasticsearch.search.rescore.RescorerBuilder; -import org.elasticsearch.xcontent.ObjectParser; -import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xpack.core.ClientHelper; -import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction; -import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; -import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfig; -import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfigUpdate; -import org.elasticsearch.xpack.core.ml.inference.trainedmodel.LearnToRankConfig; -import org.elasticsearch.xpack.core.ml.inference.trainedmodel.LearnToRankConfigUpdate; -import org.elasticsearch.xpack.core.ml.inference.trainedmodel.StrictlyParsedInferenceConfig; -import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ltr.LearnToRankFeatureExtractorBuilder; -import org.elasticsearch.xpack.core.ml.job.messages.Messages; -import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; -import org.elasticsearch.xpack.core.ml.utils.NamedXContentObjectHelper; -import org.elasticsearch.xpack.ml.inference.loadingservice.LocalModel; -import org.elasticsearch.xpack.ml.inference.loadingservice.ModelLoadingService; - -import java.io.IOException; -import java.util.Objects; -import java.util.Optional; -import java.util.function.Supplier; - -public class InferenceRescorerBuilder extends RescorerBuilder { - - public static final String NAME = "inference"; - private static final ParseField MODEL = new ParseField("model_id"); - private static final ParseField INFERENCE_CONFIG = new ParseField("inference_config"); - private static final ParseField INTERNAL_INFERENCE_CONFIG = new ParseField("_internal_inference_config"); - private static final ObjectParser PARSER = new ObjectParser<>(NAME, false, Builder::new); - static { - PARSER.declareString(Builder::setModelId, MODEL); - PARSER.declareNamedObject( - Builder::setInferenceConfigUpdate, - (p, c, name) -> p.namedObject(InferenceConfigUpdate.class, name, false), - INFERENCE_CONFIG - ); - PARSER.declareNamedObject( - Builder::setInferenceConfig, - (p, c, name) -> p.namedObject(StrictlyParsedInferenceConfig.class, name, false), - INTERNAL_INFERENCE_CONFIG - ); - } - - public static InferenceRescorerBuilder fromXContent(XContentParser parser, Supplier modelLoadingServiceSupplier) { - return PARSER.apply(parser, null).build(modelLoadingServiceSupplier); - } - - private final String modelId; - private final LearnToRankConfigUpdate inferenceConfigUpdate; - private final LearnToRankConfig inferenceConfig; - private final LocalModel inferenceDefinition; - private final Supplier inferenceDefinitionSupplier; - private final Supplier modelLoadingServiceSupplier; - private final Supplier inferenceConfigSupplier; - private boolean rescoreOccurred; - - public InferenceRescorerBuilder( - String modelId, - LearnToRankConfigUpdate inferenceConfigUpdate, - Supplier modelLoadingServiceSupplier - ) { - this.modelId = Objects.requireNonNull(modelId); - this.inferenceConfigUpdate = inferenceConfigUpdate; - this.modelLoadingServiceSupplier = modelLoadingServiceSupplier; - this.inferenceDefinition = null; - this.inferenceDefinitionSupplier = null; - this.inferenceConfigSupplier = null; - this.inferenceConfig = null; - } - - InferenceRescorerBuilder(String modelId, LearnToRankConfig inferenceConfig, Supplier modelLoadingServiceSupplier) { - this.modelId = Objects.requireNonNull(modelId); - this.inferenceConfigUpdate = null; - this.inferenceDefinition = null; - this.inferenceDefinitionSupplier = null; - this.inferenceConfigSupplier = null; - this.modelLoadingServiceSupplier = modelLoadingServiceSupplier; - this.inferenceConfig = Objects.requireNonNull(inferenceConfig); - } - - private InferenceRescorerBuilder( - String modelId, - LearnToRankConfigUpdate update, - Supplier modelLoadingServiceSupplier, - Supplier inferenceConfigSupplier - ) { - this.modelId = Objects.requireNonNull(modelId); - this.inferenceConfigUpdate = update; - this.inferenceDefinition = null; - this.inferenceDefinitionSupplier = null; - this.inferenceConfigSupplier = inferenceConfigSupplier; - this.modelLoadingServiceSupplier = modelLoadingServiceSupplier; - this.inferenceConfig = null; - } - - private InferenceRescorerBuilder( - String modelId, - LearnToRankConfig inferenceConfig, - Supplier modelLoadingServiceSupplier, - Supplier inferenceDefinitionSupplier - ) { - this.modelId = modelId; - this.inferenceConfigUpdate = null; - this.inferenceDefinition = null; - this.inferenceDefinitionSupplier = inferenceDefinitionSupplier; - this.modelLoadingServiceSupplier = modelLoadingServiceSupplier; - this.inferenceConfigSupplier = null; - this.inferenceConfig = inferenceConfig; - } - - InferenceRescorerBuilder(String modelId, LearnToRankConfig inferenceConfig, LocalModel inferenceDefinition) { - this.modelId = modelId; - this.inferenceConfigUpdate = null; - this.inferenceDefinition = inferenceDefinition; - this.inferenceDefinitionSupplier = null; - this.modelLoadingServiceSupplier = null; - this.inferenceConfigSupplier = null; - this.inferenceConfig = inferenceConfig; - } - - public InferenceRescorerBuilder(StreamInput input, Supplier modelLoadingServiceSupplier) throws IOException { - super(input); - this.modelId = input.readString(); - this.inferenceConfigUpdate = (LearnToRankConfigUpdate) input.readOptionalNamedWriteable(InferenceConfigUpdate.class); - this.inferenceDefinitionSupplier = null; - this.inferenceConfigSupplier = null; - this.inferenceDefinition = null; - this.inferenceConfig = (LearnToRankConfig) input.readOptionalNamedWriteable(InferenceConfig.class); - this.modelLoadingServiceSupplier = modelLoadingServiceSupplier; - } - - @Override - public String getWriteableName() { - return NAME; - } - - /** - * should be updated once {@link InferenceRescorerFeature} is removed - */ - @Override - public TransportVersion getMinimalSupportedVersion() { - // TODO: update transport version when released! - return TransportVersion.current(); - } - - /** - * Here we fetch the stored model inference context, apply the given update, and rewrite. - * - * This can and be done on the coordinator as it not only validates if the stored model is of the appropriate type, it allows - * any stored logic to rewrite on the coordinator level if possible. - * @param ctx QueryRewriteContext - * @return rewritten InferenceRescorerBuilder or self if no changes - * @throws IOException when rewrite fails - */ - private RescorerBuilder doRewrite(QueryRewriteContext ctx) throws IOException { - // Awaiting fetch - if (inferenceConfigSupplier != null && inferenceConfigSupplier.get() == null) { - return this; - } - if (inferenceConfig != null) { - LearnToRankConfig rewrittenConfig = Rewriteable.rewrite(inferenceConfig, ctx); - if (rewrittenConfig == inferenceConfig) { - return this; - } - InferenceRescorerBuilder builder = new InferenceRescorerBuilder(modelId, rewrittenConfig, modelLoadingServiceSupplier); - if (windowSize != null) { - builder.windowSize(windowSize); - } - return builder; - } - // We have requested for the stored config and fetch is completed, get the config and rewrite further if required - if (inferenceConfigSupplier != null) { - LearnToRankConfig rewrittenConfig = Rewriteable.rewrite(inferenceConfigSupplier.get(), ctx); - InferenceRescorerBuilder builder = new InferenceRescorerBuilder(modelId, rewrittenConfig, modelLoadingServiceSupplier); - if (windowSize != null) { - builder.windowSize(windowSize); - } - return builder; - } - SetOnce configSetOnce = new SetOnce<>(); - GetTrainedModelsAction.Request request = new GetTrainedModelsAction.Request(modelId); - request.setAllowNoResources(false); - ctx.registerAsyncAction( - (c, l) -> ClientHelper.executeAsyncWithOrigin( - c, - ClientHelper.ML_ORIGIN, - GetTrainedModelsAction.INSTANCE, - request, - ActionListener.wrap(trainedModels -> { - TrainedModelConfig config = trainedModels.getResources().results().get(0); - if (config.getInferenceConfig() instanceof LearnToRankConfig retrievedInferenceConfig) { - retrievedInferenceConfig = inferenceConfigUpdate == null - ? retrievedInferenceConfig - : inferenceConfigUpdate.apply(retrievedInferenceConfig); - for (LearnToRankFeatureExtractorBuilder builder : retrievedInferenceConfig.getFeatureExtractorBuilders()) { - builder.validate(); - } - configSetOnce.set(retrievedInferenceConfig); - l.onResponse(null); - return; - } - l.onFailure( - ExceptionsHelper.badRequestException( - Messages.getMessage( - Messages.INFERENCE_CONFIG_INCORRECT_TYPE, - Optional.ofNullable(config.getInferenceConfig()).map(InferenceConfig::getName).orElse("null"), - LearnToRankConfig.NAME.getPreferredName() - ) - ) - ); - }, l::onFailure) - ) - ); - InferenceRescorerBuilder builder = new InferenceRescorerBuilder( - modelId, - inferenceConfigUpdate, - modelLoadingServiceSupplier, - configSetOnce::get - ); - if (windowSize() != null) { - builder.windowSize(windowSize); - } - return builder; - } - - /** - * This rewrite phase occurs on the data node when we know we will want to use the model for inference - * @param ctx Rewrite context - * @return A rewritten rescorer with a model definition or a model definition supplier populated - */ - private RescorerBuilder doDataNodeRewrite(QueryRewriteContext ctx) { - assert inferenceConfig != null; - // We already have an inference definition, no need to do any rewriting - if (inferenceDefinition != null) { - return this; - } - // Awaiting fetch - if (inferenceDefinitionSupplier != null && inferenceDefinitionSupplier.get() == null) { - return this; - } - if (inferenceDefinitionSupplier != null) { - LocalModel inferenceDefinition = inferenceDefinitionSupplier.get(); - InferenceRescorerBuilder builder = new InferenceRescorerBuilder(modelId, inferenceConfig, inferenceDefinition); - if (windowSize() != null) { - builder.windowSize(windowSize()); - } - return builder; - } - if (modelLoadingServiceSupplier == null || modelLoadingServiceSupplier.get() == null) { - throw new IllegalStateException("Model loading service must be available"); - } - SetOnce inferenceDefinitionSetOnce = new SetOnce<>(); - ctx.registerAsyncAction((c, l) -> modelLoadingServiceSupplier.get().getModelForLearnToRank(modelId, ActionListener.wrap(lm -> { - inferenceDefinitionSetOnce.set(lm); - l.onResponse(null); - }, l::onFailure))); - InferenceRescorerBuilder builder = new InferenceRescorerBuilder( - modelId, - inferenceConfig, - modelLoadingServiceSupplier, - inferenceDefinitionSetOnce::get - ); - if (windowSize() != null) { - builder.windowSize(windowSize()); - } - return builder; - } - - /** - * This rewrite phase occurs on the data node when we know we will want to use the model for inference - * @param ctx Rewrite context - * @return A rewritten rescorer with a model definition or a model definition supplier populated - * @throws IOException If fetching, parsing, or overall rewrite failures occur - */ - private RescorerBuilder doSearchRewrite(QueryRewriteContext ctx) throws IOException { - if (inferenceConfig == null) { - return this; - } - LearnToRankConfig rewrittenConfig = Rewriteable.rewrite(inferenceConfig, ctx); - if (rewrittenConfig == inferenceConfig) { - return this; - } - InferenceRescorerBuilder builder = inferenceDefinition == null - ? new InferenceRescorerBuilder(modelId, rewrittenConfig, modelLoadingServiceSupplier) - : new InferenceRescorerBuilder(modelId, rewrittenConfig, inferenceDefinition); - if (windowSize != null) { - builder.windowSize(windowSize); - } - return builder; - } - - @Override - public RescorerBuilder rewrite(QueryRewriteContext ctx) throws IOException { - if (ctx.convertToDataRewriteContext() != null) { - return doDataNodeRewrite(ctx); - } - if (ctx.convertToSearchExecutionContext() != null) { - return doSearchRewrite(ctx); - } - return doRewrite(ctx); - } - - public String getModelId() { - return modelId; - } - - LearnToRankConfig getInferenceConfig() { - return inferenceConfig; - } - - @Override - protected void doWriteTo(StreamOutput out) throws IOException { - if (inferenceDefinitionSupplier != null || inferenceConfigSupplier != null) { - throw new IllegalStateException("suppliers must be null, missing a rewriteAndFetch?"); - } - assert inferenceDefinition == null || rescoreOccurred : "Unnecessarily populated local model object"; - out.writeString(modelId); - out.writeOptionalNamedWriteable(inferenceConfigUpdate); - out.writeOptionalNamedWriteable(inferenceConfig); - } - - @Override - protected void doXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(NAME); - builder.field(MODEL.getPreferredName(), modelId); - if (inferenceConfigUpdate != null) { - NamedXContentObjectHelper.writeNamedObject(builder, params, INFERENCE_CONFIG.getPreferredName(), inferenceConfigUpdate); - } - if (inferenceConfig != null) { - NamedXContentObjectHelper.writeNamedObject(builder, params, INTERNAL_INFERENCE_CONFIG.getPreferredName(), inferenceConfig); - } - builder.endObject(); - } - - @Override - protected InferenceRescorerContext innerBuildContext(int windowSize, SearchExecutionContext context) { - rescoreOccurred = true; - return new InferenceRescorerContext(windowSize, InferenceRescorer.INSTANCE, inferenceConfig, inferenceDefinition, context); - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - if (super.equals(o) == false) return false; - InferenceRescorerBuilder that = (InferenceRescorerBuilder) o; - return Objects.equals(modelId, that.modelId) - && Objects.equals(inferenceDefinition, that.inferenceDefinition) - && Objects.equals(inferenceConfigUpdate, that.inferenceConfigUpdate) - && Objects.equals(inferenceConfig, that.inferenceConfig) - && Objects.equals(inferenceDefinitionSupplier, that.inferenceDefinitionSupplier) - && Objects.equals(modelLoadingServiceSupplier, that.modelLoadingServiceSupplier); - } - - @Override - public int hashCode() { - return Objects.hash( - super.hashCode(), - modelId, - inferenceConfigUpdate, - inferenceConfig, - inferenceDefinition, - inferenceDefinitionSupplier, - modelLoadingServiceSupplier - ); - } - - LearnToRankConfigUpdate getInferenceConfigUpdate() { - return inferenceConfigUpdate; - } - - // Used in tests - Supplier modelLoadingServiceSupplier() { - return modelLoadingServiceSupplier; - } - - // Used in tests - LocalModel getInferenceDefinition() { - return inferenceDefinition; - } - - static class Builder { - private String modelId; - private LearnToRankConfigUpdate inferenceConfigUpdate; - private LearnToRankConfig inferenceConfig; - - public void setModelId(String modelId) { - this.modelId = modelId; - } - - public void setInferenceConfigUpdate(InferenceConfigUpdate inferenceConfigUpdate) { - if (inferenceConfigUpdate instanceof LearnToRankConfigUpdate learnToRankConfigUpdate) { - this.inferenceConfigUpdate = learnToRankConfigUpdate; - return; - } - throw new IllegalArgumentException( - Strings.format( - "[%s] only allows a [%s] object to be configured", - INFERENCE_CONFIG.getPreferredName(), - LearnToRankConfigUpdate.NAME.getPreferredName() - ) - ); - } - - void setInferenceConfig(InferenceConfig inferenceConfig) { - if (inferenceConfig instanceof LearnToRankConfig learnToRankConfig) { - this.inferenceConfig = learnToRankConfig; - return; - } - throw new IllegalArgumentException( - Strings.format( - "[%s] only allows a [%s] object to be configured", - INFERENCE_CONFIG.getPreferredName(), - LearnToRankConfigUpdate.NAME.getPreferredName() - ) - ); - } - - InferenceRescorerBuilder build(Supplier modelLoadingServiceSupplier) { - assert inferenceConfig == null || inferenceConfigUpdate == null; - if (inferenceConfig != null) { - return new InferenceRescorerBuilder(modelId, inferenceConfig, modelLoadingServiceSupplier); - } else { - return new InferenceRescorerBuilder(modelId, inferenceConfigUpdate, modelLoadingServiceSupplier); - } - } - } -} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobDataDeleter.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobDataDeleter.java index 9e357ce7c1dec..ac16948e32ed6 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobDataDeleter.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobDataDeleter.java @@ -17,11 +17,11 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.bulk.BulkItemResponse; -import org.elasticsearch.action.search.MultiSearchAction; import org.elasticsearch.action.search.MultiSearchRequest; import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportMultiSearchAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; @@ -413,7 +413,7 @@ public void deleteJobDocuments( ); multiSearchRequest.add(new SearchRequest(indexName).source(source)); } - executeAsyncWithOrigin(client, ML_ORIGIN, MultiSearchAction.INSTANCE, multiSearchRequest, customIndexSearchHandler); + executeAsyncWithOrigin(client, ML_ORIGIN, TransportMultiSearchAction.TYPE, multiSearchRequest, customIndexSearchHandler); }, failureHandler); // Step 5. Get the job as the initial result index name is required diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersister.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersister.java index e65e0abaca2e3..761bfa16e66bb 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersister.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersister.java @@ -541,7 +541,7 @@ void setRefreshPolicy(WriteRequest.RefreshPolicy refreshPolicy) { } BulkResponse persist(Supplier shouldRetry, boolean requireAlias) { - final PlainActionFuture getResponseFuture = PlainActionFuture.newFuture(); + final PlainActionFuture getResponseFuture = new PlainActionFuture<>(); persist(shouldRetry, requireAlias, getResponseFuture); return getResponseFuture.actionGet(); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectBuilder.java index b21ac6f47410e..2d4ea308a6693 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectBuilder.java @@ -23,6 +23,7 @@ import org.elasticsearch.xpack.ml.job.process.autodetect.writer.ScheduledEventToRuleWriter; import org.elasticsearch.xpack.ml.process.NativeController; import org.elasticsearch.xpack.ml.process.ProcessPipes; +import org.elasticsearch.xpack.ml.utils.FileUtils; import java.io.BufferedWriter; import java.io.IOException; @@ -208,6 +209,7 @@ public static Path writeNormalizerInitState(String jobId, String state, Environm // createTempFile has a race condition where it may return the same // temporary file name to different threads if called simultaneously // from multiple threads, hence add the thread ID to avoid this + FileUtils.recreateTempDirectoryIfNeeded(env.tmpFile()); Path stateFile = Files.createTempFile( env.tmpFile(), jobId + "_quantiles_" + Thread.currentThread().getId(), @@ -225,6 +227,7 @@ private void buildScheduledEventsConfig(List command) throws IOException if (scheduledEvents.isEmpty()) { return; } + FileUtils.recreateTempDirectoryIfNeeded(env.tmpFile()); Path eventsConfigFile = Files.createTempFile(env.tmpFile(), "eventsConfig", JSON_EXTENSION); filesToDelete.add(eventsConfigFile); @@ -249,6 +252,7 @@ private void buildScheduledEventsConfig(List command) throws IOException } private void buildJobConfig(List command) throws IOException { + FileUtils.recreateTempDirectoryIfNeeded(env.tmpFile()); Path configFile = Files.createTempFile(env.tmpFile(), "config", JSON_EXTENSION); filesToDelete.add(configFile); try ( @@ -267,6 +271,7 @@ private void buildFiltersConfig(List command) throws IOException { if (referencedFilters.isEmpty()) { return; } + FileUtils.recreateTempDirectoryIfNeeded(env.tmpFile()); Path filtersConfigFile = Files.createTempFile(env.tmpFile(), "filtersConfig", JSON_EXTENSION); filesToDelete.add(filtersConfigFile); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java index 1144b1afffdcb..8deac327c065e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java @@ -81,6 +81,7 @@ import java.io.IOException; import java.io.InputStream; import java.time.Duration; +import java.time.Instant; import java.time.ZonedDateTime; import java.util.Date; import java.util.Iterator; @@ -1000,7 +1001,7 @@ public Optional jobOpenTime(JobTask jobTask) { } void setJobState(JobTask jobTask, JobState state, String reason) { - JobTaskState jobTaskState = new JobTaskState(state, jobTask.getAllocationId(), reason); + JobTaskState jobTaskState = new JobTaskState(state, jobTask.getAllocationId(), reason, Instant.now()); jobTask.updatePersistentTaskState( jobTaskState, ActionListener.wrap( @@ -1019,7 +1020,7 @@ private static void logSetJobStateFailure(JobState state, String jobId, Exceptio } void setJobState(JobTask jobTask, JobState state, String reason, CheckedConsumer handler) { - JobTaskState jobTaskState = new JobTaskState(state, jobTask.getAllocationId(), reason); + JobTaskState jobTaskState = new JobTaskState(state, jobTask.getAllocationId(), reason, Instant.now()); jobTask.updatePersistentTaskState(jobTaskState, ActionListener.wrap(persistentTask -> { try { handler.accept(null); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredForecastsRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredForecastsRemover.java index 10a12273baff5..ed4e6875e260a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredForecastsRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredForecastsRemover.java @@ -10,9 +10,9 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.ThreadedActionListener; import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.index.query.BoolQueryBuilder; @@ -98,7 +98,7 @@ public void remove(float requestsPerSec, ActionListener listener, Boole searchRequest.source(source); searchRequest.setParentTask(parentTaskId); client.execute( - SearchAction.INSTANCE, + TransportSearchAction.TYPE, searchRequest, new ThreadedActionListener<>(threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME), forecastStatsHandler) ); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutor.java index 15b1993dc0586..09cd6225cf0ca 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutor.java @@ -61,6 +61,7 @@ import org.elasticsearch.xpack.ml.process.MlMemoryTracker; import org.elasticsearch.xpack.ml.task.AbstractJobPersistentTasksExecutor; +import java.time.Instant; import java.util.Collection; import java.util.Collections; import java.util.List; @@ -344,7 +345,7 @@ private void runJob(JobTask jobTask, JobState jobState, OpenJobAction.JobParams private void failTask(JobTask jobTask, String reason) { String jobId = jobTask.getJobId(); auditor.error(jobId, reason); - JobTaskState failedState = new JobTaskState(JobState.FAILED, jobTask.getAllocationId(), reason); + JobTaskState failedState = new JobTaskState(JobState.FAILED, jobTask.getAllocationId(), reason, Instant.now()); jobTask.updatePersistentTaskState(failedState, ActionListener.wrap(r -> { logger.debug("[{}] updated task state to failed", jobId); stopAssociatedDatafeedForFailedJob(jobId); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/ProcessPipes.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/ProcessPipes.java index fca64a32cd499..6b09e38b02ea6 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/ProcessPipes.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/ProcessPipes.java @@ -10,11 +10,13 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.xpack.ml.process.logging.CppLogMessageHandler; +import org.elasticsearch.xpack.ml.utils.FileUtils; import org.elasticsearch.xpack.ml.utils.NamedPipeHelper; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.nio.file.Path; import java.time.Duration; import java.util.List; import java.util.Locale; @@ -41,6 +43,7 @@ public class ProcessPipes { private final NamedPipeHelper namedPipeHelper; private final String jobId; + private final Path tempDir; /** * null indicates a pipe won't be used @@ -91,6 +94,7 @@ public ProcessPipes( ) { this.namedPipeHelper = namedPipeHelper; this.jobId = jobId; + this.tempDir = env.tmpFile(); this.timeout = timeout; // The way the pipe names are formed MUST match what is done in the controller main() @@ -150,6 +154,7 @@ public void addArgs(List command) { * and this JVM. */ public void connectLogStream() throws IOException { + FileUtils.recreateTempDirectoryIfNeeded(tempDir); logStreamHandler = new CppLogMessageHandler(jobId, namedPipeHelper.openNamedPipeInputStream(logPipeName, timeout)); } @@ -162,6 +167,7 @@ public void connectOtherStreams() throws IOException { if (logStreamHandler == null) { throw new NullPointerException("Must connect log stream before other streams"); } + FileUtils.recreateTempDirectoryIfNeeded(tempDir); // The order here is important. It must match the order that the C++ process tries to connect to the pipes, otherwise // a timeout is guaranteed. Also change api::CIoManager in the C++ code if changing the order here. try { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/queries/TextExpansionQueryBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/queries/TextExpansionQueryBuilder.java index 40e4f5d9ede78..7cdeeb3d559ec 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/queries/TextExpansionQueryBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/queries/TextExpansionQueryBuilder.java @@ -25,6 +25,7 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.ml.action.InferModelAction; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelPrefixStrings; import org.elasticsearch.xpack.core.ml.inference.results.TextExpansionResults; import org.elasticsearch.xpack.core.ml.inference.results.WarningInferenceResults; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextExpansionConfigUpdate; @@ -131,6 +132,7 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws InferModelAction.Request.DEFAULT_TIMEOUT_FOR_API ); inferRequest.setHighPriority(true); + inferRequest.setPrefixType(TrainedModelPrefixStrings.PrefixType.SEARCH); SetOnce textExpansionResultsSupplier = new SetOnce<>(); queryRewriteContext.registerAsyncAction((client, listener) -> { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/cat/RestCatTrainedModelsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/cat/RestCatTrainedModelsAction.java index e2e096ae903ea..f53a82f54a67e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/cat/RestCatTrainedModelsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/cat/RestCatTrainedModelsAction.java @@ -7,14 +7,15 @@ package org.elasticsearch.xpack.ml.rest.cat; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.support.GroupedActionListener; +import org.elasticsearch.action.support.RefCountingListener; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.Table; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.Scope; @@ -32,7 +33,6 @@ import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; import org.elasticsearch.xpack.core.security.user.InternalUsers; -import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.List; @@ -85,15 +85,23 @@ protected RestChannelConsumer doCatRequest(RestRequest restRequest, NodeClient c statsRequest.setAllowNoResources(true); modelsAction.setAllowNoResources(restRequest.paramAsBoolean(ALLOW_NO_MATCH.getPreferredName(), statsRequest.isAllowNoResources())); - return channel -> { - final ActionListener listener = ActionListener.notifyOnce(new RestResponseListener<>(channel) { - @Override - public RestResponse buildResponse(final Table table) throws Exception { - return RestTable.buildResponse(table, channel); - } - }); + return new RestChannelConsumer() { + @Override + public void accept(RestChannel channel) { + SubscribableListener.newForked(this::getTrainedModels).andThen(this::getDerivedData).addListener(newRestListener(channel)); + } + + private List trainedModelsStats; + private List dataFrameAnalytics; + + private void getTrainedModels(ActionListener listener) { + client.execute(GetTrainedModelsAction.INSTANCE, modelsAction, listener); + } - client.execute(GetTrainedModelsAction.INSTANCE, modelsAction, ActionListener.wrap(trainedModels -> { + private void getDerivedData( + ActionListener listener, + GetTrainedModelsAction.Response trainedModels + ) { final List trainedModelConfigs = trainedModels.getResources().results(); Set potentialAnalyticsIds = new HashSet<>(); @@ -105,28 +113,35 @@ public RestResponse buildResponse(final Table table) throws Exception { // Find the related DataFrameAnalyticsConfigs String requestIdPattern = Strings.collectionToDelimitedString(potentialAnalyticsIds, "*,") + "*"; - final GroupedActionListener groupedListener = createGroupedListener( - restRequest, - 2, - trainedModels.getResources().results(), - listener - ); + try (var listeners = new RefCountingListener(listener.map(ignored -> trainedModels))) { + client.execute( + GetTrainedModelsStatsAction.INSTANCE, + statsRequest, + listeners.acquire(response -> trainedModelsStats = response.getResources().results()) + ); - client.execute( - GetTrainedModelsStatsAction.INSTANCE, - statsRequest, - ActionListener.wrap(groupedListener::onResponse, groupedListener::onFailure) - ); + final var dataFrameAnalyticsRequest = new GetDataFrameAnalyticsAction.Request(requestIdPattern); + dataFrameAnalyticsRequest.setAllowNoResources(true); + dataFrameAnalyticsRequest.setPageParams(new PageParams(0, potentialAnalyticsIds.size())); + client.execute( + GetDataFrameAnalyticsAction.INSTANCE, + dataFrameAnalyticsRequest, + listeners.acquire(response -> dataFrameAnalytics = response.getResources().results()) + ); + } + } - GetDataFrameAnalyticsAction.Request dataFrameAnalyticsRequest = new GetDataFrameAnalyticsAction.Request(requestIdPattern); - dataFrameAnalyticsRequest.setAllowNoResources(true); - dataFrameAnalyticsRequest.setPageParams(new PageParams(0, potentialAnalyticsIds.size())); - client.execute( - GetDataFrameAnalyticsAction.INSTANCE, - dataFrameAnalyticsRequest, - ActionListener.wrap(groupedListener::onResponse, groupedListener::onFailure) - ); - }, listener::onFailure)); + private ActionListener newRestListener(RestChannel channel) { + return new RestResponseListener<>(channel) { + @Override + public RestResponse buildResponse(final GetTrainedModelsAction.Response trainedModels) throws Exception { + return RestTable.buildResponse( + buildTable(restRequest, trainedModelsStats, trainedModels.getResources().results(), dataFrameAnalytics), + channel + ); + } + }; + } }; } @@ -230,19 +245,6 @@ protected Table getTableWithHeader(RestRequest request) { return table; } - private GroupedActionListener createGroupedListener( - final RestRequest request, - final int size, - final List configs, - final ActionListener
    listener - ) { - return new GroupedActionListener<>(size, listener.safeMap(responses -> { - GetTrainedModelsStatsAction.Response statsResponse = extractResponse(responses, GetTrainedModelsStatsAction.Response.class); - GetDataFrameAnalyticsAction.Response analytics = extractResponse(responses, GetDataFrameAnalyticsAction.Response.class); - return buildTable(request, statsResponse.getResources().results(), configs, analytics.getResources().results()); - })); - } - private Table buildTable( RestRequest request, List stats, @@ -302,9 +304,4 @@ private Table buildTable( }); return table; } - - @SuppressWarnings("unchecked") - private static A extractResponse(final Collection responses, Class c) { - return (A) responses.stream().filter(c::isInstance).findFirst().get(); - } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/FileUtils.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/FileUtils.java new file mode 100644 index 0000000000000..95f4565d1a97b --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/FileUtils.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.utils; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.attribute.FileAttribute; +import java.nio.file.attribute.PosixFilePermission; +import java.nio.file.attribute.PosixFilePermissions; +import java.util.EnumSet; + +/** + * Some utility functions for managing files. + */ +public final class FileUtils { + + private FileUtils() {} + + private static final FileAttribute[] POSIX_TMP_DIR_PERMISSIONS = new FileAttribute[] { + PosixFilePermissions.asFileAttribute( + EnumSet.of(PosixFilePermission.OWNER_READ, PosixFilePermission.OWNER_WRITE, PosixFilePermission.OWNER_EXECUTE) + ) }; + + /** + * Recreates the Elasticsearch temporary directory if it doesn't exist. + * The operating system may have cleaned it up due to inactivity, which + * causes some (machine learning) processes to fail. + * @param tmpDir the path to the temporary directory + */ + public static void recreateTempDirectoryIfNeeded(Path tmpDir) throws IOException { + if (tmpDir.getFileSystem().supportedFileAttributeViews().contains("posix")) { + Files.createDirectories(tmpDir, POSIX_TMP_DIR_PERMISSIONS); + } else { + Files.createDirectories(tmpDir); + } + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/MlProcessors.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/MlProcessors.java index 1769a7946ce80..4d317ee7925fc 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/MlProcessors.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/MlProcessors.java @@ -66,9 +66,9 @@ public static Processors getTotalMlNodeProcessors(DiscoveryNodes nodes, Integer if (node.getRoles().contains(DiscoveryNodeRole.ML_ROLE)) { Processors nodeProcessors = get(node, allocatedProcessorScale); // Round down before summing, because ML only uses whole processors - total += nodeProcessors.roundDown(); + total += nodeProcessors.roundUp(); } } - return Processors.of((double) total); + return total == 0 ? Processors.ZERO : Processors.of((double) total); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/ResultsPersisterService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/ResultsPersisterService.java index ddcfcb7de08d4..5fa434e530bc5 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/ResultsPersisterService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/ResultsPersisterService.java @@ -238,7 +238,7 @@ private BulkResponse bulkIndexWithRetry( isShutdown ? "node is shutting down." : "machine learning feature is being reset." ); } - final PlainActionFuture getResponseFuture = PlainActionFuture.newFuture(); + final PlainActionFuture getResponseFuture = new PlainActionFuture<>(); bulkIndexWithRetry(bulkRequest, jobId, shouldRetry, retryMsgHandler, actionExecutor, getResponseFuture); return getResponseFuture.actionGet(); } @@ -281,7 +281,7 @@ public SearchResponse searchWithRetry( Supplier shouldRetry, Consumer retryMsgHandler ) { - final PlainActionFuture getResponse = PlainActionFuture.newFuture(); + final PlainActionFuture getResponse = new PlainActionFuture<>(); final Object key = new Object(); final ActionListener removeListener = ActionListener.runBefore( getResponse, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/vectors/TextEmbeddingQueryVectorBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/vectors/TextEmbeddingQueryVectorBuilder.java index 2e780c9849bd5..72663b3f8a7bd 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/vectors/TextEmbeddingQueryVectorBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/vectors/TextEmbeddingQueryVectorBuilder.java @@ -20,6 +20,7 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.ml.action.InferModelAction; import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelPrefixStrings; import org.elasticsearch.xpack.core.ml.inference.results.TextEmbeddingResults; import org.elasticsearch.xpack.core.ml.inference.results.WarningInferenceResults; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextEmbeddingConfigUpdate; @@ -100,6 +101,7 @@ public void buildVector(Client client, ActionListener listener) { InferModelAction.Request.DEFAULT_TIMEOUT_FOR_API ); inferRequest.setHighPriority(true); + inferRequest.setPrefixType(TrainedModelPrefixStrings.PrefixType.SEARCH); executeAsyncWithOrigin(client, ML_ORIGIN, InferModelAction.INSTANCE, inferRequest, ActionListener.wrap(response -> { if (response.getInferenceResults().isEmpty()) { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlLifeCycleServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlLifeCycleServiceTests.java index f6c5924db37f8..a7a9122c96606 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlLifeCycleServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlLifeCycleServiceTests.java @@ -147,7 +147,7 @@ public void testIsNodeSafeToShutdownGivenFailedTasks() { new OpenJobAction.JobParams("job-1"), new PersistentTasksCustomMetadata.Assignment("node-1", "test assignment") ); - tasksBuilder.updateTaskState(MlTasks.jobTaskId("job-1"), new JobTaskState(JobState.FAILED, 1, "testing")); + tasksBuilder.updateTaskState(MlTasks.jobTaskId("job-1"), new JobTaskState(JobState.FAILED, 1, "testing", Instant.now())); tasksBuilder.addTask( MlTasks.dataFrameAnalyticsTaskId("job-2"), MlTasks.DATA_FRAME_ANALYTICS_TASK_NAME, @@ -156,7 +156,7 @@ public void testIsNodeSafeToShutdownGivenFailedTasks() { ); tasksBuilder.updateTaskState( MlTasks.dataFrameAnalyticsTaskId("job-2"), - new DataFrameAnalyticsTaskState(DataFrameAnalyticsState.FAILED, 2, "testing") + new DataFrameAnalyticsTaskState(DataFrameAnalyticsState.FAILED, 2, "testing", Instant.now()) ); tasksBuilder.addTask( MlTasks.snapshotUpgradeTaskId("job-3", "snapshot-3"), diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedActionTests.java index 4ee8409bbd996..a15bec8c110d6 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedActionTests.java @@ -96,7 +96,7 @@ public void testPreviewDatafeed_GivenEmptyStream() throws IOException { assertThat(capturedResponse, equalTo("[]")); assertThat(capturedFailure, is(nullValue())); - verify(dataExtractor).cancel(); + verify(dataExtractor).destroy(); } public void testPreviewDatafeed_GivenNonEmptyStream() throws IOException { @@ -108,7 +108,7 @@ public void testPreviewDatafeed_GivenNonEmptyStream() throws IOException { assertThat(capturedResponse, equalTo("[{\"a\":1, \"b\":2},{\"c\":3, \"d\":4},{\"e\":5, \"f\":6}]")); assertThat(capturedFailure, is(nullValue())); - verify(dataExtractor).cancel(); + verify(dataExtractor).destroy(); } public void testPreviewDatafeed_GivenFailure() throws IOException { @@ -118,6 +118,6 @@ public void testPreviewDatafeed_GivenFailure() throws IOException { assertThat(capturedResponse, is(nullValue())); assertThat(capturedFailure.getMessage(), equalTo("failed")); - verify(dataExtractor).cancel(); + verify(dataExtractor).destroy(); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelActionTests.java index c9a2482aac343..84c49ba95b522 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelActionTests.java @@ -141,6 +141,7 @@ public void testSetTrainedModelConfigFieldsFromPackagedModel() throws IOExceptio assertEquals(packageConfig.getDescription(), trainedModelConfig.getDescription()); assertEquals(packageConfig.getMetadata(), trainedModelConfig.getMetadata()); assertEquals(packageConfig.getTags(), trainedModelConfig.getTags()); + assertEquals(packageConfig.getPrefixStrings(), trainedModelConfig.getPrefixStrings()); // fully tested in {@link #testParseInferenceConfigFromModelPackage} assertNotNull(trainedModelConfig.getInferenceConfig()); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStopDataFrameAnalyticsActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStopDataFrameAnalyticsActionTests.java index 8e8b45ae10fd4..d1d3338ce14ba 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStopDataFrameAnalyticsActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStopDataFrameAnalyticsActionTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsTaskState; import org.elasticsearch.xpack.ml.action.TransportStopDataFrameAnalyticsAction.AnalyticsByTaskState; +import java.time.Instant; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; @@ -84,7 +85,7 @@ private static void addAnalyticsTask( if (state != null) { builder.updateTaskState( MlTasks.dataFrameAnalyticsTaskId(analyticsId), - new DataFrameAnalyticsTaskState(state, builder.getLastAllocationId(), null) + new DataFrameAnalyticsTaskState(state, builder.getLastAllocationId(), null, Instant.now()) ); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointAggregatorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointAggregatorTests.java index 4cf57988500cb..410fc474a503f 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointAggregatorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointAggregatorTests.java @@ -24,7 +24,6 @@ import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.xpack.ml.MachineLearning; -import org.elasticsearch.xpack.ml.aggs.MlAggsHelper; import java.io.IOException; import java.util.Arrays; @@ -231,8 +230,4 @@ private static void writeTestDocs(RandomIndexWriter w, double[] bucketValues) th } } - private static MlAggsHelper.DoubleBucketValues values(double[] values) { - return new MlAggsHelper.DoubleBucketValues(new long[0], values, new int[0]); - } - } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/changepoint/KDETests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/changepoint/KDETests.java new file mode 100644 index 0000000000000..e794b5afb258c --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/changepoint/KDETests.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.aggs.changepoint; + +import org.elasticsearch.test.ESTestCase; + +import java.util.Arrays; +import java.util.stream.DoubleStream; + +import static org.hamcrest.Matchers.closeTo; +import static org.hamcrest.Matchers.equalTo; + +public class KDETests extends ESTestCase { + + public void testEmpty() { + KDE kde = new KDE(new double[0], 1.06); + assertThat(kde.size(), equalTo(0)); + assertThat(kde.data(), equalTo(new double[0])); + } + + public void testCdfAndSf() { + + double[] data = DoubleStream.generate(() -> randomDoubleBetween(0.0, 100.0, true)).limit(101).toArray(); + Arrays.sort(data); + KDE kde = new KDE(data, 1.06); + + // Very large and small limits are handled correctly. + assertThat(kde.cdf(-1000.0).value(), closeTo(0.0, 1e-10)); + assertThat(kde.cdf(1000.0).value(), closeTo(1.0, 1e-10)); + assertThat(kde.sf(1000.0).value(), closeTo(0.0, 1e-10)); + assertThat(kde.sf(-1000.0).value(), closeTo(1.0, 1e-10)); + + // Check the cdf and survival function are approximately equal to 0.5 for the median. + { + double median = kde.data()[kde.size() / 2]; + KDE.ValueAndMagnitude cdf = kde.cdf(median); + KDE.ValueAndMagnitude sf = kde.sf(median); + assertThat(cdf.value(), closeTo(0.5, 0.05)); + assertThat(sf.value(), closeTo(0.5, 0.05)); + } + + // Should approximately sum to 1.0 for some random data. + for (int i = 0; i < 100; i++) { + double x = randomDoubleBetween(-10.0, 110.0, true); + KDE.ValueAndMagnitude cdf = kde.cdf(x); + KDE.ValueAndMagnitude sf = kde.sf(x); + assertThat(cdf.value() + sf.value(), closeTo(1.0, 1e-4)); + } + } + + public void testSignificanceForUnderflow() { + + KDE kde = new KDE(new double[] { 1.0, 2.0, 3.0, 4.0, 5.0 }, 1.06); + + KDE.ValueAndMagnitude cdf = kde.cdf(-1000.0); + KDE.ValueAndMagnitude sf = kde.sf(1000.0); + + assertThat(cdf.value(), equalTo(0.0)); + assertThat(sf.value(), equalTo(0.0)); + + // Difference from data is larger for cdf than the survival function. + assertThat(cdf.isMoreSignificant(sf), equalTo(true)); + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/changepoint/SpikeAndDipDetectorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/changepoint/SpikeAndDipDetectorTests.java new file mode 100644 index 0000000000000..5653af2a000f5 --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/changepoint/SpikeAndDipDetectorTests.java @@ -0,0 +1,165 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.aggs.changepoint; + +import org.elasticsearch.test.ESTestCase; + +import java.util.Arrays; + +import static org.hamcrest.Matchers.closeTo; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; + +public class SpikeAndDipDetectorTests extends ESTestCase { + + public void testTooLittleData() { + for (int i = 0; i < 4; i++) { + double[] values = new double[i]; + Arrays.fill(values, 1.0); + SpikeAndDipDetector detect = new SpikeAndDipDetector(values); + assertThat(detect.at(0.01), instanceOf(ChangeType.Indeterminable.class)); + } + } + + public void testSpikeAndDipValues() { + double[] values = new double[] { 2.0, 1.0, 3.0, 5.0, 4.0 }; + SpikeAndDipDetector detector = new SpikeAndDipDetector(values); + assertThat(detector.spikeValue(), equalTo(5.0)); + assertThat(detector.dipValue(), equalTo(1.0)); + } + + public void testExludedValues() { + + // We expect to exclude the values at indices 7, 8 and 17 from the spike and at 8, 17, 18 + // from the dip KDE data. + + double[] values = new double[] { + 1.0, + -1.0, + 3.0, + 2.0, + 1.5, + 2.0, + 3.0, + 3.0, + 10.0, + 2.0, + 2.1, + 0.5, + 1.0, + 1.4, + 2.0, + -0.0, + 1.0, + 3.1, + 2.2, + 2.1, + 2.0, + 1.0, + 2.0, + 3.0, + 4.0, + 7.0, + 4.0, + -2.0, + 0.0, + 1.0 }; + double[] expectedDipKDEValues = new double[] { + 1.0, + -1.0, + 3.0, + 2.0, + 1.5, + 2.0, + 3.0, + 3.0, + 2.0, + 2.1, + 0.5, + 1.0, + 1.4, + 2.0, + -0.0, + 1.0, + 3.1, + 2.2, + 2.1, + 2.0, + 1.0, + 2.0, + 3.0, + 4.0, + 7.0, + 4.0, + 1.0 }; + double[] expectedSpikeKDEValues = new double[] { + 1.0, + -1.0, + 3.0, + 2.0, + 1.5, + 2.0, + 3.0, + 2.0, + 2.1, + 0.5, + 1.0, + 1.4, + 2.0, + -0.0, + 1.0, + 3.1, + 2.2, + 2.1, + 2.0, + 1.0, + 2.0, + 3.0, + 4.0, + 7.0, + 4.0, + 0.0, + 1.0 }; + + Arrays.sort(expectedSpikeKDEValues); + Arrays.sort(expectedDipKDEValues); + + SpikeAndDipDetector detector = new SpikeAndDipDetector(values); + + assertThat(detector.spikeValue(), equalTo(10.0)); + assertThat(detector.dipValue(), equalTo(-2.0)); + assertThat(detector.spikeTestKDE().data(), equalTo(expectedSpikeKDEValues)); + assertThat(detector.dipTestKDE().data(), equalTo(expectedDipKDEValues)); + } + + public void testDetection() { + + // Check vs some expected values. + + { + double[] values = new double[] { 0.1, 3.1, 1.2, 1.7, 0.9, 2.3, -0.8, 3.2, 1.2, 1.3, 1.1, 1.0, 8.5, 0.5, 2.6, 0.7 }; + + SpikeAndDipDetector detect = new SpikeAndDipDetector(values); + + ChangeType change = detect.at(0.05); + + assertThat(change, instanceOf(ChangeType.Spike.class)); + assertThat(change.pValue(), closeTo(3.0465e-12, 1e-15)); + } + { + double[] values = new double[] { 0.1, 3.1, 1.2, 1.7, 0.9, 2.3, -4.2, 3.2, 1.2, 1.3, 1.1, 1.0, 3.5, 0.5, 2.6, 0.7 }; + + SpikeAndDipDetector detect = new SpikeAndDipDetector(values); + + ChangeType change = detect.at(0.05); + + assertThat(change, instanceOf(ChangeType.Dip.class)); + assertThat(change.pValue(), closeTo(1.2589e-08, 1e-11)); + } + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/mr/DelegatingCircuitBreakerServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/mr/DelegatingCircuitBreakerServiceTests.java index 208fe677a9a3f..aa1a82116387e 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/mr/DelegatingCircuitBreakerServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/mr/DelegatingCircuitBreakerServiceTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.breaker.PreallocatedCircuitBreakerService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.indices.breaker.CircuitBreakerMetrics; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.test.ESTestCase; @@ -29,6 +30,7 @@ public void testThreadedExecution() { try ( HierarchyCircuitBreakerService topBreaker = new HierarchyCircuitBreakerService( + CircuitBreakerMetrics.NOOP, Settings.builder() .put(REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "10mb") // Disable the real memory checking because it causes other tests to interfere with this one. diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingResourceTrackerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingResourceTrackerTests.java index 0028c66dd9659..0d91ce45c46ba 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingResourceTrackerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingResourceTrackerTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.xpack.ml.utils.NativeMemoryCalculator; import java.net.InetAddress; +import java.time.Instant; import java.util.Collections; import java.util.List; import java.util.Map; @@ -41,6 +42,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; +import static org.elasticsearch.xpack.ml.autoscaling.MlAutoscalingResourceTracker.MlDummyAutoscalingEntity; import static org.elasticsearch.xpack.ml.autoscaling.MlAutoscalingResourceTracker.MlJobRequirements; import static org.elasticsearch.xpack.ml.job.JobNodeSelector.AWAITING_LAZY_ASSIGNMENT; import static org.mockito.Mockito.mock; @@ -61,6 +63,7 @@ public void testGetMemoryAndProcessors() throws InterruptedException { memory / 2, 10, MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE, + MlDummyAutoscalingEntity.of(0L, 0), listener ), stats -> { @@ -81,6 +84,7 @@ public void testGetMemoryAndProcessors() throws InterruptedException { memory / 2, 10, MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE, + MlDummyAutoscalingEntity.of(0L, 0), listener ), stats -> { @@ -91,6 +95,30 @@ public void testGetMemoryAndProcessors() throws InterruptedException { assertEquals(MachineLearning.NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes(), stats.perNodeMemoryOverheadInBytes()); } ); + + // Simulate 1 node & 1 "dummy" task requiring 1 processor and the same memory as the other node + // We don't expect any extra memory or processor usage in this situation. + this.assertAsync( + listener -> MlAutoscalingResourceTracker.getMemoryAndProcessors( + mlAutoscalingContext, + mockTracker, + Map.of("ml-1", randomLongBetween(0, memory), "ml-2", randomLongBetween(0, memory)), + memory / 2, + 10, + MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE, + MlDummyAutoscalingEntity.of(memory / 2, 1), + listener + ), + stats -> { + assertEquals(0, stats.perNodeMemoryInBytes()); + assertEquals(2, stats.nodes()); + assertEquals(0, stats.minNodes()); + assertEquals(0, stats.extraSingleNodeProcessors()); + assertEquals(0, stats.extraModelMemoryInBytes()); + assertEquals(0, stats.extraSingleNodeModelMemoryInBytes()); + assertEquals(MachineLearning.NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes(), stats.perNodeMemoryOverheadInBytes()); + } + ); } public void testGetMemoryAndProcessorsScaleUpGivenAwaitingLazyAssignment() throws InterruptedException { @@ -143,12 +171,68 @@ public void testGetMemoryAndProcessorsScaleUpGivenAwaitingLazyAssignment() throw memory / 2, 10, MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE, + MlDummyAutoscalingEntity.of(0L, 0), listener ), stats -> { assertEquals(memory, stats.perNodeMemoryInBytes()); assertEquals(2, stats.nodes()); assertEquals(1, stats.minNodes()); + assertEquals(0, stats.extraProcessors()); + assertEquals(0, stats.modelMemoryInBytesSum()); + assertEquals(0, stats.processorsSum()); + assertEquals(0, stats.extraSingleNodeProcessors()); + assertEquals(memory / 4, stats.extraSingleNodeModelMemoryInBytes()); + assertEquals(memory / 4, stats.extraModelMemoryInBytes()); + assertEquals(MachineLearning.NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes(), stats.perNodeMemoryOverheadInBytes()); + } + ); + + // As above but allocate an equal amount of memory to a dummy task + this.assertAsync( + listener -> MlAutoscalingResourceTracker.getMemoryAndProcessors( + mlAutoscalingContext, + mockTracker, + Map.of("ml-1", memory, "ml-2", memory), + memory / 2, + 10, + MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE, + MlDummyAutoscalingEntity.of(memory / 4, 0), + listener + ), + stats -> { + assertEquals(memory, stats.perNodeMemoryInBytes()); + assertEquals(2, stats.nodes()); + assertEquals(1, stats.minNodes()); + assertEquals(0, stats.extraProcessors()); + assertEquals(memory / 4, stats.modelMemoryInBytesSum()); + assertEquals(0, stats.processorsSum()); + assertEquals(0, stats.extraSingleNodeProcessors()); + assertEquals(memory / 4, stats.extraSingleNodeModelMemoryInBytes()); + assertEquals(memory / 4, stats.extraModelMemoryInBytes()); + assertEquals(MachineLearning.NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes(), stats.perNodeMemoryOverheadInBytes()); + } + ); + + // As above but also allocate a processor to the dummy task + this.assertAsync( + listener -> MlAutoscalingResourceTracker.getMemoryAndProcessors( + mlAutoscalingContext, + mockTracker, + Map.of("ml-1", memory, "ml-2", memory), + memory / 2, + 10, + MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE, + MlDummyAutoscalingEntity.of(memory / 4, 1), + listener + ), + stats -> { + assertEquals(memory, stats.perNodeMemoryInBytes()); + assertEquals(2, stats.nodes()); + assertEquals(1, stats.minNodes()); + assertEquals(0, stats.extraProcessors()); + assertEquals(memory / 4, stats.modelMemoryInBytesSum()); + assertEquals(1, stats.processorsSum()); assertEquals(0, stats.extraSingleNodeProcessors()); assertEquals(memory / 4, stats.extraSingleNodeModelMemoryInBytes()); assertEquals(memory / 4, stats.extraModelMemoryInBytes()); @@ -178,7 +262,7 @@ public void testGetMemoryAndProcessorsScaleUpGivenAwaitingLazyAssignmentButFaile 1, AWAITING_LAZY_ASSIGNMENT ), - new JobTaskState(JobState.FAILED, 1, "a nasty bug") + new JobTaskState(JobState.FAILED, 1, "a nasty bug", Instant.now()) ) ), List.of(), @@ -210,10 +294,12 @@ public void testGetMemoryAndProcessorsScaleUpGivenAwaitingLazyAssignmentButFaile memory / 2, 10, MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE, + MlDummyAutoscalingEntity.of(0L, 0), listener ), stats -> { assertEquals(memory, stats.perNodeMemoryInBytes()); + assertEquals(memory, stats.removeNodeMemoryInBytes()); assertEquals(2, stats.nodes()); assertEquals(0, stats.minNodes()); assertEquals(0, stats.extraSingleNodeProcessors()); @@ -680,7 +766,8 @@ public void testCheckIfOneNodeCouldBeRemovedMemoryOnly() { ), 600L, 10, - MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE + MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE, + MlDummyAutoscalingEntity.of(0L, 0) ) ); @@ -703,7 +790,8 @@ public void testCheckIfOneNodeCouldBeRemovedMemoryOnly() { ), 600L, 10, - MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE + MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE, + MlDummyAutoscalingEntity.of(0L, 0) ) ); @@ -722,7 +810,8 @@ public void testCheckIfOneNodeCouldBeRemovedMemoryOnly() { ), 600L, 10, - MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE + MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE, + MlDummyAutoscalingEntity.of(0L, 0) ) ); @@ -732,7 +821,8 @@ public void testCheckIfOneNodeCouldBeRemovedMemoryOnly() { Collections.emptyMap(), 999L, 10, - MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE + MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE, + MlDummyAutoscalingEntity.of(0L, 0) ) ); @@ -757,7 +847,8 @@ public void testCheckIfOneNodeCouldBeRemovedMemoryOnly() { ), 1000L, 10, - MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE + MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE, + MlDummyAutoscalingEntity.of(0L, 0) ) ); @@ -786,7 +877,8 @@ public void testCheckIfOneNodeCouldBeRemovedMemoryOnly() { ), 1000L, 10, - MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE + MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE, + MlDummyAutoscalingEntity.of(0L, 0) ) ); @@ -814,7 +906,8 @@ public void testCheckIfOneNodeCouldBeRemovedMemoryOnly() { ), 1000L, 10, - MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE + MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE, + MlDummyAutoscalingEntity.of(0L, 0) ) ); @@ -842,7 +935,8 @@ public void testCheckIfOneNodeCouldBeRemovedMemoryOnly() { ), 1000L, 10, - MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE + MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE, + MlDummyAutoscalingEntity.of(0L, 0) ) ); } @@ -868,7 +962,8 @@ public void testCheckIfOneNodeCouldBeRemovedProcessorAndMemory() { ), 600L, 10, - MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE + MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE, + MlDummyAutoscalingEntity.of(0L, 0) ) ); @@ -892,7 +987,8 @@ public void testCheckIfOneNodeCouldBeRemovedProcessorAndMemory() { ), 600L, 2, - MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE + MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE, + MlDummyAutoscalingEntity.of(0L, 0) ) ); @@ -916,7 +1012,8 @@ public void testCheckIfOneNodeCouldBeRemovedProcessorAndMemory() { ), 600L, 10, - 5 + 5, + MlDummyAutoscalingEntity.of(0L, 0) ) ); @@ -936,7 +1033,8 @@ public void testCheckIfOneNodeCouldBeRemovedProcessorAndMemory() { ), 600L, 10, - MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE + MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE, + MlDummyAutoscalingEntity.of(0L, 0) ) ); } @@ -957,6 +1055,29 @@ public void testGetMemoryAndProcessorsScaleDownToZero() throws InterruptedExcept perNodeAvailableModelMemoryInBytes, 10, MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE, + MlDummyAutoscalingEntity.of(0L, 0), + listener + ), + stats -> { + assertEquals(memory, stats.perNodeMemoryInBytes()); + assertEquals(1, stats.nodes()); + assertEquals(0, stats.minNodes()); + assertEquals(0, stats.extraSingleNodeProcessors()); + assertEquals(memory, stats.removeNodeMemoryInBytes()); + assertEquals(MachineLearning.NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes(), stats.perNodeMemoryOverheadInBytes()); + } + ); + + // Dummy task should not affect results + this.assertAsync( + listener -> MlAutoscalingResourceTracker.getMemoryAndProcessors( + mlAutoscalingContext, + mockTracker, + Map.of("ml-1", memory), + perNodeAvailableModelMemoryInBytes, + 10, + MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE, + MlDummyAutoscalingEntity.of(0L, 1), listener ), stats -> { @@ -964,6 +1085,7 @@ public void testGetMemoryAndProcessorsScaleDownToZero() throws InterruptedExcept assertEquals(1, stats.nodes()); assertEquals(0, stats.minNodes()); assertEquals(0, stats.extraSingleNodeProcessors()); + assertEquals(0, stats.extraProcessors()); assertEquals(memory, stats.removeNodeMemoryInBytes()); assertEquals(MachineLearning.NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes(), stats.perNodeMemoryOverheadInBytes()); } @@ -978,6 +1100,7 @@ public void testGetMemoryAndProcessorsScaleDownToZero() throws InterruptedExcept perNodeAvailableModelMemoryInBytes, 10, MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE, + MlDummyAutoscalingEntity.of(0L, 0), listener ), stats -> { @@ -1067,6 +1190,7 @@ public void testGetMemoryAndProcessorsScaleDown() throws InterruptedException { perNodeAvailableModelMemoryInBytes, 10, MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE, + MlDummyAutoscalingEntity.of(0L, 0), listener ), stats -> { @@ -1166,6 +1290,7 @@ public void testGetMemoryAndProcessorsScaleDownPreventedByMinNodes() throws Inte NativeMemoryCalculator.allowedBytesForMl(firstNode, settings).getAsLong(), 4, MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE, + MlDummyAutoscalingEntity.of(0L, 0), listener ), stats -> { @@ -1179,6 +1304,292 @@ public void testGetMemoryAndProcessorsScaleDownPreventedByMinNodes() throws Inte ); } + // scenario: 3 ml nodes, but only 2 have assigned models. This situation would normally result in a scale down but that is prevented + // by a "dummy" entity having sufficient memory to do so. + public void testGetMemoryAndProcessorsScaleDownPreventedByDummyEntityMemory() throws InterruptedException { + Map nodeAttr = Map.of( + MachineLearning.MACHINE_MEMORY_NODE_ATTR, + "1000000000", + MachineLearning.MAX_JVM_SIZE_NODE_ATTR, + "400000000", + MachineLearning.ML_CONFIG_VERSION_NODE_ATTR, + "7.2.0" + ); + + MlAutoscalingContext mlAutoscalingContext = new MlAutoscalingContext( + List.of(), + List.of(), + List.of(), + Map.of( + "model-1", + TrainedModelAssignment.Builder.empty( + new StartTrainedModelDeploymentAction.TaskParams( + "model-1", + "model-1-deployment", + 400, + 1, + 2, + 100, + null, + Priority.NORMAL, + 0L, + 0L + ) + ).addRoutingEntry("ml-node-1", new RoutingInfo(1, 1, RoutingState.STARTED, "")).build(), + "model-2", + TrainedModelAssignment.Builder.empty( + new StartTrainedModelDeploymentAction.TaskParams( + "model-2", + "model-2-deployment", + 400, + 1, + 2, + 100, + null, + Priority.NORMAL, + 0L, + 0L + ) + ).addRoutingEntry("ml-node-3", new RoutingInfo(1, 1, RoutingState.STARTED, "")).build() + ), + List.of( + DiscoveryNodeUtils.builder("ml-node-1") + .name("ml-node-name-1") + .address(new TransportAddress(InetAddress.getLoopbackAddress(), 9300)) + .attributes(nodeAttr) + .roles(Set.of(DiscoveryNodeRole.ML_ROLE)) + .build(), + DiscoveryNodeUtils.builder("ml-node-3") + .name("ml-node-name-3") + .address(new TransportAddress(InetAddress.getLoopbackAddress(), 9300)) + .attributes(nodeAttr) + .roles(Set.of(DiscoveryNodeRole.ML_ROLE)) + .build() + ), + PersistentTasksCustomMetadata.builder().build() + ); + MlMemoryTracker mockTracker = mock(MlMemoryTracker.class); + + long memory = 1000000000; + long perNodeAvailableModelMemoryInBytes = 600000000; + + this.assertAsync( + listener -> MlAutoscalingResourceTracker.getMemoryAndProcessors( + mlAutoscalingContext, + mockTracker, + Map.of("ml-node-1", memory, "ml-node-2", memory, "ml-node-3", memory), + perNodeAvailableModelMemoryInBytes, + 10, + MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE, + MlDummyAutoscalingEntity.of(perNodeAvailableModelMemoryInBytes, 1), + listener + ), + stats -> { + assertEquals(memory, stats.perNodeMemoryInBytes()); + assertEquals(perNodeAvailableModelMemoryInBytes + 503318080, stats.modelMemoryInBytesSum()); // total model memory is that + // configured in the dummy + // entity plus that used by the + // trained models. + assertEquals(5, stats.processorsSum()); // account for the extra processor from the dummy entity + assertEquals(3, stats.nodes()); + assertEquals(1, stats.minNodes()); + assertEquals(0, stats.extraSingleNodeProcessors()); + assertEquals(0, stats.extraProcessors()); + assertEquals(0, stats.extraModelMemoryInBytes()); + assertEquals(0, stats.extraSingleNodeModelMemoryInBytes()); + assertEquals(MachineLearning.NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes(), stats.perNodeMemoryOverheadInBytes()); + } + ); + } + + // scenario: 3 ml nodes, but only 2 have assigned models. This situation does result in a scale down since dummy + // processors alone are not sufficient to prevent it. + public void testGetMemoryAndProcessorsScaleDownNotPreventedByDummyEntityProcessors() throws InterruptedException { + Map nodeAttr = Map.of( + MachineLearning.MACHINE_MEMORY_NODE_ATTR, + "1000000000", + MachineLearning.MAX_JVM_SIZE_NODE_ATTR, + "400000000", + MachineLearning.ML_CONFIG_VERSION_NODE_ATTR, + "7.2.0" + ); + + MlAutoscalingContext mlAutoscalingContext = new MlAutoscalingContext( + List.of(), + List.of(), + List.of(), + Map.of( + "model-1", + TrainedModelAssignment.Builder.empty( + new StartTrainedModelDeploymentAction.TaskParams( + "model-1", + "model-1-deployment", + 400, + 1, + 2, + 100, + null, + Priority.NORMAL, + 0L, + 0L + ) + ).addRoutingEntry("ml-node-1", new RoutingInfo(1, 1, RoutingState.STARTED, "")).build(), + "model-2", + TrainedModelAssignment.Builder.empty( + new StartTrainedModelDeploymentAction.TaskParams( + "model-2", + "model-2-deployment", + 400, + 1, + 2, + 100, + null, + Priority.NORMAL, + 0L, + 0L + ) + ).addRoutingEntry("ml-node-3", new RoutingInfo(1, 1, RoutingState.STARTED, "")).build() + ), + List.of( + DiscoveryNodeUtils.builder("ml-node-1") + .name("ml-node-name-1") + .address(new TransportAddress(InetAddress.getLoopbackAddress(), 9300)) + .attributes(nodeAttr) + .roles(Set.of(DiscoveryNodeRole.ML_ROLE)) + .build(), + DiscoveryNodeUtils.builder("ml-node-3") + .name("ml-node-name-3") + .address(new TransportAddress(InetAddress.getLoopbackAddress(), 9300)) + .attributes(nodeAttr) + .roles(Set.of(DiscoveryNodeRole.ML_ROLE)) + .build() + ), + PersistentTasksCustomMetadata.builder().build() + ); + MlMemoryTracker mockTracker = mock(MlMemoryTracker.class); + + long memory = 1000000000; + long perNodeAvailableModelMemoryInBytes = 600000000; + + this.assertAsync( + listener -> MlAutoscalingResourceTracker.getMemoryAndProcessors( + mlAutoscalingContext, + mockTracker, + Map.of("ml-node-1", memory, "ml-node-2", memory, "ml-node-3", memory), + perNodeAvailableModelMemoryInBytes, + 10, + MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE, + MlDummyAutoscalingEntity.of(0L, 9), + listener + ), + stats -> { + assertEquals(memory, stats.perNodeMemoryInBytes()); + assertEquals(503318080, stats.modelMemoryInBytesSum()); + assertEquals(13, stats.processorsSum()); // account for the extra processors from the dummy entity + assertEquals(3, stats.nodes()); + assertEquals(1, stats.minNodes()); + assertEquals(0, stats.extraSingleNodeProcessors()); + assertEquals(0, stats.extraProcessors()); + assertEquals(0, stats.extraModelMemoryInBytes()); + assertEquals(0, stats.extraSingleNodeModelMemoryInBytes()); + assertEquals(MachineLearning.NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes(), stats.perNodeMemoryOverheadInBytes()); + } + ); + } + + public void testGetMemoryAndProcessorsScaleDownNotPreventedByDummyEntityAsMemoryTooLow() throws InterruptedException { + Map nodeAttr = Map.of( + MachineLearning.MACHINE_MEMORY_NODE_ATTR, + "1000000000", + MachineLearning.MAX_JVM_SIZE_NODE_ATTR, + "400000000", + MachineLearning.ML_CONFIG_VERSION_NODE_ATTR, + "7.2.0" + ); + + MlAutoscalingContext mlAutoscalingContext = new MlAutoscalingContext( + List.of(), + List.of(), + List.of(), + Map.of( + "model-1", + TrainedModelAssignment.Builder.empty( + new StartTrainedModelDeploymentAction.TaskParams( + "model-1", + "model-1-deployment", + 400, + 1, + 2, + 100, + null, + Priority.NORMAL, + 0L, + 0L + ) + ).addRoutingEntry("ml-node-1", new RoutingInfo(1, 1, RoutingState.STARTED, "")).build(), + "model-2", + TrainedModelAssignment.Builder.empty( + new StartTrainedModelDeploymentAction.TaskParams( + "model-2", + "model-2-deployment", + 400, + 1, + 2, + 100, + null, + Priority.NORMAL, + 0L, + 0L + ) + ).addRoutingEntry("ml-node-3", new RoutingInfo(1, 1, RoutingState.STARTED, "")).build() + ), + List.of( + DiscoveryNodeUtils.builder("ml-node-1") + .name("ml-node-name-1") + .address(new TransportAddress(InetAddress.getLoopbackAddress(), 9300)) + .attributes(nodeAttr) + .roles(Set.of(DiscoveryNodeRole.ML_ROLE)) + .build(), + DiscoveryNodeUtils.builder("ml-node-3") + .name("ml-node-name-3") + .address(new TransportAddress(InetAddress.getLoopbackAddress(), 9300)) + .attributes(nodeAttr) + .roles(Set.of(DiscoveryNodeRole.ML_ROLE)) + .build() + ), + PersistentTasksCustomMetadata.builder().build() + ); + MlMemoryTracker mockTracker = mock(MlMemoryTracker.class); + + long memory = 1000000000; + long perNodeAvailableModelMemoryInBytes = 600000000; + + this.assertAsync( + listener -> MlAutoscalingResourceTracker.getMemoryAndProcessors( + mlAutoscalingContext, + mockTracker, + Map.of("ml-node-1", memory, "ml-node-2", memory, "ml-node-3", memory), + perNodeAvailableModelMemoryInBytes, + 10, + MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE, + MlDummyAutoscalingEntity.of(1024, 0), + listener + ), + stats -> { + assertEquals(memory, stats.perNodeMemoryInBytes()); + assertEquals(503318080, stats.modelMemoryInBytesSum()); + assertEquals(4, stats.processorsSum()); + assertEquals(3, stats.nodes()); + assertEquals(1, stats.minNodes()); + assertEquals(0, stats.extraSingleNodeProcessors()); + assertEquals(0, stats.extraProcessors()); + assertEquals(0, stats.extraModelMemoryInBytes()); + assertEquals(0, stats.extraSingleNodeModelMemoryInBytes()); + assertEquals(MachineLearning.NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes(), stats.perNodeMemoryOverheadInBytes()); + } + ); + } + private void assertAsync(Consumer> function, Consumer furtherTests) throws InterruptedException { CountDownLatch latch = new CountDownLatch(1); AtomicBoolean listenerCalled = new AtomicBoolean(false); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDeciderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDeciderTests.java index 2d9e19cbb3830..0b3851012d0e8 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDeciderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDeciderTests.java @@ -48,6 +48,7 @@ import org.elasticsearch.xpack.ml.utils.NativeMemoryCalculator; import org.junit.Before; +import java.time.Instant; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -1406,7 +1407,7 @@ public static void addAnalyticsTask( if (jobState != null) { builder.updateTaskState( MlTasks.dataFrameAnalyticsTaskId(jobId), - new DataFrameAnalyticsTaskState(jobState, builder.getLastAllocationId(), null) + new DataFrameAnalyticsTaskState(jobState, builder.getLastAllocationId(), null, Instant.now()) ); } } @@ -1419,7 +1420,10 @@ public static void addJobTask(String jobId, String nodeId, JobState jobState, Pe nodeId == null ? AWAITING_LAZY_ASSIGNMENT : new PersistentTasksCustomMetadata.Assignment(nodeId, "test assignment") ); if (jobState != null) { - builder.updateTaskState(MlTasks.jobTaskId(jobId), new JobTaskState(jobState, builder.getLastAllocationId(), null)); + builder.updateTaskState( + MlTasks.jobTaskId(jobId), + new JobTaskState(jobState, builder.getLastAllocationId(), null, Instant.now()) + ); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java index 9da54416ce066..69d8663478b36 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java @@ -43,6 +43,7 @@ import org.junit.Before; import java.net.InetAddress; +import java.time.Instant; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -441,7 +442,7 @@ public void testSelectNode_jobTaskStale() { PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); addJobTask(job.getId(), nodeId, JobState.OPENED, tasksBuilder); // Set to lower allocationId, so job task is stale: - tasksBuilder.updateTaskState(MlTasks.jobTaskId(job.getId()), new JobTaskState(JobState.OPENED, 0, null)); + tasksBuilder.updateTaskState(MlTasks.jobTaskId(job.getId()), new JobTaskState(JobState.OPENED, 0, null, Instant.now())); tasks = tasksBuilder.build(); givenClusterState("foo", 1, 0); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractorFactoryTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractorFactoryTests.java index afb1d63a2114d..9a76eb5f2b936 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractorFactoryTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractorFactoryTests.java @@ -86,6 +86,7 @@ private AggregationDataExtractorFactory createFactory(long histogramInterval) { return new AggregationDataExtractorFactory( client, datafeedConfigBuilder.build(), + null, jobBuilder.build(new Date()), xContentRegistry(), timingStatsReporter diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/CompositeAggregationDataExtractorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/CompositeAggregationDataExtractorTests.java index 9ea246adf95cc..93d20c0c7be49 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/CompositeAggregationDataExtractorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/CompositeAggregationDataExtractorTests.java @@ -7,12 +7,12 @@ package org.elasticsearch.xpack.ml.datafeed.extractor.aggregation; import org.elasticsearch.action.ActionRequestBuilder; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.client.internal.Client; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; @@ -122,9 +122,10 @@ public void setUpTests() { .subAggregation(AggregationBuilders.avg("responsetime").field("responsetime")); runtimeMappings = Collections.emptyMap(); timingStatsReporter = new DatafeedTimingStatsReporter(new DatafeedTimingStats(jobId), mock(DatafeedTimingStatsPersister.class)); - aggregatedSearchRequestBuilder = (searchSourceBuilder) -> new SearchRequestBuilder(testClient, SearchAction.INSTANCE).setSource( - searchSourceBuilder - ).setAllowPartialSearchResults(false).setIndices(indices.toArray(String[]::new)); + aggregatedSearchRequestBuilder = (searchSourceBuilder) -> new SearchRequestBuilder(testClient, TransportSearchAction.TYPE) + .setSource(searchSourceBuilder) + .setAllowPartialSearchResults(false) + .setIndices(indices.toArray(String[]::new)); } public void testExtraction() throws IOException { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorFactoryTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorFactoryTests.java index 37123e843aeb8..a7260d34a0136 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorFactoryTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorFactoryTests.java @@ -113,6 +113,7 @@ private ChunkedDataExtractorFactory createFactory(long histogramInterval) { return new ChunkedDataExtractorFactory( client, datafeedConfigBuilder.build(), + null, jobBuilder.build(new Date()), xContentRegistry(), dataExtractorFactory, diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorTests.java index 81d28b461db76..b0b391f92b527 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorTests.java @@ -100,7 +100,6 @@ public void setUpTests() { jobId = "test-job"; timeField = "time"; indices = Arrays.asList("index-1", "index-2"); - query = QueryBuilders.matchAllQuery(); scrollSize = 1000; chunkSpan = null; dataExtractorFactory = mock(DataExtractorFactory.class); @@ -605,7 +604,7 @@ private ChunkedDataExtractorContext createContext(long start, long end, boolean jobId, timeField, indices, - query, + QueryBuilders.matchAllQuery(), scrollSize, start, end, @@ -653,6 +652,11 @@ public void cancel() { // do nothing } + @Override + public void destroy() { + // do nothing + } + @Override public long getEndTime() { return 0; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java index 13a7552e409c2..7ffb3231331a0 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java @@ -9,7 +9,6 @@ import org.apache.lucene.search.TotalHits; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionFuture; -import org.elasticsearch.action.search.ClearScrollAction; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.search.ClearScrollResponse; import org.elasticsearch.action.search.SearchPhaseExecutionException; @@ -17,6 +16,7 @@ import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.action.search.TransportClearScrollAction; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.settings.Settings; @@ -173,7 +173,7 @@ public void setUpTests() { clearScrollFuture = mock(ActionFuture.class); capturedClearScrollRequests = ArgumentCaptor.forClass(ClearScrollRequest.class); - when(client.execute(same(ClearScrollAction.INSTANCE), capturedClearScrollRequests.capture())).thenReturn(clearScrollFuture); + when(client.execute(same(TransportClearScrollAction.TYPE), capturedClearScrollRequests.capture())).thenReturn(clearScrollFuture); timingStatsReporter = new DatafeedTimingStatsReporter(new DatafeedTimingStats(jobId), mock(DatafeedTimingStatsPersister.class)); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsTaskTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsTaskTests.java index c2318c879328e..70bacebd3609f 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsTaskTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsTaskTests.java @@ -10,8 +10,8 @@ import org.elasticsearch.action.index.IndexAction; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; @@ -188,7 +188,7 @@ private void testPersistProgress(SearchHits searchHits, String expectedIndexOrAl SearchResponse searchResponse = mock(SearchResponse.class); when(searchResponse.getHits()).thenReturn(searchHits); - doAnswer(withResponse(searchResponse)).when(client).execute(eq(SearchAction.INSTANCE), any(), any()); + doAnswer(withResponse(searchResponse)).when(client).execute(eq(TransportSearchAction.TYPE), any(), any()); IndexResponse indexResponse = mock(IndexResponse.class); doAnswer(withResponse(indexResponse)).when(client).execute(eq(IndexAction.INSTANCE), any(), any()); @@ -217,7 +217,7 @@ private void testPersistProgress(SearchHits searchHits, String expectedIndexOrAl ArgumentCaptor indexRequestCaptor = ArgumentCaptor.forClass(IndexRequest.class); InOrder inOrder = inOrder(client, runnable); - inOrder.verify(client).execute(eq(SearchAction.INSTANCE), any(), any()); + inOrder.verify(client).execute(eq(TransportSearchAction.TYPE), any(), any()); inOrder.verify(client).execute(eq(IndexAction.INSTANCE), indexRequestCaptor.capture(), any()); inOrder.verify(runnable).run(); inOrder.verifyNoMoreInteractions(); @@ -284,7 +284,7 @@ private void testSetFailed(boolean nodeShuttingDown) throws IOException { SearchResponse searchResponse = mock(SearchResponse.class); when(searchResponse.getHits()).thenReturn(SearchHits.EMPTY_WITH_TOTAL_HITS); - doAnswer(withResponse(searchResponse)).when(client).execute(eq(SearchAction.INSTANCE), any(), any()); + doAnswer(withResponse(searchResponse)).when(client).execute(eq(TransportSearchAction.TYPE), any(), any()); IndexResponse indexResponse = mock(IndexResponse.class); doAnswer(withResponse(indexResponse)).when(client).execute(eq(IndexAction.INSTANCE), any(), any()); @@ -315,7 +315,7 @@ private void testSetFailed(boolean nodeShuttingDown) throws IOException { if (nodeShuttingDown == false) { // Verify progress was persisted ArgumentCaptor indexRequestCaptor = ArgumentCaptor.forClass(IndexRequest.class); - verify(client).execute(eq(SearchAction.INSTANCE), any(), any()); + verify(client).execute(eq(TransportSearchAction.TYPE), any(), any()); verify(client).execute(eq(IndexAction.INSTANCE), indexRequestCaptor.capture(), any()); IndexRequest indexRequest = indexRequestCaptor.getValue(); @@ -333,18 +333,20 @@ private void testSetFailed(boolean nodeShuttingDown) throws IOException { assertThat(parsedProgress.get().get(0), equalTo(new PhaseProgress("reindexing", 100))); } - verify(client).execute( - same(UpdatePersistentTaskStatusAction.INSTANCE), - eq( - new UpdatePersistentTaskStatusAction.Request( - "task-id", - 42, - new DataFrameAnalyticsTaskState(DataFrameAnalyticsState.FAILED, 42, "some exception") - ) - ), - any() + ArgumentCaptor captor = ArgumentCaptor.forClass( + UpdatePersistentTaskStatusAction.Request.class ); + + verify(client).execute(same(UpdatePersistentTaskStatusAction.INSTANCE), captor.capture(), any()); + + UpdatePersistentTaskStatusAction.Request request = captor.getValue(); + assertThat(request.getTaskId(), equalTo("task-id")); + DataFrameAnalyticsTaskState state = (DataFrameAnalyticsTaskState) request.getState(); + assertThat(state.getState(), equalTo(DataFrameAnalyticsState.FAILED)); + assertThat(state.getAllocationId(), equalTo(42L)); + assertThat(state.getReason(), equalTo("some exception")); } + verifyNoMoreInteractions(client, analyticsManager, auditor, taskManager); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/DestinationIndexTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/DestinationIndexTests.java index 5c928ff6e8a3a..9ec0540569689 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/DestinationIndexTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/DestinationIndexTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.ml.dataframe; import org.elasticsearch.ElasticsearchStatusException; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.create.CreateIndexAction; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; @@ -783,7 +782,7 @@ public void testReadMetadata_GivenIncompatibleVersion() { assertThat(metadata.hasMetadata(), is(true)); assertThat(metadata.isCompatible(), is(false)); - assertThat(metadata.getVersion(), equalTo(Version.V_7_9_3.toString())); + assertThat(metadata.getVersion(), equalTo(MlConfigVersion.V_7_9_3.toString())); } private static Answer callListenerOnResponse(Response response) { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManagerTests.java index 028c4b48ad355..6d963cae8159c 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManagerTests.java @@ -16,6 +16,7 @@ import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelPrefixStrings; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfig; import org.elasticsearch.xpack.ml.inference.pytorch.PriorityProcessWorkerExecutorService; import org.elasticsearch.xpack.ml.inference.pytorch.process.PyTorchProcessFactory; @@ -102,6 +103,7 @@ public void testRejectedExecution() { NlpInferenceInput.fromText("foo"), false, TimeValue.timeValueMinutes(1), + TrainedModelPrefixStrings.PrefixType.NONE, null, ActionListener.wrap(result -> fail("unexpected success"), e -> assertThat(e, instanceOf(EsRejectedExecutionException.class))) ); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/deployment/InferencePyTorchActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/deployment/InferencePyTorchActionTests.java index c937e9be24b01..4fa0876991e3b 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/deployment/InferencePyTorchActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/deployment/InferencePyTorchActionTests.java @@ -7,7 +7,9 @@ package org.elasticsearch.xpack.ml.inference.deployment; +import org.apache.lucene.util.SetOnce; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.InferenceResults; @@ -20,19 +22,30 @@ import org.elasticsearch.threadpool.ScalingExecutorBuilder; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelInput; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelPrefixStrings; import org.elasticsearch.xpack.core.ml.inference.results.WarningInferenceResults; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.PassThroughConfig; +import org.elasticsearch.xpack.ml.inference.nlp.NlpTask; +import org.elasticsearch.xpack.ml.inference.nlp.tokenizers.TokenizationResult; +import org.elasticsearch.xpack.ml.inference.pytorch.process.PyTorchProcess; import org.elasticsearch.xpack.ml.inference.pytorch.process.PyTorchResultProcessor; import org.junit.After; import org.junit.Before; +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import static org.elasticsearch.xpack.ml.MachineLearning.UTILITY_THREAD_POOL_NAME; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.anyList; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; @@ -79,6 +92,7 @@ public void testInferListenerOnlyCalledOnce() { processContext, new PassThroughConfig(null, null, null), NlpInferenceInput.fromText("foo"), + TrainedModelPrefixStrings.PrefixType.NONE, tp, null, listener @@ -100,6 +114,7 @@ public void testInferListenerOnlyCalledOnce() { processContext, new PassThroughConfig(null, null, null), NlpInferenceInput.fromText("foo"), + TrainedModelPrefixStrings.PrefixType.NONE, tp, null, listener @@ -122,6 +137,7 @@ public void testInferListenerOnlyCalledOnce() { processContext, new PassThroughConfig(null, null, null), NlpInferenceInput.fromText("foo"), + TrainedModelPrefixStrings.PrefixType.NONE, tp, null, listener @@ -153,6 +169,7 @@ public void testRunNotCalledAfterNotified() { processContext, new PassThroughConfig(null, null, null), NlpInferenceInput.fromText("foo"), + TrainedModelPrefixStrings.PrefixType.NONE, tp, null, listener @@ -171,6 +188,7 @@ public void testRunNotCalledAfterNotified() { processContext, new PassThroughConfig(null, null, null), NlpInferenceInput.fromText("foo"), + TrainedModelPrefixStrings.PrefixType.NONE, tp, null, listener @@ -214,6 +232,7 @@ public Task createTask(long id, String type, String action, TaskId parentTaskId, processContext, new PassThroughConfig(null, null, null), NlpInferenceInput.fromText("foo"), + TrainedModelPrefixStrings.PrefixType.NONE, tp, cancellableTask, listener @@ -227,6 +246,170 @@ public Task createTask(long id, String type, String action, TaskId parentTaskId, verify(resultProcessor, never()).registerRequest(anyString(), any()); } + @SuppressWarnings("unchecked") + public void testPrefixStrings() throws Exception { + DeploymentManager.ProcessContext processContext = mock(DeploymentManager.ProcessContext.class); + + TrainedModelPrefixStrings prefixStrings = new TrainedModelPrefixStrings("ingest_prefix: ", "search_prefix: "); + when(processContext.getPrefixStrings()).thenReturn(new SetOnce<>(prefixStrings)); + + TrainedModelInput modelInput = new TrainedModelInput(List.of("text_field")); + when(processContext.getModelInput()).thenReturn(new SetOnce<>(modelInput)); + + NlpTask.Processor nlpProcessor = mock(NlpTask.Processor.class); + NlpTask.RequestBuilder requestBuilder = mock(NlpTask.RequestBuilder.class); + when(nlpProcessor.getRequestBuilder(any())).thenReturn(requestBuilder); + + NlpTask.Request builtRequest = new NlpTask.Request(mock(TokenizationResult.class), mock(BytesReference.class)); + when(requestBuilder.buildRequest(anyList(), anyString(), any(), anyInt())).thenReturn(builtRequest); + + when(processContext.getNlpTaskProcessor()).thenReturn(new SetOnce<>(nlpProcessor)); + PyTorchResultProcessor resultProcessor = new PyTorchResultProcessor("1", threadSettings -> {}); + + PyTorchProcess pyTorchProcess = mock(PyTorchProcess.class); + when(processContext.getProcess()).thenReturn(new SetOnce<>(pyTorchProcess)); + + when(processContext.getResultProcessor()).thenReturn(resultProcessor); + AtomicInteger timeoutCount = new AtomicInteger(); + when(processContext.getTimeoutCount()).thenReturn(timeoutCount); + + TestListenerCounter listener = new TestListenerCounter(); + { + // test for search prefix + InferencePyTorchAction action = new InferencePyTorchAction( + "test-model", + 1, + TimeValue.MAX_VALUE, + processContext, + new PassThroughConfig(null, null, null), + NlpInferenceInput.fromText("foo"), + TrainedModelPrefixStrings.PrefixType.SEARCH, + tp, + null, + listener + ); + action.init(); + action.doRun(); + + ArgumentCaptor> inputsCapture = ArgumentCaptor.forClass(List.class); + verify(nlpProcessor).validateInputs(inputsCapture.capture()); + + assertThat(inputsCapture.getValue(), contains("search_prefix: foo")); + } + { + // Clear the previously verified invocations on this mock. + // Using this function is slightly controversal as it is + // not recommended by Mockito however, it does save a lot + // of code rebuilding the mocks for each test. + Mockito.clearInvocations(nlpProcessor); + // test for ingest prefix + InferencePyTorchAction action = new InferencePyTorchAction( + "test-model", + 1, + TimeValue.MAX_VALUE, + processContext, + new PassThroughConfig(null, null, null), + NlpInferenceInput.fromText("foo"), + TrainedModelPrefixStrings.PrefixType.INGEST, + tp, + null, + listener + ); + action.init(); + action.doRun(); + + ArgumentCaptor> inputsCapture = ArgumentCaptor.forClass(List.class); + verify(nlpProcessor).validateInputs(inputsCapture.capture()); + + assertThat(inputsCapture.getValue(), contains("ingest_prefix: foo")); + } + { + Mockito.clearInvocations(nlpProcessor); + // test no prefix + InferencePyTorchAction action = new InferencePyTorchAction( + "test-model", + 1, + TimeValue.MAX_VALUE, + processContext, + new PassThroughConfig(null, null, null), + NlpInferenceInput.fromText("foo"), + TrainedModelPrefixStrings.PrefixType.NONE, + tp, + null, + listener + ); + action.init(); + action.doRun(); + + ArgumentCaptor> inputsCapture = ArgumentCaptor.forClass(List.class); + verify(nlpProcessor).validateInputs(inputsCapture.capture()); + + assertThat(inputsCapture.getValue(), contains("foo")); + } + { + // test search only prefix + TrainedModelPrefixStrings searchOnlyPrefix = new TrainedModelPrefixStrings(null, "search_prefix: "); + when(processContext.getPrefixStrings()).thenReturn(new SetOnce<>(searchOnlyPrefix)); + boolean isForSearch = randomBoolean(); + + Mockito.clearInvocations(nlpProcessor); + InferencePyTorchAction action = new InferencePyTorchAction( + "test-model", + 1, + TimeValue.MAX_VALUE, + processContext, + new PassThroughConfig(null, null, null), + NlpInferenceInput.fromText("foo"), + isForSearch ? TrainedModelPrefixStrings.PrefixType.SEARCH : TrainedModelPrefixStrings.PrefixType.INGEST, + tp, + null, + listener + ); + action.init(); + action.doRun(); + + ArgumentCaptor> inputsCapture = ArgumentCaptor.forClass(List.class); + verify(nlpProcessor).validateInputs(inputsCapture.capture()); + + if (isForSearch) { + assertThat(inputsCapture.getValue(), contains("search_prefix: foo")); + } else { + assertThat(inputsCapture.getValue(), contains("foo")); + } + } + { + // test ingest only prefix + TrainedModelPrefixStrings ingestOnlyPrefix = new TrainedModelPrefixStrings("ingest_prefix: ", null); + when(processContext.getPrefixStrings()).thenReturn(new SetOnce<>(ingestOnlyPrefix)); + boolean isForSearch = randomBoolean(); + + Mockito.clearInvocations(nlpProcessor); + InferencePyTorchAction action = new InferencePyTorchAction( + "test-model", + 1, + TimeValue.MAX_VALUE, + processContext, + new PassThroughConfig(null, null, null), + NlpInferenceInput.fromText("foo"), + isForSearch ? TrainedModelPrefixStrings.PrefixType.SEARCH : TrainedModelPrefixStrings.PrefixType.INGEST, + tp, + null, + listener + ); + action.init(); + action.doRun(); + + ArgumentCaptor> inputsCapture = ArgumentCaptor.forClass(List.class); + verify(nlpProcessor).validateInputs(inputsCapture.capture()); + + if (isForSearch) { + assertThat(inputsCapture.getValue(), contains("foo")); + } else { + assertThat(inputsCapture.getValue(), contains("ingest_prefix: foo")); + } + } + } + static class TestListenerCounter implements ActionListener { private int responseCounts; private int failureCounts; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ingest/InferenceProcessorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ingest/InferenceProcessorTests.java index 88dcc2ba5d697..4821efa29631f 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ingest/InferenceProcessorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ingest/InferenceProcessorTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.ingest.TestIngestDocument; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.action.InferModelAction; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelPrefixStrings; import org.elasticsearch.xpack.core.ml.inference.results.ClassificationFeatureImportance; import org.elasticsearch.xpack.core.ml.inference.results.ClassificationInferenceResults; import org.elasticsearch.xpack.core.ml.inference.results.RegressionFeatureImportance; @@ -306,6 +307,7 @@ public void testGenerateRequestWithEmptyMapping() { var request = processor.buildRequest(document); assertThat(request.getObjectsToInfer().get(0), equalTo(source)); assertEquals(InferModelAction.Request.DEFAULT_TIMEOUT_FOR_INGEST, request.getInferenceTimeout()); + assertEquals(TrainedModelPrefixStrings.PrefixType.INGEST, request.getPrefixType()); Map ingestMetadata = Collections.singletonMap("_value", 3); document = TestIngestDocument.ofIngestWithNullableVersion(source, ingestMetadata); @@ -316,6 +318,7 @@ public void testGenerateRequestWithEmptyMapping() { request = processor.buildRequest(document); assertThat(request.getObjectsToInfer().get(0), equalTo(expected)); assertEquals(InferModelAction.Request.DEFAULT_TIMEOUT_FOR_INGEST, request.getInferenceTimeout()); + assertEquals(TrainedModelPrefixStrings.PrefixType.INGEST, request.getPrefixType()); } public void testGenerateWithMapping() { @@ -354,6 +357,7 @@ public void testGenerateWithMapping() { var request = processor.buildRequest(document); assertThat(request.getObjectsToInfer().get(0), equalTo(expectedMap)); assertEquals(InferModelAction.Request.DEFAULT_TIMEOUT_FOR_INGEST, request.getInferenceTimeout()); + assertEquals(TrainedModelPrefixStrings.PrefixType.INGEST, request.getPrefixType()); Map ingestMetadata = Collections.singletonMap("_value", "baz"); document = TestIngestDocument.ofIngestWithNullableVersion(source, ingestMetadata); @@ -363,6 +367,7 @@ public void testGenerateWithMapping() { request = processor.buildRequest(document); assertThat(request.getObjectsToInfer().get(0), equalTo(expectedMap)); assertEquals(InferModelAction.Request.DEFAULT_TIMEOUT_FOR_INGEST, request.getInferenceTimeout()); + assertEquals(TrainedModelPrefixStrings.PrefixType.INGEST, request.getPrefixType()); } public void testGenerateWithMappingNestedFields() { @@ -607,6 +612,7 @@ public void testBuildRequestWithInputFields() { var requestInputs = request.getTextInput(); assertThat(requestInputs, contains("body_text", "title_text")); assertEquals(InferModelAction.Request.DEFAULT_TIMEOUT_FOR_INGEST, request.getInferenceTimeout()); + assertEquals(TrainedModelPrefixStrings.PrefixType.INGEST, request.getPrefixType()); } public void testBuildRequestWithInputFields_WrongType() { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ltr/LearnToRankRescorerBuilderRewriteTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ltr/LearnToRankRescorerBuilderRewriteTests.java new file mode 100644 index 0000000000000..5939d012831aa --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ltr/LearnToRankRescorerBuilderRewriteTests.java @@ -0,0 +1,226 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.inference.ltr; + +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TotalHits; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.query.CoordinatorRewriteContext; +import org.elasticsearch.index.query.DataRewriteContext; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.Rewriteable; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.rescore.RescorerBuilder; +import org.elasticsearch.test.AbstractBuilderTestCase; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfig; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.LearnToRankConfig; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ltr.LearnToRankFeatureExtractorBuilder; +import org.elasticsearch.xpack.ml.inference.loadingservice.LocalModel; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.xpack.core.ml.inference.trainedmodel.LearnToRankConfigTests.randomLearnToRankConfig; +import static org.elasticsearch.xpack.ml.inference.ltr.LearnToRankServiceTests.BAD_MODEL; +import static org.elasticsearch.xpack.ml.inference.ltr.LearnToRankServiceTests.GOOD_MODEL; +import static org.elasticsearch.xpack.ml.inference.ltr.LearnToRankServiceTests.GOOD_MODEL_CONFIG; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.in; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class LearnToRankRescorerBuilderRewriteTests extends AbstractBuilderTestCase { + + public void testMustRewrite() { + LearnToRankService learnToRankService = learnToRankServiceMock(); + LearnToRankRescorerBuilder rescorerBuilder = new LearnToRankRescorerBuilder( + GOOD_MODEL, + randomLearnToRankConfig(), + null, + learnToRankService + ); + + SearchExecutionContext context = createSearchExecutionContext(); + LearnToRankRescorerContext rescorerContext = rescorerBuilder.innerBuildContext(randomIntBetween(1, 30), context); + IllegalStateException e = expectThrows( + IllegalStateException.class, + () -> rescorerContext.rescorer() + .rescore( + new TopDocs(new TotalHits(10, TotalHits.Relation.EQUAL_TO), new ScoreDoc[10]), + mock(IndexSearcher.class), + rescorerContext + ) + ); + assertEquals("local model reference is null, missing rewriteAndFetch before rescore phase?", e.getMessage()); + } + + public void testRewriteOnCoordinator() throws IOException { + LearnToRankService learnToRankService = learnToRankServiceMock(); + LearnToRankRescorerBuilder rescorerBuilder = new LearnToRankRescorerBuilder(GOOD_MODEL, null, learnToRankService); + rescorerBuilder.windowSize(4); + CoordinatorRewriteContext context = createCoordinatorRewriteContext( + new DateFieldMapper.DateFieldType("@timestamp"), + randomIntBetween(0, 1_100_000), + randomIntBetween(1_500_000, Integer.MAX_VALUE) + ); + LearnToRankRescorerBuilder rewritten = rewriteAndFetch(rescorerBuilder, context); + assertThat(rewritten.learnToRankConfig(), not(nullValue())); + assertThat(rewritten.learnToRankConfig().getNumTopFeatureImportanceValues(), equalTo(2)); + assertThat( + "feature_1", + is( + in( + rewritten.learnToRankConfig() + .getFeatureExtractorBuilders() + .stream() + .map(LearnToRankFeatureExtractorBuilder::featureName) + .toList() + ) + ) + ); + assertThat(rewritten.windowSize(), equalTo(4)); + } + + public void testRewriteOnCoordinatorWithBadModel() throws IOException { + LearnToRankService learnToRankService = learnToRankServiceMock(); + LearnToRankRescorerBuilder rescorerBuilder = new LearnToRankRescorerBuilder(BAD_MODEL, null, learnToRankService); + CoordinatorRewriteContext context = createCoordinatorRewriteContext( + new DateFieldMapper.DateFieldType("@timestamp"), + randomIntBetween(0, 1_100_000), + randomIntBetween(1_500_000, Integer.MAX_VALUE) + ); + ElasticsearchStatusException ex = expectThrows(ElasticsearchStatusException.class, () -> rewriteAndFetch(rescorerBuilder, context)); + assertThat(ex.status(), equalTo(RestStatus.BAD_REQUEST)); + } + + public void testRewriteOnCoordinatorWithMissingModel() { + LearnToRankService learnToRankService = learnToRankServiceMock(); + LearnToRankRescorerBuilder rescorerBuilder = new LearnToRankRescorerBuilder("missing_model", null, learnToRankService); + CoordinatorRewriteContext context = createCoordinatorRewriteContext( + new DateFieldMapper.DateFieldType("@timestamp"), + randomIntBetween(0, 1_100_000), + randomIntBetween(1_500_000, Integer.MAX_VALUE) + ); + expectThrows(ResourceNotFoundException.class, () -> rewriteAndFetch(rescorerBuilder, context)); + } + + public void testRewriteOnShard() throws IOException { + LocalModel localModel = mock(LocalModel.class); + when(localModel.getModelId()).thenReturn(GOOD_MODEL); + + LearnToRankService learnToRankService = learnToRankServiceMock(); + LearnToRankRescorerBuilder rescorerBuilder = new LearnToRankRescorerBuilder( + localModel, + (LearnToRankConfig) GOOD_MODEL_CONFIG.getInferenceConfig(), + null, + learnToRankService + ); + SearchExecutionContext searchExecutionContext = createSearchExecutionContext(); + LearnToRankRescorerBuilder rewritten = (LearnToRankRescorerBuilder) rescorerBuilder.rewrite(createSearchExecutionContext()); + assertFalse(searchExecutionContext.hasAsyncActions()); + assertSame(localModel, rewritten.localModel()); + assertEquals(localModel.getModelId(), rewritten.modelId()); + } + + public void testRewriteAndFetchOnDataNode() throws IOException { + LearnToRankService learnToRankService = learnToRankServiceMock(); + LearnToRankRescorerBuilder rescorerBuilder = new LearnToRankRescorerBuilder( + GOOD_MODEL, + randomLearnToRankConfig(), + null, + learnToRankService + ); + + boolean setWindowSize = randomBoolean(); + if (setWindowSize) { + rescorerBuilder.windowSize(42); + } + DataRewriteContext rewriteContext = dataRewriteContext(); + LearnToRankRescorerBuilder rewritten = (LearnToRankRescorerBuilder) rescorerBuilder.rewrite(rewriteContext); + assertNotSame(rescorerBuilder, rewritten); + assertTrue(rewriteContext.hasAsyncActions()); + if (setWindowSize) { + assertThat(rewritten.windowSize(), equalTo(42)); + } + } + + @SuppressWarnings("unchecked") + private static LearnToRankService learnToRankServiceMock() { + LearnToRankService learnToRankService = mock(LearnToRankService.class); + + doAnswer(invocation -> { + String modelId = invocation.getArgument(0); + ActionListener l = invocation.getArgument(2, ActionListener.class); + if (modelId.equals(GOOD_MODEL)) { + l.onResponse(GOOD_MODEL_CONFIG.getInferenceConfig()); + } else if (modelId.equals(BAD_MODEL)) { + l.onFailure(new ElasticsearchStatusException("bad model", RestStatus.BAD_REQUEST)); + } else { + l.onFailure(new ResourceNotFoundException("missing model")); + } + return null; + }).when(learnToRankService).loadLearnToRankConfig(anyString(), any(), any()); + + doAnswer(invocation -> { + ActionListener l = invocation.getArgument(1, ActionListener.class); + l.onResponse(mock(LocalModel.class)); + return null; + }).when(learnToRankService).loadLocalModel(anyString(), any()); + + return learnToRankService; + } + + public void testBuildContext() throws Exception { + LocalModel localModel = mock(LocalModel.class); + List inputFields = List.of(DOUBLE_FIELD_NAME, INT_FIELD_NAME); + when(localModel.inputFields()).thenReturn(inputFields); + + IndexSearcher searcher = mock(IndexSearcher.class); + doAnswer(invocation -> invocation.getArgument(0)).when(searcher).rewrite(any(Query.class)); + SearchExecutionContext context = createSearchExecutionContext(searcher); + + LearnToRankRescorerBuilder rescorerBuilder = new LearnToRankRescorerBuilder( + localModel, + (LearnToRankConfig) GOOD_MODEL_CONFIG.getInferenceConfig(), + null, + mock(LearnToRankService.class) + ); + + LearnToRankRescorerContext rescoreContext = rescorerBuilder.innerBuildContext(20, context); + assertNotNull(rescoreContext); + assertThat(rescoreContext.getWindowSize(), equalTo(20)); + List featureExtractors = rescoreContext.buildFeatureExtractors(context.searcher()); + assertThat(featureExtractors, hasSize(2)); + assertThat( + featureExtractors.stream().flatMap(featureExtractor -> featureExtractor.featureNames().stream()).toList(), + containsInAnyOrder("feature_1", "feature_2", DOUBLE_FIELD_NAME, INT_FIELD_NAME) + ); + } + + private LearnToRankRescorerBuilder rewriteAndFetch(RescorerBuilder builder, QueryRewriteContext context) { + PlainActionFuture> future = new PlainActionFuture<>(); + Rewriteable.rewriteAndFetch(builder, context, future); + return (LearnToRankRescorerBuilder) future.actionGet(); + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerBuilderSerializationTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ltr/LearnToRankRescorerBuilderSerializationTests.java similarity index 54% rename from x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerBuilderSerializationTests.java rename to x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ltr/LearnToRankRescorerBuilderSerializationTests.java index f85d24770f70e..4f6e5a0512f73 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerBuilderSerializationTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ltr/LearnToRankRescorerBuilderSerializationTests.java @@ -5,39 +5,43 @@ * 2.0. */ -package org.elasticsearch.xpack.ml.inference.rescorer; +package org.elasticsearch.xpack.ml.inference.ltr; import org.elasticsearch.TransportVersion; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Tuple; import org.elasticsearch.search.SearchModule; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.ml.AbstractBWCSerializationTestCase; import org.elasticsearch.xpack.core.ml.inference.MlInferenceNamedXContentProvider; -import org.elasticsearch.xpack.core.ml.inference.MlLTRNamedXContentProvider; -import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ClassificationConfigTests; -import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ClassificationConfigUpdateTests; -import org.elasticsearch.xpack.core.ml.inference.trainedmodel.LearnToRankConfigTests; -import org.elasticsearch.xpack.core.ml.inference.trainedmodel.LearnToRankConfigUpdateTests; -import org.elasticsearch.xpack.ml.inference.loadingservice.ModelLoadingService; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.LearnToRankConfig; +import org.elasticsearch.xpack.core.ml.ltr.MlLTRNamedXContentProvider; +import org.elasticsearch.xpack.ml.inference.loadingservice.LocalModel; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; -import java.util.function.Supplier; +import java.util.Map; import static org.elasticsearch.search.rank.RankBuilder.WINDOW_SIZE_FIELD; +import static org.elasticsearch.xpack.core.ml.inference.trainedmodel.LearnToRankConfigTests.randomLearnToRankConfig; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; -public class InferenceRescorerBuilderSerializationTests extends AbstractBWCSerializationTestCase { +public class LearnToRankRescorerBuilderSerializationTests extends AbstractBWCSerializationTestCase { + + private static LearnToRankService learnToRankService = mock(LearnToRankService.class); @Override - protected InferenceRescorerBuilder doParseInstance(XContentParser parser) throws IOException { + protected LearnToRankRescorerBuilder doParseInstance(XContentParser parser) throws IOException { String fieldName = null; - InferenceRescorerBuilder rescorer = null; + LearnToRankRescorerBuilder rescorer = null; Integer windowSize = null; XContentParser.Token token = parser.nextToken(); assert token == XContentParser.Token.START_OBJECT; @@ -51,7 +55,7 @@ protected InferenceRescorerBuilder doParseInstance(XContentParser parser) throws throw new ParsingException(parser.getTokenLocation(), "rescore doesn't support [" + fieldName + "]"); } } else if (token == XContentParser.Token.START_OBJECT) { - rescorer = InferenceRescorerBuilder.fromXContent(parser, null); + rescorer = LearnToRankRescorerBuilder.fromXContent(parser, learnToRankService); } else { throw new ParsingException(parser.getTokenLocation(), "unexpected token [" + token + "] after [" + fieldName + "]"); } @@ -66,63 +70,82 @@ protected InferenceRescorerBuilder doParseInstance(XContentParser parser) throws } @Override - protected Writeable.Reader instanceReader() { - return in -> new InferenceRescorerBuilder(in, null); + protected Writeable.Reader instanceReader() { + return in -> new LearnToRankRescorerBuilder(in, learnToRankService); } @Override - protected InferenceRescorerBuilder createTestInstance() { - InferenceRescorerBuilder builder = randomBoolean() - ? new InferenceRescorerBuilder( - randomAlphaOfLength(10), - randomBoolean() ? null : LearnToRankConfigUpdateTests.randomLearnToRankConfigUpdate(), - null - ) - : new InferenceRescorerBuilder( + protected LearnToRankRescorerBuilder createTestInstance() { + LearnToRankRescorerBuilder builder = randomBoolean() + ? createXContextTestInstance(null) + : new LearnToRankRescorerBuilder( randomAlphaOfLength(10), - LearnToRankConfigTests.randomLearnToRankConfig(), - (Supplier) null + randomLearnToRankConfig(), + randomBoolean() ? randomParams() : null, + learnToRankService ); + if (randomBoolean()) { builder.windowSize(randomIntBetween(1, 10000)); } + return builder; } @Override - protected InferenceRescorerBuilder mutateInstance(InferenceRescorerBuilder instance) throws IOException { - int i = randomInt(3); + protected LearnToRankRescorerBuilder createXContextTestInstance(XContentType xContentType) { + return new LearnToRankRescorerBuilder(randomAlphaOfLength(10), randomBoolean() ? randomParams() : null, learnToRankService); + } + + @Override + protected LearnToRankRescorerBuilder mutateInstance(LearnToRankRescorerBuilder instance) throws IOException { + + int i = randomInt(4); return switch (i) { case 0 -> { - InferenceRescorerBuilder builder = new InferenceRescorerBuilder( - randomValueOtherThan(instance.getModelId(), () -> randomAlphaOfLength(10)), - instance.getInferenceConfigUpdate(), - null + LearnToRankRescorerBuilder builder = new LearnToRankRescorerBuilder( + randomValueOtherThan(instance.modelId(), () -> randomAlphaOfLength(10)), + instance.params(), + learnToRankService ); if (instance.windowSize() != null) { builder.windowSize(instance.windowSize()); } yield builder; } - case 1 -> new InferenceRescorerBuilder(instance.getModelId(), instance.getInferenceConfigUpdate(), null).windowSize( + case 1 -> new LearnToRankRescorerBuilder(instance.modelId(), instance.params(), learnToRankService).windowSize( randomValueOtherThan(instance.windowSize(), () -> randomIntBetween(1, 10000)) ); case 2 -> { - InferenceRescorerBuilder builder = new InferenceRescorerBuilder( - instance.getModelId(), - randomValueOtherThan(instance.getInferenceConfigUpdate(), LearnToRankConfigUpdateTests::randomLearnToRankConfigUpdate), - null + LearnToRankRescorerBuilder builder = new LearnToRankRescorerBuilder( + instance.modelId(), + randomValueOtherThan(instance.params(), () -> (randomBoolean() ? randomParams() : null)), + learnToRankService ); if (instance.windowSize() != null) { - builder.windowSize(instance.windowSize()); + builder.windowSize(instance.windowSize() + 1); } yield builder; } case 3 -> { - InferenceRescorerBuilder builder = new InferenceRescorerBuilder( - instance.getModelId(), - randomValueOtherThan(instance.getInferenceConfig(), LearnToRankConfigTests::randomLearnToRankConfig), - (Supplier) null + LearnToRankConfig learnToRankConfig = randomValueOtherThan(instance.learnToRankConfig(), () -> randomLearnToRankConfig()); + LearnToRankRescorerBuilder builder = new LearnToRankRescorerBuilder( + instance.modelId(), + learnToRankConfig, + null, + learnToRankService + ); + if (instance.windowSize() != null) { + builder.windowSize(instance.windowSize()); + } + yield builder; + } + case 4 -> { + LearnToRankRescorerBuilder builder = new LearnToRankRescorerBuilder( + mock(LocalModel.class), + instance.learnToRankConfig(), + instance.params(), + learnToRankService ); if (instance.windowSize() != null) { builder.windowSize(instance.windowSize()); @@ -134,30 +157,10 @@ protected InferenceRescorerBuilder mutateInstance(InferenceRescorerBuilder insta } @Override - protected InferenceRescorerBuilder mutateInstanceForVersion(InferenceRescorerBuilder instance, TransportVersion version) { + protected LearnToRankRescorerBuilder mutateInstanceForVersion(LearnToRankRescorerBuilder instance, TransportVersion version) { return instance; } - public void testIncorrectInferenceConfigUpdateType() { - InferenceRescorerBuilder.Builder builder = new InferenceRescorerBuilder.Builder(); - expectThrows( - IllegalArgumentException.class, - () -> builder.setInferenceConfigUpdate(ClassificationConfigUpdateTests.randomClassificationConfigUpdate()) - ); - // Should not throw - builder.setInferenceConfigUpdate(LearnToRankConfigUpdateTests.randomLearnToRankConfigUpdate()); - } - - public void testIncorrectInferenceConfigType() { - InferenceRescorerBuilder.Builder builder = new InferenceRescorerBuilder.Builder(); - expectThrows( - IllegalArgumentException.class, - () -> builder.setInferenceConfig(ClassificationConfigTests.randomClassificationConfig()) - ); - // Should not throw - builder.setInferenceConfig(LearnToRankConfigTests.randomLearnToRankConfig()); - } - @Override protected NamedXContentRegistry xContentRegistry() { List namedXContent = new ArrayList<>(); @@ -179,4 +182,15 @@ protected NamedWriteableRegistry writableRegistry() { protected NamedWriteableRegistry getNamedWriteableRegistry() { return writableRegistry(); } + + private static Map randomParams() { + return randomMap(1, randomIntBetween(1, 10), () -> new Tuple<>(randomIdentifier(), randomIdentifier())); + } + + private static LocalModel localModelMock() { + LocalModel model = mock(LocalModel.class); + String modelId = randomIdentifier(); + when(model.getModelId()).thenReturn(modelId); + return model; + } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ltr/LearnToRankServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ltr/LearnToRankServiceTests.java new file mode 100644 index 0000000000000..e4d0225637fa1 --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ltr/LearnToRankServiceTests.java @@ -0,0 +1,199 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.inference.ltr; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.script.ScriptEngine; +import org.elasticsearch.script.ScriptModule; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.mustache.MustacheScriptEngine; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xpack.core.ml.inference.MlInferenceNamedXContentProvider; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelInput; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelType; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.LearnToRankConfig; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.RegressionConfig; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ltr.QueryExtractorBuilder; +import org.elasticsearch.xpack.core.ml.ltr.MlLTRNamedXContentProvider; +import org.elasticsearch.xpack.core.ml.utils.QueryProviderTests; +import org.elasticsearch.xpack.ml.inference.loadingservice.ModelLoadingService; +import org.elasticsearch.xpack.ml.inference.persistence.TrainedModelProvider; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.script.Script.DEFAULT_TEMPLATE_LANG; +import static org.hamcrest.Matchers.hasSize; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.argThat; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.isA; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; + +public class LearnToRankServiceTests extends ESTestCase { + public static final String GOOD_MODEL = "modelId"; + public static final String BAD_MODEL = "badModel"; + public static final String TEMPLATED_GOOD_MODEL = "templatedModelId"; + public static final TrainedModelConfig GOOD_MODEL_CONFIG = TrainedModelConfig.builder() + .setModelId(GOOD_MODEL) + .setInput(new TrainedModelInput(List.of("field1", "field2"))) + .setEstimatedOperations(1) + .setModelSize(2) + .setModelType(TrainedModelType.TREE_ENSEMBLE) + .setInferenceConfig( + new LearnToRankConfig( + 2, + List.of( + new QueryExtractorBuilder("feature_1", QueryProviderTests.createRandomValidQueryProvider("field_1", "foo")), + new QueryExtractorBuilder("feature_2", QueryProviderTests.createRandomValidQueryProvider("field_2", "bar")) + ) + ) + ) + .build(); + public static final TrainedModelConfig BAD_MODEL_CONFIG = TrainedModelConfig.builder() + .setModelId(BAD_MODEL) + .setInput(new TrainedModelInput(List.of("field1", "field2"))) + .setEstimatedOperations(1) + .setModelSize(2) + .setModelType(TrainedModelType.TREE_ENSEMBLE) + .setInferenceConfig(new RegressionConfig(null, null)) + .build(); + + public static final TrainedModelConfig TEMPLATED_GOOD_MODEL_CONFIG = new TrainedModelConfig.Builder(GOOD_MODEL_CONFIG).setModelId( + TEMPLATED_GOOD_MODEL + ) + .setInferenceConfig( + new LearnToRankConfig( + 2, + List.of( + new QueryExtractorBuilder("feature_1", QueryProviderTests.createRandomValidQueryProvider("field_1", "{{foo_param}}")), + new QueryExtractorBuilder("feature_2", QueryProviderTests.createRandomValidQueryProvider("field_2", "{{bar_param}}")) + ) + ) + ) + .build(); + + @SuppressWarnings("unchecked") + public void testLoadLearnToRankConfig() throws Exception { + LearnToRankService learnToRankService = new LearnToRankService( + mockModelLoadingService(), + mockTrainedModelProvider(), + mockScriptService(), + xContentRegistry() + ); + ActionListener listener = mock(ActionListener.class); + learnToRankService.loadLearnToRankConfig(GOOD_MODEL, Collections.emptyMap(), listener); + assertBusy(() -> verify(listener).onResponse(eq((LearnToRankConfig) GOOD_MODEL_CONFIG.getInferenceConfig()))); + } + + @SuppressWarnings("unchecked") + public void testLoadMissingLearnToRankConfig() throws Exception { + LearnToRankService learnToRankService = new LearnToRankService( + mockModelLoadingService(), + mockTrainedModelProvider(), + mockScriptService(), + xContentRegistry() + ); + ActionListener listener = mock(ActionListener.class); + learnToRankService.loadLearnToRankConfig("non-existing-model", Collections.emptyMap(), listener); + assertBusy(() -> verify(listener).onFailure(isA(ResourceNotFoundException.class))); + } + + @SuppressWarnings("unchecked") + public void testLoadBadLearnToRankConfig() throws Exception { + LearnToRankService learnToRankService = new LearnToRankService( + mockModelLoadingService(), + mockTrainedModelProvider(), + mockScriptService(), + xContentRegistry() + ); + ActionListener listener = mock(ActionListener.class); + learnToRankService.loadLearnToRankConfig(BAD_MODEL, Collections.emptyMap(), listener); + assertBusy(() -> verify(listener).onFailure(isA(ElasticsearchStatusException.class))); + } + + @SuppressWarnings("unchecked") + public void testLoadLearnToRankConfigWithTemplate() throws Exception { + LearnToRankService learnToRankService = new LearnToRankService( + mockModelLoadingService(), + mockTrainedModelProvider(), + mockScriptService(), + xContentRegistry() + ); + + // When no parameters are provided we expect the templated queries not being part of the retrieved config. + ActionListener noParamsListener = mock(ActionListener.class); + learnToRankService.loadLearnToRankConfig(TEMPLATED_GOOD_MODEL, Collections.emptyMap(), noParamsListener); + assertBusy(() -> verify(noParamsListener).onResponse(argThat(retrievedConfig -> { + assertThat(retrievedConfig.getFeatureExtractorBuilders(), hasSize(2)); + assertEquals(retrievedConfig, TEMPLATED_GOOD_MODEL_CONFIG.getInferenceConfig()); + return true; + }))); + + // Now testing when providing all the params of the template. + ActionListener allParamsListener = mock(ActionListener.class); + learnToRankService.loadLearnToRankConfig( + TEMPLATED_GOOD_MODEL, + Map.ofEntries(Map.entry("foo_param", "foo"), Map.entry("bar_param", "bar")), + allParamsListener + ); + assertBusy(() -> verify(allParamsListener).onResponse(argThat(retrievedConfig -> { + assertThat(retrievedConfig.getFeatureExtractorBuilders(), hasSize(2)); + assertEquals(retrievedConfig, GOOD_MODEL_CONFIG.getInferenceConfig()); + return true; + }))); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List namedXContent = new ArrayList<>(); + namedXContent.addAll(new MlInferenceNamedXContentProvider().getNamedXContentParsers()); + namedXContent.addAll(new MlLTRNamedXContentProvider().getNamedXContentParsers()); + namedXContent.addAll(new SearchModule(Settings.EMPTY, Collections.emptyList()).getNamedXContents()); + return new NamedXContentRegistry(namedXContent); + } + + private ModelLoadingService mockModelLoadingService() { + return mock(ModelLoadingService.class); + } + + @SuppressWarnings("unchecked") + private TrainedModelProvider mockTrainedModelProvider() { + TrainedModelProvider trainedModelProvider = mock(TrainedModelProvider.class); + + doAnswer(invocation -> { + String modelId = invocation.getArgument(0); + ActionListener l = invocation.getArgument(3, ActionListener.class); + switch (modelId) { + case GOOD_MODEL -> l.onResponse(GOOD_MODEL_CONFIG); + case TEMPLATED_GOOD_MODEL -> l.onResponse(TEMPLATED_GOOD_MODEL_CONFIG); + case BAD_MODEL -> l.onResponse(BAD_MODEL_CONFIG); + default -> l.onFailure(new ResourceNotFoundException("missing model")); + } + return null; + + }).when(trainedModelProvider).getTrainedModel(any(), any(), any(), any()); + + return trainedModelProvider; + } + + private ScriptService mockScriptService() { + ScriptEngine scriptEngine = new MustacheScriptEngine(); + return new ScriptService(Settings.EMPTY, Map.of(DEFAULT_TEMPLATE_LANG, scriptEngine), ScriptModule.CORE_CONTEXTS, () -> 1L); + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/rescorer/QueryFeatureExtractorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ltr/QueryFeatureExtractorTests.java similarity index 99% rename from x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/rescorer/QueryFeatureExtractorTests.java rename to x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ltr/QueryFeatureExtractorTests.java index bae0a6cf1c028..3878ce5dab087 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/rescorer/QueryFeatureExtractorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ltr/QueryFeatureExtractorTests.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.ml.inference.rescorer; +package org.elasticsearch.xpack.ml.inference.ltr; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerBuilderRewriteTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerBuilderRewriteTests.java deleted file mode 100644 index b31c425ea1eb4..0000000000000 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerBuilderRewriteTests.java +++ /dev/null @@ -1,303 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.ml.inference.rescorer; - -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.ScoreDoc; -import org.apache.lucene.search.TopDocs; -import org.apache.lucene.search.TotalHits; -import org.elasticsearch.ElasticsearchStatusException; -import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.client.internal.Client; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.breaker.CircuitBreaker; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.mapper.DateFieldMapper; -import org.elasticsearch.index.query.CoordinatorRewriteContext; -import org.elasticsearch.index.query.DataRewriteContext; -import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.index.query.QueryRewriteContext; -import org.elasticsearch.index.query.Rewriteable; -import org.elasticsearch.index.query.SearchExecutionContext; -import org.elasticsearch.license.XPackLicenseState; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.search.rescore.RescorerBuilder; -import org.elasticsearch.test.AbstractBuilderTestCase; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction; -import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; -import org.elasticsearch.xpack.core.ml.inference.TrainedModelInput; -import org.elasticsearch.xpack.core.ml.inference.TrainedModelType; -import org.elasticsearch.xpack.core.ml.inference.trainedmodel.LearnToRankConfig; -import org.elasticsearch.xpack.core.ml.inference.trainedmodel.LearnToRankConfigTests; -import org.elasticsearch.xpack.core.ml.inference.trainedmodel.LearnToRankConfigUpdate; -import org.elasticsearch.xpack.core.ml.inference.trainedmodel.LearnToRankConfigUpdateTests; -import org.elasticsearch.xpack.core.ml.inference.trainedmodel.RegressionConfig; -import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ltr.LearnToRankFeatureExtractorBuilder; -import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ltr.QueryExtractorBuilder; -import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; -import org.elasticsearch.xpack.core.ml.utils.QueryProvider; -import org.elasticsearch.xpack.ml.inference.TrainedModelStatsService; -import org.elasticsearch.xpack.ml.inference.loadingservice.LocalModel; -import org.elasticsearch.xpack.ml.inference.loadingservice.ModelLoadingService; -import org.elasticsearch.xpack.ml.inference.persistence.TrainedModelProvider; -import org.elasticsearch.xpack.ml.notifications.InferenceAuditor; - -import java.io.IOException; -import java.lang.reflect.Method; -import java.util.List; - -import static org.hamcrest.Matchers.containsInAnyOrder; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.in; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -public class InferenceRescorerBuilderRewriteTests extends AbstractBuilderTestCase { - - private static final String GOOD_MODEL = "modelId"; - private static final String BAD_MODEL = "badModel"; - private static final TrainedModelConfig GOOD_MODEL_CONFIG = TrainedModelConfig.builder() - .setModelId(GOOD_MODEL) - .setInput(new TrainedModelInput(List.of("field1", "field2"))) - .setEstimatedOperations(1) - .setModelSize(2) - .setModelType(TrainedModelType.TREE_ENSEMBLE) - .setInferenceConfig(new LearnToRankConfig(null, null)) - .build(); - private static final TrainedModelConfig BAD_MODEL_CONFIG = TrainedModelConfig.builder() - .setModelId(BAD_MODEL) - .setInput(new TrainedModelInput(List.of("field1", "field2"))) - .setEstimatedOperations(1) - .setModelSize(2) - .setModelType(TrainedModelType.TREE_ENSEMBLE) - .setInferenceConfig(new RegressionConfig(null, null)) - .build(); - - public void testMustRewrite() { - TestModelLoader testModelLoader = new TestModelLoader(); - InferenceRescorerBuilder inferenceRescorerBuilder = new InferenceRescorerBuilder( - GOOD_MODEL, - LearnToRankConfigTests.randomLearnToRankConfig(), - () -> testModelLoader - ); - SearchExecutionContext context = createSearchExecutionContext(); - InferenceRescorerContext inferenceRescorerContext = inferenceRescorerBuilder.innerBuildContext(randomIntBetween(1, 30), context); - IllegalStateException e = expectThrows( - IllegalStateException.class, - () -> inferenceRescorerContext.rescorer() - .rescore( - new TopDocs(new TotalHits(10, TotalHits.Relation.EQUAL_TO), new ScoreDoc[10]), - mock(IndexSearcher.class), - inferenceRescorerContext - ) - ); - assertEquals("local model reference is null, missing rewriteAndFetch before rescore phase?", e.getMessage()); - } - - public void testRewriteOnCoordinator() throws IOException { - TestModelLoader testModelLoader = new TestModelLoader(); - LearnToRankConfigUpdate ltru = new LearnToRankConfigUpdate( - 2, - List.of(new QueryExtractorBuilder("all", QueryProvider.fromParsedQuery(QueryBuilders.matchAllQuery()))) - ); - InferenceRescorerBuilder inferenceRescorerBuilder = new InferenceRescorerBuilder(GOOD_MODEL, ltru, () -> testModelLoader); - inferenceRescorerBuilder.windowSize(4); - CoordinatorRewriteContext context = createCoordinatorRewriteContext( - new DateFieldMapper.DateFieldType("@timestamp"), - randomIntBetween(0, 1_100_000), - randomIntBetween(1_500_000, Integer.MAX_VALUE) - ); - InferenceRescorerBuilder rewritten = rewriteAndFetch(inferenceRescorerBuilder, context); - assertThat(rewritten.getInferenceConfig(), not(nullValue())); - assertThat(rewritten.getInferenceConfig().getNumTopFeatureImportanceValues(), equalTo(2)); - assertThat( - "all", - is( - in( - rewritten.getInferenceConfig() - .getFeatureExtractorBuilders() - .stream() - .map(LearnToRankFeatureExtractorBuilder::featureName) - .toList() - ) - ) - ); - assertThat(rewritten.getInferenceConfigUpdate(), is(nullValue())); - assertThat(rewritten.windowSize(), equalTo(4)); - } - - public void testRewriteOnCoordinatorWithBadModel() throws IOException { - TestModelLoader testModelLoader = new TestModelLoader(); - InferenceRescorerBuilder inferenceRescorerBuilder = new InferenceRescorerBuilder( - BAD_MODEL, - randomBoolean() ? null : LearnToRankConfigUpdateTests.randomLearnToRankConfigUpdate(), - () -> testModelLoader - ); - CoordinatorRewriteContext context = createCoordinatorRewriteContext( - new DateFieldMapper.DateFieldType("@timestamp"), - randomIntBetween(0, 1_100_000), - randomIntBetween(1_500_000, Integer.MAX_VALUE) - ); - ElasticsearchStatusException ex = expectThrows( - ElasticsearchStatusException.class, - () -> rewriteAndFetch(inferenceRescorerBuilder, context) - ); - assertThat(ex.status(), equalTo(RestStatus.BAD_REQUEST)); - } - - public void testRewriteOnCoordinatorWithMissingModel() { - TestModelLoader testModelLoader = new TestModelLoader(); - InferenceRescorerBuilder inferenceRescorerBuilder = new InferenceRescorerBuilder( - "missing_model", - randomBoolean() ? null : LearnToRankConfigUpdateTests.randomLearnToRankConfigUpdate(), - () -> testModelLoader - ); - CoordinatorRewriteContext context = createCoordinatorRewriteContext( - new DateFieldMapper.DateFieldType("@timestamp"), - randomIntBetween(0, 1_100_000), - randomIntBetween(1_500_000, Integer.MAX_VALUE) - ); - expectThrows(ResourceNotFoundException.class, () -> rewriteAndFetch(inferenceRescorerBuilder, context)); - } - - public void testSearchRewrite() throws IOException { - TestModelLoader testModelLoader = new TestModelLoader(); - InferenceRescorerBuilder inferenceRescorerBuilder = new InferenceRescorerBuilder( - GOOD_MODEL, - LearnToRankConfigTests.randomLearnToRankConfig(), - () -> testModelLoader - ); - QueryRewriteContext context = createSearchExecutionContext(); - InferenceRescorerBuilder rewritten = (InferenceRescorerBuilder) Rewriteable.rewrite(inferenceRescorerBuilder, context, true); - assertThat(rewritten.modelLoadingServiceSupplier(), is(notNullValue())); - - inferenceRescorerBuilder = new InferenceRescorerBuilder(GOOD_MODEL, LearnToRankConfigTests.randomLearnToRankConfig(), localModel()); - - rewritten = (InferenceRescorerBuilder) Rewriteable.rewrite(inferenceRescorerBuilder, context, true); - assertThat(rewritten.modelLoadingServiceSupplier(), is(nullValue())); - assertThat(rewritten.getInferenceDefinition(), is(notNullValue())); - } - - protected InferenceRescorerBuilder rewriteAndFetch(RescorerBuilder builder, QueryRewriteContext context) { - PlainActionFuture> future = new PlainActionFuture<>(); - Rewriteable.rewriteAndFetch(builder, context, future); - return (InferenceRescorerBuilder) future.actionGet(); - } - - @Override - protected boolean canSimulateMethod(Method method, Object[] args) throws NoSuchMethodException { - return method.equals(Client.class.getMethod("execute", ActionType.class, ActionRequest.class, ActionListener.class)) - && (args[0] instanceof GetTrainedModelsAction); - } - - @Override - protected Object simulateMethod(Method method, Object[] args) { - GetTrainedModelsAction.Request request = (GetTrainedModelsAction.Request) args[1]; - @SuppressWarnings("unchecked") // We matched the method above. - ActionListener listener = (ActionListener) args[2]; - if (request.getResourceId().equals(GOOD_MODEL)) { - listener.onResponse(GetTrainedModelsAction.Response.builder().setModels(List.of(GOOD_MODEL_CONFIG)).build()); - return null; - } - if (request.getResourceId().equals(BAD_MODEL)) { - listener.onResponse(GetTrainedModelsAction.Response.builder().setModels(List.of(BAD_MODEL_CONFIG)).build()); - return null; - } - listener.onFailure(ExceptionsHelper.missingTrainedModel(request.getResourceId())); - return null; - } - - public void testRewriteOnShard() throws IOException { - TestModelLoader testModelLoader = new TestModelLoader(); - InferenceRescorerBuilder inferenceRescorerBuilder = new InferenceRescorerBuilder( - GOOD_MODEL, - (LearnToRankConfig) GOOD_MODEL_CONFIG.getInferenceConfig(), - () -> testModelLoader - ); - SearchExecutionContext searchExecutionContext = createSearchExecutionContext(); - InferenceRescorerBuilder rewritten = (InferenceRescorerBuilder) inferenceRescorerBuilder.rewrite(createSearchExecutionContext()); - assertSame(inferenceRescorerBuilder, rewritten); - assertFalse(searchExecutionContext.hasAsyncActions()); - } - - public void testRewriteAndFetchOnDataNode() throws IOException { - TestModelLoader testModelLoader = new TestModelLoader(); - InferenceRescorerBuilder inferenceRescorerBuilder = new InferenceRescorerBuilder( - GOOD_MODEL, - (LearnToRankConfig) GOOD_MODEL_CONFIG.getInferenceConfig(), - () -> testModelLoader - ); - boolean setWindowSize = randomBoolean(); - if (setWindowSize) { - inferenceRescorerBuilder.windowSize(42); - } - DataRewriteContext rewriteContext = dataRewriteContext(); - InferenceRescorerBuilder rewritten = (InferenceRescorerBuilder) inferenceRescorerBuilder.rewrite(rewriteContext); - assertNotSame(inferenceRescorerBuilder, rewritten); - assertTrue(rewriteContext.hasAsyncActions()); - if (setWindowSize) { - assertThat(rewritten.windowSize(), equalTo(42)); - } - } - - public void testBuildContext() throws Exception { - LocalModel localModel = localModel(); - List inputFields = List.of(DOUBLE_FIELD_NAME, INT_FIELD_NAME); - when(localModel.inputFields()).thenReturn(inputFields); - SearchExecutionContext context = createSearchExecutionContext(); - InferenceRescorerBuilder inferenceRescorerBuilder = new InferenceRescorerBuilder( - GOOD_MODEL, - (LearnToRankConfig) GOOD_MODEL_CONFIG.getInferenceConfig(), - localModel - ); - InferenceRescorerContext rescoreContext = inferenceRescorerBuilder.innerBuildContext(20, context); - assertNotNull(rescoreContext); - assertThat(rescoreContext.getWindowSize(), equalTo(20)); - List featureExtractors = rescoreContext.buildFeatureExtractors(context.searcher()); - assertThat(featureExtractors, hasSize(1)); - assertThat( - featureExtractors.stream().flatMap(featureExtractor -> featureExtractor.featureNames().stream()).toList(), - containsInAnyOrder(DOUBLE_FIELD_NAME, INT_FIELD_NAME) - ); - } - - private static LocalModel localModel() { - return mock(LocalModel.class); - } - - private static class TestModelLoader extends ModelLoadingService { - TestModelLoader() { - super( - mock(TrainedModelProvider.class), - mock(InferenceAuditor.class), - mock(ThreadPool.class), - mock(ClusterService.class), - mock(TrainedModelStatsService.class), - Settings.EMPTY, - "test", - mock(CircuitBreaker.class), - new XPackLicenseState(System::currentTimeMillis) - ); - } - - @Override - public void getModelForLearnToRank(String modelId, ActionListener modelActionListener) { - modelActionListener.onResponse(localModel()); - } - } -} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java index 5550b1ff1df35..cb10eead972ec 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java @@ -443,17 +443,6 @@ private Job.Builder createCategorizationJob( return builder; } - private Job.Builder createJob() { - Detector.Builder d = new Detector.Builder("info_content", "domain").setOverFieldName("client"); - AnalysisConfig.Builder ac = new AnalysisConfig.Builder(Collections.singletonList(d.build())); - - Job.Builder builder = new Job.Builder(); - builder.setId("foo"); - builder.setAnalysisConfig(ac); - builder.setDataDescription(new DataDescription.Builder()); - return builder; - } - private JobManager createJobManager(Client client) { return new JobManager( jobResultsProvider, @@ -469,12 +458,6 @@ private JobManager createJobManager(Client client) { ); } - private ClusterState createClusterState() { - ClusterState.Builder builder = ClusterState.builder(new ClusterName("_name")); - builder.metadata(Metadata.builder()); - return builder.build(); - } - private BytesReference toBytesReference(ToXContent content) throws IOException { try (XContentBuilder xContentBuilder = XContentFactory.jsonBuilder()) { content.toXContent(xContentBuilder, ToXContent.EMPTY_PARAMS); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobNodeSelectorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobNodeSelectorTests.java index 92dc9a9a749cf..112a8c80b0483 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobNodeSelectorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobNodeSelectorTests.java @@ -38,6 +38,7 @@ import org.junit.Before; import java.net.InetAddress; +import java.time.Instant; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -1429,7 +1430,7 @@ static void addDataFrameAnalyticsJobTask( if (state != null) { builder.updateTaskState( MlTasks.dataFrameAnalyticsTaskId(id), - new DataFrameAnalyticsTaskState(state, builder.getLastAllocationId() - (isStale ? 1 : 0), null) + new DataFrameAnalyticsTaskState(state, builder.getLastAllocationId() - (isStale ? 1 : 0), null, Instant.now()) ); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/config/JobTaskStateTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/config/JobTaskStateTests.java index 8ba8a15cf66d0..db52e00b8b9ea 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/config/JobTaskStateTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/config/JobTaskStateTests.java @@ -16,7 +16,7 @@ public class JobTaskStateTests extends AbstractXContentSerializingTestCase false); InOrder inOrder = inOrder(client); - inOrder.verify(client).execute(eq(SearchAction.INSTANCE), any(), any()); + inOrder.verify(client).execute(eq(TransportSearchAction.TYPE), any(), any()); inOrder.verify(client).execute(eq(BulkAction.INSTANCE), bulkRequestCaptor.capture(), any()); inOrder.verifyNoMoreInteractions(); @@ -370,7 +370,7 @@ private void testPersistQuantilesAsync(SearchHits searchHits, String expectedInd SearchResponse searchResponse = mock(SearchResponse.class); when(searchResponse.getHits()).thenReturn(searchHits); - doAnswer(withResponse(searchResponse)).when(client).execute(eq(SearchAction.INSTANCE), any(), any()); + doAnswer(withResponse(searchResponse)).when(client).execute(eq(TransportSearchAction.TYPE), any(), any()); IndexResponse indexResponse = mock(IndexResponse.class); doAnswer(withResponse(indexResponse)).when(client).execute(eq(IndexAction.INSTANCE), any(), any()); @@ -380,7 +380,7 @@ private void testPersistQuantilesAsync(SearchHits searchHits, String expectedInd persister.persistQuantiles(quantiles, WriteRequest.RefreshPolicy.IMMEDIATE, indexResponseListener); InOrder inOrder = inOrder(client, indexResponseListener); - inOrder.verify(client).execute(eq(SearchAction.INSTANCE), any(), any()); + inOrder.verify(client).execute(eq(TransportSearchAction.TYPE), any(), any()); inOrder.verify(client).execute(eq(IndexAction.INSTANCE), indexRequestCaptor.capture(), any()); inOrder.verify(indexResponseListener).onResponse(any()); inOrder.verifyNoMoreInteractions(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProviderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProviderTests.java index e160b310df016..e9cc63e4dd96e 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProviderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProviderTests.java @@ -10,14 +10,14 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.search.MultiSearchAction; import org.elasticsearch.action.search.MultiSearchRequest; import org.elasticsearch.action.search.MultiSearchRequestBuilder; import org.elasticsearch.action.search.MultiSearchResponse; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportMultiSearchAction; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -748,67 +748,72 @@ public void testDatafeedTimingStats_MultipleDocumentsAtOnce() throws IOException ); SearchResponse responseFoo = createSearchResponse(sourceFoo); SearchResponse responseBar = createSearchResponse(sourceBar); - MultiSearchResponse multiSearchResponse = new MultiSearchResponse( + final MultiSearchResponse multiSearchResponse = new MultiSearchResponse( new MultiSearchResponse.Item[] { new MultiSearchResponse.Item(responseFoo, null), new MultiSearchResponse.Item(responseBar, null) }, randomNonNegativeLong() ); - Client client = getBasicMockedClient(); - when(client.prepareMultiSearch()).thenReturn(new MultiSearchRequestBuilder(client, MultiSearchAction.INSTANCE)); - doAnswer(invocationOnMock -> { - MultiSearchRequest multiSearchRequest = (MultiSearchRequest) invocationOnMock.getArguments()[0]; - assertThat(multiSearchRequest.requests(), hasSize(2)); - assertThat(multiSearchRequest.requests().get(0).source().query().getName(), equalTo("ids")); - assertThat(multiSearchRequest.requests().get(1).source().query().getName(), equalTo("ids")); - @SuppressWarnings("unchecked") - ActionListener actionListener = (ActionListener) invocationOnMock.getArguments()[1]; - actionListener.onResponse(multiSearchResponse); - return null; - }).when(client).multiSearch(any(), any()); - when(client.prepareSearch(AnomalyDetectorsIndex.jobResultsAliasedName("foo"))).thenReturn( - new SearchRequestBuilder(client, SearchAction.INSTANCE).setIndices(AnomalyDetectorsIndex.jobResultsAliasedName("foo")) - ); - when(client.prepareSearch(AnomalyDetectorsIndex.jobResultsAliasedName("bar"))).thenReturn( - new SearchRequestBuilder(client, SearchAction.INSTANCE).setIndices(AnomalyDetectorsIndex.jobResultsAliasedName("bar")) - ); + try { + Client client = getBasicMockedClient(); + when(client.prepareMultiSearch()).thenReturn(new MultiSearchRequestBuilder(client, TransportMultiSearchAction.TYPE)); + doAnswer(invocationOnMock -> { + MultiSearchRequest multiSearchRequest = (MultiSearchRequest) invocationOnMock.getArguments()[0]; + assertThat(multiSearchRequest.requests(), hasSize(2)); + assertThat(multiSearchRequest.requests().get(0).source().query().getName(), equalTo("ids")); + assertThat(multiSearchRequest.requests().get(1).source().query().getName(), equalTo("ids")); + @SuppressWarnings("unchecked") + ActionListener actionListener = (ActionListener) invocationOnMock + .getArguments()[1]; + actionListener.onResponse(multiSearchResponse); + return null; + }).when(client).multiSearch(any(), any()); + when(client.prepareSearch(AnomalyDetectorsIndex.jobResultsAliasedName("foo"))).thenReturn( + new SearchRequestBuilder(client, TransportSearchAction.TYPE).setIndices(AnomalyDetectorsIndex.jobResultsAliasedName("foo")) + ); + when(client.prepareSearch(AnomalyDetectorsIndex.jobResultsAliasedName("bar"))).thenReturn( + new SearchRequestBuilder(client, TransportSearchAction.TYPE).setIndices(AnomalyDetectorsIndex.jobResultsAliasedName("bar")) + ); - JobResultsProvider provider = createProvider(client); - ExponentialAverageCalculationContext contextFoo = new ExponentialAverageCalculationContext( - 600.0, - Instant.ofEpochMilli(100000600), - 60.0 - ); - ExponentialAverageCalculationContext contextBar = new ExponentialAverageCalculationContext( - 700.0, - Instant.ofEpochMilli(100000700), - 70.0 - ); - provider.datafeedTimingStats( - List.of("foo", "bar"), - null, - ActionTestUtils.assertNoFailureListener( - statsByJobId -> assertThat( - statsByJobId, - equalTo( - Map.of( - "foo", - new DatafeedTimingStats("foo", 6, 66, 666.0, contextFoo), - "bar", - new DatafeedTimingStats("bar", 7, 77, 777.0, contextBar) + JobResultsProvider provider = createProvider(client); + ExponentialAverageCalculationContext contextFoo = new ExponentialAverageCalculationContext( + 600.0, + Instant.ofEpochMilli(100000600), + 60.0 + ); + ExponentialAverageCalculationContext contextBar = new ExponentialAverageCalculationContext( + 700.0, + Instant.ofEpochMilli(100000700), + 70.0 + ); + provider.datafeedTimingStats( + List.of("foo", "bar"), + null, + ActionTestUtils.assertNoFailureListener( + statsByJobId -> assertThat( + statsByJobId, + equalTo( + Map.of( + "foo", + new DatafeedTimingStats("foo", 6, 66, 666.0, contextFoo), + "bar", + new DatafeedTimingStats("bar", 7, 77, 777.0, contextBar) + ) ) ) ) - ) - ); + ); - verify(client).threadPool(); - verify(client).prepareMultiSearch(); - verify(client).multiSearch(any(MultiSearchRequest.class), any()); - verify(client).prepareSearch(AnomalyDetectorsIndex.jobResultsAliasedName("foo")); - verify(client).prepareSearch(AnomalyDetectorsIndex.jobResultsAliasedName("bar")); - verifyNoMoreInteractions(client); + verify(client).threadPool(); + verify(client).prepareMultiSearch(); + verify(client).multiSearch(any(MultiSearchRequest.class), any()); + verify(client).prepareSearch(AnomalyDetectorsIndex.jobResultsAliasedName("foo")); + verify(client).prepareSearch(AnomalyDetectorsIndex.jobResultsAliasedName("bar")); + verifyNoMoreInteractions(client); + } finally { + multiSearchResponse.decRef(); + } } public void testDatafeedTimingStats_Ok() throws IOException { @@ -837,7 +842,9 @@ public void testDatafeedTimingStats_Ok() throws IOException { SearchResponse response = createSearchResponse(source); Client client = getMockedClient(queryBuilder -> assertThat(queryBuilder.getName(), equalTo("ids")), response); - when(client.prepareSearch(indexName)).thenReturn(new SearchRequestBuilder(client, SearchAction.INSTANCE).setIndices(indexName)); + when(client.prepareSearch(indexName)).thenReturn( + new SearchRequestBuilder(client, TransportSearchAction.TYPE).setIndices(indexName) + ); JobResultsProvider provider = createProvider(client); ExponentialAverageCalculationContext contextFoo = new ExponentialAverageCalculationContext( 600.0, @@ -864,7 +871,9 @@ public void testDatafeedTimingStats_NotFound() throws IOException { SearchResponse response = createSearchResponse(source); Client client = getMockedClient(queryBuilder -> assertThat(queryBuilder.getName(), equalTo("ids")), response); - when(client.prepareSearch(indexName)).thenReturn(new SearchRequestBuilder(client, SearchAction.INSTANCE).setIndices(indexName)); + when(client.prepareSearch(indexName)).thenReturn( + new SearchRequestBuilder(client, TransportSearchAction.TYPE).setIndices(indexName) + ); JobResultsProvider provider = createProvider(client); provider.datafeedTimingStats("foo", stats -> assertThat(stats, equalTo(new DatafeedTimingStats("foo"))), e -> { throw new AssertionError("Failure getting datafeed timing stats", e); @@ -949,7 +958,11 @@ private Client getMockedClient(Consumer queryBuilderConsumer, Sear new MultiSearchResponse.Item[] { new MultiSearchResponse.Item(response, null) }, randomNonNegativeLong() ); - actionListener.onResponse(mresponse); + try { + actionListener.onResponse(mresponse); + } finally { + mresponse.decRef(); + } return null; }).when(client).multiSearch(any(), any()); doAnswer(invocationOnMock -> { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java index 4a034908282bc..36828423ce8e9 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java @@ -268,7 +268,12 @@ public void testOpenJob() { manager.openJob(jobTask, clusterState, DEFAULT_MASTER_NODE_TIMEOUT, (e, b) -> {}); assertEquals(1, manager.numberOfOpenJobs()); assertTrue(manager.jobHasActiveAutodetectProcess(jobTask)); - verify(jobTask).updatePersistentTaskState(eq(new JobTaskState(JobState.OPENED, 1L, null)), any()); + ArgumentCaptor captor = ArgumentCaptor.forClass(JobTaskState.class); + verify(jobTask).updatePersistentTaskState(captor.capture(), any()); + JobTaskState state = captor.getValue(); + assertThat(state.getState(), equalTo(JobState.OPENED)); + assertThat(state.getAllocationId(), equalTo(1L)); + assertNull(state.getReason()); } public void testOpenJob_withoutVersion() { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredAnnotationsRemoverTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredAnnotationsRemoverTests.java index f0eb026249996..ad0719011c92e 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredAnnotationsRemoverTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredAnnotationsRemoverTests.java @@ -7,8 +7,8 @@ package org.elasticsearch.xpack.ml.job.retention; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.index.reindex.BulkByScrollResponse; @@ -180,7 +180,7 @@ private void givenBucket(Bucket bucket) { ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; listener.onResponse(AbstractExpiredJobDataRemoverTests.createSearchResponse(Collections.singletonList(bucket))); return null; - }).when(client).execute(eq(SearchAction.INSTANCE), any(), any()); + }).when(client).execute(eq(TransportSearchAction.TYPE), any(), any()); } private ExpiredAnnotationsRemover createExpiredAnnotationsRemover(Iterator jobIterator) { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemoverTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemoverTests.java index 5be897df538ef..5b3168a425029 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemoverTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemoverTests.java @@ -7,8 +7,8 @@ package org.elasticsearch.xpack.ml.job.retention; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.OriginSettingClient; @@ -99,7 +99,7 @@ public void testRemove_GivenJobWithoutActiveSnapshot() throws IOException { listener.waitToCompletion(); assertThat(listener.success, is(true)); - verify(client, times(1)).execute(eq(SearchAction.INSTANCE), any(), any()); + verify(client, times(1)).execute(eq(TransportSearchAction.TYPE), any(), any()); } public void testRemove_GivenJobsWithMixedRetentionPolicies() { @@ -351,7 +351,7 @@ public Void answer(InvocationOnMock invocationOnMock) { } return null; } - }).when(client).execute(same(SearchAction.INSTANCE), any(), any()); + }).when(client).execute(same(TransportSearchAction.TYPE), any(), any()); doAnswer(invocationOnMock -> { capturedDeleteModelSnapshotRequests.add((DeleteByQueryRequest) invocationOnMock.getArguments()[1]); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemoverTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemoverTests.java index 258ec71505caf..5aa5b847b26be 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemoverTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemoverTests.java @@ -7,8 +7,8 @@ package org.elasticsearch.xpack.ml.job.retention; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.index.reindex.BulkByScrollResponse; @@ -180,7 +180,7 @@ private void givenBucket(Bucket bucket) { ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; listener.onResponse(AbstractExpiredJobDataRemoverTests.createSearchResponse(Collections.singletonList(bucket))); return null; - }).when(client).execute(eq(SearchAction.INSTANCE), any(), any()); + }).when(client).execute(eq(TransportSearchAction.TYPE), any(), any()); } private ExpiredResultsRemover createExpiredResultsRemover(Iterator jobIterator) { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutorTests.java index 0b563a8a08107..c3db184759d3f 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutorTests.java @@ -64,6 +64,7 @@ import org.elasticsearch.xpack.ml.process.MlMemoryTracker; import org.junit.Before; +import java.time.Instant; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -238,7 +239,7 @@ public static void addJobTask( if (jobState != null) { builder.updateTaskState( MlTasks.jobTaskId(jobId), - new JobTaskState(jobState, builder.getLastAllocationId() - (isStale ? 1 : 0), null) + new JobTaskState(jobState, builder.getLastAllocationId() - (isStale ? 1 : 0), null, Instant.now()) ); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/queries/TextExpansionQueryBuilderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/queries/TextExpansionQueryBuilderTests.java index a329a55d8afe9..d8edea137330f 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/queries/TextExpansionQueryBuilderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/queries/TextExpansionQueryBuilderTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.AbstractQueryTestCase; import org.elasticsearch.xpack.core.ml.action.InferModelAction; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelPrefixStrings; import org.elasticsearch.xpack.core.ml.inference.results.TextExpansionResults; import org.elasticsearch.xpack.ml.MachineLearning; @@ -78,6 +79,7 @@ protected boolean canSimulateMethod(Method method, Object[] args) throws NoSuchM protected Object simulateMethod(Method method, Object[] args) { InferModelAction.Request request = (InferModelAction.Request) args[1]; assertEquals(InferModelAction.Request.DEFAULT_TIMEOUT_FOR_API, request.getInferenceTimeout()); + assertEquals(TrainedModelPrefixStrings.PrefixType.SEARCH, request.getPrefixType()); // Randomisation cannot be used here as {@code #doAssertLuceneQuery} // asserts that 2 rewritten queries are the same diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java index e9a89a81f62e2..ed41042913421 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java @@ -12,6 +12,9 @@ import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.TransportListTasksAction; +import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.bulk.BulkItemResponse; @@ -36,6 +39,7 @@ import org.elasticsearch.ingest.common.IngestCommonPlugin; import org.elasticsearch.license.LicenseSettings; import org.elasticsearch.persistent.PersistentTasksClusterService; +import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.reindex.ReindexPlugin; import org.elasticsearch.script.IngestScript; @@ -76,6 +80,7 @@ import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; +import org.elasticsearch.xpack.core.ml.utils.MlTaskState; import org.elasticsearch.xpack.ilm.IndexLifecycle; import org.elasticsearch.xpack.ml.LocalStateMachineLearning; import org.elasticsearch.xpack.ml.MachineLearning; @@ -86,6 +91,8 @@ import org.junit.After; import org.junit.Before; +import java.time.Duration; +import java.time.Instant; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -104,6 +111,7 @@ import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; @@ -507,6 +515,31 @@ protected String awaitJobOpenedAndAssigned(String jobId, String queryNode) throw return jobNode.get(); } + protected void assertRecentLastTaskStateChangeTime(String taskId, Duration howRecent, String queryNode) { + ClusterStateRequest csRequest = new ClusterStateRequest().clear().metadata(true); + ClusterStateResponse csResponse = client(queryNode).execute(ClusterStateAction.INSTANCE, csRequest).actionGet(); + PersistentTasksCustomMetadata tasks = csResponse.getState().getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + assertNotNull(tasks); + PersistentTasksCustomMetadata.PersistentTask task = tasks.getTask(taskId); + assertNotNull(task); + assertThat(task.getState(), instanceOf(MlTaskState.class)); + MlTaskState state = (MlTaskState) task.getState(); + assertNotNull(state.getLastStateChangeTime()); + Instant now = Instant.now(); + assertTrue( + "[" + + taskId + + " has last state change time [" + + state.getLastStateChangeTime() + + "] that is more than [" + + howRecent + + "] behind current time [" + + now + + "]", + state.getLastStateChangeTime().isAfter(now.minus(howRecent)) + ); + } + /** * Sets delayed allocation to 0 to make sure we have tests are not delayed */ diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/FileUtilsTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/FileUtilsTests.java new file mode 100644 index 0000000000000..34d12a1730100 --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/FileUtilsTests.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.utils; + +import com.google.common.jimfs.Configuration; +import com.google.common.jimfs.Jimfs; + +import org.elasticsearch.core.PathUtilsForTesting; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.nio.file.FileSystem; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.attribute.BasicFileAttributes; +import java.nio.file.attribute.PosixFileAttributes; +import java.nio.file.attribute.PosixFilePermission; +import java.util.Set; + +public class FileUtilsTests extends ESTestCase { + + public void test_recreateTempDirectoryIfNeeded_forWindows() throws IOException { + FileSystem fileSystem = Jimfs.newFileSystem(Configuration.windows()); + PathUtilsForTesting.installMock(fileSystem); + + Path tmpDir = fileSystem.getPath("c:\\tmp\\elasticsearch"); + + assertFalse(Files.exists(tmpDir)); + FileUtils.recreateTempDirectoryIfNeeded(tmpDir); + assertTrue(Files.exists(tmpDir)); + + BasicFileAttributes attributes = Files.readAttributes(tmpDir, BasicFileAttributes.class); + assertTrue(attributes.isDirectory()); + } + + public void test_recreateTempDirectoryIfNeeded_forPosix() throws IOException { + FileSystem fileSystem = Jimfs.newFileSystem(Configuration.unix().toBuilder().setAttributeViews("posix").build()); + PathUtilsForTesting.installMock(fileSystem); + + Path tmpDir = fileSystem.getPath("/tmp/elasticsearch-1234567890"); + + assertFalse(Files.exists(tmpDir)); + FileUtils.recreateTempDirectoryIfNeeded(tmpDir); + assertTrue(Files.exists(tmpDir)); + + PosixFileAttributes attributes = Files.readAttributes(tmpDir, PosixFileAttributes.class); + assertTrue(attributes.isDirectory()); + assertEquals( + attributes.permissions(), + Set.of(PosixFilePermission.OWNER_READ, PosixFilePermission.OWNER_WRITE, PosixFilePermission.OWNER_EXECUTE) + ); + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/MlProcessorsTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/MlProcessorsTests.java index b1b213e2c3f15..b92521dd55810 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/MlProcessorsTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/MlProcessorsTests.java @@ -162,6 +162,12 @@ public void testGetTotalMlNodeProcessors() { assertThat(processor.count(), equalTo(15.0)); } + public void testGetTotalMlNodeProcessorsWithZeroProcessors() { + var nodes = DiscoveryNodes.EMPTY_NODES; + var processor = MlProcessors.getTotalMlNodeProcessors(nodes, 1); + assertThat(processor.count(), equalTo(0.0)); + } + public void testGetTotalMlNodeProcessorsWithScale() { var nodes = DiscoveryNodes.builder() .add( @@ -190,7 +196,7 @@ public void testGetTotalMlNodeProcessorsWithScale() { ) .build(); var processor = MlProcessors.getTotalMlNodeProcessors(nodes, 2); - assertThat(processor.count(), equalTo(7.0)); + assertThat(processor.count(), equalTo(8.0)); } public void testGetTotalMlNodeProcessorsWithNull() { @@ -221,6 +227,6 @@ public void testGetTotalMlNodeProcessorsWithNull() { ) .build(); var processor = MlProcessors.getTotalMlNodeProcessors(nodes, null); - assertThat(processor.count(), equalTo(13.0)); + assertThat(processor.count(), equalTo(14.0)); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/BatchedDocumentsIteratorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/BatchedDocumentsIteratorTests.java index d0efe69e8ac49..4f1308e9295c2 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/BatchedDocumentsIteratorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/BatchedDocumentsIteratorTests.java @@ -8,13 +8,13 @@ import org.apache.lucene.search.TotalHits; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.search.ClearScrollAction; import org.elasticsearch.action.search.ClearScrollResponse; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchScrollAction; import org.elasticsearch.action.search.SearchScrollRequest; +import org.elasticsearch.action.search.TransportClearScrollAction; +import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.action.search.TransportSearchScrollAction; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.core.TimeValue; @@ -142,7 +142,7 @@ private void givenClearScrollRequest() { wasScrollCleared = true; listener.onResponse(mock(ClearScrollResponse.class)); return null; - }).when(client).execute(eq(ClearScrollAction.INSTANCE), any(), any()); + }).when(client).execute(eq(TransportClearScrollAction.TYPE), any(), any()); } abstract static class ResponsesMocker { @@ -227,7 +227,7 @@ ResponsesMocker finishMock() { ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; listener.onResponse(responses.get(responseIndex.getAndIncrement())); return null; - }).when(client).execute(eq(SearchScrollAction.INSTANCE), searchScrollRequestCaptor.capture(), any()); + }).when(client).execute(eq(TransportSearchScrollAction.TYPE), searchScrollRequestCaptor.capture(), any()); return this; } @@ -240,7 +240,7 @@ private void givenInitialResponse(String... hits) { ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; listener.onResponse(searchResponse); return null; - }).when(client).execute(eq(SearchAction.INSTANCE), searchRequestCaptor.capture(), any()); + }).when(client).execute(eq(TransportSearchAction.TYPE), searchRequestCaptor.capture(), any()); } } @@ -258,7 +258,7 @@ ResponsesMocker finishMock() { ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; listener.onResponse(createSearchResponseWithHits()); return null; - }).when(client).execute(eq(SearchAction.INSTANCE), searchRequestCaptor.capture(), any()); + }).when(client).execute(eq(TransportSearchAction.TYPE), searchRequestCaptor.capture(), any()); return this; } @@ -271,7 +271,7 @@ ResponsesMocker finishMock() { ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; listener.onResponse(responses.get(responseIndex.getAndIncrement())); return null; - }).when(client).execute(eq(SearchAction.INSTANCE), searchRequestCaptor.capture(), any()); + }).when(client).execute(eq(TransportSearchAction.TYPE), searchRequestCaptor.capture(), any()); return this; } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/ResultsPersisterServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/ResultsPersisterServiceTests.java index ec07d0424c841..9e2f14aaabd84 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/ResultsPersisterServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/ResultsPersisterServiceTests.java @@ -16,10 +16,10 @@ import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.cluster.routing.OperationRouting; @@ -131,45 +131,45 @@ public void setUpTests() { } public void testSearchWithRetries_ImmediateSuccess() { - doAnswer(withResponse(SEARCH_RESPONSE_SUCCESS)).when(client).execute(eq(SearchAction.INSTANCE), eq(SEARCH_REQUEST), any()); + doAnswer(withResponse(SEARCH_RESPONSE_SUCCESS)).when(client).execute(eq(TransportSearchAction.TYPE), eq(SEARCH_REQUEST), any()); List messages = new ArrayList<>(); SearchResponse searchResponse = resultsPersisterService.searchWithRetry(SEARCH_REQUEST, JOB_ID, () -> true, messages::add); assertThat(searchResponse, is(SEARCH_RESPONSE_SUCCESS)); assertThat(messages, is(empty())); - verify(client).execute(eq(SearchAction.INSTANCE), eq(SEARCH_REQUEST), any()); + verify(client).execute(eq(TransportSearchAction.TYPE), eq(SEARCH_REQUEST), any()); } public void testSearchWithRetries_SuccessAfterRetry() { doAnswerWithResponses(SEARCH_RESPONSE_FAILURE, SEARCH_RESPONSE_SUCCESS).when(client) - .execute(eq(SearchAction.INSTANCE), eq(SEARCH_REQUEST), any()); + .execute(eq(TransportSearchAction.TYPE), eq(SEARCH_REQUEST), any()); List messages = new ArrayList<>(); SearchResponse searchResponse = resultsPersisterService.searchWithRetry(SEARCH_REQUEST, JOB_ID, () -> true, messages::add); assertThat(searchResponse, is(SEARCH_RESPONSE_SUCCESS)); assertThat(messages, hasSize(1)); - verify(client, times(2)).execute(eq(SearchAction.INSTANCE), eq(SEARCH_REQUEST), any()); + verify(client, times(2)).execute(eq(TransportSearchAction.TYPE), eq(SEARCH_REQUEST), any()); } public void testSearchWithRetries_SuccessAfterRetryDueToException() { doAnswer(withFailure(new IndexPrimaryShardNotAllocatedException(new Index("my-index", "UUID")))).doAnswer( withResponse(SEARCH_RESPONSE_SUCCESS) - ).when(client).execute(eq(SearchAction.INSTANCE), eq(SEARCH_REQUEST), any()); + ).when(client).execute(eq(TransportSearchAction.TYPE), eq(SEARCH_REQUEST), any()); List messages = new ArrayList<>(); SearchResponse searchResponse = resultsPersisterService.searchWithRetry(SEARCH_REQUEST, JOB_ID, () -> true, messages::add); assertThat(searchResponse, is(SEARCH_RESPONSE_SUCCESS)); assertThat(messages, hasSize(1)); - verify(client, times(2)).execute(eq(SearchAction.INSTANCE), eq(SEARCH_REQUEST), any()); + verify(client, times(2)).execute(eq(TransportSearchAction.TYPE), eq(SEARCH_REQUEST), any()); } private void testSearchWithRetries_FailureAfterTooManyRetries(int maxFailureRetries) { resultsPersisterService.setMaxFailureRetries(maxFailureRetries); - doAnswer(withResponse(SEARCH_RESPONSE_FAILURE)).when(client).execute(eq(SearchAction.INSTANCE), eq(SEARCH_REQUEST), any()); + doAnswer(withResponse(SEARCH_RESPONSE_FAILURE)).when(client).execute(eq(TransportSearchAction.TYPE), eq(SEARCH_REQUEST), any()); List messages = new ArrayList<>(); ElasticsearchException e = expectThrows( @@ -179,7 +179,7 @@ private void testSearchWithRetries_FailureAfterTooManyRetries(int maxFailureRetr assertThat(e.getMessage(), containsString("search failed with status")); assertThat(messages, hasSize(maxFailureRetries)); - verify(client, times(maxFailureRetries + 1)).execute(eq(SearchAction.INSTANCE), eq(SEARCH_REQUEST), any()); + verify(client, times(maxFailureRetries + 1)).execute(eq(TransportSearchAction.TYPE), eq(SEARCH_REQUEST), any()); } public void testSearchWithRetries_FailureAfterTooManyRetries_0() { @@ -195,7 +195,7 @@ public void testSearchWithRetries_FailureAfterTooManyRetries_10() { } public void testSearchWithRetries_Failure_ShouldNotRetryFromTheBeginning() { - doAnswer(withResponse(SEARCH_RESPONSE_FAILURE)).when(client).execute(eq(SearchAction.INSTANCE), eq(SEARCH_REQUEST), any()); + doAnswer(withResponse(SEARCH_RESPONSE_FAILURE)).when(client).execute(eq(TransportSearchAction.TYPE), eq(SEARCH_REQUEST), any()); List messages = new ArrayList<>(); ElasticsearchException e = expectThrows( @@ -205,14 +205,14 @@ public void testSearchWithRetries_Failure_ShouldNotRetryFromTheBeginning() { assertThat(e.getMessage(), containsString("search failed with status SERVICE_UNAVAILABLE")); assertThat(messages, empty()); - verify(client, times(1)).execute(eq(SearchAction.INSTANCE), eq(SEARCH_REQUEST), any()); + verify(client, times(1)).execute(eq(TransportSearchAction.TYPE), eq(SEARCH_REQUEST), any()); } public void testSearchWithRetries_Failure_ShouldNotRetryAfterRandomNumberOfRetries() { int maxFailureRetries = 10; resultsPersisterService.setMaxFailureRetries(maxFailureRetries); - doAnswer(withResponse(SEARCH_RESPONSE_FAILURE)).when(client).execute(eq(SearchAction.INSTANCE), eq(SEARCH_REQUEST), any()); + doAnswer(withResponse(SEARCH_RESPONSE_FAILURE)).when(client).execute(eq(TransportSearchAction.TYPE), eq(SEARCH_REQUEST), any()); int maxRetries = randomIntBetween(1, maxFailureRetries); List messages = new ArrayList<>(); @@ -223,14 +223,14 @@ public void testSearchWithRetries_Failure_ShouldNotRetryAfterRandomNumberOfRetri assertThat(e.getMessage(), containsString("search failed with status SERVICE_UNAVAILABLE")); assertThat(messages, hasSize(maxRetries)); - verify(client, times(maxRetries + 1)).execute(eq(SearchAction.INSTANCE), eq(SEARCH_REQUEST), any()); + verify(client, times(maxRetries + 1)).execute(eq(TransportSearchAction.TYPE), eq(SEARCH_REQUEST), any()); } public void testSearchWithRetries_FailureOnIrrecoverableError() { resultsPersisterService.setMaxFailureRetries(5); doAnswer(withFailure(new ElasticsearchStatusException("bad search request", RestStatus.BAD_REQUEST))).when(client) - .execute(eq(SearchAction.INSTANCE), eq(SEARCH_REQUEST), any()); + .execute(eq(TransportSearchAction.TYPE), eq(SEARCH_REQUEST), any()); ElasticsearchException e = expectThrows( ElasticsearchException.class, @@ -238,7 +238,7 @@ public void testSearchWithRetries_FailureOnIrrecoverableError() { ); assertThat(e.getMessage(), containsString("bad search request")); - verify(client, times(1)).execute(eq(SearchAction.INSTANCE), eq(SEARCH_REQUEST), any()); + verify(client, times(1)).execute(eq(TransportSearchAction.TYPE), eq(SEARCH_REQUEST), any()); } private static Supplier shouldRetryUntil(int maxRetries) { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/vectors/TextEmbeddingQueryVectorBuilderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/vectors/TextEmbeddingQueryVectorBuilderTests.java index 2c83777487685..8506be491f7e1 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/vectors/TextEmbeddingQueryVectorBuilderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/vectors/TextEmbeddingQueryVectorBuilderTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.test.AbstractQueryVectorBuilderTestCase; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.ml.action.InferModelAction; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelPrefixStrings; import org.elasticsearch.xpack.core.ml.inference.results.TextEmbeddingResults; import org.elasticsearch.xpack.ml.MachineLearning; @@ -39,6 +40,7 @@ protected void doAssertClientRequest(ActionRequest request, TextEmbeddingQueryVe assertEquals(builder.getModelText(), inferRequest.getTextInput().get(0)); assertEquals(builder.getModelId(), inferRequest.getId()); assertEquals(InferModelAction.Request.DEFAULT_TIMEOUT_FOR_API, inferRequest.getInferenceTimeout()); + assertEquals(TrainedModelPrefixStrings.PrefixType.SEARCH, inferRequest.getPrefixType()); } public ActionResponse createResponse(float[] array, TextEmbeddingQueryVectorBuilder builder) { diff --git a/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java b/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java index 36246902e5597..5250a1f764e5c 100644 --- a/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java +++ b/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java @@ -16,7 +16,6 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexTemplateMetadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -37,12 +36,7 @@ import org.elasticsearch.test.http.MockRequest; import org.elasticsearch.test.http.MockResponse; import org.elasticsearch.test.http.MockWebServer; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentFactory; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; -import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.monitoring.LocalStateMonitoring; @@ -519,41 +513,6 @@ private void assertMonitorTemplates( } } - private void assertMonitorVersionResource( - final MockWebServer webServer, - final boolean alreadyExists, - final String resourcePrefix, - final List> resources, - @Nullable final Map customHeaders, - @Nullable final String basePath - ) throws Exception { - final String pathPrefix = basePathToAssertablePrefix(basePath); - - for (Tuple resource : resources) { - final MockRequest getRequest = webServer.takeRequest(); - - assertThat(getRequest.getMethod(), equalTo("GET")); - assertThat(getRequest.getUri().getPath(), equalTo(pathPrefix + resourcePrefix + resource.v1())); - assertMonitorVersionQueryString(getRequest.getUri().getQuery(), Collections.emptyMap()); - assertHeaders(getRequest, customHeaders); - - if (alreadyExists == false) { - final MockRequest putRequest = webServer.takeRequest(); - - assertThat(putRequest.getMethod(), equalTo("PUT")); - assertThat(putRequest.getUri().getPath(), equalTo(pathPrefix + resourcePrefix + resource.v1())); - Map parameters = Collections.emptyMap(); - assertMonitorVersionQueryString(putRequest.getUri().getQuery(), parameters); - if (resourcePrefix.startsWith("/_template")) { - assertThat(putRequest.getBody(), equalTo(getExternalTemplateRepresentation(resource.v2()))); - } else { - assertThat(putRequest.getBody(), equalTo(resource.v2())); - } - assertHeaders(putRequest, customHeaders); - } - } - } - private void assertMonitorVersionQueryString(String query, final Map parameters) { Map expectedQueryStringMap = new HashMap<>(); RestUtils.decodeQueryString(query, 0, expectedQueryStringMap); @@ -941,14 +900,4 @@ private MockWebServer createMockWebServer() throws IOException { return server; } - private String getExternalTemplateRepresentation(String internalRepresentation) throws IOException { - try ( - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(XContentParserConfiguration.EMPTY, internalRepresentation) - ) { - XContentBuilder builder = JsonXContent.contentBuilder(); - IndexTemplateMetadata.Builder.removeType(IndexTemplateMetadata.Builder.fromXContent(parser, ""), builder); - return BytesReference.bytes(builder).utf8ToString(); - } - } } diff --git a/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java b/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java index dc55d3ccd6a10..506cd016cf8de 100644 --- a/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java +++ b/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.analysis.common.CommonAnalysisPlugin; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Strings; @@ -59,11 +58,12 @@ import java.util.Map; import java.util.Optional; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import static org.elasticsearch.common.xcontent.support.XContentMapValues.extractValue; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertCheckedResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.threadpool.ThreadPool.Names.WRITE; import static org.elasticsearch.xcontent.ToXContent.EMPTY_PARAMS; import static org.elasticsearch.xpack.core.monitoring.exporter.MonitoringTemplateUtils.TEMPLATE_VERSION; @@ -145,40 +145,43 @@ public void testMonitoringBulk() throws Exception { ensureGreen(monitoringIndex); assertThat(client().admin().indices().prepareRefresh(monitoringIndex).get().getStatus(), is(RestStatus.OK)); - final SearchResponse response = client().prepareSearch(".monitoring-" + system.getSystem() + "-" + TEMPLATE_VERSION + "-*") - .get(); + assertResponse(client().prepareSearch(".monitoring-" + system.getSystem() + "-" + TEMPLATE_VERSION + "-*"), response -> { + // exactly 3 results are expected + assertThat("No monitoring documents yet", response.getHits().getTotalHits().value, equalTo(3L)); - // exactly 3 results are expected - assertThat("No monitoring documents yet", response.getHits().getTotalHits().value, equalTo(3L)); + final List> sources = Arrays.stream(response.getHits().getHits()) + .map(SearchHit::getSourceAsMap) + .collect(Collectors.toList()); - final List> sources = Arrays.stream(response.getHits().getHits()) - .map(SearchHit::getSourceAsMap) - .collect(Collectors.toList()); - - // find distinct _source.timestamp fields - assertThat(sources.stream().map(source -> source.get("timestamp")).distinct().count(), is(1L)); - // find distinct _source.source_node fields (which is a map) - assertThat(sources.stream().map(source -> source.get("source_node")).distinct().count(), is(1L)); + // find distinct _source.timestamp fields + assertThat(sources.stream().map(source -> source.get("timestamp")).distinct().count(), is(1L)); + // find distinct _source.source_node fields (which is a map) + assertThat(sources.stream().map(source -> source.get("source_node")).distinct().count(), is(1L)); + }); }); - final SearchResponse response = client().prepareSearch(monitoringIndex).get(); - final SearchHits hits = response.getHits(); + assertCheckedResponse(client().prepareSearch(monitoringIndex), response -> { + final SearchHits hits = response.getHits(); - assertThat(response.getHits().getTotalHits().value, equalTo(3L)); - assertThat( - "Monitoring documents must have the same timestamp", - Arrays.stream(hits.getHits()).map(hit -> extractValue("timestamp", hit.getSourceAsMap())).distinct().count(), - equalTo(1L) - ); - assertThat( - "Monitoring documents must have the same source_node timestamp", - Arrays.stream(hits.getHits()).map(hit -> extractValue("source_node.timestamp", hit.getSourceAsMap())).distinct().count(), - equalTo(1L) - ); + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat( + "Monitoring documents must have the same timestamp", + Arrays.stream(hits.getHits()).map(hit -> extractValue("timestamp", hit.getSourceAsMap())).distinct().count(), + equalTo(1L) + ); + assertThat( + "Monitoring documents must have the same source_node timestamp", + Arrays.stream(hits.getHits()) + .map(hit -> extractValue("source_node.timestamp", hit.getSourceAsMap())) + .distinct() + .count(), + equalTo(1L) + ); - for (final SearchHit hit : hits.getHits()) { - assertMonitoringDoc(toMap(hit), system, interval); - } + for (final SearchHit hit : hits.getHits()) { + assertMonitoringDoc(toMap(hit), system, interval); + } + }); }); } @@ -193,8 +196,7 @@ public void testMonitoringService() throws Exception { final String indexName = createAPMIndex ? "apm-2017.11.06" : "books"; assertThat( - client().prepareIndex(indexName) - .setId("0") + prepareIndex(indexName).setId("0") .setRefreshPolicy("true") .setSource("{\"field\":\"value\"}", XContentType.JSON) .get() @@ -206,30 +208,27 @@ public void testMonitoringService() throws Exception { assertAcked(clusterAdmin().prepareUpdateSettings().setTransientSettings(settings)); whenExportersAreReady(() -> { - final AtomicReference searchResponse = new AtomicReference<>(); - assertBusy(() -> { - final SearchResponse response = client().prepareSearch(".monitoring-es-*") - .setCollapse(new CollapseBuilder("type")) - .addSort("timestamp", SortOrder.DESC) - .get(); - - assertThat(response.status(), is(RestStatus.OK)); - assertThat( - "Expecting a minimum number of 6 docs, one per collector", - response.getHits().getHits().length, - greaterThanOrEqualTo(6) + assertCheckedResponse( + client().prepareSearch(".monitoring-es-*") + .setCollapse(new CollapseBuilder("type")) + .addSort("timestamp", SortOrder.DESC), + response -> { + assertThat(response.status(), is(RestStatus.OK)); + assertThat( + "Expecting a minimum number of 6 docs, one per collector", + response.getHits().getHits().length, + greaterThanOrEqualTo(6) + ); + + for (final SearchHit hit : response.getHits()) { + final Map searchHit = toMap(hit); + assertMonitoringDoc(searchHit, MonitoredSystem.ES, MonitoringService.MIN_INTERVAL); + } + } ); - - searchResponse.set(response); }); - - for (final SearchHit hit : searchResponse.get().getHits()) { - final Map searchHit = toMap(hit); - assertMonitoringDoc(searchHit, MonitoredSystem.ES, MonitoringService.MIN_INTERVAL); - } }); - } /** diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java index b054095499a30..442cd2479f87c 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java @@ -77,7 +77,7 @@ public class MonitoringTemplateRegistry extends IndexTemplateRegistry { * writes monitoring data in ECS format as of 8.0. These templates define the ECS schema as well as alias fields for the old monitoring * mappings that point to the corresponding ECS fields. */ - public static final int STACK_MONITORING_REGISTRY_VERSION = 8_00_00_99 + 9; + public static final int STACK_MONITORING_REGISTRY_VERSION = 8_00_00_99 + 11; private static final String STACK_MONITORING_REGISTRY_VERSION_VARIABLE = "xpack.stack.monitoring.template.release.version"; private static final String STACK_TEMPLATE_VERSION = "8"; private static final String STACK_TEMPLATE_VERSION_VARIABLE = "xpack.stack.monitoring.template.version"; diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsCollector.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsCollector.java index ba6b6d6fc20b7..e62f7113acbf4 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsCollector.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsCollector.java @@ -8,6 +8,7 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; +import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestParameters; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.bootstrap.BootstrapInfo; @@ -67,13 +68,14 @@ protected boolean shouldCollect(final boolean isElectedMaster) { @Override protected Collection doCollect(final MonitoringDoc.Node node, final long interval, final ClusterState clusterState) { NodesStatsRequest request = new NodesStatsRequest("_local"); + request.setIncludeShardsStats(false); request.indices(FLAGS); request.addMetrics( - NodesStatsRequest.Metric.OS.metricName(), - NodesStatsRequest.Metric.JVM.metricName(), - NodesStatsRequest.Metric.PROCESS.metricName(), - NodesStatsRequest.Metric.THREAD_POOL.metricName(), - NodesStatsRequest.Metric.FS.metricName() + NodesStatsRequestParameters.Metric.OS.metricName(), + NodesStatsRequestParameters.Metric.JVM.metricName(), + NodesStatsRequestParameters.Metric.PROCESS.metricName(), + NodesStatsRequestParameters.Metric.THREAD_POOL.metricName(), + NodesStatsRequestParameters.Metric.FS.metricName() ); request.timeout(getCollectionTimeout()); diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/PublishableHttpResource.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/PublishableHttpResource.java index d37f4669484a0..e2d4d173af013 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/PublishableHttpResource.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/PublishableHttpResource.java @@ -27,9 +27,11 @@ import java.util.Collections; import java.util.Map; import java.util.Set; -import java.util.stream.Collectors; +import static java.util.stream.Collectors.joining; +import static org.elasticsearch.client.RestClient.IGNORE_RESPONSE_CODES_PARAM; import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.rest.RestStatus.NOT_FOUND; /** * {@code PublishableHttpResource} represents an {@link HttpResource} that is a single file or object that can be checked and @@ -254,7 +256,7 @@ protected void checkForResource( // avoid exists and DNE parameters from being an exception by default final Set expectedResponseCodes = Sets.union(exists, doesNotExist); - request.addParameter("ignore", expectedResponseCodes.stream().map(i -> i.toString()).collect(Collectors.joining(","))); + request.addParameter(IGNORE_RESPONSE_CODES_PARAM, expectedResponseCodes.stream().map(Object::toString).collect(joining(","))); client.performRequestAsync(request, new ResponseListener() { @@ -436,9 +438,9 @@ protected void deleteResource( final Request request = new Request("DELETE", resourceBasePath + "/" + resourceName); addDefaultParameters(request); - if (false == defaultParameters.containsKey("ignore")) { + if (false == defaultParameters.containsKey(IGNORE_RESPONSE_CODES_PARAM)) { // avoid 404 being an exception by default - request.addParameter("ignore", Integer.toString(RestStatus.NOT_FOUND.getStatus())); + request.addParameter(IGNORE_RESPONSE_CODES_PARAM, Integer.toString(NOT_FOUND.getStatus())); } client.performRequestAsync(request, new ResponseListener() { diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MultiNodesStatsTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MultiNodesStatsTests.java index 02c9a8e2f210c..44859b73ffb2e 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MultiNodesStatsTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MultiNodesStatsTests.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.monitoring; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.aggregations.Aggregation; @@ -20,6 +19,7 @@ import static org.elasticsearch.test.NodeRoles.noRoles; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.instanceOf; @@ -78,24 +78,27 @@ public void testMultipleNodes() throws Exception { flush(ALL_MONITORING_INDICES); refresh(); - SearchResponse response = prepareSearch(ALL_MONITORING_INDICES).setQuery( - QueryBuilders.termQuery("type", NodeStatsMonitoringDoc.TYPE) - ).setSize(0).addAggregation(AggregationBuilders.terms("nodes_ids").field("node_stats.node_id")).get(); - - for (Aggregation aggregation : response.getAggregations()) { - assertThat(aggregation, instanceOf(StringTerms.class)); - assertThat(((StringTerms) aggregation).getBuckets().size(), equalTo(nbNodes)); - - for (String nodeName : internalCluster().getNodeNames()) { - StringTerms.Bucket bucket = ((StringTerms) aggregation).getBucketByKey( - internalCluster().clusterService(nodeName).localNode().getId() - ); - // At least 1 doc must exist per node, but it can be more than 1 - // because the first node may have already collected many node stats documents - // whereas the last node just started to collect node stats. - assertThat(bucket.getDocCount(), greaterThanOrEqualTo(1L)); + assertResponse( + prepareSearch(ALL_MONITORING_INDICES).setQuery(QueryBuilders.termQuery("type", NodeStatsMonitoringDoc.TYPE)) + .setSize(0) + .addAggregation(AggregationBuilders.terms("nodes_ids").field("node_stats.node_id")), + response -> { + for (Aggregation aggregation : response.getAggregations()) { + assertThat(aggregation, instanceOf(StringTerms.class)); + assertThat(((StringTerms) aggregation).getBuckets().size(), equalTo(nbNodes)); + + for (String nodeName : internalCluster().getNodeNames()) { + StringTerms.Bucket bucket = ((StringTerms) aggregation).getBucketByKey( + internalCluster().clusterService(nodeName).localNode().getId() + ); + // At least 1 doc must exist per node, but it can be more than 1 + // because the first node may have already collected many node stats documents + // whereas the last node just started to collect node stats. + assertThat(bucket.getDocCount(), greaterThanOrEqualTo(1L)); + } + } } - } + ); }); } } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/AbstractPublishableHttpResourceTestCase.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/AbstractPublishableHttpResourceTestCase.java index 4878289cae8d6..b72891708e780 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/AbstractPublishableHttpResourceTestCase.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/AbstractPublishableHttpResourceTestCase.java @@ -30,8 +30,9 @@ import java.util.Map; import java.util.Set; import java.util.function.Predicate; -import java.util.stream.Collectors; +import static java.util.stream.Collectors.joining; +import static org.elasticsearch.client.RestClient.IGNORE_RESPONSE_CODES_PARAM; import static org.elasticsearch.xpack.monitoring.exporter.http.AsyncHttpResourceHelper.mockBooleanActionListener; import static org.elasticsearch.xpack.monitoring.exporter.http.AsyncHttpResourceHelper.mockPublishResultActionListener; import static org.elasticsearch.xpack.monitoring.exporter.http.AsyncHttpResourceHelper.whenPerformRequestAsyncWith; @@ -443,7 +444,7 @@ protected Map getParameters( final Set statusCodes = Sets.union(exists, doesNotExist); final Map parametersWithIgnore = new HashMap<>(parameters); - parametersWithIgnore.putIfAbsent("ignore", statusCodes.stream().map(i -> i.toString()).collect(Collectors.joining(","))); + parametersWithIgnore.putIfAbsent(IGNORE_RESPONSE_CODES_PARAM, statusCodes.stream().map(Object::toString).collect(joining(","))); return parametersWithIgnore; } @@ -451,7 +452,7 @@ protected Map getParameters( protected Map deleteParameters(final Map parameters) { final Map parametersWithIgnore = new HashMap<>(parameters); - parametersWithIgnore.putIfAbsent("ignore", "404"); + parametersWithIgnore.putIfAbsent(IGNORE_RESPONSE_CODES_PARAM, Integer.toString(RestStatus.NOT_FOUND.getStatus())); return parametersWithIgnore; } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterResourceTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterResourceTests.java index 556f5142cf1a5..e8d47bffa0ea2 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterResourceTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterResourceTests.java @@ -695,13 +695,6 @@ private void verifyGetTemplates(final int called) { ); } - private void verifyPutTemplates(final int called) { - verify(client, times(called)).performRequestAsync( - argThat(new RequestMatcher(is("PUT"), startsWith("/_template/"))::matches), - any(ResponseListener.class) - ); - } - private void verifyWatcherCheck() { verify(client).performRequestAsync(argThat(new RequestMatcher(is("GET"), is("/_xpack"))::matches), any(ResponseListener.class)); } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java index 91dff9abcc5e2..a732e80e18f37 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java @@ -9,7 +9,6 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.IndexTemplateMetadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; @@ -43,6 +42,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.max; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xpack.core.monitoring.MonitoredSystem.BEATS; import static org.elasticsearch.xpack.core.monitoring.MonitoredSystem.KIBANA; import static org.elasticsearch.xpack.core.monitoring.MonitoredSystem.LOGSTASH; @@ -73,8 +73,7 @@ public void testExport() throws Exception { // indexing some random documents IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5]; for (int i = 0; i < indexRequestBuilders.length; i++) { - indexRequestBuilders[i] = client().prepareIndex("test") - .setId(Integer.toString(i)) + indexRequestBuilders[i] = prepareIndex("test").setId(Integer.toString(i)) .setSource("title", "This is a random document"); } indexRandom(true, indexRequestBuilders); @@ -111,8 +110,10 @@ public void testExport() throws Exception { assertThat(indexExists(".monitoring-*"), is(true)); ensureYellowAndNoInitializingShards(".monitoring-*"); - SearchResponse response = prepareSearch(".monitoring-*").get(); - assertThat((long) nbDocs, lessThanOrEqualTo(response.getHits().getTotalHits().value)); + assertResponse( + prepareSearch(".monitoring-*"), + response -> assertThat((long) nbDocs, lessThanOrEqualTo(response.getHits().getTotalHits().value)) + ); }); checkMonitoringTemplates(); @@ -169,25 +170,25 @@ public void testExport() throws Exception { greaterThan(0L) ); - SearchResponse response = prepareSearch(".monitoring-es-*").setSize(0) - .setQuery(QueryBuilders.termQuery("type", "node_stats")) - .addAggregation(terms("agg_nodes_ids").field("node_stats.node_id")) - .get(); - - Terms aggregation = response.getAggregations().get("agg_nodes_ids"); - assertEquals( - "Aggregation on node_id must return a bucket per node involved in test", - numNodes, - aggregation.getBuckets().size() + assertResponse( + prepareSearch(".monitoring-es-*").setSize(0) + .setQuery(QueryBuilders.termQuery("type", "node_stats")) + .addAggregation(terms("agg_nodes_ids").field("node_stats.node_id")), + response -> { + Terms aggregation = response.getAggregations().get("agg_nodes_ids"); + assertEquals( + "Aggregation on node_id must return a bucket per node involved in test", + numNodes, + aggregation.getBuckets().size() + ); + for (String nodeName : internalCluster().getNodeNames()) { + String nodeId = internalCluster().clusterService(nodeName).localNode().getId(); + Terms.Bucket bucket = aggregation.getBucketByKey(nodeId); + assertTrue("No bucket found for node id [" + nodeId + "]", bucket != null); + assertTrue(bucket.getDocCount() >= 1L); + } + } ); - - for (String nodeName : internalCluster().getNodeNames()) { - String nodeId = internalCluster().clusterService(nodeName).localNode().getId(); - Terms.Bucket bucket = aggregation.getBucketByKey(nodeId); - assertTrue("No bucket found for node id [" + nodeId + "]", bucket != null); - assertTrue(bucket.getDocCount() >= 1L); - } - }, 30L, TimeUnit.SECONDS); checkMonitoringTemplates(); @@ -206,24 +207,27 @@ public void testExport() throws Exception { ensureYellowAndNoInitializingShards(".monitoring-*"); refresh(".monitoring-es-*"); - SearchResponse response = prepareSearch(".monitoring-es-*").setSize(0) - .setQuery(QueryBuilders.termQuery("type", "node_stats")) - .addAggregation( - terms("agg_nodes_ids").field("node_stats.node_id").subAggregation(max("agg_last_time_collected").field("timestamp")) - ) - .get(); - - Terms aggregation = response.getAggregations().get("agg_nodes_ids"); - for (String nodeName : internalCluster().getNodeNames()) { - String nodeId = internalCluster().clusterService(nodeName).localNode().getId(); - Terms.Bucket bucket = aggregation.getBucketByKey(nodeId); - assertTrue("No bucket found for node id [" + nodeId + "]", bucket != null); - assertTrue(bucket.getDocCount() >= 1L); - - Max subAggregation = bucket.getAggregations().get("agg_last_time_collected"); - ZonedDateTime lastCollection = Instant.ofEpochMilli(Math.round(subAggregation.value())).atZone(ZoneOffset.UTC); - assertTrue(lastCollection.plusSeconds(elapsedInSeconds).isBefore(ZonedDateTime.now(ZoneOffset.UTC))); - } + assertResponse( + prepareSearch(".monitoring-es-*").setSize(0) + .setQuery(QueryBuilders.termQuery("type", "node_stats")) + .addAggregation( + terms("agg_nodes_ids").field("node_stats.node_id") + .subAggregation(max("agg_last_time_collected").field("timestamp")) + ), + response -> { + Terms aggregation = response.getAggregations().get("agg_nodes_ids"); + for (String nodeName : internalCluster().getNodeNames()) { + String nodeId = internalCluster().clusterService(nodeName).localNode().getId(); + Terms.Bucket bucket = aggregation.getBucketByKey(nodeId); + assertTrue("No bucket found for node id [" + nodeId + "]", bucket != null); + assertTrue(bucket.getDocCount() >= 1L); + + Max subAggregation = bucket.getAggregations().get("agg_last_time_collected"); + ZonedDateTime lastCollection = Instant.ofEpochMilli(Math.round(subAggregation.value())).atZone(ZoneOffset.UTC); + assertTrue(lastCollection.plusSeconds(elapsedInSeconds).isBefore(ZonedDateTime.now(ZoneOffset.UTC))); + } + } + ); } else { assertTrue(ZonedDateTime.now(ZoneOffset.UTC).isAfter(startTime.plusSeconds(elapsedInSeconds))); } @@ -264,43 +268,43 @@ private void checkMonitoringDocs() { DateFormatter dateParser = DateFormatter.forPattern("strict_date_time"); DateFormatter dateFormatter = DateFormatter.forPattern(customTimeFormat).withZone(ZoneOffset.UTC); - SearchResponse searchResponse = prepareSearch(".monitoring-*").setSize(100).get(); - assertThat(searchResponse.getHits().getTotalHits().value, greaterThan(0L)); - - for (SearchHit hit : searchResponse.getHits().getHits()) { - final Map source = hit.getSourceAsMap(); + assertResponse(prepareSearch(".monitoring-*").setSize(100), rsp -> { + assertThat(rsp.getHits().getTotalHits().value, greaterThan(0L)); + for (SearchHit hit : rsp.getHits().getHits()) { + final Map source = hit.getSourceAsMap(); - assertTrue(source != null && source.isEmpty() == false); + assertTrue(source != null && source.isEmpty() == false); - final String timestamp = (String) source.get("timestamp"); - final String type = (String) source.get("type"); + final String timestamp = (String) source.get("timestamp"); + final String type = (String) source.get("type"); - assertTrue("document is missing cluster_uuid field", Strings.hasText((String) source.get("cluster_uuid"))); - assertTrue("document is missing timestamp field", Strings.hasText(timestamp)); - assertTrue("document is missing type field", Strings.hasText(type)); + assertTrue("document is missing cluster_uuid field", Strings.hasText((String) source.get("cluster_uuid"))); + assertTrue("document is missing timestamp field", Strings.hasText(timestamp)); + assertTrue("document is missing type field", Strings.hasText(type)); - @SuppressWarnings("unchecked") - Map docSource = (Map) source.get("doc"); + @SuppressWarnings("unchecked") + Map docSource = (Map) source.get("doc"); - MonitoredSystem expectedSystem; - if (docSource == null) { - // This is a document indexed by the Monitoring service - expectedSystem = MonitoredSystem.ES; - } else { - // This is a document indexed through the Monitoring Bulk API - expectedSystem = MonitoredSystem.fromSystem((String) docSource.get("expected_system")); - } + MonitoredSystem expectedSystem; + if (docSource == null) { + // This is a document indexed by the Monitoring service + expectedSystem = MonitoredSystem.ES; + } else { + // This is a document indexed through the Monitoring Bulk API + expectedSystem = MonitoredSystem.fromSystem((String) docSource.get("expected_system")); + } - String dateTime = dateFormatter.format(dateParser.parse(timestamp)); - final String expectedIndex = ".monitoring-" + expectedSystem.getSystem() + "-" + TEMPLATE_VERSION + "-" + dateTime; - assertEquals("Expected " + expectedIndex + " but got " + hit.getIndex(), expectedIndex, hit.getIndex()); + String dateTime = dateFormatter.format(dateParser.parse(timestamp)); + final String expectedIndex = ".monitoring-" + expectedSystem.getSystem() + "-" + TEMPLATE_VERSION + "-" + dateTime; + assertEquals("Expected " + expectedIndex + " but got " + hit.getIndex(), expectedIndex, hit.getIndex()); - @SuppressWarnings("unchecked") - Map sourceNode = (Map) source.get("source_node"); - if ("shards".equals(type) == false) { - assertNotNull("document is missing source_node field", sourceNode); + @SuppressWarnings("unchecked") + Map sourceNode = (Map) source.get("source_node"); + if ("shards".equals(type) == false) { + assertNotNull("document is missing source_node field", sourceNode); + } } - } + }); } public static MonitoringBulkDoc createMonitoringBulkDoc() throws IOException { diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterResourceIntegTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterResourceIntegTests.java index 71590943e4bf6..901150d0bca7c 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterResourceIntegTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterResourceIntegTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.monitoring.exporter.local; import org.elasticsearch.Version; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexTemplateMetadata; import org.elasticsearch.common.bytes.BytesReference; @@ -36,6 +35,7 @@ import static org.elasticsearch.test.ESIntegTestCase.Scope.TEST; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; @@ -289,16 +289,16 @@ private void assertNoWatchesExist() { String clusterUUID = clusterService().state().getMetadata().clusterUUID(); SearchSourceBuilder searchSource = SearchSourceBuilder.searchSource() .query(QueryBuilders.matchQuery("metadata.xpack.cluster_uuid", clusterUUID)); - SearchResponse searchResponse = prepareSearch(".watches").setSource(searchSource).get(); - if (searchResponse.getHits().getTotalHits().value > 0) { - List invalidWatches = new ArrayList<>(); - for (SearchHit hit : searchResponse.getHits().getHits()) { - invalidWatches.add(ObjectPath.eval("metadata.xpack.watch", hit.getSourceAsMap())); + + assertResponse(prepareSearch(".watches").setSource(searchSource), response -> { + if (response.getHits().getTotalHits().value > 0) { + List invalidWatches = new ArrayList<>(); + for (SearchHit hit : response.getHits().getHits()) { + invalidWatches.add(ObjectPath.eval("metadata.xpack.watch", hit.getSourceAsMap())); + } + fail("Found [" + response.getHits().getTotalHits().value + "] invalid watches when none were expected: " + invalidWatches); } - fail( - "Found [" + searchResponse.getHits().getTotalHits().value + "] invalid watches when none were expected: " + invalidWatches - ); - } + }); } private void assertResourcesExist() throws Exception { diff --git a/x-pack/plugin/profiling/build.gradle b/x-pack/plugin/profiling/build.gradle index 30bcb5a8756dc..8275bfe633c91 100644 --- a/x-pack/plugin/profiling/build.gradle +++ b/x-pack/plugin/profiling/build.gradle @@ -17,6 +17,7 @@ esplugin { dependencies { compileOnly project(path: xpackModule('core')) + compileOnly project(path: xpackModule('mapper-counted-keyword')) testImplementation(testArtifact(project(xpackModule('core')))) testImplementation project(path: xpackModule('mapper-unsigned-long')) testImplementation project(path: xpackModule('mapper-version')) diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/CancellationIT.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/CancellationIT.java index b4b8242a6e456..ef5198499ff09 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/CancellationIT.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/CancellationIT.java @@ -61,6 +61,7 @@ public void testAutomaticCancellation() throws Exception { restRequest.setEntity(new StringEntity(""" { "sample_size": 10000, + "requested_duration": 33, "query": { "bool": { "filter": [ @@ -85,7 +86,7 @@ void verifyCancellation(String action, Request restRequest) throws Exception { Map nodeIdToName = readNodesInfo(); List plugins = initBlockFactory(); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); Cancellable cancellable = getRestClient().performRequestAsync(restRequest, wrapAsRestResponseListener(future)); awaitForBlock(plugins); diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetFlameGraphActionIT.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetFlameGraphActionIT.java index 7d7bae20ef983..586071ad7c1f3 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetFlameGraphActionIT.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetFlameGraphActionIT.java @@ -9,13 +9,13 @@ public class GetFlameGraphActionIT extends ProfilingTestCase { public void testGetStackTracesUnfiltered() throws Exception { - GetStackTracesRequest request = new GetStackTracesRequest(10, null); + GetStackTracesRequest request = new GetStackTracesRequest(10, 1.0d, 1.0d, null, null, null, null, null, null, null, null); GetFlamegraphResponse response = client().execute(GetFlamegraphAction.INSTANCE, request).get(); // only spot-check top level properties - detailed tests are done in unit tests - assertEquals(231, response.getSize()); + assertEquals(297, response.getSize()); assertEquals(1.0d, response.getSamplingRate(), 0.001d); assertEquals(60, response.getSelfCPU()); - assertEquals(1204, response.getTotalCPU()); + assertEquals(1956, response.getTotalCPU()); assertEquals(40, response.getTotalSamples()); } } diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java index 050ff5dd47800..a5efa24da5397 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java @@ -7,32 +7,96 @@ package org.elasticsearch.xpack.profiling; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.TermQueryBuilder; + import java.util.List; public class GetStackTracesActionIT extends ProfilingTestCase { public void testGetStackTracesUnfiltered() throws Exception { - GetStackTracesRequest request = new GetStackTracesRequest(10, null); + GetStackTracesRequest request = new GetStackTracesRequest(10, 1.0d, 1.0d, null, null, null, null, null, null, null, null); request.setAdjustSampleCount(true); GetStackTracesResponse response = client().execute(GetStackTracesAction.INSTANCE, request).get(); assertEquals(40, response.getTotalSamples()); - assertEquals(285, response.getTotalFrames()); + assertEquals(473, response.getTotalFrames()); + + assertNotNull(response.getStackTraceEvents()); + assertEquals(4L, response.getStackTraceEvents().get("L7kj7UvlKbT-vN73el4faQ").count); + + assertNotNull(response.getStackTraces()); + // just do a high-level spot check. Decoding is tested in unit-tests + StackTrace stackTrace = response.getStackTraces().get("L7kj7UvlKbT-vN73el4faQ"); + assertEquals(18, stackTrace.addressOrLines.size()); + assertEquals(18, stackTrace.fileIds.size()); + assertEquals(18, stackTrace.frameIds.size()); + assertEquals(18, stackTrace.typeIds.size()); + assertEquals(0.007903d, stackTrace.annualCO2Tons, 0.000001d); + assertEquals(74.46d, stackTrace.annualCostsUSD, 0.01d); + + assertNotNull(response.getStackFrames()); + StackFrame stackFrame = response.getStackFrames().get("8NlMClggx8jaziUTJXlmWAAAAAAAAIYI"); + assertEquals(List.of("start_thread"), stackFrame.functionName); + + assertNotNull(response.getExecutables()); + assertEquals("vmlinux", response.getExecutables().get("lHp5_WAgpLy2alrUVab6HA")); + } + + public void testGetStackTracesFromAPMWithMatch() throws Exception { + TermQueryBuilder query = QueryBuilders.termQuery("transaction.name", "encodeSha1"); + + GetStackTracesRequest request = new GetStackTracesRequest( + null, + 1.0d, + 1.0d, + query, + "apm-test-*", + "transaction.profiler_stack_trace_ids", + null, + null, + null, + null, + null + ); + GetStackTracesResponse response = client().execute(GetStackTracesAction.INSTANCE, request).get(); + assertEquals(43, response.getTotalFrames()); assertNotNull(response.getStackTraceEvents()); - assertEquals(4, (int) response.getStackTraceEvents().get("14cFLjgoe-BTQd17mhedeA")); + assertEquals(3L, response.getStackTraceEvents().get("Ce77w10WeIDow3kd1jowlA").count); + assertEquals(2L, response.getStackTraceEvents().get("JvISdnJ47BQ01489cwF9DA").count); assertNotNull(response.getStackTraces()); // just do a high-level spot check. Decoding is tested in unit-tests - StackTrace stackTrace = response.getStackTraces().get("JvISdnJ47BQ01489cwF9DA"); - assertEquals(4, stackTrace.addressOrLines.size()); - assertEquals(4, stackTrace.fileIds.size()); - assertEquals(4, stackTrace.frameIds.size()); - assertEquals(4, stackTrace.typeIds.size()); + StackTrace stackTrace = response.getStackTraces().get("Ce77w10WeIDow3kd1jowlA"); + assertEquals(39, stackTrace.addressOrLines.size()); + assertEquals(39, stackTrace.fileIds.size()); + assertEquals(39, stackTrace.frameIds.size()); + assertEquals(39, stackTrace.typeIds.size()); assertNotNull(response.getStackFrames()); - StackFrame stackFrame = response.getStackFrames().get("lHp5_WAgpLy2alrUVab6HAAAAAAATgeq"); - assertEquals(List.of("blkdev_issue_flush"), stackFrame.functionName); + StackFrame stackFrame = response.getStackFrames().get("fhsEKXDuxJ-jIJrZpdRuSAAAAAAAAFtj"); + assertEquals(List.of("deflate", "deflate"), stackFrame.functionName); assertNotNull(response.getExecutables()); - assertNotNull("vmlinux", response.getExecutables().get("lHp5_WAgpLy2alrUVab6HA")); + assertEquals("libzip.so", response.getExecutables().get("GXH6S9Nv2Lf0omTz4cH4RA")); + } + + public void testGetStackTracesFromAPMNoMatch() throws Exception { + TermQueryBuilder query = QueryBuilders.termQuery("transaction.name", "nonExistingTransaction"); + + GetStackTracesRequest request = new GetStackTracesRequest( + null, + 1.0d, + 1.0d, + query, + "apm-test-*", + "transaction.profiler_stack_trace_ids", + null, + null, + null, + null, + null + ); + GetStackTracesResponse response = client().execute(GetStackTracesAction.INSTANCE, request).get(); + assertEquals(0, response.getTotalFrames()); } } diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/ProfilingTestCase.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/ProfilingTestCase.java index 0f602d9d99c25..6a95b7c8d8573 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/ProfilingTestCase.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/ProfilingTestCase.java @@ -7,9 +7,9 @@ package org.elasticsearch.xpack.profiling; -import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.network.NetworkModule; @@ -17,12 +17,12 @@ import org.elasticsearch.datastreams.DataStreamsPlugin; import org.elasticsearch.license.LicenseSettings; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.transport.netty4.Netty4Plugin; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.ilm.LifecycleSettings; +import org.elasticsearch.xpack.countedkeyword.CountedKeywordMapperPlugin; import org.elasticsearch.xpack.ilm.IndexLifecycle; import org.elasticsearch.xpack.unsignedlong.UnsignedLongMapperPlugin; import org.elasticsearch.xpack.versionfield.VersionFieldPlugin; @@ -30,9 +30,9 @@ import org.junit.Before; import java.io.IOException; +import java.util.ArrayList; import java.util.Collection; import java.util.List; -import java.util.Map; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 1) public abstract class ProfilingTestCase extends ESIntegTestCase { @@ -43,6 +43,7 @@ protected Collection> nodePlugins() { LocalStateProfilingXPackPlugin.class, IndexLifecycle.class, UnsignedLongMapperPlugin.class, + CountedKeywordMapperPlugin.class, VersionFieldPlugin.class, getTestTransportPlugin() ); @@ -68,30 +69,31 @@ protected boolean addMockHttpTransport() { return false; // enable http } - private void indexDoc(String index, String id, Map source) { - DocWriteResponse indexResponse = client().prepareIndex(index).setId(id).setSource(source).setCreate(true).get(); - assertEquals(RestStatus.CREATED, indexResponse.status()); + protected final byte[] read(String resource) throws IOException { + return ProfilingTestCase.class.getClassLoader().getResourceAsStream(resource).readAllBytes(); + } + + protected final void createIndex(String name, String bodyFileName) throws Exception { + CreateIndexResponse response = client().admin() + .indices() + .prepareCreate(name) + .setSource(read(bodyFileName), XContentType.JSON) + .execute() + .get(); + assertTrue("Creation of [" + name + "] is not acknowledged.", response.isAcknowledged()); } /** - * @return true iff this test relies that data (and the corresponding indices / data streams) are present for this test. + * @return true iff this test relies on that data (and the corresponding indices / data streams) are present for this test. */ protected boolean requiresDataSetup() { return true; } - protected void waitForIndices() throws Exception { + protected void waitForIndices(Collection indices) throws Exception { assertBusy(() -> { ClusterState state = clusterAdmin().prepareState().get().getState(); - assertTrue( - "Timed out waiting for the indices to be created", - state.metadata() - .indices() - .keySet() - .containsAll( - ProfilingIndexManager.PROFILING_INDICES.stream().map(ProfilingIndexManager.ProfilingIndex::toString).toList() - ) - ); + assertTrue("Timed out waiting for indices to be created", state.metadata().indices().keySet().containsAll(indices)); }); } @@ -102,13 +104,9 @@ protected void updateProfilingTemplatesEnabled(boolean newValue) { assertTrue("Update of profiling templates enabled setting is not acknowledged", response.isAcknowledged()); } - protected final byte[] read(String resource) throws IOException { - return ProfilingTestCase.class.getClassLoader().getResourceAsStream(resource).readAllBytes(); - } - protected final void bulkIndex(String file) throws Exception { byte[] bulkData = read(file); - BulkResponse response = client().prepareBulk().add(bulkData, 0, bulkData.length, XContentType.JSON).execute().actionGet(); + BulkResponse response = client().prepareBulk().add(bulkData, 0, bulkData.length, XContentType.JSON).get(); assertFalse(response.hasFailures()); } @@ -117,15 +115,23 @@ public void setupData() throws Exception { if (requiresDataSetup() == false) { return; } + final String apmTestIndex = "apm-test-001"; // only enable index management while setting up indices to avoid interfering with the rest of the test infrastructure updateProfilingTemplatesEnabled(true); - waitForIndices(); - ensureGreen(); + createIndex(apmTestIndex, "indices/apm-test.json"); + List allIndices = new ArrayList<>( + ProfilingIndexManager.PROFILING_INDICES.stream().map(ProfilingIndexManager.ProfilingIndex::toString).toList() + ); + allIndices.add(apmTestIndex); + waitForIndices(allIndices); + ensureGreen(allIndices.toArray(new String[0])); bulkIndex("data/profiling-events-all.ndjson"); bulkIndex("data/profiling-stacktraces.ndjson"); bulkIndex("data/profiling-stackframes.ndjson"); bulkIndex("data/profiling-executables.ndjson"); + bulkIndex("data/profiling-hosts.ndjson"); + bulkIndex("data/apm-test.ndjson"); refresh(); } diff --git a/x-pack/plugin/profiling/src/internalClusterTest/resources/data/apm-test.ndjson b/x-pack/plugin/profiling/src/internalClusterTest/resources/data/apm-test.ndjson new file mode 100644 index 0000000000000..d147256d6b90f --- /dev/null +++ b/x-pack/plugin/profiling/src/internalClusterTest/resources/data/apm-test.ndjson @@ -0,0 +1,2 @@ +{"create": {"_index": "apm-test-001"}} +{"@timestamp": "1698624000", "transaction.name": "encodeSha1", "transaction.profiler_stack_trace_ids": ["Ce77w10WeIDow3kd1jowlA", "JvISdnJ47BQ01489cwF9DA", "JvISdnJ47BQ01489cwF9DA", "Ce77w10WeIDow3kd1jowlA", "Ce77w10WeIDow3kd1jowlA"]} diff --git a/x-pack/plugin/profiling/src/internalClusterTest/resources/data/profiling-events-all.ndjson b/x-pack/plugin/profiling/src/internalClusterTest/resources/data/profiling-events-all.ndjson index 071b0a2edbe1a..6964368e534c7 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/resources/data/profiling-events-all.ndjson +++ b/x-pack/plugin/profiling/src/internalClusterTest/resources/data/profiling-events-all.ndjson @@ -1,5 +1,5 @@ {"create": {"_index": "profiling-events-all"}} -{"Stacktrace.count": [1], "profiling.project.id": ["100"], "os.kernel": ["9.9.9-0"], "tags": ["environment:qa", "region:eu-west-1"], "host.ip": ["192.168.1.2"], "@timestamp": ["1698624000"], "container.name": ["instance-0000000010"], "ecs.version": ["1.12.0"], "Stacktrace.id": ["S07KmaoGhvNte78xwwRbZQ"], "agent.version": ["head-be593ef3-1688111067"], "host.name": ["ip-192-168-1-2"], "host.id": ["8457605156473051743"], "process.thread.name": ["497295213074376"]} +{"Stacktrace.count": [1], "profiling.project.id": ["100"], "os.kernel": ["9.9.9-0"], "tags": ["environment:qa", "region:eu-west-1"], "host.ip": ["192.168.1.2"], "@timestamp": ["1700504427"], "container.name": ["instance-0000000010"], "ecs.version": ["1.12.0"], "Stacktrace.id": ["S07KmaoGhvNte78xwwRbZQ"], "agent.version": ["head-be593ef3-1688111067"], "host.name": ["ip-192-168-1-2"], "host.id": ["8457605156473051743"], "process.thread.name": ["497295213074376"]} {"create": {"_index": "profiling-events-all"}} {"Stacktrace.count": [1], "profiling.project.id": ["100"], "os.kernel": ["9.9.9-0"], "tags": ["environment:qa", "region:eu-west-1"], "host.ip": ["192.168.1.2"], "@timestamp": ["1698624000"], "container.name": ["instance-0000000010"], "ecs.version": ["1.12.0"], "Stacktrace.id": ["4tB_mGJrj1xVuMFbXVYwGA"], "agent.version": ["head-be593ef3-1688111067"], "host.name": ["ip-192-168-1-2"], "host.id": ["8457605156473051743"], "process.thread.name": ["497295213074376"]} {"create": {"_index": "profiling-events-all"}} diff --git a/x-pack/plugin/profiling/src/internalClusterTest/resources/data/profiling-hosts.ndjson b/x-pack/plugin/profiling/src/internalClusterTest/resources/data/profiling-hosts.ndjson new file mode 100644 index 0000000000000..cd3ddc1271d2d --- /dev/null +++ b/x-pack/plugin/profiling/src/internalClusterTest/resources/data/profiling-hosts.ndjson @@ -0,0 +1,2 @@ +{"create": {"_index": "profiling-hosts", "_id": "eLH27YsBj2lLi3tJYlvr"}} +{"profiling.project.id": 100, "host.id": "8457605156473051743", "@timestamp": 1700504426, "ecs.version": "1.12.0", "profiling.agent.build_timestamp": 1688111067, "profiling.instance.private_ipv4s": ["192.168.1.2"], "ec2.instance_life_cycle": "on-demand", "profiling.agent.config.map_scale_factor": 0, "ec2.instance_type": "i3.2xlarge", "profiling.host.ip": "192.168.1.2", "profiling.agent.config.bpf_log_level": 0, "profiling.host.sysctl.net.core.bpf_jit_enable": 1, "profiling.agent.config.file": "/etc/prodfiler/prodfiler.conf", "ec2.local_ipv4": "192.168.1.2", "profiling.agent.config.no_kernel_version_check": false, "profiling.host.machine": "x86_64", ",profiling.host.tags": ["cloud_provider:aws", "cloud_environment:qa", "cloud_region:eu-west-1"], "profiling.agent.config.probabilistic_threshold": 100, "profiling.agent.config.disable_tls": false, "profiling.agent.config.tracers": "all", "profiling.agent.start_time": 1700090045589, "profiling.agent.config.max_elements_per_interval": 800, "ec2.placement.region": "eu-west-1", "profiling.agent.config.present_cpu_cores": 8, "profiling.host.kernel_version": "9.9.9-0-aws", "profiling.agent.config.bpf_log_size": 65536, "profiling.agent.config.known_traces_entries": 65536, "profiling.host.sysctl.kernel.unprivileged_bpf_disabled": 1, "profiling.agent.config.verbose": false, "profiling.agent.config.probabilistic_interval": "1m0s", "ec2.placement.availability_zone_id": "euw1-az1", "ec2.security_groups": "", "ec2.local_hostname": "ip-192-168-1-2.eu-west-1.compute.internal", "ec2.placement.availability_zone": "eu-west-1c", "profiling.agent.config.upload_symbols": false, "profiling.host.sysctl.kernel.bpf_stats_enabled": 0, "profiling.host.name": "ip-192-168-1-2", "ec2.mac": "00:11:22:33:44:55", "profiling.host.kernel_proc_version": "Linux version 9.9.9-0-aws", "profiling.agent.config.cache_directory": "/var/cache/optimyze/", "profiling.agent.version": "v8.12.0", "ec2.hostname": "ip-192-168-1-2.eu-west-1.compute.internal", "profiling.agent.config.elastic_mode": false, "ec2.ami_id": "ami-aaaaaaaaaaa", "ec2.instance_id": "i-0b999999999999999" } diff --git a/x-pack/plugin/profiling/src/internalClusterTest/resources/indices/apm-test.json b/x-pack/plugin/profiling/src/internalClusterTest/resources/indices/apm-test.json new file mode 100644 index 0000000000000..e0aeb707ffc76 --- /dev/null +++ b/x-pack/plugin/profiling/src/internalClusterTest/resources/indices/apm-test.json @@ -0,0 +1,24 @@ +{ + "settings": { + "index" : { + "number_of_replicas" : 0 + } + }, + "mappings": { + "_doc": { + "dynamic": "false", + "date_detection": false, + "properties": { + "@timestamp": { + "type": "date" + }, + "transaction.profiler_stack_trace_ids": { + "type": "counted_keyword" + }, + "transaction.name": { + "type": "keyword" + } + } + } + } +} diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/CO2Calculator.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/CO2Calculator.java new file mode 100644 index 0000000000000..136821d491c59 --- /dev/null +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/CO2Calculator.java @@ -0,0 +1,224 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.profiling; + +import java.util.Collections; +import java.util.Map; + +import static java.util.Map.entry; + +final class CO2Calculator { + private static final double DEFAULT_SAMPLING_FREQUENCY = 20.0d; + private static final double DEFAULT_CO2_TONS_PER_KWH = 0.000379069d; // unit: metric tons / kWh + private static final double DEFAULT_KILOWATTS_PER_CORE_X86 = 7.0d / 1000.0d; // unit: watt / core + private static final double DEFAULT_KILOWATTS_PER_CORE_ARM64 = 2.8d / 1000.0d; // unit: watt / core + private static final double DEFAULT_KILOWATTS_PER_CORE = DEFAULT_KILOWATTS_PER_CORE_X86; // unit: watt / core + private static final double DEFAULT_DATACENTER_PUE = 1.7d; + private static final Provider DEFAULT_PROVIDER = new Provider(DEFAULT_DATACENTER_PUE, Collections.emptyMap()); + private final InstanceTypeService instanceTypeService; + private final Map hostMetadata; + private final double samplingDurationInSeconds; + private final double customCO2PerKWH; + private final double customDatacenterPUE; + private final double customKilowattsPerCoreX86; + private final double customKilowattsPerCoreARM64; + + CO2Calculator( + InstanceTypeService instanceTypeService, + Map hostMetadata, + double samplingDurationInSeconds, + Double customCO2PerKWH, + Double customDatacenterPUE, + Double customPerCoreWattX86, + Double customPerCoreWattARM64 + ) { + this.instanceTypeService = instanceTypeService; + this.hostMetadata = hostMetadata; + this.samplingDurationInSeconds = samplingDurationInSeconds > 0 ? samplingDurationInSeconds : 1.0d; // avoid division by zero + this.customCO2PerKWH = customCO2PerKWH == null ? DEFAULT_CO2_TONS_PER_KWH : customCO2PerKWH; + this.customDatacenterPUE = customDatacenterPUE == null ? DEFAULT_DATACENTER_PUE : customDatacenterPUE; + this.customKilowattsPerCoreX86 = customPerCoreWattX86 == null ? DEFAULT_KILOWATTS_PER_CORE_X86 : customPerCoreWattX86 / 1000.0d; + this.customKilowattsPerCoreARM64 = customPerCoreWattARM64 == null + ? DEFAULT_KILOWATTS_PER_CORE_ARM64 + : customPerCoreWattARM64 / 1000.0d; + } + + public double getAnnualCO2Tons(String hostID, long samples) { + double annualCoreHours = CostCalculator.annualCoreHours(samplingDurationInSeconds, samples, DEFAULT_SAMPLING_FREQUENCY); + + HostMetadata host = hostMetadata.get(hostID); + if (host == null) { + return DEFAULT_KILOWATTS_PER_CORE * customCO2PerKWH * annualCoreHours * customDatacenterPUE; + } + + CostEntry costs = instanceTypeService.getCosts(host.instanceType); + if (costs == null) { + return getKiloWattsPerCore(host) * getCO2TonsPerKWH(host) * annualCoreHours * getDatacenterPUE(host); + } + + return annualCoreHours * costs.co2Factor; // unit: metric tons + } + + private double getKiloWattsPerCore(HostMetadata host) { + if ("aarch64".equals(host.profilingHostMachine)) { + // Assume that AARCH64 (aka ARM64) machines are more energy efficient than x86_64 machines. + return customKilowattsPerCoreARM64; + } + if ("x86_64".equals(host.profilingHostMachine)) { + return customKilowattsPerCoreX86; + } + return DEFAULT_KILOWATTS_PER_CORE; + } + + private double getCO2TonsPerKWH(HostMetadata host) { + Provider provider = PROVIDERS.getOrDefault(host.instanceType.provider, DEFAULT_PROVIDER); + return provider.co2TonsPerKWH.getOrDefault(host.instanceType.region, customCO2PerKWH); + } + + private static double getDatacenterPUE(HostMetadata host) { + return PROVIDERS.getOrDefault(host.instanceType.provider, DEFAULT_PROVIDER).pue; + } + + private record Provider(double pue, Map co2TonsPerKWH) {} + + // values are taken from https://www.cloudcarbonfootprint.org/docs/methodology/ + private static final Map PROVIDERS; + static { + // noinspection (explicit type arguments speedup compilation and analysis time) + PROVIDERS = Map.of( + "aws", + new Provider( + 1.135d, + Map.ofEntries( + entry("us-east-1", 0.000379069d), + entry("us-east-2", 0.000410608d), + entry("us-west-1", 0.000322167d), + entry("us-west-2", 0.000322167d), + entry("us-gov-east-1", 0.000379069d), + entry("us-gov-west-1", 0.000322167d), + entry("af-south-1", 0.0009006d), + entry("ap-east-1", 0.00071d), + entry("ap-south-1", 0.0007082d), + entry("ap-northeast-3", 0.0004658d), + entry("ap-northeast-2", 0.0004156d), + entry("ap-southeast-1", 0.000408d), + entry("ap-southeast-2", 0.00076d), + entry("ap-northeast-1", 0.0004658d), + entry("ca-central-1", 0.00012d), + entry("cn-north-1", 0.0005374d), + entry("cn-northwest-1", 0.0005374d), + entry("eu-central-1", 0.000311d), + entry("eu-west-1", 0.0002786d), + entry("eu-west-2", 0.000225d), + entry("eu-south-1", 0.0002134d), + entry("eu-west-3", 0.0000511d), + entry("eu-north-1", 0.0000088d), + entry("me-south-1", 0.0005059d), + entry("sa-east-1", 0.0000617d) + ) + ), + // noinspection (explicit type arguments speedup compilation and analysis time) + "gcp", + new Provider( + 1.1d, + Map.ofEntries( + entry("us-central1", 0.00003178d), + entry("us-east1", 0.0003504d), + entry("us-east4", 0.00015162d), + entry("us-west1", 0.0000078d), + entry("us-west2", 0.00011638d), + entry("us-west3", 0.00038376d), + entry("us-west4", 0.00036855d), + entry("asia-east1", 0.0004428d), + entry("asia-east2", 0.000453d), + entry("asia-northeast1", 0.00048752d), + entry("asia-northeast2", 0.00048752d), + entry("asia-northeast3", 0.00031533d), + entry("asia-south1", 0.00063448d), + entry("asia-south2", 0.000657d), + entry("asia-southeast1", 0.00047328d), + entry("asia-southeast2", 0.000647d), + entry("australia-southeast1", 0.00064703d), + entry("australia-southeast2", 0.000691d), + entry("europe-central2", 0.000622d), + entry("europe-north1", 0.00000798d), + entry("europe-west1", 0.00004452d), + entry("europe-west2", 0.00009471d), + entry("europe-west3", 0.000108), + entry("europe-west4", 0.000164d), + entry("europe-west6", 0.000087d), + entry("northamerica-northeast1", 0.000027d), + entry("southamerica-east1", 0.00001236d) + ) + ), + "azure", + new Provider( + 1.185d, + Map.ofEntries( + entry("centralus", 0.000426254d), + entry("eastus", 0.000379069d), + entry("eastus2", 0.000379069d), + entry("eastus3", 0.000379069d), + entry("northcentralus", 0.000410608d), + entry("southcentralus", 0.000373231d), + entry("westcentralusS", 0.000322167d), + entry("westus", 0.000322167d), + entry("westus2", 0.000322167d), + entry("westus3", 0.000322167d), + entry("eastasia", 0.00071d), + entry("southeastasia", 0.000408d), + entry("southafricanorth", 0.0009006d), + entry("southafricawest", 0.0009006d), + entry("southafrica", 0.0009006d), + entry("australia", 0.00079d), + entry("australiacentral", 0.00079d), + entry("australiacentral2", 0.00079d), + entry("australiaeast", 0.00079d), + entry("australiasoutheast", 0.00096d), + entry("japan", 0.0004658d), + entry("japanwest", 0.0004658d), + entry("japaneast", 0.0004658d), + entry("korea", 0.0004156d), + entry("koreaeast", 0.0004156d), + entry("koreasouth", 0.0004156d), + entry("india", 0.0007082d), + entry("indiawest", 0.0007082d), + entry("indiacentral", 0.0007082d), + entry("indiasouth", 0.0007082d), + entry("northeurope", 0.0002786d), + entry("westeurope", 0.0003284d), + entry("france", 0.00005128d), + entry("francecentral", 0.00005128d), + entry("francesouth", 0.00005128d), + entry("swedencentral", 0.00000567d), + entry("switzerland", 0.00000567d), + entry("switzerlandnorth", 0.00000567d), + entry("switzerlandwest", 0.00000567d), + entry("uk", 0.000225d), + entry("uksouth", 0.000225d), + entry("ukwest", 0.000228d), + entry("germany", 0.00033866d), + entry("germanynorth", 0.00033866d), + entry("germanywestcentral", 0.00033866d), + entry("norway", 0.00000762d), + entry("norwayeast", 0.00000762d), + entry("norwaywest", 0.00000762d), + entry("unitedarabemirates", 0.0004041d), + entry("unitedarabemiratesnorth", 0.0004041d), + entry("unitedarabemiratescentral", 0.0004041d), + entry("canada", 0.00012d), + entry("canadacentral", 0.00012d), + entry("canadaeast", 0.00012d), + entry("brazil", 0.0000617d), + entry("brazilsouth", 0.0000617d), + entry("brazilsoutheast", 0.0000617d) + ) + ) + ); + } +} diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/CostCalculator.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/CostCalculator.java new file mode 100644 index 0000000000000..05319ba7d1cc4 --- /dev/null +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/CostCalculator.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.profiling; + +import java.util.Map; + +final class CostCalculator { + private static final double DEFAULT_SAMPLING_FREQUENCY = 20.0d; + private static final double SECONDS_PER_HOUR = 60 * 60; + private static final double SECONDS_PER_YEAR = SECONDS_PER_HOUR * 24 * 365.0d; // unit: seconds + private static final double DEFAULT_COST_USD_PER_CORE_HOUR = 0.0425d; // unit: USD / (core * hour) + private static final double DEFAULT_AWS_COST_FACTOR = 1.0d; + private final InstanceTypeService instanceTypeService; + private final Map hostMetadata; + private final double samplingDurationInSeconds; + private final double awsCostFactor; + private final double customCostPerCoreHour; + + CostCalculator( + InstanceTypeService instanceTypeService, + Map hostMetadata, + double samplingDurationInSeconds, + Double awsCostFactor, + Double customCostPerCoreHour + ) { + this.instanceTypeService = instanceTypeService; + this.hostMetadata = hostMetadata; + this.samplingDurationInSeconds = samplingDurationInSeconds > 0 ? samplingDurationInSeconds : 1.0d; // avoid division by zero + this.awsCostFactor = awsCostFactor == null ? DEFAULT_AWS_COST_FACTOR : awsCostFactor; + this.customCostPerCoreHour = customCostPerCoreHour == null ? DEFAULT_COST_USD_PER_CORE_HOUR : customCostPerCoreHour; + } + + public double annualCostsUSD(String hostID, double samples) { + double annualCoreHours = annualCoreHours(samplingDurationInSeconds, samples, DEFAULT_SAMPLING_FREQUENCY); + + HostMetadata host = hostMetadata.get(hostID); + if (host == null) { + return annualCoreHours * customCostPerCoreHour; + } + + double providerCostFactor = host.instanceType.provider.equals("aws") ? awsCostFactor : 1.0d; + + CostEntry costs = instanceTypeService.getCosts(host.instanceType); + if (costs == null) { + return annualCoreHours * customCostPerCoreHour * providerCostFactor; + } + + return annualCoreHours * costs.costFactor * providerCostFactor; + } + + public static double annualCoreHours(double duration, double samples, double samplingFrequency) { + // samplingFrequency will a variable value when we start supporting probabilistic profiling (soon). + return (SECONDS_PER_YEAR / duration * samples / samplingFrequency) / SECONDS_PER_HOUR; // unit: core * hour + } +} diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/CostEntry.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/CostEntry.java new file mode 100644 index 0000000000000..6033e650072bc --- /dev/null +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/CostEntry.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.profiling; + +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Map; + +final class CostEntry implements ToXContentObject { + final double co2Factor; + final double costFactor; + + CostEntry(double co2Factor, double costFactor) { + this.co2Factor = co2Factor; + this.costFactor = costFactor; + } + + public static CostEntry fromSource(Map source) { + return new CostEntry((Double) source.get("co2_factor"), (Double) source.get("cost_factor")); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("co2_factor", this.co2Factor); + builder.field("cost_factor", this.costFactor); + builder.endObject(); + return builder; + } +} diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphResponse.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphResponse.java index d357971e68b1f..6666c326d710d 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphResponse.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphResponse.java @@ -23,8 +23,12 @@ public class GetFlamegraphResponse extends ActionResponse implements ChunkedToXContentObject { private final int size; private final double samplingRate; - private final int selfCPU; - private final int totalCPU; + private final long selfCPU; + private final long totalCPU; + private final double selfAnnualCO2Tons; + private final double totalAnnualCO2Tons; + private final double selfAnnualCostsUSD; + private final double totalAnnualCostsUSD; private final long totalSamples; private final List> edges; private final List fileIds; @@ -36,8 +40,12 @@ public class GetFlamegraphResponse extends ActionResponse implements ChunkedToXC private final List functionOffsets; private final List sourceFileNames; private final List sourceLines; - private final List countInclusive; - private final List countExclusive; + private final List countInclusive; + private final List countExclusive; + private final List annualCO2TonsInclusive; + private final List annualCO2TonsExclusive; + private final List annualCostsUSDInclusive; + private final List annualCostsUSDExclusive; public GetFlamegraphResponse(StreamInput in) throws IOException { this.size = in.readInt(); @@ -52,10 +60,18 @@ public GetFlamegraphResponse(StreamInput in) throws IOException { this.functionOffsets = in.readCollectionAsList(StreamInput::readInt); this.sourceFileNames = in.readCollectionAsList(StreamInput::readString); this.sourceLines = in.readCollectionAsList(StreamInput::readInt); - this.countInclusive = in.readCollectionAsList(StreamInput::readInt); - this.countExclusive = in.readCollectionAsList(StreamInput::readInt); - this.selfCPU = in.readInt(); - this.totalCPU = in.readInt(); + this.countInclusive = in.readCollectionAsList(StreamInput::readLong); + this.countExclusive = in.readCollectionAsList(StreamInput::readLong); + this.annualCO2TonsInclusive = in.readCollectionAsList(StreamInput::readDouble); + this.annualCO2TonsExclusive = in.readCollectionAsList(StreamInput::readDouble); + this.annualCostsUSDInclusive = in.readCollectionAsList(StreamInput::readDouble); + this.annualCostsUSDExclusive = in.readCollectionAsList(StreamInput::readDouble); + this.selfCPU = in.readLong(); + this.totalCPU = in.readLong(); + this.selfAnnualCO2Tons = in.readDouble(); + this.totalAnnualCO2Tons = in.readDouble(); + this.selfAnnualCostsUSD = in.readDouble(); + this.totalAnnualCostsUSD = in.readDouble(); this.totalSamples = in.readLong(); } @@ -72,10 +88,18 @@ public GetFlamegraphResponse( List functionOffsets, List sourceFileNames, List sourceLines, - List countInclusive, - List countExclusive, - int selfCPU, - int totalCPU, + List countInclusive, + List countExclusive, + List annualCO2TonsInclusive, + List annualCO2TonsExclusive, + List annualCostsUSDInclusive, + List annualCostsUSDExclusive, + long selfCPU, + long totalCPU, + double selfAnnualCO2Tons, + double totalAnnualCO2Tons, + double selfAnnualCostsUSD, + double totalAnnualCostsUSD, long totalSamples ) { this.size = size; @@ -92,8 +116,16 @@ public GetFlamegraphResponse( this.sourceLines = sourceLines; this.countInclusive = countInclusive; this.countExclusive = countExclusive; + this.annualCO2TonsInclusive = annualCO2TonsInclusive; + this.annualCO2TonsExclusive = annualCO2TonsExclusive; + this.annualCostsUSDInclusive = annualCostsUSDInclusive; + this.annualCostsUSDExclusive = annualCostsUSDExclusive; this.selfCPU = selfCPU; this.totalCPU = totalCPU; + this.selfAnnualCO2Tons = selfAnnualCO2Tons; + this.totalAnnualCO2Tons = totalAnnualCO2Tons; + this.selfAnnualCostsUSD = selfAnnualCostsUSD; + this.totalAnnualCostsUSD = totalAnnualCostsUSD; this.totalSamples = totalSamples; } @@ -111,10 +143,18 @@ public void writeTo(StreamOutput out) throws IOException { out.writeCollection(this.functionOffsets, StreamOutput::writeInt); out.writeCollection(this.sourceFileNames, StreamOutput::writeString); out.writeCollection(this.sourceLines, StreamOutput::writeInt); - out.writeCollection(this.countInclusive, StreamOutput::writeInt); - out.writeCollection(this.countExclusive, StreamOutput::writeInt); - out.writeInt(this.selfCPU); - out.writeInt(this.totalCPU); + out.writeCollection(this.countInclusive, StreamOutput::writeLong); + out.writeCollection(this.countExclusive, StreamOutput::writeLong); + out.writeCollection(this.annualCO2TonsInclusive, StreamOutput::writeDouble); + out.writeCollection(this.annualCO2TonsExclusive, StreamOutput::writeDouble); + out.writeCollection(this.annualCostsUSDInclusive, StreamOutput::writeDouble); + out.writeCollection(this.annualCostsUSDExclusive, StreamOutput::writeDouble); + out.writeLong(this.selfCPU); + out.writeLong(this.totalCPU); + out.writeDouble(this.selfAnnualCO2Tons); + out.writeDouble(this.totalAnnualCO2Tons); + out.writeDouble(this.selfAnnualCostsUSD); + out.writeDouble(this.totalAnnualCostsUSD); out.writeLong(this.totalSamples); } @@ -126,11 +166,11 @@ public double getSamplingRate() { return samplingRate; } - public List getCountInclusive() { + public List getCountInclusive() { return countInclusive; } - public List getCountExclusive() { + public List getCountExclusive() { return countExclusive; } @@ -174,11 +214,11 @@ public List getSourceLines() { return sourceLines; } - public int getSelfCPU() { + public long getSelfCPU() { return selfCPU; } - public int getTotalCPU() { + public long getTotalCPU() { return totalCPU; } @@ -212,10 +252,30 @@ public Iterator toXContentChunked(ToXContent.Params params ChunkedToXContentHelper.array("SourceLine", Iterators.map(sourceLines.iterator(), e -> (b, p) -> b.value(e))), ChunkedToXContentHelper.array("CountInclusive", Iterators.map(countInclusive.iterator(), e -> (b, p) -> b.value(e))), ChunkedToXContentHelper.array("CountExclusive", Iterators.map(countExclusive.iterator(), e -> (b, p) -> b.value(e))), + ChunkedToXContentHelper.array( + "AnnualCO2TonsInclusive", + Iterators.map(annualCO2TonsInclusive.iterator(), e -> (b, p) -> b.value(e)) + ), + ChunkedToXContentHelper.array( + "AnnualCO2TonsExclusive", + Iterators.map(annualCO2TonsExclusive.iterator(), e -> (b, p) -> b.value(e)) + ), + ChunkedToXContentHelper.array( + "AnnualCostsUSDInclusive", + Iterators.map(annualCostsUSDInclusive.iterator(), e -> (b, p) -> b.value(e)) + ), + ChunkedToXContentHelper.array( + "AnnualCostsUSDExclusive", + Iterators.map(annualCostsUSDExclusive.iterator(), e -> (b, p) -> b.value(e)) + ), Iterators.single((b, p) -> b.field("Size", size)), Iterators.single((b, p) -> b.field("SamplingRate", samplingRate)), Iterators.single((b, p) -> b.field("SelfCPU", selfCPU)), Iterators.single((b, p) -> b.field("TotalCPU", totalCPU)), + Iterators.single((b, p) -> b.field("SelfAnnualCO2Tons", selfAnnualCO2Tons)), + Iterators.single((b, p) -> b.field("TotalAnnualCO2Tons", totalAnnualCO2Tons)), + Iterators.single((b, p) -> b.field("SelfAnnualCostsUSD", selfAnnualCostsUSD)), + Iterators.single((b, p) -> b.field("TotalAnnualCostsUSD", totalAnnualCostsUSD)), Iterators.single((b, p) -> b.field("TotalSamples", totalSamples)), ChunkedToXContentHelper.endObject() ); diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesRequest.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesRequest.java index 3932e386225c5..a6680f08f4684 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesRequest.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesRequest.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.query.QueryBuilder; @@ -36,46 +35,139 @@ public class GetStackTracesRequest extends ActionRequest implements IndicesRequest { public static final ParseField QUERY_FIELD = new ParseField("query"); public static final ParseField SAMPLE_SIZE_FIELD = new ParseField("sample_size"); + public static final ParseField INDICES_FIELD = new ParseField("indices"); + public static final ParseField STACKTRACE_IDS_FIELD = new ParseField("stacktrace_ids"); + public static final ParseField REQUESTED_DURATION_FIELD = new ParseField("requested_duration"); + public static final ParseField AWS_COST_FACTOR_FIELD = new ParseField("aws_cost_factor"); + public static final ParseField CUSTOM_CO2_PER_KWH = new ParseField("co2_per_kwh"); + public static final ParseField CUSTOM_DATACENTER_PUE = new ParseField("datacenter_pue"); + public static final ParseField CUSTOM_PER_CORE_WATT_X86 = new ParseField("per_core_watt_x86"); + public static final ParseField CUSTOM_PER_CORE_WATT_ARM64 = new ParseField("per_core_watt_arm64"); + public static final ParseField CUSTOM_COST_PER_CORE_HOUR = new ParseField("cost_per_core_hour"); + private static final int DEFAULT_SAMPLE_SIZE = 20_000; private QueryBuilder query; - private Integer sampleSize; + private String indices; + private String stackTraceIds; + private Double requestedDuration; + private Double awsCostFactor; + private Double customCO2PerKWH; + private Double customDatacenterPUE; + private Double customPerCoreWattX86; + private Double customPerCoreWattARM64; + private Double customCostPerCoreHour; - // We intentionally don't expose this field via the REST API but we can control behavior within Elasticsearch. + // We intentionally don't expose this field via the REST API, but we can control behavior within Elasticsearch. // Once we have migrated all client-side code to dedicated APIs (such as the flamegraph API), we can adjust // sample counts by default and remove this flag. private Boolean adjustSampleCount; public GetStackTracesRequest() { - this(null, null); + this(null, null, null, null, null, null, null, null, null, null, null); } - public GetStackTracesRequest(Integer sampleSize, QueryBuilder query) { + public GetStackTracesRequest( + Integer sampleSize, + Double requestedDuration, + Double awsCostFactor, + QueryBuilder query, + String indices, + String stackTraceIds, + Double customCO2PerKWH, + Double customDatacenterPUE, + Double customPerCoreWattX86, + Double customPerCoreWattARM64, + Double customCostPerCoreHour + ) { this.sampleSize = sampleSize; + this.requestedDuration = requestedDuration; + this.awsCostFactor = awsCostFactor; this.query = query; + this.indices = indices; + this.stackTraceIds = stackTraceIds; + this.customCO2PerKWH = customCO2PerKWH; + this.customDatacenterPUE = customDatacenterPUE; + this.customPerCoreWattX86 = customPerCoreWattX86; + this.customPerCoreWattARM64 = customPerCoreWattX86; + this.customCostPerCoreHour = customCostPerCoreHour; } public GetStackTracesRequest(StreamInput in) throws IOException { this.query = in.readOptionalNamedWriteable(QueryBuilder.class); this.sampleSize = in.readOptionalInt(); + this.requestedDuration = in.readOptionalDouble(); + this.awsCostFactor = in.readOptionalDouble(); this.adjustSampleCount = in.readOptionalBoolean(); + this.indices = in.readOptionalString(); + this.stackTraceIds = in.readOptionalString(); + this.customCO2PerKWH = in.readOptionalDouble(); + this.customDatacenterPUE = in.readOptionalDouble(); + this.customPerCoreWattX86 = in.readOptionalDouble(); + this.customPerCoreWattARM64 = in.readOptionalDouble(); + this.customCostPerCoreHour = in.readOptionalDouble(); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalNamedWriteable(query); out.writeOptionalInt(sampleSize); + out.writeOptionalDouble(requestedDuration); + out.writeOptionalDouble(awsCostFactor); out.writeOptionalBoolean(adjustSampleCount); + out.writeOptionalString(indices); + out.writeOptionalString(stackTraceIds); + out.writeOptionalDouble(customCO2PerKWH); + out.writeOptionalDouble(customDatacenterPUE); + out.writeOptionalDouble(customPerCoreWattX86); + out.writeOptionalDouble(customPerCoreWattARM64); + out.writeOptionalDouble(customCostPerCoreHour); } public Integer getSampleSize() { - return sampleSize; + return sampleSize != null ? sampleSize : DEFAULT_SAMPLE_SIZE; + } + + public Double getRequestedDuration() { + return requestedDuration; + } + + public Double getAwsCostFactor() { + return awsCostFactor; + } + + public Double getCustomCO2PerKWH() { + return customCO2PerKWH; + } + + public Double getCustomDatacenterPUE() { + return customDatacenterPUE; + } + + public Double getCustomPerCoreWattX86() { + return customPerCoreWattX86; + } + + public Double getCustomPerCoreWattARM64() { + return customPerCoreWattARM64; + } + + public Double getCustomCostPerCoreHour() { + return customCostPerCoreHour; } public QueryBuilder getQuery() { return query; } + public String getIndices() { + return indices; + } + + public String getStackTraceIds() { + return stackTraceIds; + } + public boolean isAdjustSampleCount() { return Boolean.TRUE.equals(adjustSampleCount); } @@ -101,6 +193,24 @@ public void parseXContent(XContentParser parser) throws IOException { } else if (token.isValue()) { if (SAMPLE_SIZE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { this.sampleSize = parser.intValue(); + } else if (INDICES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + this.indices = parser.text(); + } else if (STACKTRACE_IDS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + this.stackTraceIds = parser.text(); + } else if (REQUESTED_DURATION_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + this.requestedDuration = parser.doubleValue(); + } else if (AWS_COST_FACTOR_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + this.awsCostFactor = parser.doubleValue(); + } else if (CUSTOM_CO2_PER_KWH.match(currentFieldName, parser.getDeprecationHandler())) { + this.customCO2PerKWH = parser.doubleValue(); + } else if (CUSTOM_DATACENTER_PUE.match(currentFieldName, parser.getDeprecationHandler())) { + this.customDatacenterPUE = parser.doubleValue(); + } else if (CUSTOM_PER_CORE_WATT_X86.match(currentFieldName, parser.getDeprecationHandler())) { + this.customPerCoreWattX86 = parser.doubleValue(); + } else if (CUSTOM_PER_CORE_WATT_ARM64.match(currentFieldName, parser.getDeprecationHandler())) { + this.customPerCoreWattARM64 = parser.doubleValue(); + } else if (CUSTOM_COST_PER_CORE_HOUR.match(currentFieldName, parser.getDeprecationHandler())) { + this.customCostPerCoreHour = parser.doubleValue(); } else { throw new ParsingException( parser.getTokenLocation(), @@ -130,17 +240,48 @@ public void parseXContent(XContentParser parser) throws IOException { @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; - if (sampleSize == null) { - validationException = addValidationError("[" + SAMPLE_SIZE_FIELD.getPreferredName() + "] is mandatory", validationException); - } else if (sampleSize <= 0) { - validationException = addValidationError( - "[" + SAMPLE_SIZE_FIELD.getPreferredName() + "] must be greater or equals than 1, got: " + sampleSize, - validationException - ); + if (indices != null) { + if (stackTraceIds == null || stackTraceIds.isEmpty()) { + validationException = addValidationError( + "[" + STACKTRACE_IDS_FIELD.getPreferredName() + "] is mandatory", + validationException + ); + } + // we don't do downsampling when a custom index is provided + if (sampleSize != null) { + validationException = addValidationError( + "[" + SAMPLE_SIZE_FIELD.getPreferredName() + "] must not be set", + validationException + ); + } + } else { + if (stackTraceIds != null) { + validationException = addValidationError( + "[" + STACKTRACE_IDS_FIELD.getPreferredName() + "] must not be set", + validationException + ); + } + validationException = requirePositive(SAMPLE_SIZE_FIELD, sampleSize, validationException); } + validationException = requirePositive(REQUESTED_DURATION_FIELD, requestedDuration, validationException); + validationException = requirePositive(AWS_COST_FACTOR_FIELD, awsCostFactor, validationException); + validationException = requirePositive(CUSTOM_CO2_PER_KWH, customCO2PerKWH, validationException); + validationException = requirePositive(CUSTOM_DATACENTER_PUE, customDatacenterPUE, validationException); + validationException = requirePositive(CUSTOM_PER_CORE_WATT_X86, customPerCoreWattX86, validationException); + validationException = requirePositive(CUSTOM_PER_CORE_WATT_ARM64, customPerCoreWattARM64, validationException); + validationException = requirePositive(CUSTOM_COST_PER_CORE_HOUR, customCostPerCoreHour, validationException); return validationException; } + private static ActionRequestValidationException requirePositive(ParseField field, Number value, ActionRequestValidationException e) { + if (value != null) { + if (value.doubleValue() <= 0.0d) { + return addValidationError("[" + field.getPreferredName() + "] must be greater than 0, got: " + value, e); + } + } + return e; + } + @Override public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { return new CancellableTask(id, type, action, null, parentTaskId, headers) { @@ -148,17 +289,33 @@ public Task createTask(long id, String type, String action, TaskId parentTaskId, public String getDescription() { // generating description lazily since the query could be large StringBuilder sb = new StringBuilder(); - sb.append("sample_size[").append(sampleSize).append("]"); - if (query == null) { - sb.append(", query[]"); - } else { - sb.append(", query[").append(Strings.toString(query)).append("]"); - } + appendField(sb, "indices", indices); + appendField(sb, "stacktrace_ids", stackTraceIds); + appendField(sb, "sample_size", sampleSize); + appendField(sb, "requested_duration", requestedDuration); + appendField(sb, "aws_cost_factor", awsCostFactor); + appendField(sb, "co2_per_kwh", customCO2PerKWH); + appendField(sb, "datacenter_pue", customDatacenterPUE); + appendField(sb, "per_core_watt_x86", customPerCoreWattX86); + appendField(sb, "per_core_watt_arm64", customPerCoreWattARM64); + appendField(sb, "cost_per_core_hour", customCostPerCoreHour); + appendField(sb, "query", query); return sb.toString(); } }; } + private static void appendField(StringBuilder sb, String name, Object value) { + if (sb.isEmpty() == false) { + sb.append(", "); + } + if (value == null) { + sb.append(name).append("[]"); + } else { + sb.append(name).append("[").append(value).append("]"); + } + } + @Override public boolean equals(Object o) { if (this == o) { @@ -168,7 +325,10 @@ public boolean equals(Object o) { return false; } GetStackTracesRequest that = (GetStackTracesRequest) o; - return Objects.equals(query, that.query) && Objects.equals(sampleSize, that.sampleSize); + return Objects.equals(query, that.query) + && Objects.equals(sampleSize, that.sampleSize) + && Objects.equals(indices, that.indices) + && Objects.equals(stackTraceIds, that.stackTraceIds); } @Override @@ -179,7 +339,7 @@ public int hashCode() { // Resampler to produce a consistent downsampling results, relying on the default hashCode implementation of `query` will // produce consistent results per node but not across the cluster. To avoid this, we produce the hashCode based on the // string representation instead, which will produce consistent results for the entire cluster and across node restarts. - return Objects.hash(Objects.toString(query, "null"), sampleSize); + return Objects.hash(Objects.toString(query, "null"), sampleSize, indices, stackTraceIds); } @Override @@ -188,7 +348,11 @@ public String[] indices() { indices.add("profiling-stacktraces"); indices.add("profiling-stackframes"); indices.add("profiling-executables"); - indices.addAll(EventsIndex.indexNames()); + if (this.indices == null) { + indices.addAll(EventsIndex.indexNames()); + } else { + indices.add(this.indices); + } return indices.toArray(new String[0]); } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesResponse.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesResponse.java index 39dd7cd611e64..2f1e15252c277 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesResponse.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesResponse.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.profiling; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -30,7 +31,7 @@ public class GetStackTracesResponse extends ActionResponse implements ChunkedToX @Nullable private final Map executables; @Nullable - private final Map stackTraceEvents; + private final Map stackTraceEvents; private final int totalFrames; private final double samplingRate; private final long totalSamples; @@ -42,7 +43,10 @@ public GetStackTracesResponse(StreamInput in) throws IOException { i.readCollectionAsList(StreamInput::readInt), i.readCollectionAsList(StreamInput::readString), i.readCollectionAsList(StreamInput::readString), - i.readCollectionAsList(StreamInput::readInt) + i.readCollectionAsList(StreamInput::readInt), + i.readDouble(), + i.readDouble(), + i.readLong() ) ) : null; @@ -57,7 +61,7 @@ public GetStackTracesResponse(StreamInput in) throws IOException { ) : null; this.executables = in.readBoolean() ? in.readMap(StreamInput::readString) : null; - this.stackTraceEvents = in.readBoolean() ? in.readMap(StreamInput::readInt) : null; + this.stackTraceEvents = in.readBoolean() ? in.readMap(i -> new TraceEvent(i.readString(), i.readLong())) : null; this.totalFrames = in.readInt(); this.samplingRate = in.readDouble(); this.totalSamples = in.readLong(); @@ -67,7 +71,7 @@ public GetStackTracesResponse( Map stackTraces, Map stackFrames, Map executables, - Map stackTraceEvents, + Map stackTraceEvents, int totalFrames, double samplingRate, long totalSamples @@ -90,6 +94,9 @@ public void writeTo(StreamOutput out) throws IOException { o.writeStringCollection(v.fileIds); o.writeStringCollection(v.frameIds); o.writeCollection(v.typeIds, StreamOutput::writeInt); + o.writeDouble(v.annualCO2Tons); + o.writeDouble(v.annualCostsUSD); + o.writeLong(v.count); }); } else { out.writeBoolean(false); @@ -113,7 +120,10 @@ public void writeTo(StreamOutput out) throws IOException { } if (stackTraceEvents != null) { out.writeBoolean(true); - out.writeMap(stackTraceEvents, StreamOutput::writeInt); + out.writeMap(stackTraceEvents, (o, v) -> { + o.writeString(v.stacktraceID); + o.writeLong(v.count); + }); } else { out.writeBoolean(false); } @@ -134,7 +144,7 @@ public Map getExecutables() { return executables; } - public Map getStackTraceEvents() { + public Map getStackTraceEvents() { return stackTraceEvents; } @@ -157,7 +167,12 @@ public Iterator toXContentChunked(ToXContent.Params params optional("stack_traces", stackTraces, ChunkedToXContentHelper::xContentValuesMap), optional("stack_frames", stackFrames, ChunkedToXContentHelper::xContentValuesMap), optional("executables", executables, ChunkedToXContentHelper::map), - optional("stack_trace_events", stackTraceEvents, ChunkedToXContentHelper::map), + // render only count for backwards-compatibility + optional( + "stack_trace_events", + stackTraceEvents, + (n, v) -> ChunkedToXContentHelper.map(n, v, entry -> (b, p) -> b.field(entry.getKey(), entry.getValue().count)) + ), Iterators.single((b, p) -> b.field("total_frames", totalFrames)), Iterators.single((b, p) -> b.field("sampling_rate", samplingRate)), // the following fields are intentionally not written to the XContent representation (only needed on the transport layer): @@ -198,4 +213,9 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(stackTraces, stackFrames, executables, stackTraceEvents, totalFrames, samplingRate); } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStatusAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStatusAction.java index ec8c85d39015e..1ddf2d7178584 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStatusAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStatusAction.java @@ -35,6 +35,7 @@ public static class Response extends ActionResponse implements ToXContentObject private boolean resourceManagementEnabled; private boolean resourcesCreated; private boolean pre891Data; + private boolean hasData; private boolean timedOut; public Response(StreamInput in) throws IOException { @@ -44,13 +45,21 @@ public Response(StreamInput in) throws IOException { resourcesCreated = in.readBoolean(); pre891Data = in.readBoolean(); timedOut = in.readBoolean(); + hasData = in.readBoolean(); } - public Response(boolean profilingEnabled, boolean resourceManagementEnabled, boolean resourcesCreated, boolean pre891Data) { + public Response( + boolean profilingEnabled, + boolean resourceManagementEnabled, + boolean resourcesCreated, + boolean pre891Data, + boolean hasData + ) { this.profilingEnabled = profilingEnabled; this.resourceManagementEnabled = resourceManagementEnabled; this.resourcesCreated = resourcesCreated; this.pre891Data = pre891Data; + this.hasData = hasData; } public void setTimedOut(boolean timedOut) { @@ -66,7 +75,11 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); builder.startObject("profiling").field("enabled", profilingEnabled).endObject(); builder.startObject("resource_management").field("enabled", resourceManagementEnabled).endObject(); - builder.startObject("resources").field("created", resourcesCreated).field("pre_8_9_1_data", pre891Data).endObject(); + builder.startObject("resources") + .field("created", resourcesCreated) + .field("pre_8_9_1_data", pre891Data) + .field("has_data", hasData) + .endObject(); builder.endObject(); return builder; } @@ -78,6 +91,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(resourcesCreated); out.writeBoolean(pre891Data); out.writeBoolean(timedOut); + out.writeBoolean(hasData); } @Override @@ -89,12 +103,13 @@ public boolean equals(Object o) { && resourceManagementEnabled == response.resourceManagementEnabled && resourcesCreated == response.resourcesCreated && pre891Data == response.pre891Data + && hasData == response.hasData && timedOut == response.timedOut; } @Override public int hashCode() { - return Objects.hash(profilingEnabled, resourceManagementEnabled, resourcesCreated, pre891Data, timedOut); + return Objects.hash(profilingEnabled, resourceManagementEnabled, resourcesCreated, pre891Data, hasData, timedOut); } @Override diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/HostMetadata.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/HostMetadata.java new file mode 100644 index 0000000000000..e0b634b5fb9dd --- /dev/null +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/HostMetadata.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.profiling; + +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +final class HostMetadata implements ToXContentObject { + final String hostID; + final InstanceType instanceType; + final String profilingHostMachine; // aarch64 or x86_64 + + HostMetadata(String hostID, InstanceType instanceType, String profilingHostMachine) { + this.hostID = hostID; + this.instanceType = instanceType; + this.profilingHostMachine = profilingHostMachine; + } + + public static HostMetadata fromSource(Map source) { + if (source != null) { + String hostID = (String) source.get("host.id"); + String profilingHostMachine = (String) source.get("profiling.host.machine"); + return new HostMetadata(hostID, InstanceType.fromHostSource(source), profilingHostMachine); + } + return new HostMetadata("", new InstanceType("", "", ""), ""); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + instanceType.toXContent(builder, params); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + HostMetadata that = (HostMetadata) o; + return Objects.equals(hostID, that.hostID); + } + + @Override + public int hashCode() { + return Objects.hash(hostID); + } +} diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/InstanceType.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/InstanceType.java new file mode 100644 index 0000000000000..98e75ff264375 --- /dev/null +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/InstanceType.java @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.profiling; + +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; + +final class InstanceType implements ToXContentObject { + final String provider; + final String region; + final String name; + + InstanceType(String provider, String region, String name) { + this.provider = provider; + this.region = region; + this.name = name; + } + + /** + * Creates a {@link InstanceType} from a {@link Map} of source data provided from JSON or profiling-costs. + * + * @param source the source data + * @return the {@link InstanceType} + */ + public static InstanceType fromCostSource(Map source) { + return new InstanceType((String) source.get("provider"), (String) source.get("region"), (String) source.get("instance_type")); + } + + /** + * Creates a {@link InstanceType} from a {@link Map} of source data provided from profiling-hosts. + * + * @param source the source data + * @return the {@link InstanceType} + */ + public static InstanceType fromHostSource(Map source) { + // Example of tags: + // "profiling.host.tags": [ + // "cloud_provider:aws", + // "cloud_environment:qa", + // "cloud_region:eu-west-1", + // ], + String provider = ""; + String region = ""; + String instanceType = ""; + + List tags = listOf(source.get("profiling.host.tags")); + for (String tag : tags) { + String[] kv = tag.toLowerCase(Locale.ROOT).split(":", 2); + if (kv.length != 2) { + continue; + } + if ("cloud_provider".equals(kv[0])) { + provider = kv[1]; + } + if ("cloud_region".equals(kv[0])) { + region = kv[1]; + } + } + + // We only support AWS for 8.12, but plan for GCP and Azure later. + // "gcp": check 'gce.instance.name' or 'gce.instance.name' to extract the instanceType + // "azure": extract the instanceType + if ("aws".equals(provider)) { + instanceType = (String) source.get("ec2.instance_type"); + } + + return new InstanceType(provider, region, instanceType); + } + + @SuppressWarnings("unchecked") + private static List listOf(Object o) { + if (o instanceof List) { + return (List) o; + } else if (o != null) { + return List.of((T) o); + } else { + return Collections.emptyList(); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("provider", this.provider); + builder.field("region", this.region); + builder.field("instance_type", this.name); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + InstanceType that = (InstanceType) o; + return Objects.equals(provider, that.provider) && Objects.equals(region, that.region) && Objects.equals(name, that.name); + } + + @Override + public int hashCode() { + return Objects.hash(provider, region, name); + } + + @Override + public String toString() { + return name + " in region " + region; + } +} diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/InstanceTypeService.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/InstanceTypeService.java new file mode 100644 index 0000000000000..570a2c499fe35 --- /dev/null +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/InstanceTypeService.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.profiling; + +import org.elasticsearch.common.xcontent.XContentParserUtils; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.zip.GZIPInputStream; + +public class InstanceTypeService { + private final Map costsPerDatacenter = new HashMap<>(); + + public void load() { + try ( + GZIPInputStream in = new GZIPInputStream( + InstanceTypeService.class.getClassLoader().getResourceAsStream("profiling-costs.json.gz") + ) + ) { + XContentParser parser = XContentType.JSON.xContent().createParser(XContentParserConfiguration.EMPTY, in); + if (parser.currentToken() == null) { + parser.nextToken(); + } + List> rawData = XContentParserUtils.parseList(parser, XContentParser::map); + for (Map entry : rawData) { + costsPerDatacenter.put(InstanceType.fromCostSource(entry), CostEntry.fromSource(entry)); + } + + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + public CostEntry getCosts(InstanceType instance) { + return costsPerDatacenter.get(instance); + } +} diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/KvIndexResolver.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/KvIndexResolver.java index 963f2229c4a59..53962c1f93cee 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/KvIndexResolver.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/KvIndexResolver.java @@ -102,9 +102,9 @@ public List resolve(ClusterState clusterState, String indexPattern, Insta "Resolved index pattern [" + indexPattern + "] in time range [" - + eventStart.toEpochMilli() + + eventStart + ", " - + eventEnd.toEpochMilli() + + eventEnd + "] to indices [" + matchingIndices.stream().map(Index::getName).collect(Collectors.joining(", ")) + "]." diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingIndexManager.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingIndexManager.java index 00a57faa85401..746159c23dda0 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingIndexManager.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingIndexManager.java @@ -42,7 +42,6 @@ class ProfilingIndexManager extends AbstractProfilingPersistenceManager { // For testing public static final List PROFILING_INDICES = List.of( - ProfilingIndex.regular("profiling-costs", ProfilingIndexTemplateRegistry.PROFILING_COSTS_VERSION, OnVersionBump.KEEP_OLD), ProfilingIndex.regular( "profiling-returnpads-private", ProfilingIndexTemplateRegistry.PROFILING_RETURNPADS_PRIVATE_VERSION, diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingIndexTemplateRegistry.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingIndexTemplateRegistry.java index 571fe6bd803fc..0068d03767387 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingIndexTemplateRegistry.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingIndexTemplateRegistry.java @@ -33,6 +33,8 @@ import java.util.Locale; import java.util.Map; +import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.isDataStreamsLifecycleOnlyMode; + /** * Creates all index-templates and ILM policies that are required for using Elastic Universal Profiling. */ @@ -55,8 +57,6 @@ public class ProfilingIndexTemplateRegistry extends IndexTemplateRegistry { public static final int PROFILING_RETURNPADS_PRIVATE_VERSION = 1; public static final int PROFILING_SQ_EXECUTABLES_VERSION = 1; public static final int PROFILING_SQ_LEAFFRAMES_VERSION = 1; - public static final int PROFILING_COSTS_VERSION = 1; - public static final String PROFILING_TEMPLATE_VERSION_VARIABLE = "xpack.profiling.template.version"; private volatile boolean templatesEnabled; @@ -233,13 +233,6 @@ protected Map getComponentTemplateConfigs() { PROFILING_TEMPLATE_VERSION_VARIABLE ), // templates for regular indices - new IndexTemplateConfig( - "profiling-costs", - "/profiling/index-template/profiling-costs.json", - INDEX_TEMPLATE_VERSION, - PROFILING_TEMPLATE_VERSION_VARIABLE, - indexVersion("costs", PROFILING_COSTS_VERSION) - ), new IndexTemplateConfig( "profiling-returnpads-private", "/profiling/index-template/profiling-returnpads-private.json", diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingPlugin.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingPlugin.java index f98c22b3bde30..a2459f839523b 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingPlugin.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingPlugin.java @@ -86,18 +86,24 @@ public Collection createComponents(PluginServices services) { // set initial value updateTemplatesEnabled(PROFILING_TEMPLATES_ENABLED.get(settings)); clusterService.getClusterSettings().addSettingsUpdateConsumer(PROFILING_TEMPLATES_ENABLED, this::updateTemplatesEnabled); + InstanceTypeService instanceTypeService = createInstanceTypeService(); if (enabled) { registry.get().initialize(); indexManager.get().initialize(); dataStreamManager.get().initialize(); + instanceTypeService.load(); } - return Collections.singletonList(createLicenseChecker()); + return List.of(createLicenseChecker(), instanceTypeService); } protected ProfilingLicenseChecker createLicenseChecker() { return new ProfilingLicenseChecker(XPackPlugin::getSharedLicenseState); } + protected InstanceTypeService createInstanceTypeService() { + return new InstanceTypeService(); + } + public void updateCheckOutdatedIndices(boolean newValue) { if (newValue == false) { logger.info("profiling will ignore outdated indices"); diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/StackTrace.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/StackTrace.java index d2f72dce2db55..8264471222b57 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/StackTrace.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/StackTrace.java @@ -24,11 +24,26 @@ final class StackTrace implements ToXContentObject { List frameIds; List typeIds; - StackTrace(List addressOrLines, List fileIds, List frameIds, List typeIds) { + double annualCO2Tons; + double annualCostsUSD; + long count; + + StackTrace( + List addressOrLines, + List fileIds, + List frameIds, + List typeIds, + double annualCO2Tons, + double annualCostsUSD, + long count + ) { this.addressOrLines = addressOrLines; this.fileIds = fileIds; this.frameIds = frameIds; this.typeIds = typeIds; + this.annualCO2Tons = annualCO2Tons; + this.annualCostsUSD = annualCostsUSD; + this.count = count; } private static final int BASE64_FRAME_ID_LENGTH = 32; @@ -197,7 +212,7 @@ public static StackTrace fromSource(Map source) { // Step 2: Convert the run-length byte encoding into a list of uint8s. List typeIDs = runLengthDecodeBase64Url(inputFrameTypes, inputFrameTypes.length(), countsFrameIDs); - return new StackTrace(addressOrLines, fileIDs, frameIDs, typeIDs); + return new StackTrace(addressOrLines, fileIDs, frameIDs, typeIDs, 0, 0, 0); } @Override @@ -207,6 +222,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("file_ids", this.fileIds); builder.field("frame_ids", this.frameIds); builder.field("type_ids", this.typeIds); + builder.field("annual_co2_tons", this.annualCO2Tons); + builder.field("annual_costs_usd", this.annualCostsUSD); + builder.field("count", this.count); builder.endObject(); return builder; } @@ -222,8 +240,10 @@ public boolean equals(Object o) { && fileIds.equals(that.fileIds) && frameIds.equals(that.frameIds) && typeIds.equals(that.typeIds); + // Don't compare metadata like annualized co2, annualized costs and count. } + // Don't hash metadata like annualized co2, annualized costs and count. @Override public int hashCode() { return Objects.hash(addressOrLines, fileIds, frameIds, typeIds); diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TraceEvent.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TraceEvent.java new file mode 100644 index 0000000000000..d092868e23cd9 --- /dev/null +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TraceEvent.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.profiling; + +import java.util.Objects; + +final class TraceEvent { + final String stacktraceID; + double annualCO2Tons; + double annualCostsUSD; + long count; + + TraceEvent(String stacktraceID) { + this.stacktraceID = stacktraceID; + } + + TraceEvent(String stacktraceID, long count) { + this.stacktraceID = stacktraceID; + this.count = count; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + TraceEvent event = (TraceEvent) o; + return count == event.count && Objects.equals(stacktraceID, event.stacktraceID); + } + + @Override + public int hashCode() { + return Objects.hash(stacktraceID, count); + } + + @Override + public String toString() { + return "TraceEvent{" + + "stacktraceID='" + + stacktraceID + + '\'' + + ", annualCO2Tons=" + + annualCO2Tons + + ", annualCostsUSD=" + + annualCostsUSD + + ", count=" + + count + + "}"; + } +} diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetFlamegraphAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetFlamegraphAction.java index b791684bec233..a8e8ffcd09769 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetFlamegraphAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetFlamegraphAction.java @@ -77,12 +77,20 @@ static GetFlamegraphResponse buildFlamegraph(GetStackTracesResponse response) { SortedMap sortedStacktraces = new TreeMap<>(response.getStackTraces()); for (Map.Entry st : sortedStacktraces.entrySet()) { - String stackTraceId = st.getKey(); StackTrace stackTrace = st.getValue(); - int samples = response.getStackTraceEvents().getOrDefault(stackTraceId, 0); builder.setCurrentNode(0); + + long samples = stackTrace.count; builder.addSamplesInclusive(0, samples); - builder.addSamplesExclusive(0, 0); + builder.addSamplesExclusive(0, 0L); + + double annualCO2Tons = stackTrace.annualCO2Tons; + builder.addAnnualCO2TonsInclusive(0, annualCO2Tons); + builder.addAnnualCO2TonsExclusive(0, 0.0d); + + double annualCostsUSD = stackTrace.annualCostsUSD; + builder.addAnnualCostsUSDInclusive(0, annualCostsUSD); + builder.addAnnualCostsUSDExclusive(0, 0.0d); int frameCount = stackTrace.frameIds.size(); for (int i = 0; i < frameCount; i++) { @@ -100,6 +108,8 @@ static GetFlamegraphResponse buildFlamegraph(GetStackTracesResponse response) { if (builder.isExists(frameGroupId)) { nodeId = builder.getNodeId(frameGroupId); builder.addSamplesInclusive(nodeId, samples); + builder.addAnnualCO2TonsInclusive(nodeId, annualCO2Tons); + builder.addAnnualCostsUSDInclusive(nodeId, annualCostsUSD); } else { nodeId = builder.addNode( fileId, @@ -112,12 +122,16 @@ static GetFlamegraphResponse buildFlamegraph(GetStackTracesResponse response) { frame.fileName(), frame.lineNumber(), samples, + annualCO2Tons, + annualCostsUSD, frameGroupId ); } if (i == frameCount - 1) { // Leaf frame: sum up counts for exclusive CPU. builder.addSamplesExclusive(nodeId, samples); + builder.addAnnualCO2TonsExclusive(nodeId, annualCO2Tons); + builder.addAnnualCostsUSDExclusive(nodeId, annualCostsUSD); } builder.setCurrentNode(nodeId); } @@ -129,8 +143,12 @@ static GetFlamegraphResponse buildFlamegraph(GetStackTracesResponse response) { private static class FlamegraphBuilder { private int currentNode = 0; private int size = 0; - private int selfCPU; - private int totalCPU; + private long selfCPU; + private long totalCPU; + private double selfAnnualCO2Tons; + private double totalAnnualCO2Tons; + private double selfAnnualCostsUSD; + private double totalAnnualCostsUSD; private final long totalSamples; // Map: FrameGroupId -> NodeId private final List> edges; @@ -143,8 +161,12 @@ private static class FlamegraphBuilder { private final List functionOffsets; private final List sourceFileNames; private final List sourceLines; - private final List countInclusive; - private final List countExclusive; + private final List countInclusive; + private final List countExclusive; + private final List annualCO2TonsExclusive; + private final List annualCO2TonsInclusive; + private final List annualCostsUSDExclusive; + private final List annualCostsUSDInclusive; private final double samplingRate; FlamegraphBuilder(long totalSamples, int frames, double samplingRate) { @@ -162,9 +184,13 @@ private static class FlamegraphBuilder { this.sourceLines = new ArrayList<>(capacity); this.countInclusive = new ArrayList<>(capacity); this.countExclusive = new ArrayList<>(capacity); + this.annualCO2TonsInclusive = new ArrayList<>(capacity); + this.annualCO2TonsExclusive = new ArrayList<>(capacity); + this.annualCostsUSDInclusive = new ArrayList<>(capacity); + this.annualCostsUSDExclusive = new ArrayList<>(capacity); this.totalSamples = totalSamples; // always insert root node - int nodeId = this.addNode("", 0, false, "", 0, "", 0, "", 0, 0, null); + int nodeId = this.addNode("", 0, false, "", 0, "", 0, "", 0, 0, 0.0, 0.0, null); this.setCurrentNode(nodeId); this.samplingRate = samplingRate; } @@ -180,7 +206,9 @@ public int addNode( int functionOffset, String sourceFileName, int sourceLine, - int samples, + long samples, + double annualCO2Tons, + double annualCostsUSD, String frameGroupId ) { int node = this.size; @@ -196,7 +224,13 @@ public int addNode( this.sourceLines.add(sourceLine); this.countInclusive.add(samples); this.totalCPU += samples; - this.countExclusive.add(0); + this.countExclusive.add(0L); + this.annualCO2TonsInclusive.add(annualCO2Tons); + this.totalAnnualCO2Tons += annualCO2Tons; + this.annualCO2TonsExclusive.add(0.0); + this.annualCostsUSDInclusive.add(annualCostsUSD); + this.totalAnnualCostsUSD += annualCostsUSD; + this.annualCostsUSDExclusive.add(0.0); if (frameGroupId != null) { this.edges.get(currentNode).put(frameGroupId, node); } @@ -216,18 +250,42 @@ public int getNodeId(String frameGroupId) { return this.edges.get(currentNode).get(frameGroupId); } - public void addSamplesInclusive(int nodeId, int sampleCount) { - Integer priorSampleCount = this.countInclusive.get(nodeId); + public void addSamplesInclusive(int nodeId, long sampleCount) { + Long priorSampleCount = this.countInclusive.get(nodeId); this.countInclusive.set(nodeId, priorSampleCount + sampleCount); this.totalCPU += sampleCount; } - public void addSamplesExclusive(int nodeId, int sampleCount) { - Integer priorSampleCount = this.countExclusive.get(nodeId); + public void addSamplesExclusive(int nodeId, long sampleCount) { + Long priorSampleCount = this.countExclusive.get(nodeId); this.countExclusive.set(nodeId, priorSampleCount + sampleCount); this.selfCPU += sampleCount; } + public void addAnnualCO2TonsInclusive(int nodeId, double annualCO2Tons) { + Double priorAnnualCO2Tons = this.annualCO2TonsInclusive.get(nodeId); + this.annualCO2TonsInclusive.set(nodeId, priorAnnualCO2Tons + annualCO2Tons); + this.totalAnnualCO2Tons += annualCO2Tons; + } + + public void addAnnualCO2TonsExclusive(int nodeId, double annualCO2Tons) { + Double priorAnnualCO2Tons = this.annualCO2TonsExclusive.get(nodeId); + this.annualCO2TonsExclusive.set(nodeId, priorAnnualCO2Tons + annualCO2Tons); + this.selfAnnualCO2Tons += annualCO2Tons; + } + + public void addAnnualCostsUSDInclusive(int nodeId, double annualCostsUSD) { + Double priorAnnualCostsUSD = this.annualCostsUSDInclusive.get(nodeId); + this.annualCostsUSDInclusive.set(nodeId, priorAnnualCostsUSD + annualCostsUSD); + this.totalAnnualCostsUSD += annualCostsUSD; + } + + public void addAnnualCostsUSDExclusive(int nodeId, double annualCostsUSD) { + Double priorAnnualCostsUSD = this.annualCostsUSDExclusive.get(nodeId); + this.annualCostsUSDExclusive.set(nodeId, priorAnnualCostsUSD + annualCostsUSD); + this.selfAnnualCostsUSD += annualCostsUSD; + } + public GetFlamegraphResponse build() { return new GetFlamegraphResponse( size, @@ -244,8 +302,16 @@ public GetFlamegraphResponse build() { sourceLines, countInclusive, countExclusive, + annualCO2TonsInclusive, + annualCO2TonsExclusive, + annualCostsUSDInclusive, + annualCostsUSDExclusive, selfCPU, totalCPU, + selfAnnualCO2Tons, + totalAnnualCO2Tons, + selfAnnualCostsUSD, + totalAnnualCostsUSD, totalSamples ); } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java index 8b9fce4d04040..801ed012de0ee 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.get.MultiGetItemResponse; import org.elasticsearch.action.get.MultiGetResponse; +import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.ThreadedActionListener; @@ -20,6 +21,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -27,22 +29,32 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.aggregations.bucket.terms.StringTerms; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.Max; +import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.Min; import org.elasticsearch.search.aggregations.metrics.MinAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; +import org.elasticsearch.search.collapse.CollapseBuilder; +import org.elasticsearch.search.sort.FieldSortBuilder; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xcontent.ObjectPath; +import org.elasticsearch.xpack.countedkeyword.CountedTermsAggregationBuilder; +import java.time.Duration; import java.time.Instant; import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -51,6 +63,7 @@ import java.util.concurrent.ConcurrentSkipListSet; import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; public class TransportGetStackTracesAction extends HandledTransportAction { private static final Logger log = LogManager.getLogger(TransportGetStackTracesAction.class); @@ -89,8 +102,16 @@ public class TransportGetStackTracesAction extends HandledTransportAction submitListener) { licenseChecker.requireSupportedLicense(); - StopWatch watch = new StopWatch("getResampledIndex"); + GetStackTracesResponseBuilder responseBuilder = new GetStackTracesResponseBuilder(); + responseBuilder.setRequestedDuration(request.getRequestedDuration()); + responseBuilder.setAwsCostFactor(request.getAwsCostFactor()); + responseBuilder.setCustomCO2PerKWH(request.getCustomCO2PerKWH()); + responseBuilder.setCustomDatacenterPUE(request.getCustomDatacenterPUE()); + responseBuilder.setCustomPerCoreWattX86(request.getCustomPerCoreWattX86()); + responseBuilder.setCustomPerCoreWattARM64(request.getCustomPerCoreWattARM64()); + responseBuilder.setCustomCostPerCoreHour(request.getCustomCostPerCoreHour()); Client client = new ParentTaskAssigningClient(this.nodeClient, transportService.getLocalNode(), submitTask); + if (request.getIndices() == null) { + searchProfilingEvents(submitTask, client, request, submitListener, responseBuilder); + } else { + searchGenericEvents(submitTask, client, request, submitListener, responseBuilder); + } + } + + /** + * Checks whether a task has been cancelled and notifies the provided listener if required. + * @param task The task to check. May be a cancelable task. + * @param listener Listener to notify. + * @return true iff the task has been cancelled. Callers must terminate as early as possible. + */ + private boolean mayNotifyOfCancellation(Task task, ActionListener listener) { + if (task instanceof CancellableTask && ((CancellableTask) task).isCancelled()) { + log.info("{} got cancelled.", task); + listener.onFailure(new TaskCancelledException("get stacktraces task cancelled")); + return true; + } else { + return false; + } + } + + private void searchProfilingEvents( + Task submitTask, + Client client, + GetStackTracesRequest request, + ActionListener submitListener, + GetStackTracesResponseBuilder responseBuilder + ) { + StopWatch watch = new StopWatch("getResampledIndex"); EventsIndex mediumDownsampled = EventsIndex.MEDIUM_DOWNSAMPLED; client.prepareSearch(mediumDownsampled.getName()) .setSize(0) @@ -143,8 +204,8 @@ protected void doExecute(Task submitTask, GetStackTracesRequest request, ActionL mediumDownsampled, resampledIndex ); - log.debug(() -> watch.report()); - searchEventGroupByStackTrace(client, request, resampledIndex, submitListener); + log.debug(watch::report); + searchEventGroupedByStackTrace(submitTask, client, request, submitListener, responseBuilder, resampledIndex); }, e -> { // All profiling-events data streams are created lazily. In a relatively empty cluster it can happen that there are so few // data that we need to resort to the "full" events stream. As this is an edge case we'd rather fail instead of prematurely @@ -153,22 +214,69 @@ protected void doExecute(Task submitTask, GetStackTracesRequest request, ActionL String missingIndex = ((IndexNotFoundException) e).getIndex().getName(); EventsIndex fullIndex = EventsIndex.FULL_INDEX; log.debug("Index [{}] does not exist. Using [{}] instead.", missingIndex, fullIndex.getName()); - searchEventGroupByStackTrace(client, request, fullIndex, submitListener); + searchEventGroupedByStackTrace(submitTask, client, request, submitListener, responseBuilder, fullIndex); } else { submitListener.onFailure(e); } })); } - private void searchEventGroupByStackTrace( + private void searchGenericEvents( + Task submitTask, Client client, GetStackTracesRequest request, - EventsIndex eventsIndex, - ActionListener submitListener + ActionListener submitListener, + GetStackTracesResponseBuilder responseBuilder ) { - StopWatch watch = new StopWatch("searchEventGroupByStackTrace"); - GetStackTracesResponseBuilder responseBuilder = new GetStackTracesResponseBuilder(); - responseBuilder.setSampleRate(eventsIndex.getSampleRate()); + responseBuilder.setSamplingRate(1.0d); + client.prepareSearch(request.indices()) + .setTrackTotalHits(false) + .setSize(0) + .setQuery(request.getQuery()) + .addAggregation(new MinAggregationBuilder("min_time").field("@timestamp")) + .addAggregation(new MaxAggregationBuilder("max_time").field("@timestamp")) + .addAggregation( + new CountedTermsAggregationBuilder("group_by").size(MAX_TRACE_EVENTS_RESULT_SIZE).field(request.getStackTraceIds()) + ) + .execute(handleEventsGroupedByStackTrace(submitTask, client, responseBuilder, submitListener, searchResponse -> { + long totalSamples = 0; + StringTerms stacktraces = searchResponse.getAggregations().get("group_by"); + + // When we switch to aggregation by (hostID, stacktraceID) we need to change the empty List to this. + // List hostEventCounts = new ArrayList<>(MAX_TRACE_EVENTS_RESULT_SIZE); + // Related: https://github.com/elastic/prodfiler/issues/4300 + // See also the aggregation in searchEventGroupedByStackTrace() for the other parts of the change. + List hostEventCounts = Collections.emptyList(); + + // aggregation + Map stackTraceEvents = new TreeMap<>(); + for (StringTerms.Bucket stacktraceBucket : stacktraces.getBuckets()) { + long count = stacktraceBucket.getDocCount(); + totalSamples += count; + + String stackTraceID = stacktraceBucket.getKeyAsString(); + TraceEvent event = stackTraceEvents.get(stackTraceID); + if (event == null) { + event = new TraceEvent(stackTraceID); + stackTraceEvents.put(stackTraceID, event); + } + event.count += count; + } + responseBuilder.setTotalSamples(totalSamples); + responseBuilder.setHostEventCounts(hostEventCounts); + return stackTraceEvents; + })); + } + + private void searchEventGroupedByStackTrace( + Task submitTask, + Client client, + GetStackTracesRequest request, + ActionListener submitListener, + GetStackTracesResponseBuilder responseBuilder, + EventsIndex eventsIndex + ) { + responseBuilder.setSamplingRate(eventsIndex.getSampleRate()); client.prepareSearch(eventsIndex.getName()) .setTrackTotalHits(false) .setSize(0) @@ -176,87 +284,182 @@ private void searchEventGroupByStackTrace( .addAggregation(new MinAggregationBuilder("min_time").field("@timestamp")) .addAggregation(new MaxAggregationBuilder("max_time").field("@timestamp")) .addAggregation( + // We have nested aggregations, which in theory might blow up to MAX_TRACE_EVENTS_RESULT_SIZE^2 items + // reported. But we know that the total number of items is limited by our down-sampling to + // a maximum of ~100k (MAX_TRACE_EVENTS_RESULT_SIZE is higher to be on the safe side). new TermsAggregationBuilder("group_by") - // 'size' should be max 100k, but might be slightly more. Better be on the safe side. - .size(150_000) - .field("Stacktrace.id") + // 'size' specifies the max number of host ID we support per request. + .size(MAX_TRACE_EVENTS_RESULT_SIZE) + .field("host.id") // 'execution_hint: map' skips the slow building of ordinals that we don't need. // Especially with high cardinality fields, this makes aggregations really slow. .executionHint("map") - .subAggregation(new SumAggregationBuilder("count").field("Stacktrace.count")) + .subAggregation( + new TermsAggregationBuilder("group_by") + // 'size' should be max 100k, but might be slightly more. Better be on the safe side. + .size(MAX_TRACE_EVENTS_RESULT_SIZE) + .field("Stacktrace.id") + // 'execution_hint: map' skips the slow building of ordinals that we don't need. + // Especially with high cardinality fields, this makes aggregations really slow. + .executionHint("map") + .subAggregation(new SumAggregationBuilder("count").field("Stacktrace.count")) + ) ) .addAggregation(new SumAggregationBuilder("total_count").field("Stacktrace.count")) - .execute(ActionListener.wrap(searchResponse -> { - Min minTimeAgg = searchResponse.getAggregations().get("min_time"); - Max maxTimeAgg = searchResponse.getAggregations().get("max_time"); - long minTime = Math.round(minTimeAgg.value()); - long maxTime = Math.round(maxTimeAgg.value()); - Sum totalCountAgg = searchResponse.getAggregations().get("total_count"); - long totalCount = Math.round(totalCountAgg.value()); - Resampler resampler = new Resampler(request, eventsIndex.getSampleRate(), totalCount); - StringTerms stacktraces = searchResponse.getAggregations().get("group_by"); - // sort items lexicographically to access Lucene's term dictionary more efficiently when issuing an mget request. + .execute(handleEventsGroupedByStackTrace(submitTask, client, responseBuilder, submitListener, searchResponse -> { + long totalCount = getAggValueAsLong(searchResponse, "total_count"); + + Resampler resampler = new Resampler(request, responseBuilder.getSamplingRate(), totalCount); + StringTerms hosts = searchResponse.getAggregations().get("group_by"); + + // Sort items lexicographically to access Lucene's term dictionary more efficiently when issuing an mget request. // The term dictionary is lexicographically sorted and using the same order reduces the number of page faults // needed to load it. long totalFinalCount = 0; - Map stackTraceEvents = new TreeMap<>(); - for (StringTerms.Bucket bucket : stacktraces.getBuckets()) { - Sum count = bucket.getAggregations().get("count"); - int finalCount = resampler.adjustSampleCount((int) count.value()); - totalFinalCount += finalCount; - if (finalCount > 0) { - stackTraceEvents.put(bucket.getKeyAsString(), finalCount); + List hostEventCounts = new ArrayList<>(MAX_TRACE_EVENTS_RESULT_SIZE); + Map stackTraceEvents = new TreeMap<>(); + for (StringTerms.Bucket hostBucket : hosts.getBuckets()) { + String hostid = hostBucket.getKeyAsString(); + + StringTerms stacktraces = hostBucket.getAggregations().get("group_by"); + for (StringTerms.Bucket stacktraceBucket : stacktraces.getBuckets()) { + Sum count = stacktraceBucket.getAggregations().get("count"); + int finalCount = resampler.adjustSampleCount((int) count.value()); + if (finalCount <= 0) { + continue; + } + totalFinalCount += finalCount; + + /* + The same stacktraces may come from different hosts (eventually from different datacenters). + We make a list of the triples here. As soon as we have the host metadata, we can calculate + the CO2 emission and the costs for each TraceEvent. + */ + String stackTraceID = stacktraceBucket.getKeyAsString(); + hostEventCounts.add(new HostEventCount(hostid, stackTraceID, finalCount)); + + TraceEvent event = stackTraceEvents.get(stackTraceID); + if (event == null) { + event = new TraceEvent(stackTraceID); + stackTraceEvents.put(stackTraceID, event); + } + event.count += finalCount; } } responseBuilder.setTotalSamples(totalFinalCount); + responseBuilder.setHostEventCounts(hostEventCounts); log.debug( "Found [{}] stacktrace events, resampled with sample rate [{}] to [{}] events ([{}] unique stack traces).", totalCount, - eventsIndex.getSampleRate(), + responseBuilder.getSamplingRate(), totalFinalCount, stackTraceEvents.size() ); - log.debug(() -> watch.report()); - if (stackTraceEvents.isEmpty() == false) { - responseBuilder.setStart(Instant.ofEpochMilli(minTime)); - responseBuilder.setEnd(Instant.ofEpochMilli(maxTime)); - responseBuilder.setStackTraceEvents(stackTraceEvents); - retrieveStackTraces(client, responseBuilder, submitListener); - } else { - submitListener.onResponse(responseBuilder.build()); - } - }, e -> { - // Data streams are created lazily; if even the "full" index does not exist no data have been indexed yet. - if (e instanceof IndexNotFoundException) { - log.debug("Index [{}] does not exist. Returning empty response.", ((IndexNotFoundException) e).getIndex()); - submitListener.onResponse(responseBuilder.build()); - } else { - submitListener.onFailure(e); - } + return stackTraceEvents; })); } + private ActionListener handleEventsGroupedByStackTrace( + Task submitTask, + Client client, + GetStackTracesResponseBuilder responseBuilder, + ActionListener submitListener, + Function> stacktraceCollector + ) { + StopWatch watch = new StopWatch("eventsGroupedByStackTrace"); + return ActionListener.wrap(searchResponse -> { + long minTime = getAggValueAsLong(searchResponse, "min_time"); + long maxTime = getAggValueAsLong(searchResponse, "max_time"); + + Map stackTraceEvents = stacktraceCollector.apply(searchResponse); + + log.debug(watch::report); + if (stackTraceEvents.isEmpty() == false) { + responseBuilder.setStart(Instant.ofEpochMilli(minTime)); + responseBuilder.setEnd(Instant.ofEpochMilli(maxTime)); + responseBuilder.setStackTraceEvents(stackTraceEvents); + retrieveStackTraces(submitTask, client, responseBuilder, submitListener); + } else { + submitListener.onResponse(responseBuilder.build()); + } + }, e -> { + // Data streams are created lazily; if even the "full" index does not exist no data have been indexed yet. + if (e instanceof IndexNotFoundException) { + log.debug("Index [{}] does not exist. Returning empty response.", ((IndexNotFoundException) e).getIndex()); + submitListener.onResponse(responseBuilder.build()); + } else { + submitListener.onFailure(e); + } + }); + } + + private static long getAggValueAsLong(SearchResponse searchResponse, String field) { + InternalNumericMetricsAggregation.SingleValue x = searchResponse.getAggregations().get(field); + return Math.round(x.value()); + } + private void retrieveStackTraces( + Task submitTask, Client client, GetStackTracesResponseBuilder responseBuilder, ActionListener submitListener ) { + if (mayNotifyOfCancellation(submitTask, submitListener)) { + return; + } List eventIds = new ArrayList<>(responseBuilder.getStackTraceEvents().keySet()); List> slicedEventIds = sliced(eventIds, desiredSlices); ClusterState clusterState = clusterService.state(); List indices = resolver.resolve(clusterState, "profiling-stacktraces", responseBuilder.getStart(), responseBuilder.getEnd()); + + // Build a set of unique host IDs. + Set uniqueHostIDs = new HashSet<>(responseBuilder.hostEventCounts.size()); + for (HostEventCount hec : responseBuilder.hostEventCounts) { + uniqueHostIDs.add(hec.hostID); + } + StackTraceHandler handler = new StackTraceHandler( + submitTask, clusterState, client, responseBuilder, submitListener, eventIds.size(), - // we need to expect a set of slices for each resolved index - slicedEventIds.size() * indices.size() + // We need to expect a set of slices for each resolved index, plus one for the host metadata. + slicedEventIds.size() * indices.size() + (uniqueHostIDs.isEmpty() ? 0 : 1), + uniqueHostIDs.size() ); for (List slice : slicedEventIds) { - mget(client, indices, slice, ActionListener.wrap(handler::onResponse, submitListener::onFailure)); + mget(client, indices, slice, ActionListener.wrap(handler::onStackTraceResponse, submitListener::onFailure)); } + + if (uniqueHostIDs.isEmpty()) { + return; + } + + // Retrieve the host metadata in parallel. Assume low-cardinality and do not split the query. + client.prepareSearch("profiling-hosts") + .setTrackTotalHits(false) + .setQuery( + QueryBuilders.boolQuery() + .filter( + // Only return hosts that have been active during the requested time period + QueryBuilders.rangeQuery("@timestamp") + // HAs write host metadata every 6h, so use start minus 6h. + .gte(responseBuilder.getStart().minus(Duration.ofHours(6L)).toEpochMilli()) + .lt(responseBuilder.getEnd().toEpochMilli()) + .format("epoch_millis") + ) + .filter(QueryBuilders.termsQuery("host.id", uniqueHostIDs)) + ) + .setCollapse( + // Collapse on host.id to get a single host metadata for each host. + new CollapseBuilder("host.id") + ) + // Sort descending by timestamp to get the latest host metadata for each host. + .addSort(new FieldSortBuilder("@timestamp").order(SortOrder.DESC)) + .setFrom(0) + .execute(ActionListener.wrap(handler::onHostsResponse, submitListener::onFailure)); } // package private for testing @@ -275,7 +478,8 @@ static List> sliced(List c, int slices) { } private class StackTraceHandler { - private final AtomicInteger remainingSlices; + private final AtomicInteger expectedResponses; + private final Task submitTask; private final ClusterState clusterState; private final Client client; private final GetStackTracesResponseBuilder responseBuilder; @@ -288,24 +492,30 @@ private class StackTraceHandler { private final Set executableIds = new ConcurrentSkipListSet<>(); private final AtomicInteger totalFrames = new AtomicInteger(); private final StopWatch watch = new StopWatch("retrieveStackTraces"); + private final StopWatch hostsWatch = new StopWatch("retrieveHostMetadata"); + private final Map hostMetadata; private StackTraceHandler( + Task submitTask, ClusterState clusterState, Client client, GetStackTracesResponseBuilder responseBuilder, ActionListener submitListener, int stackTraceCount, - int slices + int expectedResponses, + int expectedHosts ) { + this.submitTask = submitTask; this.clusterState = clusterState; this.stackTracePerId = new ConcurrentHashMap<>(stackTraceCount); - this.remainingSlices = new AtomicInteger(slices); + this.expectedResponses = new AtomicInteger(expectedResponses); this.client = client; this.responseBuilder = responseBuilder; this.submitListener = submitListener; + this.hostMetadata = new HashMap<>(expectedHosts); } - public void onResponse(MultiGetResponse multiGetItemResponses) { + public void onStackTraceResponse(MultiGetResponse multiGetItemResponses) { for (MultiGetItemResponse trace : multiGetItemResponses) { if (trace.isFailed()) { submitListener.onFailure(trace.getFailure().getFailure()); @@ -325,7 +535,65 @@ public void onResponse(MultiGetResponse multiGetItemResponses) { } } } - if (this.remainingSlices.decrementAndGet() == 0) { + mayFinish(); + } + + public void onHostsResponse(SearchResponse searchResponse) { + SearchHit[] hits = searchResponse.getHits().getHits(); + for (SearchHit hit : hits) { + HostMetadata host = HostMetadata.fromSource(hit.getSourceAsMap()); + hostMetadata.put(host.hostID, host); + } + log.debug(hostsWatch::report); + log.debug("Got [{}] host metadata items", hostMetadata.size()); + + mayFinish(); + } + + public void calculateCO2AndCosts() { + // Do the CO2 and cost calculation in parallel to waiting for frame metadata. + StopWatch watch = new StopWatch("calculateCO2AndCosts"); + CO2Calculator co2Calculator = new CO2Calculator( + instanceTypeService, + hostMetadata, + responseBuilder.getRequestedDuration(), + responseBuilder.customCO2PerKWH, + responseBuilder.customDatacenterPUE, + responseBuilder.customPerCoreWattX86, + responseBuilder.customPerCoreWattARM64 + ); + CostCalculator costCalculator = new CostCalculator( + instanceTypeService, + hostMetadata, + responseBuilder.getRequestedDuration(), + responseBuilder.awsCostFactor, + responseBuilder.customCostPerCoreHour + ); + Map events = responseBuilder.stackTraceEvents; + List missingStackTraces = new ArrayList<>(); + for (HostEventCount hec : responseBuilder.hostEventCounts) { + TraceEvent event = events.get(hec.stacktraceID); + if (event == null) { + // If this happens, hostEventsCounts and events are out of sync, which indicates a bug. + missingStackTraces.add(hec.stacktraceID); + continue; + } + event.annualCO2Tons += co2Calculator.getAnnualCO2Tons(hec.hostID, hec.count); + event.annualCostsUSD += costCalculator.annualCostsUSD(hec.hostID, hec.count); + } + log.debug(watch::report); + + if (missingStackTraces.isEmpty() == false) { + StringBuilder stringBuilder = new StringBuilder(); + Strings.collectionToDelimitedStringWithLimit(missingStackTraces, ",", "", "", 80, stringBuilder); + log.warn("CO2/cost calculator: missing trace events for StackTraceID [" + stringBuilder + "]."); + } + } + + public void mayFinish() { + if (expectedResponses.decrementAndGet() == 0) { + calculateCO2AndCosts(); + responseBuilder.setStackTraces(stackTracePerId); responseBuilder.setTotalFrames(totalFrames.get()); log.debug( @@ -334,8 +602,9 @@ public void onResponse(MultiGetResponse multiGetItemResponses) { stackFrameIds.size(), executableIds.size() ); - log.debug(() -> watch.report()); + log.debug(watch::report); retrieveStackTraceDetails( + submitTask, clusterState, client, responseBuilder, @@ -348,6 +617,7 @@ public void onResponse(MultiGetResponse multiGetItemResponses) { } private void retrieveStackTraceDetails( + Task submitTask, ClusterState clusterState, Client client, GetStackTracesResponseBuilder responseBuilder, @@ -355,6 +625,10 @@ private void retrieveStackTraceDetails( List executableIds, ActionListener submitListener ) { + if (mayNotifyOfCancellation(submitTask, submitListener)) { + return; + } + List> slicedStackFrameIds = sliced(stackFrameIds, desiredDetailSlices); List> slicedExecutableIds = sliced(executableIds, desiredDetailSlices); List stackFrameIndices = resolver.resolve( @@ -423,7 +697,7 @@ private DetailsHandler( this.submitListener = submitListener; this.executables = new ConcurrentHashMap<>(executableCount); this.stackFrames = new ConcurrentHashMap<>(stackFrameCount); - // for deciding when we're finished it is irrelevant where a slice originated so we can + // for deciding when we're finished it is irrelevant where a slice originated, so we can // simplify state handling by treating them equally. this.expectedSlices = new AtomicInteger(expectedExecutableSlices + expectedStackFrameSlices); } @@ -479,7 +753,7 @@ public void mayFinish() { builder.setExecutables(executables); builder.setStackFrames(stackFrames); log.debug("retrieveStackTraceDetails found [{}] stack frames, [{}] executables.", stackFrames.size(), executables.size()); - log.debug(() -> watch.report()); + log.debug(watch::report); submitListener.onResponse(builder.build()); } } @@ -501,9 +775,17 @@ private static class GetStackTracesResponseBuilder { private int totalFrames; private Map stackFrames; private Map executables; - private Map stackTraceEvents; + private Map stackTraceEvents; + private List hostEventCounts; private double samplingRate; private long totalSamples; + private Double requestedDuration; + private Double awsCostFactor; + private Double customCO2PerKWH; + private Double customDatacenterPUE; + private Double customPerCoreWattX86; + private Double customPerCoreWattARM64; + private Double customCostPerCoreHour; public void setStackTraces(Map stackTraces) { this.stackTraces = stackTraces; @@ -537,23 +819,80 @@ public void setExecutables(Map executables) { this.executables = executables; } - public void setStackTraceEvents(Map stackTraceEvents) { + public void setStackTraceEvents(Map stackTraceEvents) { this.stackTraceEvents = stackTraceEvents; } - public Map getStackTraceEvents() { + public void setHostEventCounts(List hostEventCounts) { + this.hostEventCounts = hostEventCounts; + } + + public Map getStackTraceEvents() { return stackTraceEvents; } - public void setSampleRate(double rate) { + public void setSamplingRate(double rate) { this.samplingRate = rate; } + public double getSamplingRate() { + return samplingRate; + } + + public void setRequestedDuration(Double requestedDuration) { + this.requestedDuration = requestedDuration; + } + + public double getRequestedDuration() { + if (requestedDuration != null) { + return requestedDuration; + } + // If "requested_duration" wasn't specified, we use the time range from the query response. + return end.getEpochSecond() - start.getEpochSecond(); + } + + public void setAwsCostFactor(Double awsCostFactor) { + this.awsCostFactor = awsCostFactor; + } + + public void setCustomCO2PerKWH(Double customCO2PerKWH) { + this.customCO2PerKWH = customCO2PerKWH; + } + + public void setCustomDatacenterPUE(Double customDatacenterPUE) { + this.customDatacenterPUE = customDatacenterPUE; + } + + public void setCustomPerCoreWattX86(Double customPerCoreWattX86) { + this.customPerCoreWattX86 = customPerCoreWattX86; + } + + public void setCustomPerCoreWattARM64(Double customPerCoreWattARM64) { + this.customPerCoreWattARM64 = customPerCoreWattARM64; + } + + public void setCustomCostPerCoreHour(Double customCostPerCoreHour) { + this.customCostPerCoreHour = customCostPerCoreHour; + } + public void setTotalSamples(long totalSamples) { this.totalSamples = totalSamples; } public GetStackTracesResponse build() { + // Merge the TraceEvent data into the StackTraces. + if (stackTraces != null) { + for (Map.Entry entry : stackTraces.entrySet()) { + String stacktraceID = entry.getKey(); + TraceEvent event = stackTraceEvents.get(stacktraceID); + if (event != null) { + StackTrace stackTrace = entry.getValue(); + stackTrace.count = event.count; + stackTrace.annualCO2Tons = event.annualCO2Tons; + stackTrace.annualCostsUSD = event.annualCostsUSD; + } + } + } return new GetStackTracesResponse( stackTraces, stackFrames, @@ -565,4 +904,6 @@ public GetStackTracesResponse build() { ); } } + + record HostEventCount(String hostID, String stacktraceID, int count) {} } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStatusAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStatusAction.java index 6c4a7d455539f..ca0571a5a634f 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStatusAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStatusAction.java @@ -16,6 +16,8 @@ import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -24,6 +26,10 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -39,6 +45,7 @@ public class TransportGetStatusAction extends TransportMasterNodeAction 0L) { + return true; + } + } + } + } + return false; } private boolean getValue(ClusterState state, Setting setting) { diff --git a/x-pack/plugin/profiling/src/main/resources/profiling-costs.json.gz b/x-pack/plugin/profiling/src/main/resources/profiling-costs.json.gz new file mode 100644 index 0000000000000..e54b3175c7237 Binary files /dev/null and b/x-pack/plugin/profiling/src/main/resources/profiling-costs.json.gz differ diff --git a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/CO2CalculatorTests.java b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/CO2CalculatorTests.java new file mode 100644 index 0000000000000..f2a21bc8b9cf5 --- /dev/null +++ b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/CO2CalculatorTests.java @@ -0,0 +1,100 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.profiling; + +import org.elasticsearch.test.ESTestCase; + +import java.util.Map; + +public class CO2CalculatorTests extends ESTestCase { + private static final String HOST_ID_A = "1110256254710195391"; + private static final String HOST_ID_B = "2220256254710195392"; + private static final String HOST_ID_C = "3330256254710195393"; + private static final String HOST_ID_D = "4440256254710195394"; + + public void testCreateFromRegularSource() { + InstanceTypeService instanceTypeService = new InstanceTypeService(); + instanceTypeService.load(); + + // tag::noformat + Map hostsTable = Map.ofEntries( + Map.entry(HOST_ID_A, + // known datacenter and instance type + new HostMetadata(HOST_ID_A, + new InstanceType( + "aws", + "eu-west-1", + "c5n.xlarge" + ), + "" // Doesn't matter if datacenter is known. + ) + ), + Map.entry(HOST_ID_B, + new HostMetadata(HOST_ID_B, + // unknown datacenter, known provider and region, x86_64 + new InstanceType( + "gcp", + "europe-west1", + "" // Doesn't matter for unknown datacenters. + ), + "x86_64" + ) + ), + Map.entry(HOST_ID_C, + new HostMetadata(HOST_ID_C, + // unknown datacenter, known provider and region, aarch64 + new InstanceType( + "azure", + "northcentralus", + "" // Doesn't matter for unknown datacenters. + ), + "aarch64" + ) + ), + Map.entry(HOST_ID_D, + new HostMetadata(HOST_ID_D, + // unknown datacenter, unknown provider and region, aarch64 + new InstanceType( + "on-prem-provider", + "on-prem-region", + "" // Doesn't matter for unknown datacenters. + ), + "aarch64" + ) + ) + ); + // end::noformat + + double samplingDurationInSeconds = 1_800.0d; // 30 minutes + long samples = 100_000L; // 100k samples + double annualCoreHours = CostCalculator.annualCoreHours(samplingDurationInSeconds, samples, 20.0d); + CO2Calculator co2Calculator = new CO2Calculator(instanceTypeService, hostsTable, samplingDurationInSeconds, null, null, null, null); + + checkCO2Calculation(co2Calculator.getAnnualCO2Tons(HOST_ID_A, samples), annualCoreHours, 0.000002213477d); + checkCO2Calculation(co2Calculator.getAnnualCO2Tons(HOST_ID_B, samples), annualCoreHours, 1.1d, 0.00004452d, 7.0d); + checkCO2Calculation(co2Calculator.getAnnualCO2Tons(HOST_ID_C, samples), annualCoreHours, 1.185d, 0.000410608d, 2.8d); + checkCO2Calculation(co2Calculator.getAnnualCO2Tons(HOST_ID_D, samples), annualCoreHours, 1.7d, 0.000379069d, 2.8d); + } + + private void checkCO2Calculation(double calculatedAnnualCO2Tons, double annualCoreHours, double co2Factor) { + double expectedAnnualCO2Tons = annualCoreHours * co2Factor; + assertEquals(expectedAnnualCO2Tons, calculatedAnnualCO2Tons, 0.000000000001d); + } + + private void checkCO2Calculation( + double calculatedAnnualCO2Tons, + double annualCoreHours, + double datacenterPUE, + double co2TonsPerKWH, + double wattsPerCore + ) { + double kiloWattsPerCore = wattsPerCore / 1000.0d; + double expectedAnnualCO2Tons = annualCoreHours * datacenterPUE * co2TonsPerKWH * kiloWattsPerCore; + assertEquals(expectedAnnualCO2Tons, calculatedAnnualCO2Tons, 0.000000000001d); + } +} diff --git a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/CostCalculatorTests.java b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/CostCalculatorTests.java new file mode 100644 index 0000000000000..f42ad1188693b --- /dev/null +++ b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/CostCalculatorTests.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.profiling; + +import org.elasticsearch.test.ESTestCase; + +import java.util.Map; + +public class CostCalculatorTests extends ESTestCase { + private static final String HOST_ID_A = "1110256254710195391"; + private static final String HOST_ID_B = "2220256254710195392"; + + public void testCreateFromRegularSource() { + InstanceTypeService instanceTypeService = new InstanceTypeService(); + instanceTypeService.load(); + + // tag::noformat + Map hostsTable = Map.ofEntries( + Map.entry(HOST_ID_A, + // known datacenter + new HostMetadata(HOST_ID_A, + new InstanceType( + "aws", + "eu-west-1", + "c5n.xlarge" + ), + "" // Doesn't matter for cost calculation. + ) + ), + Map.entry(HOST_ID_B, + new HostMetadata(HOST_ID_B, + // unknown datacenter + new InstanceType( + "on-prem-provider", + "on-prem-region", + "on-prem-instance-type" + ), + "" // Doesn't matter for cost calculation. + ) + ) + ); + // end::noformat + + double samplingDurationInSeconds = 1_800.0d; // 30 minutes + long samples = 100_000L; // 100k samples + double annualCoreHours = CostCalculator.annualCoreHours(samplingDurationInSeconds, samples, 20.0d); + CostCalculator costCalculator = new CostCalculator(instanceTypeService, hostsTable, samplingDurationInSeconds, null, null); + + // Checks whether the cost calculation is based on the pre-calculated lookup data. + checkCostCalculation(costCalculator.annualCostsUSD(HOST_ID_A, samples), annualCoreHours, 0.061d); + + // Checks whether the cost calculation is based on the default cost factor. + checkCostCalculation(costCalculator.annualCostsUSD(HOST_ID_B, samples), annualCoreHours, 0.0425d); + } + + private void checkCostCalculation(double calculatedAnnualCostsUSD, double annualCoreHours, double costFactor) { + double expectedAnnualCostsUSD = annualCoreHours * costFactor; + assertEquals(expectedAnnualCostsUSD, calculatedAnnualCostsUSD, 0.00000001d); + } +} diff --git a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/GetStackTracesRequestTests.java b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/GetStackTracesRequestTests.java index 6c73d2eaf94ab..bb4973e75eec8 100644 --- a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/GetStackTracesRequestTests.java +++ b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/GetStackTracesRequestTests.java @@ -22,20 +22,42 @@ import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; +import java.util.List; +import java.util.Set; import static java.util.Collections.emptyList; public class GetStackTracesRequestTests extends ESTestCase { public void testSerialization() throws IOException { - Integer sampleSize = randomBoolean() ? randomIntBetween(0, Integer.MAX_VALUE) : null; + Integer sampleSize = randomIntBetween(1, Integer.MAX_VALUE); + Double requestedDuration = randomBoolean() ? randomDoubleBetween(0.001d, Double.MAX_VALUE, true) : null; + Double awsCostFactor = randomBoolean() ? randomDoubleBetween(0.1d, 5.0d, true) : null; + Double customCO2PerKWH = randomBoolean() ? randomDoubleBetween(0.000001d, 0.001d, true) : null; + Double datacenterPUE = randomBoolean() ? randomDoubleBetween(1.0d, 3.0d, true) : null; + Double perCoreWattX86 = randomBoolean() ? randomDoubleBetween(0.01d, 20.0d, true) : null; + Double perCoreWattARM64 = randomBoolean() ? randomDoubleBetween(0.01d, 20.0d, true) : null; + Double customCostPerCoreHour = randomBoolean() ? randomDoubleBetween(0.001d, 1000.0d, true) : null; QueryBuilder query = randomBoolean() ? new BoolQueryBuilder() : null; - GetStackTracesRequest request = new GetStackTracesRequest(sampleSize, query); + GetStackTracesRequest request = new GetStackTracesRequest( + sampleSize, + requestedDuration, + awsCostFactor, + query, + null, + null, + customCO2PerKWH, + datacenterPUE, + perCoreWattX86, + perCoreWattARM64, + customCostPerCoreHour + ); try (BytesStreamOutput out = new BytesStreamOutput()) { request.writeTo(out); try (NamedWriteableAwareStreamInput in = new NamedWriteableAwareStreamInput(out.bytes().streamInput(), writableRegistry())) { GetStackTracesRequest deserialized = new GetStackTracesRequest(in); assertEquals(sampleSize, deserialized.getSampleSize()); + assertEquals(awsCostFactor, deserialized.getAwsCostFactor()); assertEquals(query, deserialized.getQuery()); } } @@ -46,6 +68,7 @@ public void testParseValidXContent() throws IOException { //tag::noformat .startObject() .field("sample_size", 500) + .field("requested_duration", 100.54d) .startObject("query") .startObject("range") .startObject("@timestamp") @@ -61,11 +84,44 @@ public void testParseValidXContent() throws IOException { request.parseXContent(content); assertEquals(Integer.valueOf(500), request.getSampleSize()); + assertEquals(Double.valueOf(100.54d), request.getRequestedDuration()); // a basic check suffices here assertEquals("@timestamp", ((RangeQueryBuilder) request.getQuery()).fieldName()); } } + public void testParseValidXContentWithCustomIndex() throws IOException { + try (XContentParser content = createParser(XContentFactory.jsonBuilder() + //tag::noformat + .startObject() + .field("sample_size", 2000) + .field("indices", "my-traces") + .field("stacktrace_ids", "stacktraces") + .startObject("query") + .startObject("range") + .startObject("@timestamp") + .field("gte", "2022-10-05") + .endObject() + .endObject() + .endObject() + .endObject() + //end::noformat + )) { + + GetStackTracesRequest request = new GetStackTracesRequest(); + request.parseXContent(content); + + assertEquals(Integer.valueOf(2000), request.getSampleSize()); + assertEquals("my-traces", request.getIndices()); + assertEquals("stacktraces", request.getStackTraceIds()); + // a basic check suffices here + assertEquals("@timestamp", ((RangeQueryBuilder) request.getQuery()).fieldName()); + + // Expect the default values + assertEquals(null, request.getRequestedDuration()); + } + } + public void testParseXContentUnrecognizedField() throws IOException { try (XContentParser content = createParser(XContentFactory.jsonBuilder() //tag::noformat @@ -89,6 +145,90 @@ public void testParseXContentUnrecognizedField() throws IOException { } } + public void testValidateWrongSampleSize() { + GetStackTracesRequest request = new GetStackTracesRequest( + randomIntBetween(Integer.MIN_VALUE, 0), + 1.0d, + 1.0d, + null, + null, + null, + null, + null, + null, + null, + null + ); + List validationErrors = request.validate().validationErrors(); + assertEquals(1, validationErrors.size()); + assertTrue(validationErrors.get(0).contains("[sample_size] must be greater than 0,")); + } + + public void testValidateStacktraceWithoutIndices() { + GetStackTracesRequest request = new GetStackTracesRequest( + 1, + 1.0d, + 1.0d, + null, + null, + randomAlphaOfLength(3), + null, + null, + null, + null, + null + ); + List validationErrors = request.validate().validationErrors(); + assertEquals(1, validationErrors.size()); + assertEquals("[stacktrace_ids] must not be set", validationErrors.get(0)); + } + + public void testValidateIndicesWithoutStacktraces() { + GetStackTracesRequest request = new GetStackTracesRequest( + null, + 1.0d, + 1.0d, + null, + randomAlphaOfLength(5), + randomFrom("", null), + null, + null, + null, + null, + null + ); + List validationErrors = request.validate().validationErrors(); + assertEquals(1, validationErrors.size()); + assertEquals("[stacktrace_ids] is mandatory", validationErrors.get(0)); + } + + public void testConsidersCustomIndicesInRelatedIndices() { + String customIndex = randomAlphaOfLength(5); + GetStackTracesRequest request = new GetStackTracesRequest( + 1, + 1.0d, + 1.0d, + null, + customIndex, + randomAlphaOfLength(3), + null, + null, + null, + null, + null + ); + String[] indices = request.indices(); + assertEquals(4, indices.length); + assertTrue("custom index not contained in indices list", Set.of(indices).contains(customIndex)); + } + + public void testConsidersDefaultIndicesInRelatedIndices() { + String customIndex = randomAlphaOfLength(5); + GetStackTracesRequest request = new GetStackTracesRequest(1, 1.0d, 1.0d, null, null, null, null, null, null, null, null); + String[] indices = request.indices(); + assertEquals(15, indices.length); + } + @Override protected NamedXContentRegistry xContentRegistry() { // to register the query parser diff --git a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/GetStackTracesResponseTests.java b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/GetStackTracesResponseTests.java index 566c4d24fc088..7455c2b30e13d 100644 --- a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/GetStackTracesResponseTests.java +++ b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/GetStackTracesResponseTests.java @@ -26,7 +26,15 @@ protected GetStackTracesResponse createTestInstance() { Map stackTraces = randomNullable( Map.of( "QjoLteG7HX3VUUXr-J4kHQ", - new StackTrace(List.of(1083999), List.of("QCCDqjSg3bMK1C4YRK6Tiw"), List.of("QCCDqjSg3bMK1C4YRK6TiwAAAAAAEIpf"), List.of(2)) + new StackTrace( + List.of(1083999), + List.of("QCCDqjSg3bMK1C4YRK6Tiw"), + List.of("QCCDqjSg3bMK1C4YRK6TiwAAAAAAEIpf"), + List.of(2), + 0.3d, + 2.7d, + 1 + ) ) ); int maxInlined = randomInt(5); @@ -42,8 +50,9 @@ protected GetStackTracesResponse createTestInstance() { ) ); Map executables = randomNullable(Map.of("QCCDqjSg3bMK1C4YRK6Tiw", "libc.so.6")); - int totalSamples = randomIntBetween(1, 200); - Map stackTraceEvents = randomNullable(Map.of(randomAlphaOfLength(12), totalSamples)); + long totalSamples = randomLongBetween(1L, 200L); + String stackTraceID = randomAlphaOfLength(12); + Map stackTraceEvents = randomNullable(Map.of(stackTraceID, new TraceEvent(stackTraceID, totalSamples))); return new GetStackTracesResponse(stackTraces, stackFrames, executables, stackTraceEvents, totalFrames, 1.0, totalSamples); } diff --git a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/HostMetadataTests.java b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/HostMetadataTests.java new file mode 100644 index 0000000000000..0359357004687 --- /dev/null +++ b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/HostMetadataTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.profiling; + +import org.elasticsearch.test.ESTestCase; + +import java.util.Arrays; +import java.util.Map; + +public class HostMetadataTests extends ESTestCase { + public void testCreateFromRegularSource() { + final String hostID = "1440256254710195396"; + final String machine = "x86_64"; + final String provider = "aws"; + final String region = "eu-west-1"; + final String instanceType = "md5x.large"; + + // tag::noformat + HostMetadata host = HostMetadata.fromSource( + Map.of( + "host.id", hostID, + "profiling.host.machine", machine, + "profiling.host.tags", Arrays.asList( + "cloud_provider:"+provider, "cloud_environment:qa", "cloud_region:"+region), + "ec2.instance_type", instanceType + ) + ); + // end::noformat + + assertEquals(hostID, host.hostID); + assertEquals(machine, host.profilingHostMachine); + assertEquals(provider, host.instanceType.provider); + assertEquals(region, host.instanceType.region); + assertEquals(instanceType, host.instanceType.name); + } +} diff --git a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/ResamplerTests.java b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/ResamplerTests.java index 07d9b60b31ff7..4903fec05261f 100644 --- a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/ResamplerTests.java +++ b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/ResamplerTests.java @@ -30,7 +30,19 @@ public void testNoResamplingNoSampleRateAdjustment() { int requestedSamples = 20_000; int actualTotalSamples = 10_000; - GetStackTracesRequest request = new GetStackTracesRequest(requestedSamples, null); + GetStackTracesRequest request = new GetStackTracesRequest( + requestedSamples, + 1.0d, + 1.0d, + null, + null, + null, + null, + null, + null, + null, + null + ); request.setAdjustSampleCount(false); Resampler resampler = createResampler(request, sampleRate, actualTotalSamples); @@ -45,7 +57,19 @@ public void testNoResamplingButAdjustSampleRate() { int requestedSamples = 20_000; int actualTotalSamples = 10_000; - GetStackTracesRequest request = new GetStackTracesRequest(requestedSamples, null); + GetStackTracesRequest request = new GetStackTracesRequest( + requestedSamples, + 1.0d, + 1.0d, + null, + null, + null, + null, + null, + null, + null, + null + ); request.setAdjustSampleCount(true); Resampler resampler = createResampler(request, sampleRate, actualTotalSamples); @@ -60,7 +84,19 @@ public void testResamplingNoSampleRateAdjustment() { int requestedSamples = 20_000; int actualTotalSamples = 40_000; - GetStackTracesRequest request = new GetStackTracesRequest(requestedSamples, null); + GetStackTracesRequest request = new GetStackTracesRequest( + requestedSamples, + 1.0d, + 1.0d, + null, + null, + null, + null, + null, + null, + null, + null + ); request.setAdjustSampleCount(false); Resampler resampler = createResampler(request, sampleRate, actualTotalSamples); @@ -78,9 +114,18 @@ public void testResamplingNoSampleRateAdjustmentWithQuery() { GetStackTracesRequest request = new GetStackTracesRequest( requestedSamples, + 1.0d, + 1.0d, new BoolQueryBuilder().filter( - new RangeQueryBuilder("@timestamp").lt("2023-10-19 15:33:00").gte("2023-10-19 15:31:52").format("yyyy-MM-dd HH:mm:ss") - ) + new RangeQueryBuilder("@timestamp").lt("2023-10-19 15:33:00").gte("2023-09-20 15:31:52").format("yyyy-MM-dd HH:mm:ss") + ), + null, + null, + null, + null, + null, + null, + null ); request.setAdjustSampleCount(false); @@ -96,7 +141,19 @@ public void testResamplingAndSampleRateAdjustment() { int requestedSamples = 20_000; int actualTotalSamples = 40_000; - GetStackTracesRequest request = new GetStackTracesRequest(requestedSamples, null); + GetStackTracesRequest request = new GetStackTracesRequest( + requestedSamples, + 1.0d, + 1.0d, + null, + null, + null, + null, + null, + null, + null, + null + ); request.setAdjustSampleCount(true); Resampler resampler = createResampler(request, sampleRate, actualTotalSamples); diff --git a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/RestGetStackTracesActionTests.java b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/RestGetStackTracesActionTests.java index 4b6aef4f544f9..171b43e4be4d5 100644 --- a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/RestGetStackTracesActionTests.java +++ b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/RestGetStackTracesActionTests.java @@ -39,7 +39,7 @@ public void testPrepareEmptyRequest() { verifyingClient.setExecuteLocallyVerifier((actionType, request) -> { assertThat(request, instanceOf(GetStackTracesRequest.class)); GetStackTracesRequest getStackTracesRequest = (GetStackTracesRequest) request; - assertThat(getStackTracesRequest.getSampleSize(), nullValue()); + assertThat(getStackTracesRequest.getSampleSize(), is(20_000)); // expect the default value assertThat(getStackTracesRequest.getQuery(), nullValue()); executeCalled.set(true); return new GetStackTracesResponse( @@ -48,8 +48,8 @@ public void testPrepareEmptyRequest() { Collections.emptyMap(), Collections.emptyMap(), 0, - 1.0, - 0 + 1.0d, + 0L ); }); RestRequest profilingRequest = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.POST) @@ -65,7 +65,7 @@ public void testPrepareParameterizedRequest() { verifyingClient.setExecuteLocallyVerifier((actionType, request) -> { assertThat(request, instanceOf(GetStackTracesRequest.class)); GetStackTracesRequest getStackTracesRequest = (GetStackTracesRequest) request; - assertThat(getStackTracesRequest.getSampleSize(), is(10000)); + assertThat(getStackTracesRequest.getSampleSize(), is(10_000)); assertThat(getStackTracesRequest.getQuery(), notNullValue(QueryBuilder.class)); executeCalled.set(true); return new GetStackTracesResponse( @@ -74,8 +74,8 @@ public void testPrepareParameterizedRequest() { Collections.emptyMap(), Collections.emptyMap(), 0, - 0.0, - 0 + 0.0d, + 0L ); }); RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.POST) @@ -83,6 +83,7 @@ public void testPrepareParameterizedRequest() { .withContent(new BytesArray(""" { "sample_size": 10000, + "requested_duration": 3600, "query": { "bool": { "filter": [ diff --git a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/StackTraceTests.java b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/StackTraceTests.java index 768ff4ae5f647..4765d23bd30d0 100644 --- a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/StackTraceTests.java +++ b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/StackTraceTests.java @@ -87,6 +87,9 @@ public void testToXContent() throws IOException { .array("file_ids", "AAAAAAAAAAUAAAAAAAAB3g") .array("frame_ids", "AAAAAAAAAAUAAAAAAAAB3gAAAAAAD67u") .array("type_ids", new int[] { 2 }) + .field("annual_co2_tons", 0.3d) + .field("annual_costs_usd", 2.7d) + .field("count", 1) .endObject(); XContentBuilder actualRequest = XContentFactory.contentBuilder(contentType); @@ -94,7 +97,10 @@ public void testToXContent() throws IOException { List.of(1027822), List.of("AAAAAAAAAAUAAAAAAAAB3g"), List.of("AAAAAAAAAAUAAAAAAAAB3gAAAAAAD67u"), - List.of(2) + List.of(2), + 0.3d, + 2.7d, + 1 ); stackTrace.toXContent(actualRequest, ToXContent.EMPTY_PARAMS); @@ -106,7 +112,10 @@ public void testEquality() { List.of(102782), List.of("AAAAAAAAAAUAAAAAAAAB3g"), List.of("AAAAAAAAAAUAAAAAAAAB3gAAAAAAD67u"), - List.of(2) + List.of(2), + 0.3d, + 2.7d, + 1 ); EqualsHashCodeTestUtils.checkEqualsAndHashCode( @@ -115,7 +124,10 @@ public void testEquality() { new ArrayList<>(o.addressOrLines), new ArrayList<>(o.fileIds), new ArrayList<>(o.frameIds), - new ArrayList<>(o.typeIds) + new ArrayList<>(o.typeIds), + 0.3d, + 2.7d, + 1 )) ); } diff --git a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/TransportGetFlamegraphActionTests.java b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/TransportGetFlamegraphActionTests.java index 7b3a572c918de..32735e5db935a 100644 --- a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/TransportGetFlamegraphActionTests.java +++ b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/TransportGetFlamegraphActionTests.java @@ -41,12 +41,15 @@ public void testCreateFlamegraph() { "fr28zxcZ2UDasxYuu6dV-wAAAAAA0Gra", "fr28zxcZ2UDasxYuu6dV-wAAAAAA-VK9" ), - List.of(3, 3, 3, 3, 3, 3, 3, 3, 3) + List.of(3, 3, 3, 3, 3, 3, 3, 3, 3), + 0.3d, + 2.7d, + 1 ) ), Map.of(), Map.of("fr28zxcZ2UDasxYuu6dV-w", "containerd"), - Map.of("2buqP1GpF-TXYmL4USW8gA", 1), + Map.of("2buqP1GpF-TXYmL4USW8gA", new TraceEvent("2buqP1GpF-TXYmL4USW8gA", 1L)), 9, 1.0d, 1 @@ -55,8 +58,8 @@ public void testCreateFlamegraph() { assertNotNull(response); assertEquals(10, response.getSize()); assertEquals(1.0d, response.getSamplingRate(), 0.001d); - assertEquals(List.of(1, 1, 1, 1, 1, 1, 1, 1, 1, 1), response.getCountInclusive()); - assertEquals(List.of(0, 0, 0, 0, 0, 0, 0, 0, 0, 1), response.getCountExclusive()); + assertEquals(List.of(1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L), response.getCountInclusive()); + assertEquals(List.of(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L), response.getCountExclusive()); assertEquals( List.of( Map.of("174640828", 1), @@ -112,8 +115,8 @@ public void testCreateFlamegraph() { assertEquals(List.of(0, 0, 0, 0, 0, 0, 0, 0, 0, 0), response.getFunctionOffsets()); assertEquals(List.of("", "", "", "", "", "", "", "", "", ""), response.getSourceFileNames()); assertEquals(List.of(0, 0, 0, 0, 0, 0, 0, 0, 0, 0), response.getSourceLines()); - assertEquals(1, response.getSelfCPU()); - assertEquals(10, response.getTotalCPU()); + assertEquals(1L, response.getSelfCPU()); + assertEquals(10L, response.getTotalCPU()); assertEquals(1L, response.getTotalSamples()); } @@ -124,8 +127,8 @@ public void testCreateEmptyFlamegraphWithRootNode() { assertNotNull(response); assertEquals(1, response.getSize()); assertEquals(1.0d, response.getSamplingRate(), 0.001d); - assertEquals(List.of(0), response.getCountInclusive()); - assertEquals(List.of(0), response.getCountExclusive()); + assertEquals(List.of(0L), response.getCountInclusive()); + assertEquals(List.of(0L), response.getCountExclusive()); assertEquals(List.of(Map.of()), response.getEdges()); assertEquals(List.of(""), response.getFileIds()); assertEquals(List.of(0), response.getFrameTypes()); @@ -136,8 +139,8 @@ public void testCreateEmptyFlamegraphWithRootNode() { assertEquals(List.of(0), response.getFunctionOffsets()); assertEquals(List.of(""), response.getSourceFileNames()); assertEquals(List.of(0), response.getSourceLines()); - assertEquals(0, response.getSelfCPU()); - assertEquals(0, response.getTotalCPU()); + assertEquals(0L, response.getSelfCPU()); + assertEquals(0L, response.getTotalCPU()); assertEquals(0L, response.getTotalSamples()); } } diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/MetadataAttribute.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/MetadataAttribute.java index c61fe9f240ea7..3b2cb542ceb4c 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/MetadataAttribute.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/MetadataAttribute.java @@ -8,6 +8,8 @@ package org.elasticsearch.xpack.ql.expression; import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.mapper.IdFieldMapper; +import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.xpack.ql.tree.NodeInfo; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; @@ -24,8 +26,10 @@ public class MetadataAttribute extends TypedAttribute { tuple(DataTypes.LONG, false), // _version field is not searchable "_index", tuple(DataTypes.KEYWORD, true), - "_id", - tuple(DataTypes.KEYWORD, false) // actually searchable, but fielddata access on the _id field is disallowed by default + IdFieldMapper.NAME, + tuple(DataTypes.KEYWORD, false), // actually searchable, but fielddata access on the _id field is disallowed by default + SourceFieldMapper.NAME, + tuple(DataTypes.SOURCE, false) ); private final boolean searchable; diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/IndexResolver.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/IndexResolver.java index 291722f42ca94..fd15a69977f02 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/IndexResolver.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/IndexResolver.java @@ -135,6 +135,7 @@ public String toString() { ); public static final Set ALL_FIELDS = Set.of("*"); + public static final Set INDEX_METADATA_FIELD = Set.of("_index"); public static final String UNMAPPED = "unmapped"; private final Client client; @@ -359,7 +360,7 @@ public void resolveAsMergedMapping( client.fieldCaps( fieldRequest, listener.delegateFailureAndWrap( - (l, response) -> l.onResponse(mergedMappings(typeRegistry, indexWildcard, response, specificValidityVerifier, null)) + (l, response) -> l.onResponse(mergedMappings(typeRegistry, indexWildcard, response, specificValidityVerifier, null, null)) ) ); } @@ -371,14 +372,16 @@ public void resolveAsMergedMapping( Map runtimeMappings, ActionListener listener, BiFunction, InvalidMappedField> specificValidityVerifier, - BiConsumer fieldUpdater - + BiConsumer fieldUpdater, + Set allowedMetadataFields ) { FieldCapabilitiesRequest fieldRequest = createFieldCapsRequest(indexWildcard, fieldNames, includeFrozen, runtimeMappings); client.fieldCaps( fieldRequest, listener.delegateFailureAndWrap( - (l, response) -> l.onResponse(mergedMappings(typeRegistry, indexWildcard, response, specificValidityVerifier, fieldUpdater)) + (l, response) -> l.onResponse( + mergedMappings(typeRegistry, indexWildcard, response, specificValidityVerifier, fieldUpdater, allowedMetadataFields) + ) ) ); } @@ -389,7 +392,7 @@ public static IndexResolution mergedMappings( FieldCapabilitiesResponse fieldCapsResponse, BiFunction, InvalidMappedField> specificValidityVerifier ) { - return mergedMappings(typeRegistry, indexPattern, fieldCapsResponse, specificValidityVerifier, null); + return mergedMappings(typeRegistry, indexPattern, fieldCapsResponse, specificValidityVerifier, null, null); } public static IndexResolution mergedMappings( @@ -397,7 +400,8 @@ public static IndexResolution mergedMappings( String indexPattern, FieldCapabilitiesResponse fieldCapsResponse, BiFunction, InvalidMappedField> specificValidityVerifier, - BiConsumer fieldUpdater + BiConsumer fieldUpdater, + Set allowedMetadataFields ) { if (fieldCapsResponse.getIndices().length == 0) { @@ -470,7 +474,8 @@ public static IndexResolution mergedMappings( null, i -> indexPattern, validityVerifier, - fieldUpdater + fieldUpdater, + allowedMetadataFields ); if (indices.size() > 1) { @@ -494,7 +499,7 @@ public static IndexResolution mergedMappings( String indexPattern, FieldCapabilitiesResponse fieldCapsResponse ) { - return mergedMappings(typeRegistry, indexPattern, fieldCapsResponse, (fieldName, types) -> null, null); + return mergedMappings(typeRegistry, indexPattern, fieldCapsResponse, (fieldName, types) -> null, null, null); } private static EsField createField( @@ -662,7 +667,7 @@ public static List separateMappings( FieldCapabilitiesResponse fieldCaps, Map> aliases ) { - return buildIndices(typeRegistry, javaRegex, fieldCaps, aliases, Function.identity(), (s, cap) -> null, null); + return buildIndices(typeRegistry, javaRegex, fieldCaps, aliases, Function.identity(), (s, cap) -> null, null, null); } private static class Fields { @@ -681,7 +686,8 @@ private static List buildIndices( Map> aliases, Function indexNameProcessor, BiFunction, InvalidMappedField> validityVerifier, - BiConsumer fieldUpdater + BiConsumer fieldUpdater, + Set allowedMetadataFields ) { if ((fieldCapsResponse.getIndices() == null || fieldCapsResponse.getIndices().length == 0) @@ -706,8 +712,9 @@ private static List buildIndices( final Map> fieldCaps = fieldCapsResponse.get(); for (Entry> entry : fieldCaps.entrySet()) { String fieldName = entry.getKey(); - // skip metadata field! - if (fieldCapsResponse.isMetadataField(fieldName) == false) { + // skip specific metadata fields + if ((allowedMetadataFields != null && allowedMetadataFields.contains(fieldName)) + || fieldCapsResponse.isMetadataField(fieldName) == false) { sortedFields.put(fieldName, entry.getValue()); } } @@ -718,6 +725,10 @@ private static List buildIndices( final InvalidMappedField invalidField = validityVerifier.apply(fieldName, types); // apply verification for fields belonging to index aliases Map invalidFieldsForAliases = getInvalidFieldsForAliases(fieldName, types, aliases); + // For ESQL there are scenarios where there is no field asked from field_caps and the field_caps response only contains + // the list of indices. To be able to still have an "indices" list properly built (even if empty), the metadata fields are + // accepted but not actually added to each index hierarchy. + boolean isMetadataField = allowedMetadataFields != null && allowedMetadataFields.contains(fieldName); // check each type for (Entry typeEntry : types.entrySet()) { @@ -750,7 +761,8 @@ private static List buildIndices( Fields indexFields = indices.computeIfAbsent(indexName, k -> new Fields()); EsField field = indexFields.flattedMapping.get(fieldName); // create field hierarchy or update it in case of an invalid field - if (field == null || (invalidField != null && (field instanceof InvalidMappedField) == false)) { + if (isMetadataField == false + && (field == null || (invalidField != null && (field instanceof InvalidMappedField) == false))) { createField(typeRegistry, fieldName, indexFields, fieldCaps, invalidField, typeCap); // In evolving mappings, it is possible for a field to be promoted to an object in new indices @@ -777,7 +789,7 @@ private static List buildIndices( for (String index : uniqueAliases) { Fields indexFields = indices.computeIfAbsent(index, k -> new Fields()); EsField field = indexFields.flattedMapping.get(fieldName); - if (field == null && invalidFieldsForAliases.get(index) == null) { + if (isMetadataField == false && field == null && invalidFieldsForAliases.get(index) == null) { createField(typeRegistry, fieldName, indexFields, fieldCaps, invalidField, typeCap); } } diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/type/DataTypes.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/type/DataTypes.java index a080e673fe50a..6aa47f7c817a7 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/type/DataTypes.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/type/DataTypes.java @@ -6,6 +6,8 @@ */ package org.elasticsearch.xpack.ql.type; +import org.elasticsearch.index.mapper.SourceFieldMapper; + import java.math.BigInteger; import java.time.ZonedDateTime; import java.util.Collection; @@ -52,6 +54,14 @@ public final class DataTypes { public static final DataType OBJECT = new DataType("object", 0, false, false, false); public static final DataType NESTED = new DataType("nested", 0, false, false, false); //end::noformat + public static final DataType SOURCE = new DataType( + SourceFieldMapper.NAME, + SourceFieldMapper.NAME, + Integer.MAX_VALUE, + false, + false, + false + ); private static final Collection TYPES = Stream.of( UNSUPPORTED, diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/util/Graphviz.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/util/Graphviz.java index 9a1b2e9630894..84784540c929e 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/util/Graphviz.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/util/Graphviz.java @@ -305,10 +305,6 @@ private static String quoteGraphviz(String value) { return "\"" + value + "\""; } - private static String escapeGraphviz(String value) { - return value.replace("<", "\\<").replace(">", "\\>").replace("\"", "\\\""); - } - private static void indent(StringBuilder sb, int indent) { for (int i = 0; i < indent; i++) { sb.append(" "); diff --git a/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankCoordinatorCanMatchIT.java b/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankCoordinatorCanMatchIT.java index c5fac43723b70..445aeaa375e11 100644 --- a/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankCoordinatorCanMatchIT.java +++ b/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankCoordinatorCanMatchIT.java @@ -10,7 +10,6 @@ import org.apache.lucene.document.LongPoint; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.PointValues; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.Strings; import org.elasticsearch.index.IndexSettings; @@ -35,6 +34,7 @@ import java.util.Optional; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; @ESIntegTestCase.ClusterScope(maxNumDataNodes = 3) @ESIntegTestCase.SuiteScopeTestCase @@ -114,10 +114,10 @@ public void testCanMatchCoordinator() throws Exception { ensureGreen("time_index"); for (int i = 0; i < 500; i++) { - client().prepareIndex("time_index").setSource("@timestamp", i).setRouting("a").get(); + prepareIndex("time_index").setSource("@timestamp", i).setRouting("a").get(); } for (int i = 500; i < 1000; i++) { - client().prepareIndex("time_index").setSource("@timestamp", i).setRouting("b").get(); + prepareIndex("time_index").setSource("@timestamp", i).setRouting("b").get(); } client().admin().indices().prepareRefresh("time_index").get(); @@ -130,117 +130,129 @@ public void testCanMatchCoordinator() throws Exception { }); // match 2 separate shard with no overlap in queries - SearchResponse response = prepareSearch("time_index").setSearchType(SearchType.QUERY_THEN_FETCH) - .setPreFilterShardSize(1) - .setRankBuilder(new RRFRankBuilder(20, 1)) - .setTrackTotalHits(false) - .setSubSearches( - List.of( - new SubSearchSourceBuilder(QueryBuilders.rangeQuery("@timestamp").gt(495).lte(499)), - new SubSearchSourceBuilder(QueryBuilders.rangeQuery("@timestamp").gte(500).lt(505)) + assertResponse( + prepareSearch("time_index").setSearchType(SearchType.QUERY_THEN_FETCH) + .setPreFilterShardSize(1) + .setRankBuilder(new RRFRankBuilder(20, 1)) + .setTrackTotalHits(false) + .setSubSearches( + List.of( + new SubSearchSourceBuilder(QueryBuilders.rangeQuery("@timestamp").gt(495).lte(499)), + new SubSearchSourceBuilder(QueryBuilders.rangeQuery("@timestamp").gte(500).lt(505)) + ) ) - ) - .setSize(5) - .get(); - - assertNull(response.getHits().getTotalHits()); - assertEquals(5, response.getHits().getHits().length); - assertEquals(5, response.getSuccessfulShards()); - assertEquals(3, response.getSkippedShards()); + .setSize(5), + response -> { + assertNull(response.getHits().getTotalHits()); + assertEquals(5, response.getHits().getHits().length); + assertEquals(5, response.getSuccessfulShards()); + assertEquals(3, response.getSkippedShards()); + } + ); // match 2 shards with overlap in queries - response = prepareSearch("time_index").setSearchType(SearchType.QUERY_THEN_FETCH) - .setPreFilterShardSize(1) - .setRankBuilder(new RRFRankBuilder(20, 1)) - .setTrackTotalHits(false) - .setSubSearches( - List.of( - new SubSearchSourceBuilder(QueryBuilders.rangeQuery("@timestamp").gt(495).lte(505)), - new SubSearchSourceBuilder(QueryBuilders.rangeQuery("@timestamp").gte(497).lt(507)) + assertResponse( + prepareSearch("time_index").setSearchType(SearchType.QUERY_THEN_FETCH) + .setPreFilterShardSize(1) + .setRankBuilder(new RRFRankBuilder(20, 1)) + .setTrackTotalHits(false) + .setSubSearches( + List.of( + new SubSearchSourceBuilder(QueryBuilders.rangeQuery("@timestamp").gt(495).lte(505)), + new SubSearchSourceBuilder(QueryBuilders.rangeQuery("@timestamp").gte(497).lt(507)) + ) ) - ) - .setSize(5) - .get(); - - assertNull(response.getHits().getTotalHits()); - assertEquals(5, response.getHits().getHits().length); - assertEquals(5, response.getSuccessfulShards()); - assertEquals(3, response.getSkippedShards()); + .setSize(5), + response -> { + assertNull(response.getHits().getTotalHits()); + assertEquals(5, response.getHits().getHits().length); + assertEquals(5, response.getSuccessfulShards()); + assertEquals(3, response.getSkippedShards()); + } + ); // match one shard with one query in range and one query out of range - response = prepareSearch("time_index").setSearchType(SearchType.QUERY_THEN_FETCH) - .setPreFilterShardSize(1) - .setRankBuilder(new RRFRankBuilder(20, 1)) - .setTrackTotalHits(false) - .setSubSearches( - List.of( - new SubSearchSourceBuilder(QueryBuilders.rangeQuery("@timestamp").gt(501).lte(505)), - new SubSearchSourceBuilder(QueryBuilders.rangeQuery("@timestamp").gte(10000).lt(10005)) + assertResponse( + prepareSearch("time_index").setSearchType(SearchType.QUERY_THEN_FETCH) + .setPreFilterShardSize(1) + .setRankBuilder(new RRFRankBuilder(20, 1)) + .setTrackTotalHits(false) + .setSubSearches( + List.of( + new SubSearchSourceBuilder(QueryBuilders.rangeQuery("@timestamp").gt(501).lte(505)), + new SubSearchSourceBuilder(QueryBuilders.rangeQuery("@timestamp").gte(10000).lt(10005)) + ) ) - ) - .setSize(5) - .get(); - - assertNull(response.getHits().getTotalHits()); - assertEquals(4, response.getHits().getHits().length); - assertEquals(5, response.getSuccessfulShards()); - assertEquals(4, response.getSkippedShards()); + .setSize(5), + response -> { + assertNull(response.getHits().getTotalHits()); + assertEquals(4, response.getHits().getHits().length); + assertEquals(5, response.getSuccessfulShards()); + assertEquals(4, response.getSkippedShards()); + } + ); // match no shards, but still use one to generate a search response - response = prepareSearch("time_index").setSearchType(SearchType.QUERY_THEN_FETCH) - .setPreFilterShardSize(1) - .setRankBuilder(new RRFRankBuilder(20, 1)) - .setTrackTotalHits(false) - .setSubSearches( - List.of( - new SubSearchSourceBuilder(QueryBuilders.rangeQuery("@timestamp").gt(4000).lte(5000)), - new SubSearchSourceBuilder(QueryBuilders.rangeQuery("@timestamp").gte(10000).lt(10005)) + assertResponse( + prepareSearch("time_index").setSearchType(SearchType.QUERY_THEN_FETCH) + .setPreFilterShardSize(1) + .setRankBuilder(new RRFRankBuilder(20, 1)) + .setTrackTotalHits(false) + .setSubSearches( + List.of( + new SubSearchSourceBuilder(QueryBuilders.rangeQuery("@timestamp").gt(4000).lte(5000)), + new SubSearchSourceBuilder(QueryBuilders.rangeQuery("@timestamp").gte(10000).lt(10005)) + ) ) - ) - .setSize(5) - .get(); - - assertNull(response.getHits().getTotalHits()); - assertEquals(0, response.getHits().getHits().length); - assertEquals(5, response.getSuccessfulShards()); - assertEquals(4, response.getSkippedShards()); + .setSize(5), + response -> { + assertNull(response.getHits().getTotalHits()); + assertEquals(0, response.getHits().getHits().length); + assertEquals(5, response.getSuccessfulShards()); + assertEquals(4, response.getSkippedShards()); + } + ); // match one shard with with no overlap in queries - response = prepareSearch("time_index").setSearchType(SearchType.QUERY_THEN_FETCH) - .setPreFilterShardSize(1) - .setRankBuilder(new RRFRankBuilder(20, 1)) - .setTrackTotalHits(false) - .setSubSearches( - List.of( - new SubSearchSourceBuilder(QueryBuilders.rangeQuery("@timestamp").gt(600).lte(605)), - new SubSearchSourceBuilder(QueryBuilders.rangeQuery("@timestamp").gte(700).lt(705)) + assertResponse( + prepareSearch("time_index").setSearchType(SearchType.QUERY_THEN_FETCH) + .setPreFilterShardSize(1) + .setRankBuilder(new RRFRankBuilder(20, 1)) + .setTrackTotalHits(false) + .setSubSearches( + List.of( + new SubSearchSourceBuilder(QueryBuilders.rangeQuery("@timestamp").gt(600).lte(605)), + new SubSearchSourceBuilder(QueryBuilders.rangeQuery("@timestamp").gte(700).lt(705)) + ) ) - ) - .setSize(5) - .get(); - - assertNull(response.getHits().getTotalHits()); - assertEquals(5, response.getHits().getHits().length); - assertEquals(5, response.getSuccessfulShards()); - assertEquals(4, response.getSkippedShards()); + .setSize(5), + response -> { + assertNull(response.getHits().getTotalHits()); + assertEquals(5, response.getHits().getHits().length); + assertEquals(5, response.getSuccessfulShards()); + assertEquals(4, response.getSkippedShards()); + } + ); // match one shard with exact overlap in queries - response = prepareSearch("time_index").setSearchType(SearchType.QUERY_THEN_FETCH) - .setPreFilterShardSize(1) - .setRankBuilder(new RRFRankBuilder(20, 1)) - .setTrackTotalHits(false) - .setSubSearches( - List.of( - new SubSearchSourceBuilder(QueryBuilders.rangeQuery("@timestamp").gt(600).lte(605)), - new SubSearchSourceBuilder(QueryBuilders.rangeQuery("@timestamp").gte(600).lt(605)) + assertResponse( + prepareSearch("time_index").setSearchType(SearchType.QUERY_THEN_FETCH) + .setPreFilterShardSize(1) + .setRankBuilder(new RRFRankBuilder(20, 1)) + .setTrackTotalHits(false) + .setSubSearches( + List.of( + new SubSearchSourceBuilder(QueryBuilders.rangeQuery("@timestamp").gt(600).lte(605)), + new SubSearchSourceBuilder(QueryBuilders.rangeQuery("@timestamp").gte(600).lt(605)) + ) ) - ) - .setSize(5) - .get(); - - assertNull(response.getHits().getTotalHits()); - assertEquals(5, response.getHits().getHits().length); - assertEquals(5, response.getSuccessfulShards()); - assertEquals(4, response.getSkippedShards()); + .setSize(5), + response -> { + assertNull(response.getHits().getTotalHits()); + assertEquals(5, response.getHits().getHits().length); + assertEquals(5, response.getSuccessfulShards()); + assertEquals(4, response.getSkippedShards()); + } + ); } } diff --git a/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankMultiShardIT.java b/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankMultiShardIT.java index 3db050e071aa7..af465658a0b52 100644 --- a/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankMultiShardIT.java +++ b/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankMultiShardIT.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.rank.rrf; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.QueryBuilders; @@ -28,6 +27,7 @@ import java.util.stream.Collectors; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; @ESIntegTestCase.ClusterScope(maxNumDataNodes = 3) @ESIntegTestCase.SuiteScopeTestCase @@ -77,9 +77,9 @@ public void setupSuiteScopeCluster() throws Exception { assertAcked(prepareCreate("tiny_index").setMapping(builder)); ensureGreen("tiny_index"); - client().prepareIndex("tiny_index").setSource("vector", new float[] { 0.0f }, "text", "term term").get(); - client().prepareIndex("tiny_index").setSource("vector", new float[] { 1.0f }, "text", "other").get(); - client().prepareIndex("tiny_index").setSource("vector", new float[] { 2.0f }, "text", "term").get(); + prepareIndex("tiny_index").setSource("vector", new float[] { 0.0f }, "text", "term term").get(); + prepareIndex("tiny_index").setSource("vector", new float[] { 1.0f }, "text", "other").get(); + prepareIndex("tiny_index").setSource("vector", new float[] { 2.0f }, "text", "term").get(); indicesAdmin().prepareRefresh("tiny_index").get(); @@ -117,20 +117,18 @@ public void setupSuiteScopeCluster() throws Exception { ensureGreen(TimeValue.timeValueSeconds(120), "nrd_index"); for (int doc = 0; doc < 1001; ++doc) { - client().prepareIndex("nrd_index") - .setSource( - "vector_asc", - new float[] { doc }, - "vector_desc", - new float[] { 1000 - doc }, - "int", - doc % 3, - "text0", - "term " + doc, - "text1", - "term " + (1000 - doc) - ) - .get(); + prepareIndex("nrd_index").setSource( + "vector_asc", + new float[] { doc }, + "vector_desc", + new float[] { 1000 - doc }, + "int", + doc % 3, + "text0", + "term " + doc, + "text1", + "term " + (1000 - doc) + ).get(); } indicesAdmin().prepareRefresh("nrd_index").get(); @@ -139,69 +137,72 @@ public void setupSuiteScopeCluster() throws Exception { public void testTotalDocsSmallerThanSize() { float[] queryVector = { 0.0f }; KnnSearchBuilder knnSearch = new KnnSearchBuilder("vector", queryVector, 3, 3, null); - SearchResponse response = prepareSearch("tiny_index").setRankBuilder(new RRFRankBuilder(100, 1)) - .setKnnSearch(List.of(knnSearch)) - .setQuery(QueryBuilders.termQuery("text", "term")) - .addFetchField("vector") - .addFetchField("text") - .get(); - - // we cast to Number when looking at values in vector fields because different xContentTypes may return Float or Double - - assertEquals(3, response.getHits().getHits().length); - - SearchHit hit = response.getHits().getAt(0); - assertEquals(1, hit.getRank()); - assertEquals(0.0, ((Number) hit.field("vector").getValue()).doubleValue(), 0.0); - assertEquals("term term", hit.field("text").getValue()); - - hit = response.getHits().getAt(1); - assertEquals(2, hit.getRank()); - assertEquals(2.0, ((Number) hit.field("vector").getValue()).doubleValue(), 0.0); - assertEquals("term", hit.field("text").getValue()); - - hit = response.getHits().getAt(2); - assertEquals(3, hit.getRank()); - assertEquals(1.0, ((Number) hit.field("vector").getValue()).doubleValue(), 0.0); - assertEquals("other", hit.field("text").getValue()); + assertResponse( + prepareSearch("tiny_index").setRankBuilder(new RRFRankBuilder(100, 1)) + .setKnnSearch(List.of(knnSearch)) + .setQuery(QueryBuilders.termQuery("text", "term")) + .addFetchField("vector") + .addFetchField("text"), + response -> { + // we cast to Number when looking at values in vector fields because different xContentTypes may return Float or Double + assertEquals(3, response.getHits().getHits().length); + + SearchHit hit = response.getHits().getAt(0); + assertEquals(1, hit.getRank()); + assertEquals(0.0, ((Number) hit.field("vector").getValue()).doubleValue(), 0.0); + assertEquals("term term", hit.field("text").getValue()); + + hit = response.getHits().getAt(1); + assertEquals(2, hit.getRank()); + assertEquals(2.0, ((Number) hit.field("vector").getValue()).doubleValue(), 0.0); + assertEquals("term", hit.field("text").getValue()); + + hit = response.getHits().getAt(2); + assertEquals(3, hit.getRank()); + assertEquals(1.0, ((Number) hit.field("vector").getValue()).doubleValue(), 0.0); + assertEquals("other", hit.field("text").getValue()); + } + ); } public void testBM25AndKnn() { float[] queryVector = { 500.0f }; KnnSearchBuilder knnSearch = new KnnSearchBuilder("vector_asc", queryVector, 101, 1001, null); - SearchResponse response = prepareSearch("nrd_index").setRankBuilder(new RRFRankBuilder(101, 1)) - .setTrackTotalHits(false) - .setKnnSearch(List.of(knnSearch)) - .setQuery( - QueryBuilders.boolQuery() - .should(QueryBuilders.termQuery("text0", "500").boost(11.0f)) - .should(QueryBuilders.termQuery("text0", "499").boost(10.0f)) - .should(QueryBuilders.termQuery("text0", "498").boost(9.0f)) - .should(QueryBuilders.termQuery("text0", "497").boost(8.0f)) - .should(QueryBuilders.termQuery("text0", "496").boost(7.0f)) - .should(QueryBuilders.termQuery("text0", "495").boost(6.0f)) - .should(QueryBuilders.termQuery("text0", "494").boost(5.0f)) - .should(QueryBuilders.termQuery("text0", "493").boost(4.0f)) - .should(QueryBuilders.termQuery("text0", "492").boost(3.0f)) - .should(QueryBuilders.termQuery("text0", "491").boost(2.0f)) - ) - .addFetchField("vector_asc") - .addFetchField("text0") - .setSize(11) - .get(); - - assertNull(response.getHits().getTotalHits()); - assertEquals(11, response.getHits().getHits().length); - - SearchHit hit = response.getHits().getAt(0); - assertEquals(1, hit.getRank()); - assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); - assertEquals("term 500", hit.field("text0").getValue()); - - Set vectors = Arrays.stream(response.getHits().getHits()) - .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) - .collect(Collectors.toSet()); - assertEquals(Set.of(492.0, 493.0, 494.0, 495.0, 496.0, 497.0, 498.0, 499.0, 500.0, 501.0, 502.0), vectors); + assertResponse( + prepareSearch("nrd_index").setRankBuilder(new RRFRankBuilder(101, 1)) + .setTrackTotalHits(false) + .setKnnSearch(List.of(knnSearch)) + .setQuery( + QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("text0", "500").boost(11.0f)) + .should(QueryBuilders.termQuery("text0", "499").boost(10.0f)) + .should(QueryBuilders.termQuery("text0", "498").boost(9.0f)) + .should(QueryBuilders.termQuery("text0", "497").boost(8.0f)) + .should(QueryBuilders.termQuery("text0", "496").boost(7.0f)) + .should(QueryBuilders.termQuery("text0", "495").boost(6.0f)) + .should(QueryBuilders.termQuery("text0", "494").boost(5.0f)) + .should(QueryBuilders.termQuery("text0", "493").boost(4.0f)) + .should(QueryBuilders.termQuery("text0", "492").boost(3.0f)) + .should(QueryBuilders.termQuery("text0", "491").boost(2.0f)) + ) + .addFetchField("vector_asc") + .addFetchField("text0") + .setSize(11), + response -> { + assertNull(response.getHits().getTotalHits()); + assertEquals(11, response.getHits().getHits().length); + + SearchHit hit = response.getHits().getAt(0); + assertEquals(1, hit.getRank()); + assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); + assertEquals("term 500", hit.field("text0").getValue()); + + Set vectors = Arrays.stream(response.getHits().getHits()) + .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) + .collect(Collectors.toSet()); + assertEquals(Set.of(492.0, 493.0, 494.0, 495.0, 496.0, 497.0, 498.0, 499.0, 500.0, 501.0, 502.0), vectors); + } + ); } public void testMultipleOnlyKnn() { @@ -209,48 +210,50 @@ public void testMultipleOnlyKnn() { float[] queryVectorDesc = { 500.0f }; KnnSearchBuilder knnSearchAsc = new KnnSearchBuilder("vector_asc", queryVectorAsc, 51, 1001, null); KnnSearchBuilder knnSearchDesc = new KnnSearchBuilder("vector_desc", queryVectorDesc, 51, 1001, null); - SearchResponse response = prepareSearch("nrd_index").setRankBuilder(new RRFRankBuilder(51, 1)) - .setTrackTotalHits(true) - .setKnnSearch(List.of(knnSearchAsc, knnSearchDesc)) - .addFetchField("vector_asc") - .addFetchField("text0") - .setSize(19) - .get(); - - assertEquals(51, response.getHits().getTotalHits().value); - assertEquals(19, response.getHits().getHits().length); - - SearchHit hit = response.getHits().getAt(0); - assertEquals(1, hit.getRank()); - assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); - assertEquals("term 500", hit.field("text0").getValue()); - - Set vectors = Arrays.stream(response.getHits().getHits()) - .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) - .collect(Collectors.toSet()); - assertEquals( - Set.of( - 491.0, - 492.0, - 493.0, - 494.0, - 495.0, - 496.0, - 497.0, - 498.0, - 499.0, - 500.0, - 501.0, - 502.0, - 503.0, - 504.0, - 505.0, - 506.0, - 507.0, - 508.0, - 509.0 - ), - vectors + assertResponse( + prepareSearch("nrd_index").setRankBuilder(new RRFRankBuilder(51, 1)) + .setTrackTotalHits(true) + .setKnnSearch(List.of(knnSearchAsc, knnSearchDesc)) + .addFetchField("vector_asc") + .addFetchField("text0") + .setSize(19), + response -> { + assertEquals(51, response.getHits().getTotalHits().value); + assertEquals(19, response.getHits().getHits().length); + + SearchHit hit = response.getHits().getAt(0); + assertEquals(1, hit.getRank()); + assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); + assertEquals("term 500", hit.field("text0").getValue()); + + Set vectors = Arrays.stream(response.getHits().getHits()) + .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) + .collect(Collectors.toSet()); + assertEquals( + Set.of( + 491.0, + 492.0, + 493.0, + 494.0, + 495.0, + 496.0, + 497.0, + 498.0, + 499.0, + 500.0, + 501.0, + 502.0, + 503.0, + 504.0, + 505.0, + 506.0, + 507.0, + 508.0, + 509.0 + ), + vectors + ); + } ); } @@ -259,124 +262,128 @@ public void testBM25AndMultipleKnn() { float[] queryVectorDesc = { 500.0f }; KnnSearchBuilder knnSearchAsc = new KnnSearchBuilder("vector_asc", queryVectorAsc, 51, 1001, null); KnnSearchBuilder knnSearchDesc = new KnnSearchBuilder("vector_desc", queryVectorDesc, 51, 1001, null); - SearchResponse response = prepareSearch("nrd_index").setRankBuilder(new RRFRankBuilder(51, 1)) - .setTrackTotalHits(false) - .setKnnSearch(List.of(knnSearchAsc, knnSearchDesc)) - .setQuery( - QueryBuilders.boolQuery() - .should(QueryBuilders.termQuery("text0", "500").boost(10.0f)) - .should(QueryBuilders.termQuery("text0", "499").boost(20.0f)) - .should(QueryBuilders.termQuery("text0", "498").boost(8.0f)) - .should(QueryBuilders.termQuery("text0", "497").boost(7.0f)) - .should(QueryBuilders.termQuery("text0", "496").boost(6.0f)) - .should(QueryBuilders.termQuery("text0", "485").boost(5.0f)) - .should(QueryBuilders.termQuery("text0", "494").boost(4.0f)) - .should(QueryBuilders.termQuery("text0", "506").boost(3.0f)) - .should(QueryBuilders.termQuery("text0", "505").boost(2.0f)) - .should(QueryBuilders.termQuery("text0", "511").boost(9.0f)) - ) - .addFetchField("vector_asc") - .addFetchField("vector_desc") - .addFetchField("text0") - .setSize(19) - .get(); - - assertNull(response.getHits().getTotalHits()); - assertEquals(19, response.getHits().getHits().length); - - SearchHit hit = response.getHits().getAt(0); - assertEquals(1, hit.getRank()); - assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); - assertEquals(500.0, ((Number) hit.field("vector_desc").getValue()).doubleValue(), 0.0); - assertEquals("term 500", hit.field("text0").getValue()); - - hit = response.getHits().getAt(1); - assertEquals(2, hit.getRank()); - assertEquals(499.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); - assertEquals(501.0, ((Number) hit.field("vector_desc").getValue()).doubleValue(), 0.0); - assertEquals("term 499", hit.field("text0").getValue()); - - Set vectors = Arrays.stream(response.getHits().getHits()) - .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) - .collect(Collectors.toSet()); - assertEquals( - Set.of( - 485.0, - 492.0, - 493.0, - 494.0, - 495.0, - 496.0, - 497.0, - 498.0, - 499.0, - 500.0, - 501.0, - 502.0, - 503.0, - 504.0, - 505.0, - 506.0, - 507.0, - 508.0, - 511.0 - ), - vectors + assertResponse( + prepareSearch("nrd_index").setRankBuilder(new RRFRankBuilder(51, 1)) + .setTrackTotalHits(false) + .setKnnSearch(List.of(knnSearchAsc, knnSearchDesc)) + .setQuery( + QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("text0", "500").boost(10.0f)) + .should(QueryBuilders.termQuery("text0", "499").boost(20.0f)) + .should(QueryBuilders.termQuery("text0", "498").boost(8.0f)) + .should(QueryBuilders.termQuery("text0", "497").boost(7.0f)) + .should(QueryBuilders.termQuery("text0", "496").boost(6.0f)) + .should(QueryBuilders.termQuery("text0", "485").boost(5.0f)) + .should(QueryBuilders.termQuery("text0", "494").boost(4.0f)) + .should(QueryBuilders.termQuery("text0", "506").boost(3.0f)) + .should(QueryBuilders.termQuery("text0", "505").boost(2.0f)) + .should(QueryBuilders.termQuery("text0", "511").boost(9.0f)) + ) + .addFetchField("vector_asc") + .addFetchField("vector_desc") + .addFetchField("text0") + .setSize(19), + response -> { + assertNull(response.getHits().getTotalHits()); + assertEquals(19, response.getHits().getHits().length); + + SearchHit hit = response.getHits().getAt(0); + assertEquals(1, hit.getRank()); + assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); + assertEquals(500.0, ((Number) hit.field("vector_desc").getValue()).doubleValue(), 0.0); + assertEquals("term 500", hit.field("text0").getValue()); + + hit = response.getHits().getAt(1); + assertEquals(2, hit.getRank()); + assertEquals(499.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); + assertEquals(501.0, ((Number) hit.field("vector_desc").getValue()).doubleValue(), 0.0); + assertEquals("term 499", hit.field("text0").getValue()); + + Set vectors = Arrays.stream(response.getHits().getHits()) + .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) + .collect(Collectors.toSet()); + assertEquals( + Set.of( + 485.0, + 492.0, + 493.0, + 494.0, + 495.0, + 496.0, + 497.0, + 498.0, + 499.0, + 500.0, + 501.0, + 502.0, + 503.0, + 504.0, + 505.0, + 506.0, + 507.0, + 508.0, + 511.0 + ), + vectors + ); + } ); } public void testBM25AndKnnWithBucketAggregation() { float[] queryVector = { 500.0f }; KnnSearchBuilder knnSearch = new KnnSearchBuilder("vector_asc", queryVector, 101, 1001, null); - SearchResponse response = prepareSearch("nrd_index").setRankBuilder(new RRFRankBuilder(101, 1)) - .setTrackTotalHits(true) - .setKnnSearch(List.of(knnSearch)) - .setQuery( - QueryBuilders.boolQuery() - .should(QueryBuilders.termQuery("text0", "500").boost(11.0f)) - .should(QueryBuilders.termQuery("text0", "499").boost(10.0f)) - .should(QueryBuilders.termQuery("text0", "498").boost(9.0f)) - .should(QueryBuilders.termQuery("text0", "497").boost(8.0f)) - .should(QueryBuilders.termQuery("text0", "496").boost(7.0f)) - .should(QueryBuilders.termQuery("text0", "495").boost(6.0f)) - .should(QueryBuilders.termQuery("text0", "494").boost(5.0f)) - .should(QueryBuilders.termQuery("text0", "493").boost(4.0f)) - .should(QueryBuilders.termQuery("text0", "492").boost(3.0f)) - .should(QueryBuilders.termQuery("text0", "491").boost(2.0f)) - ) - .addFetchField("vector_asc") - .addFetchField("text0") - .setSize(11) - .addAggregation(AggregationBuilders.terms("sums").field("int")) - .get(); - - assertEquals(101, response.getHits().getTotalHits().value); - assertEquals(11, response.getHits().getHits().length); - - SearchHit hit = response.getHits().getAt(0); - assertEquals(1, hit.getRank()); - assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); - assertEquals("term 500", hit.field("text0").getValue()); - - Set vectors = Arrays.stream(response.getHits().getHits()) - .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) - .collect(Collectors.toSet()); - assertEquals(Set.of(492.0, 493.0, 494.0, 495.0, 496.0, 497.0, 498.0, 499.0, 500.0, 501.0, 502.0), vectors); - - LongTerms aggregation = response.getAggregations().get("sums"); - assertEquals(3, aggregation.getBuckets().size()); - - for (LongTerms.Bucket bucket : aggregation.getBuckets()) { - if (0L == (long) bucket.getKey()) { - assertEquals(34, bucket.getDocCount()); - } else if (1L == (long) bucket.getKey()) { - assertEquals(34, bucket.getDocCount()); - } else if (2L == (long) bucket.getKey()) { - assertEquals(33, bucket.getDocCount()); - } else { - throw new IllegalArgumentException("unexpected bucket key [" + bucket.getKey() + "]"); + assertResponse( + prepareSearch("nrd_index").setRankBuilder(new RRFRankBuilder(101, 1)) + .setTrackTotalHits(true) + .setKnnSearch(List.of(knnSearch)) + .setQuery( + QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("text0", "500").boost(11.0f)) + .should(QueryBuilders.termQuery("text0", "499").boost(10.0f)) + .should(QueryBuilders.termQuery("text0", "498").boost(9.0f)) + .should(QueryBuilders.termQuery("text0", "497").boost(8.0f)) + .should(QueryBuilders.termQuery("text0", "496").boost(7.0f)) + .should(QueryBuilders.termQuery("text0", "495").boost(6.0f)) + .should(QueryBuilders.termQuery("text0", "494").boost(5.0f)) + .should(QueryBuilders.termQuery("text0", "493").boost(4.0f)) + .should(QueryBuilders.termQuery("text0", "492").boost(3.0f)) + .should(QueryBuilders.termQuery("text0", "491").boost(2.0f)) + ) + .addFetchField("vector_asc") + .addFetchField("text0") + .setSize(11) + .addAggregation(AggregationBuilders.terms("sums").field("int")), + response -> { + assertEquals(101, response.getHits().getTotalHits().value); + assertEquals(11, response.getHits().getHits().length); + + SearchHit hit = response.getHits().getAt(0); + assertEquals(1, hit.getRank()); + assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); + assertEquals("term 500", hit.field("text0").getValue()); + + Set vectors = Arrays.stream(response.getHits().getHits()) + .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) + .collect(Collectors.toSet()); + assertEquals(Set.of(492.0, 493.0, 494.0, 495.0, 496.0, 497.0, 498.0, 499.0, 500.0, 501.0, 502.0), vectors); + + LongTerms aggregation = response.getAggregations().get("sums"); + assertEquals(3, aggregation.getBuckets().size()); + + for (LongTerms.Bucket bucket : aggregation.getBuckets()) { + if (0L == (long) bucket.getKey()) { + assertEquals(34, bucket.getDocCount()); + } else if (1L == (long) bucket.getKey()) { + assertEquals(34, bucket.getDocCount()); + } else if (2L == (long) bucket.getKey()) { + assertEquals(33, bucket.getDocCount()); + } else { + throw new IllegalArgumentException("unexpected bucket key [" + bucket.getKey() + "]"); + } + } } - } + ); } public void testMultipleOnlyKnnWithAggregation() { @@ -384,65 +391,67 @@ public void testMultipleOnlyKnnWithAggregation() { float[] queryVectorDesc = { 500.0f }; KnnSearchBuilder knnSearchAsc = new KnnSearchBuilder("vector_asc", queryVectorAsc, 51, 1001, null); KnnSearchBuilder knnSearchDesc = new KnnSearchBuilder("vector_desc", queryVectorDesc, 51, 1001, null); - SearchResponse response = prepareSearch("nrd_index").setRankBuilder(new RRFRankBuilder(51, 1)) - .setTrackTotalHits(false) - .setKnnSearch(List.of(knnSearchAsc, knnSearchDesc)) - .addFetchField("vector_asc") - .addFetchField("text0") - .setSize(19) - .addAggregation(AggregationBuilders.terms("sums").field("int")) - .get(); - - assertNull(response.getHits().getTotalHits()); - assertEquals(19, response.getHits().getHits().length); - - SearchHit hit = response.getHits().getAt(0); - assertEquals(1, hit.getRank()); - assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); - assertEquals("term 500", hit.field("text0").getValue()); - - Set vectors = Arrays.stream(response.getHits().getHits()) - .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) - .collect(Collectors.toSet()); - assertEquals( - Set.of( - 491.0, - 492.0, - 493.0, - 494.0, - 495.0, - 496.0, - 497.0, - 498.0, - 499.0, - 500.0, - 501.0, - 502.0, - 503.0, - 504.0, - 505.0, - 506.0, - 507.0, - 508.0, - 509.0 - ), - vectors - ); - - LongTerms aggregation = response.getAggregations().get("sums"); - assertEquals(3, aggregation.getBuckets().size()); - - for (LongTerms.Bucket bucket : aggregation.getBuckets()) { - if (0L == (long) bucket.getKey()) { - assertEquals(17, bucket.getDocCount()); - } else if (1L == (long) bucket.getKey()) { - assertEquals(17, bucket.getDocCount()); - } else if (2L == (long) bucket.getKey()) { - assertEquals(17, bucket.getDocCount()); - } else { - throw new IllegalArgumentException("unexpected bucket key [" + bucket.getKey() + "]"); + assertResponse( + prepareSearch("nrd_index").setRankBuilder(new RRFRankBuilder(51, 1)) + .setTrackTotalHits(false) + .setKnnSearch(List.of(knnSearchAsc, knnSearchDesc)) + .addFetchField("vector_asc") + .addFetchField("text0") + .setSize(19) + .addAggregation(AggregationBuilders.terms("sums").field("int")), + response -> { + assertNull(response.getHits().getTotalHits()); + assertEquals(19, response.getHits().getHits().length); + + SearchHit hit = response.getHits().getAt(0); + assertEquals(1, hit.getRank()); + assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); + assertEquals("term 500", hit.field("text0").getValue()); + + Set vectors = Arrays.stream(response.getHits().getHits()) + .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) + .collect(Collectors.toSet()); + assertEquals( + Set.of( + 491.0, + 492.0, + 493.0, + 494.0, + 495.0, + 496.0, + 497.0, + 498.0, + 499.0, + 500.0, + 501.0, + 502.0, + 503.0, + 504.0, + 505.0, + 506.0, + 507.0, + 508.0, + 509.0 + ), + vectors + ); + + LongTerms aggregation = response.getAggregations().get("sums"); + assertEquals(3, aggregation.getBuckets().size()); + + for (LongTerms.Bucket bucket : aggregation.getBuckets()) { + if (0L == (long) bucket.getKey()) { + assertEquals(17, bucket.getDocCount()); + } else if (1L == (long) bucket.getKey()) { + assertEquals(17, bucket.getDocCount()); + } else if (2L == (long) bucket.getKey()) { + assertEquals(17, bucket.getDocCount()); + } else { + throw new IllegalArgumentException("unexpected bucket key [" + bucket.getKey() + "]"); + } + } } - } + ); } public void testBM25AndMultipleKnnWithAggregation() { @@ -450,94 +459,256 @@ public void testBM25AndMultipleKnnWithAggregation() { float[] queryVectorDesc = { 500.0f }; KnnSearchBuilder knnSearchAsc = new KnnSearchBuilder("vector_asc", queryVectorAsc, 51, 1001, null); KnnSearchBuilder knnSearchDesc = new KnnSearchBuilder("vector_desc", queryVectorDesc, 51, 1001, null); - SearchResponse response = prepareSearch("nrd_index").setRankBuilder(new RRFRankBuilder(51, 1)) - .setTrackTotalHits(true) - .setKnnSearch(List.of(knnSearchAsc, knnSearchDesc)) - .setQuery( - QueryBuilders.boolQuery() - .should(QueryBuilders.termQuery("text0", "500").boost(10.0f)) - .should(QueryBuilders.termQuery("text0", "499").boost(20.0f)) - .should(QueryBuilders.termQuery("text0", "498").boost(8.0f)) - .should(QueryBuilders.termQuery("text0", "497").boost(7.0f)) - .should(QueryBuilders.termQuery("text0", "496").boost(6.0f)) - .should(QueryBuilders.termQuery("text0", "485").boost(5.0f)) - .should(QueryBuilders.termQuery("text0", "494").boost(4.0f)) - .should(QueryBuilders.termQuery("text0", "506").boost(3.0f)) - .should(QueryBuilders.termQuery("text0", "505").boost(2.0f)) - .should(QueryBuilders.termQuery("text0", "511").boost(9.0f)) - ) - .addFetchField("vector_asc") - .addFetchField("vector_desc") - .addFetchField("text0") - .setSize(19) - .addAggregation(AggregationBuilders.terms("sums").field("int")) - .setStats("search") - .get(); - - assertEquals(51, response.getHits().getTotalHits().value); - assertEquals(19, response.getHits().getHits().length); - - SearchHit hit = response.getHits().getAt(0); - assertEquals(1, hit.getRank()); - assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); - assertEquals(500.0, ((Number) hit.field("vector_desc").getValue()).doubleValue(), 0.0); - assertEquals("term 500", hit.field("text0").getValue()); - - hit = response.getHits().getAt(1); - assertEquals(2, hit.getRank()); - assertEquals(499.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); - assertEquals(501.0, ((Number) hit.field("vector_desc").getValue()).doubleValue(), 0.0); - assertEquals("term 499", hit.field("text0").getValue()); - - Set vectors = Arrays.stream(response.getHits().getHits()) - .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) - .collect(Collectors.toSet()); - assertEquals( - Set.of( - 485.0, - 492.0, - 493.0, - 494.0, - 495.0, - 496.0, - 497.0, - 498.0, - 499.0, - 500.0, - 501.0, - 502.0, - 503.0, - 504.0, - 505.0, - 506.0, - 507.0, - 508.0, - 511.0 - ), - vectors + assertResponse( + prepareSearch("nrd_index").setRankBuilder(new RRFRankBuilder(51, 1)) + .setTrackTotalHits(true) + .setKnnSearch(List.of(knnSearchAsc, knnSearchDesc)) + .setQuery( + QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("text0", "500").boost(10.0f)) + .should(QueryBuilders.termQuery("text0", "499").boost(20.0f)) + .should(QueryBuilders.termQuery("text0", "498").boost(8.0f)) + .should(QueryBuilders.termQuery("text0", "497").boost(7.0f)) + .should(QueryBuilders.termQuery("text0", "496").boost(6.0f)) + .should(QueryBuilders.termQuery("text0", "485").boost(5.0f)) + .should(QueryBuilders.termQuery("text0", "494").boost(4.0f)) + .should(QueryBuilders.termQuery("text0", "506").boost(3.0f)) + .should(QueryBuilders.termQuery("text0", "505").boost(2.0f)) + .should(QueryBuilders.termQuery("text0", "511").boost(9.0f)) + ) + .addFetchField("vector_asc") + .addFetchField("vector_desc") + .addFetchField("text0") + .setSize(19) + .addAggregation(AggregationBuilders.terms("sums").field("int")) + .setStats("search"), + response -> { + assertEquals(51, response.getHits().getTotalHits().value); + assertEquals(19, response.getHits().getHits().length); + + SearchHit hit = response.getHits().getAt(0); + assertEquals(1, hit.getRank()); + assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); + assertEquals(500.0, ((Number) hit.field("vector_desc").getValue()).doubleValue(), 0.0); + assertEquals("term 500", hit.field("text0").getValue()); + + hit = response.getHits().getAt(1); + assertEquals(2, hit.getRank()); + assertEquals(499.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); + assertEquals(501.0, ((Number) hit.field("vector_desc").getValue()).doubleValue(), 0.0); + assertEquals("term 499", hit.field("text0").getValue()); + + Set vectors = Arrays.stream(response.getHits().getHits()) + .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) + .collect(Collectors.toSet()); + assertEquals( + Set.of( + 485.0, + 492.0, + 493.0, + 494.0, + 495.0, + 496.0, + 497.0, + 498.0, + 499.0, + 500.0, + 501.0, + 502.0, + 503.0, + 504.0, + 505.0, + 506.0, + 507.0, + 508.0, + 511.0 + ), + vectors + ); + + LongTerms aggregation = response.getAggregations().get("sums"); + assertEquals(3, aggregation.getBuckets().size()); + + for (LongTerms.Bucket bucket : aggregation.getBuckets()) { + if (0L == (long) bucket.getKey()) { + assertEquals(17, bucket.getDocCount()); + } else if (1L == (long) bucket.getKey()) { + assertEquals(17, bucket.getDocCount()); + } else if (2L == (long) bucket.getKey()) { + assertEquals(17, bucket.getDocCount()); + } else { + throw new IllegalArgumentException("unexpected bucket key [" + bucket.getKey() + "]"); + } + } + } ); + } - LongTerms aggregation = response.getAggregations().get("sums"); - assertEquals(3, aggregation.getBuckets().size()); - - for (LongTerms.Bucket bucket : aggregation.getBuckets()) { - if (0L == (long) bucket.getKey()) { - assertEquals(17, bucket.getDocCount()); - } else if (1L == (long) bucket.getKey()) { - assertEquals(17, bucket.getDocCount()); - } else if (2L == (long) bucket.getKey()) { - assertEquals(17, bucket.getDocCount()); - } else { - throw new IllegalArgumentException("unexpected bucket key [" + bucket.getKey() + "]"); - } + public void testMultiBM25() { + for (SearchType searchType : SearchType.CURRENTLY_SUPPORTED) { + assertResponse( + prepareSearch("nrd_index").setSearchType(searchType) + .setRankBuilder(new RRFRankBuilder(8, 1)) + .setTrackTotalHits(false) + .setSubSearches( + List.of( + new SubSearchSourceBuilder( + QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("text0", "500").boost(10.0f)) + .should(QueryBuilders.termQuery("text0", "499").boost(9.0f)) + .should(QueryBuilders.termQuery("text0", "498").boost(8.0f)) + .should(QueryBuilders.termQuery("text0", "497").boost(7.0f)) + .should(QueryBuilders.termQuery("text0", "496").boost(6.0f)) + .should(QueryBuilders.termQuery("text0", "495").boost(5.0f)) + .should(QueryBuilders.termQuery("text0", "494").boost(4.0f)) + .should(QueryBuilders.termQuery("text0", "492").boost(3.0f)) + .should(QueryBuilders.termQuery("text0", "491").boost(2.0f)) + .should(QueryBuilders.termQuery("text0", "490").boost(1.0f)) + ), + new SubSearchSourceBuilder( + QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("text1", "508").boost(9.0f)) + .should(QueryBuilders.termQuery("text1", "304").boost(8.0f)) + .should(QueryBuilders.termQuery("text1", "501").boost(7.0f)) + .should(QueryBuilders.termQuery("text1", "504").boost(6.0f)) + .should(QueryBuilders.termQuery("text1", "502").boost(5.0f)) + .should(QueryBuilders.termQuery("text1", "499").boost(4.0f)) + .should(QueryBuilders.termQuery("text1", "800").boost(3.0f)) + .should(QueryBuilders.termQuery("text1", "201").boost(2.0f)) + .should(QueryBuilders.termQuery("text1", "492").boost(1.0f)) + ) + ) + ) + .addFetchField("text0") + .addFetchField("text1") + .setSize(5), + response -> { + assertNull(response.getHits().getTotalHits()); + assertEquals(5, response.getHits().getHits().length); + + SearchHit hit = response.getHits().getAt(0); + assertEquals(1, hit.getRank()); + assertEquals("term 492", hit.field("text0").getValue()); + assertEquals("term 508", hit.field("text1").getValue()); + + hit = response.getHits().getAt(1); + assertEquals(2, hit.getRank()); + assertEquals("term 499", hit.field("text0").getValue()); + assertEquals("term 501", hit.field("text1").getValue()); + + hit = response.getHits().getAt(2); + assertEquals(3, hit.getRank()); + assertEquals("term 500", hit.field("text0").getValue()); + assertEquals("term 500", hit.field("text1").getValue()); + + hit = response.getHits().getAt(3); + assertEquals(4, hit.getRank()); + assertEquals("term 498", hit.field("text0").getValue()); + assertEquals("term 502", hit.field("text1").getValue()); + + hit = response.getHits().getAt(4); + assertEquals(5, hit.getRank()); + assertEquals("term 496", hit.field("text0").getValue()); + assertEquals("term 504", hit.field("text1").getValue()); + } + ); } } - public void testMultiBM25() { + public void testMultiBM25WithAggregation() { for (SearchType searchType : SearchType.CURRENTLY_SUPPORTED) { - SearchResponse response = prepareSearch("nrd_index").setSearchType(searchType) - .setRankBuilder(new RRFRankBuilder(8, 1)) + assertResponse( + prepareSearch("nrd_index").setSearchType(searchType) + .setRankBuilder(new RRFRankBuilder(8, 1)) + .setTrackTotalHits(false) + .setSubSearches( + List.of( + new SubSearchSourceBuilder( + QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("text0", "500").boost(10.0f)) + .should(QueryBuilders.termQuery("text0", "499").boost(9.0f)) + .should(QueryBuilders.termQuery("text0", "498").boost(8.0f)) + .should(QueryBuilders.termQuery("text0", "497").boost(7.0f)) + .should(QueryBuilders.termQuery("text0", "496").boost(6.0f)) + .should(QueryBuilders.termQuery("text0", "495").boost(5.0f)) + .should(QueryBuilders.termQuery("text0", "494").boost(4.0f)) + .should(QueryBuilders.termQuery("text0", "492").boost(3.0f)) + .should(QueryBuilders.termQuery("text0", "491").boost(2.0f)) + .should(QueryBuilders.termQuery("text0", "490").boost(1.0f)) + ), + new SubSearchSourceBuilder( + QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("text1", "508").boost(9.0f)) + .should(QueryBuilders.termQuery("text1", "304").boost(8.0f)) + .should(QueryBuilders.termQuery("text1", "501").boost(7.0f)) + .should(QueryBuilders.termQuery("text1", "504").boost(6.0f)) + .should(QueryBuilders.termQuery("text1", "502").boost(5.0f)) + .should(QueryBuilders.termQuery("text1", "499").boost(4.0f)) + .should(QueryBuilders.termQuery("text1", "801").boost(3.0f)) + .should(QueryBuilders.termQuery("text1", "201").boost(2.0f)) + .should(QueryBuilders.termQuery("text1", "492").boost(1.0f)) + ) + ) + ) + .addFetchField("text0") + .addFetchField("text1") + .setSize(5) + .addAggregation(AggregationBuilders.terms("sums").field("int")), + response -> { + assertNull(response.getHits().getTotalHits()); + assertEquals(5, response.getHits().getHits().length); + + SearchHit hit = response.getHits().getAt(0); + assertEquals(1, hit.getRank()); + assertEquals("term 492", hit.field("text0").getValue()); + assertEquals("term 508", hit.field("text1").getValue()); + + hit = response.getHits().getAt(1); + assertEquals(2, hit.getRank()); + assertEquals("term 499", hit.field("text0").getValue()); + assertEquals("term 501", hit.field("text1").getValue()); + + hit = response.getHits().getAt(2); + assertEquals(3, hit.getRank()); + assertEquals("term 500", hit.field("text0").getValue()); + assertEquals("term 500", hit.field("text1").getValue()); + + hit = response.getHits().getAt(3); + assertEquals(4, hit.getRank()); + assertEquals("term 498", hit.field("text0").getValue()); + assertEquals("term 502", hit.field("text1").getValue()); + + hit = response.getHits().getAt(4); + assertEquals(5, hit.getRank()); + assertEquals("term 496", hit.field("text0").getValue()); + assertEquals("term 504", hit.field("text1").getValue()); + + LongTerms aggregation = response.getAggregations().get("sums"); + assertEquals(3, aggregation.getBuckets().size()); + + for (LongTerms.Bucket bucket : aggregation.getBuckets()) { + if (0L == (long) bucket.getKey()) { + assertEquals(5, bucket.getDocCount()); + } else if (1L == (long) bucket.getKey()) { + assertEquals(6, bucket.getDocCount()); + } else if (2L == (long) bucket.getKey()) { + assertEquals(4, bucket.getDocCount()); + } else { + throw new IllegalArgumentException("unexpected bucket key [" + bucket.getKey() + "]"); + } + } + } + ); + } + } + + public void testMultiBM25AndSingleKnn() { + float[] queryVector = { 500.0f }; + KnnSearchBuilder knnSearch = new KnnSearchBuilder("vector_asc", queryVector, 101, 1001, null); + assertResponse( + prepareSearch("nrd_index").setRankBuilder(new RRFRankBuilder(101, 1)) .setTrackTotalHits(false) + .setKnnSearch(List.of(knnSearch)) .setSubSearches( List.of( new SubSearchSourceBuilder( @@ -559,54 +730,43 @@ public void testMultiBM25() { .should(QueryBuilders.termQuery("text1", "304").boost(8.0f)) .should(QueryBuilders.termQuery("text1", "501").boost(7.0f)) .should(QueryBuilders.termQuery("text1", "504").boost(6.0f)) - .should(QueryBuilders.termQuery("text1", "502").boost(5.0f)) - .should(QueryBuilders.termQuery("text1", "499").boost(4.0f)) - .should(QueryBuilders.termQuery("text1", "800").boost(3.0f)) - .should(QueryBuilders.termQuery("text1", "201").boost(2.0f)) - .should(QueryBuilders.termQuery("text1", "492").boost(1.0f)) + .should(QueryBuilders.termQuery("text1", "492").boost(5.0f)) + .should(QueryBuilders.termQuery("text1", "502").boost(4.0f)) + .should(QueryBuilders.termQuery("text1", "499").boost(3.0f)) + .should(QueryBuilders.termQuery("text1", "800").boost(2.0f)) + .should(QueryBuilders.termQuery("text1", "201").boost(1.0f)) ) ) ) .addFetchField("text0") .addFetchField("text1") - .setSize(5) - .get(); - - assertNull(response.getHits().getTotalHits()); - assertEquals(5, response.getHits().getHits().length); - - SearchHit hit = response.getHits().getAt(0); - assertEquals(1, hit.getRank()); - assertEquals("term 492", hit.field("text0").getValue()); - assertEquals("term 508", hit.field("text1").getValue()); - - hit = response.getHits().getAt(1); - assertEquals(2, hit.getRank()); - assertEquals("term 499", hit.field("text0").getValue()); - assertEquals("term 501", hit.field("text1").getValue()); - - hit = response.getHits().getAt(2); - assertEquals(3, hit.getRank()); - assertEquals("term 500", hit.field("text0").getValue()); - assertEquals("term 500", hit.field("text1").getValue()); - - hit = response.getHits().getAt(3); - assertEquals(4, hit.getRank()); - assertEquals("term 498", hit.field("text0").getValue()); - assertEquals("term 502", hit.field("text1").getValue()); - - hit = response.getHits().getAt(4); - assertEquals(5, hit.getRank()); - assertEquals("term 496", hit.field("text0").getValue()); - assertEquals("term 504", hit.field("text1").getValue()); - } + .addFetchField("vector_asc") + .setSize(5), + response -> { + assertNull(response.getHits().getTotalHits()); + assertEquals(5, response.getHits().getHits().length); + + SearchHit hit = response.getHits().getAt(0); + assertEquals(1, hit.getRank()); + assertEquals("term 500", hit.field("text0").getValue()); + assertEquals("term 500", hit.field("text1").getValue()); + assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); + + Set vectors = Arrays.stream(response.getHits().getHits()) + .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) + .collect(Collectors.toSet()); + assertEquals(Set.of(492.0, 496.0, 498.0, 499.0, 500.0), vectors); + } + ); } - public void testMultiBM25WithAggregation() { - for (SearchType searchType : SearchType.CURRENTLY_SUPPORTED) { - SearchResponse response = prepareSearch("nrd_index").setSearchType(searchType) - .setRankBuilder(new RRFRankBuilder(8, 1)) + public void testMultiBM25AndSingleKnnWithAggregation() { + float[] queryVector = { 500.0f }; + KnnSearchBuilder knnSearch = new KnnSearchBuilder("vector_asc", queryVector, 101, 1001, null); + assertResponse( + prepareSearch("nrd_index").setRankBuilder(new RRFRankBuilder(101, 1)) .setTrackTotalHits(false) + .setKnnSearch(List.of(knnSearch)) .setSubSearches( List.of( new SubSearchSourceBuilder( @@ -628,191 +788,50 @@ public void testMultiBM25WithAggregation() { .should(QueryBuilders.termQuery("text1", "304").boost(8.0f)) .should(QueryBuilders.termQuery("text1", "501").boost(7.0f)) .should(QueryBuilders.termQuery("text1", "504").boost(6.0f)) - .should(QueryBuilders.termQuery("text1", "502").boost(5.0f)) - .should(QueryBuilders.termQuery("text1", "499").boost(4.0f)) - .should(QueryBuilders.termQuery("text1", "801").boost(3.0f)) - .should(QueryBuilders.termQuery("text1", "201").boost(2.0f)) - .should(QueryBuilders.termQuery("text1", "492").boost(1.0f)) + .should(QueryBuilders.termQuery("text1", "492").boost(5.0f)) + .should(QueryBuilders.termQuery("text1", "502").boost(4.0f)) + .should(QueryBuilders.termQuery("text1", "499").boost(3.0f)) + .should(QueryBuilders.termQuery("text1", "800").boost(2.0f)) + .should(QueryBuilders.termQuery("text1", "201").boost(1.0f)) ) ) ) .addFetchField("text0") .addFetchField("text1") + .addFetchField("vector_asc") .setSize(5) - .addAggregation(AggregationBuilders.terms("sums").field("int")) - .get(); - - assertNull(response.getHits().getTotalHits()); - assertEquals(5, response.getHits().getHits().length); - - SearchHit hit = response.getHits().getAt(0); - assertEquals(1, hit.getRank()); - assertEquals("term 492", hit.field("text0").getValue()); - assertEquals("term 508", hit.field("text1").getValue()); - - hit = response.getHits().getAt(1); - assertEquals(2, hit.getRank()); - assertEquals("term 499", hit.field("text0").getValue()); - assertEquals("term 501", hit.field("text1").getValue()); - - hit = response.getHits().getAt(2); - assertEquals(3, hit.getRank()); - assertEquals("term 500", hit.field("text0").getValue()); - assertEquals("term 500", hit.field("text1").getValue()); - - hit = response.getHits().getAt(3); - assertEquals(4, hit.getRank()); - assertEquals("term 498", hit.field("text0").getValue()); - assertEquals("term 502", hit.field("text1").getValue()); - - hit = response.getHits().getAt(4); - assertEquals(5, hit.getRank()); - assertEquals("term 496", hit.field("text0").getValue()); - assertEquals("term 504", hit.field("text1").getValue()); - - LongTerms aggregation = response.getAggregations().get("sums"); - assertEquals(3, aggregation.getBuckets().size()); - - for (LongTerms.Bucket bucket : aggregation.getBuckets()) { - if (0L == (long) bucket.getKey()) { - assertEquals(5, bucket.getDocCount()); - } else if (1L == (long) bucket.getKey()) { - assertEquals(6, bucket.getDocCount()); - } else if (2L == (long) bucket.getKey()) { - assertEquals(4, bucket.getDocCount()); - } else { - throw new IllegalArgumentException("unexpected bucket key [" + bucket.getKey() + "]"); + .addAggregation(AggregationBuilders.terms("sums").field("int")), + response -> { + assertNull(response.getHits().getTotalHits()); + assertEquals(5, response.getHits().getHits().length); + + SearchHit hit = response.getHits().getAt(0); + assertEquals(1, hit.getRank()); + assertEquals("term 500", hit.field("text0").getValue()); + assertEquals("term 500", hit.field("text1").getValue()); + assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); + + Set vectors = Arrays.stream(response.getHits().getHits()) + .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) + .collect(Collectors.toSet()); + assertEquals(Set.of(492.0, 496.0, 498.0, 499.0, 500.0), vectors); + + LongTerms aggregation = response.getAggregations().get("sums"); + assertEquals(3, aggregation.getBuckets().size()); + + for (LongTerms.Bucket bucket : aggregation.getBuckets()) { + if (0L == (long) bucket.getKey()) { + assertEquals(35, bucket.getDocCount()); + } else if (1L == (long) bucket.getKey()) { + assertEquals(35, bucket.getDocCount()); + } else if (2L == (long) bucket.getKey()) { + assertEquals(34, bucket.getDocCount()); + } else { + throw new IllegalArgumentException("unexpected bucket key [" + bucket.getKey() + "]"); + } } } - } - } - - public void testMultiBM25AndSingleKnn() { - float[] queryVector = { 500.0f }; - KnnSearchBuilder knnSearch = new KnnSearchBuilder("vector_asc", queryVector, 101, 1001, null); - SearchResponse response = prepareSearch("nrd_index").setRankBuilder(new RRFRankBuilder(101, 1)) - .setTrackTotalHits(false) - .setKnnSearch(List.of(knnSearch)) - .setSubSearches( - List.of( - new SubSearchSourceBuilder( - QueryBuilders.boolQuery() - .should(QueryBuilders.termQuery("text0", "500").boost(10.0f)) - .should(QueryBuilders.termQuery("text0", "499").boost(9.0f)) - .should(QueryBuilders.termQuery("text0", "498").boost(8.0f)) - .should(QueryBuilders.termQuery("text0", "497").boost(7.0f)) - .should(QueryBuilders.termQuery("text0", "496").boost(6.0f)) - .should(QueryBuilders.termQuery("text0", "495").boost(5.0f)) - .should(QueryBuilders.termQuery("text0", "494").boost(4.0f)) - .should(QueryBuilders.termQuery("text0", "492").boost(3.0f)) - .should(QueryBuilders.termQuery("text0", "491").boost(2.0f)) - .should(QueryBuilders.termQuery("text0", "490").boost(1.0f)) - ), - new SubSearchSourceBuilder( - QueryBuilders.boolQuery() - .should(QueryBuilders.termQuery("text1", "508").boost(9.0f)) - .should(QueryBuilders.termQuery("text1", "304").boost(8.0f)) - .should(QueryBuilders.termQuery("text1", "501").boost(7.0f)) - .should(QueryBuilders.termQuery("text1", "504").boost(6.0f)) - .should(QueryBuilders.termQuery("text1", "492").boost(5.0f)) - .should(QueryBuilders.termQuery("text1", "502").boost(4.0f)) - .should(QueryBuilders.termQuery("text1", "499").boost(3.0f)) - .should(QueryBuilders.termQuery("text1", "800").boost(2.0f)) - .should(QueryBuilders.termQuery("text1", "201").boost(1.0f)) - ) - ) - ) - .addFetchField("text0") - .addFetchField("text1") - .addFetchField("vector_asc") - .setSize(5) - .get(); - - assertNull(response.getHits().getTotalHits()); - assertEquals(5, response.getHits().getHits().length); - - SearchHit hit = response.getHits().getAt(0); - assertEquals(1, hit.getRank()); - assertEquals("term 500", hit.field("text0").getValue()); - assertEquals("term 500", hit.field("text1").getValue()); - assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); - - Set vectors = Arrays.stream(response.getHits().getHits()) - .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) - .collect(Collectors.toSet()); - assertEquals(Set.of(492.0, 496.0, 498.0, 499.0, 500.0), vectors); - } - - public void testMultiBM25AndSingleKnnWithAggregation() { - float[] queryVector = { 500.0f }; - KnnSearchBuilder knnSearch = new KnnSearchBuilder("vector_asc", queryVector, 101, 1001, null); - SearchResponse response = prepareSearch("nrd_index").setRankBuilder(new RRFRankBuilder(101, 1)) - .setTrackTotalHits(false) - .setKnnSearch(List.of(knnSearch)) - .setSubSearches( - List.of( - new SubSearchSourceBuilder( - QueryBuilders.boolQuery() - .should(QueryBuilders.termQuery("text0", "500").boost(10.0f)) - .should(QueryBuilders.termQuery("text0", "499").boost(9.0f)) - .should(QueryBuilders.termQuery("text0", "498").boost(8.0f)) - .should(QueryBuilders.termQuery("text0", "497").boost(7.0f)) - .should(QueryBuilders.termQuery("text0", "496").boost(6.0f)) - .should(QueryBuilders.termQuery("text0", "495").boost(5.0f)) - .should(QueryBuilders.termQuery("text0", "494").boost(4.0f)) - .should(QueryBuilders.termQuery("text0", "492").boost(3.0f)) - .should(QueryBuilders.termQuery("text0", "491").boost(2.0f)) - .should(QueryBuilders.termQuery("text0", "490").boost(1.0f)) - ), - new SubSearchSourceBuilder( - QueryBuilders.boolQuery() - .should(QueryBuilders.termQuery("text1", "508").boost(9.0f)) - .should(QueryBuilders.termQuery("text1", "304").boost(8.0f)) - .should(QueryBuilders.termQuery("text1", "501").boost(7.0f)) - .should(QueryBuilders.termQuery("text1", "504").boost(6.0f)) - .should(QueryBuilders.termQuery("text1", "492").boost(5.0f)) - .should(QueryBuilders.termQuery("text1", "502").boost(4.0f)) - .should(QueryBuilders.termQuery("text1", "499").boost(3.0f)) - .should(QueryBuilders.termQuery("text1", "800").boost(2.0f)) - .should(QueryBuilders.termQuery("text1", "201").boost(1.0f)) - ) - ) - ) - .addFetchField("text0") - .addFetchField("text1") - .addFetchField("vector_asc") - .setSize(5) - .addAggregation(AggregationBuilders.terms("sums").field("int")) - .get(); - - assertNull(response.getHits().getTotalHits()); - assertEquals(5, response.getHits().getHits().length); - - SearchHit hit = response.getHits().getAt(0); - assertEquals(1, hit.getRank()); - assertEquals("term 500", hit.field("text0").getValue()); - assertEquals("term 500", hit.field("text1").getValue()); - assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); - - Set vectors = Arrays.stream(response.getHits().getHits()) - .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) - .collect(Collectors.toSet()); - assertEquals(Set.of(492.0, 496.0, 498.0, 499.0, 500.0), vectors); - - LongTerms aggregation = response.getAggregations().get("sums"); - assertEquals(3, aggregation.getBuckets().size()); - - for (LongTerms.Bucket bucket : aggregation.getBuckets()) { - if (0L == (long) bucket.getKey()) { - assertEquals(35, bucket.getDocCount()); - } else if (1L == (long) bucket.getKey()) { - assertEquals(35, bucket.getDocCount()); - } else if (2L == (long) bucket.getKey()) { - assertEquals(34, bucket.getDocCount()); - } else { - throw new IllegalArgumentException("unexpected bucket key [" + bucket.getKey() + "]"); - } - } + ); } public void testMultiBM25AndMultipleKnn() { @@ -820,59 +839,61 @@ public void testMultiBM25AndMultipleKnn() { float[] queryVectorDesc = { 500.0f }; KnnSearchBuilder knnSearchAsc = new KnnSearchBuilder("vector_asc", queryVectorAsc, 101, 1001, null); KnnSearchBuilder knnSearchDesc = new KnnSearchBuilder("vector_desc", queryVectorDesc, 101, 1001, null); - SearchResponse response = prepareSearch("nrd_index").setRankBuilder(new RRFRankBuilder(101, 1)) - .setTrackTotalHits(false) - .setKnnSearch(List.of(knnSearchAsc, knnSearchDesc)) - .setSubSearches( - List.of( - new SubSearchSourceBuilder( - QueryBuilders.boolQuery() - .should(QueryBuilders.termQuery("text0", "500").boost(10.0f)) - .should(QueryBuilders.termQuery("text0", "499").boost(9.0f)) - .should(QueryBuilders.termQuery("text0", "498").boost(8.0f)) - .should(QueryBuilders.termQuery("text0", "497").boost(7.0f)) - .should(QueryBuilders.termQuery("text0", "496").boost(6.0f)) - .should(QueryBuilders.termQuery("text0", "495").boost(5.0f)) - .should(QueryBuilders.termQuery("text0", "494").boost(4.0f)) - .should(QueryBuilders.termQuery("text0", "492").boost(3.0f)) - .should(QueryBuilders.termQuery("text0", "491").boost(2.0f)) - .should(QueryBuilders.termQuery("text0", "490").boost(1.0f)) - ), - new SubSearchSourceBuilder( - QueryBuilders.boolQuery() - .should(QueryBuilders.termQuery("text1", "508").boost(9.0f)) - .should(QueryBuilders.termQuery("text1", "304").boost(8.0f)) - .should(QueryBuilders.termQuery("text1", "501").boost(7.0f)) - .should(QueryBuilders.termQuery("text1", "504").boost(6.0f)) - .should(QueryBuilders.termQuery("text1", "492").boost(5.0f)) - .should(QueryBuilders.termQuery("text1", "502").boost(4.0f)) - .should(QueryBuilders.termQuery("text1", "499").boost(3.0f)) - .should(QueryBuilders.termQuery("text1", "800").boost(2.0f)) - .should(QueryBuilders.termQuery("text1", "201").boost(1.0f)) + assertResponse( + prepareSearch("nrd_index").setRankBuilder(new RRFRankBuilder(101, 1)) + .setTrackTotalHits(false) + .setKnnSearch(List.of(knnSearchAsc, knnSearchDesc)) + .setSubSearches( + List.of( + new SubSearchSourceBuilder( + QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("text0", "500").boost(10.0f)) + .should(QueryBuilders.termQuery("text0", "499").boost(9.0f)) + .should(QueryBuilders.termQuery("text0", "498").boost(8.0f)) + .should(QueryBuilders.termQuery("text0", "497").boost(7.0f)) + .should(QueryBuilders.termQuery("text0", "496").boost(6.0f)) + .should(QueryBuilders.termQuery("text0", "495").boost(5.0f)) + .should(QueryBuilders.termQuery("text0", "494").boost(4.0f)) + .should(QueryBuilders.termQuery("text0", "492").boost(3.0f)) + .should(QueryBuilders.termQuery("text0", "491").boost(2.0f)) + .should(QueryBuilders.termQuery("text0", "490").boost(1.0f)) + ), + new SubSearchSourceBuilder( + QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("text1", "508").boost(9.0f)) + .should(QueryBuilders.termQuery("text1", "304").boost(8.0f)) + .should(QueryBuilders.termQuery("text1", "501").boost(7.0f)) + .should(QueryBuilders.termQuery("text1", "504").boost(6.0f)) + .should(QueryBuilders.termQuery("text1", "492").boost(5.0f)) + .should(QueryBuilders.termQuery("text1", "502").boost(4.0f)) + .should(QueryBuilders.termQuery("text1", "499").boost(3.0f)) + .should(QueryBuilders.termQuery("text1", "800").boost(2.0f)) + .should(QueryBuilders.termQuery("text1", "201").boost(1.0f)) + ) ) ) - ) - .addFetchField("text0") - .addFetchField("text1") - .addFetchField("vector_asc") - .addFetchField("vector_desc") - .setSize(5) - .get(); - - assertNull(response.getHits().getTotalHits()); - assertEquals(5, response.getHits().getHits().length); - - SearchHit hit = response.getHits().getAt(0); - assertEquals(1, hit.getRank()); - assertEquals("term 500", hit.field("text0").getValue()); - assertEquals("term 500", hit.field("text1").getValue()); - assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); - assertEquals(500.0, ((Number) hit.field("vector_desc").getValue()).doubleValue(), 0.0); - - Set vectors = Arrays.stream(response.getHits().getHits()) - .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) - .collect(Collectors.toSet()); - assertEquals(Set.of(492.0, 498.0, 499.0, 500.0, 501.0), vectors); + .addFetchField("text0") + .addFetchField("text1") + .addFetchField("vector_asc") + .addFetchField("vector_desc") + .setSize(5), + response -> { + assertNull(response.getHits().getTotalHits()); + assertEquals(5, response.getHits().getHits().length); + + SearchHit hit = response.getHits().getAt(0); + assertEquals(1, hit.getRank()); + assertEquals("term 500", hit.field("text0").getValue()); + assertEquals("term 500", hit.field("text1").getValue()); + assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); + assertEquals(500.0, ((Number) hit.field("vector_desc").getValue()).doubleValue(), 0.0); + + Set vectors = Arrays.stream(response.getHits().getHits()) + .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) + .collect(Collectors.toSet()); + assertEquals(Set.of(492.0, 498.0, 499.0, 500.0, 501.0), vectors); + } + ); } public void testMultiBM25AndMultipleKnnWithAggregation() { @@ -880,74 +901,76 @@ public void testMultiBM25AndMultipleKnnWithAggregation() { float[] queryVectorDesc = { 500.0f }; KnnSearchBuilder knnSearchAsc = new KnnSearchBuilder("vector_asc", queryVectorAsc, 101, 1001, null); KnnSearchBuilder knnSearchDesc = new KnnSearchBuilder("vector_desc", queryVectorDesc, 101, 1001, null); - SearchResponse response = prepareSearch("nrd_index").setRankBuilder(new RRFRankBuilder(101, 1)) - .setTrackTotalHits(false) - .setKnnSearch(List.of(knnSearchAsc, knnSearchDesc)) - .setSubSearches( - List.of( - new SubSearchSourceBuilder( - QueryBuilders.boolQuery() - .should(QueryBuilders.termQuery("text0", "500").boost(10.0f)) - .should(QueryBuilders.termQuery("text0", "499").boost(9.0f)) - .should(QueryBuilders.termQuery("text0", "498").boost(8.0f)) - .should(QueryBuilders.termQuery("text0", "497").boost(7.0f)) - .should(QueryBuilders.termQuery("text0", "496").boost(6.0f)) - .should(QueryBuilders.termQuery("text0", "495").boost(5.0f)) - .should(QueryBuilders.termQuery("text0", "494").boost(4.0f)) - .should(QueryBuilders.termQuery("text0", "492").boost(3.0f)) - .should(QueryBuilders.termQuery("text0", "491").boost(2.0f)) - .should(QueryBuilders.termQuery("text0", "490").boost(1.0f)) - ), - new SubSearchSourceBuilder( - QueryBuilders.boolQuery() - .should(QueryBuilders.termQuery("text1", "508").boost(9.0f)) - .should(QueryBuilders.termQuery("text1", "304").boost(8.0f)) - .should(QueryBuilders.termQuery("text1", "501").boost(7.0f)) - .should(QueryBuilders.termQuery("text1", "504").boost(6.0f)) - .should(QueryBuilders.termQuery("text1", "492").boost(5.0f)) - .should(QueryBuilders.termQuery("text1", "502").boost(4.0f)) - .should(QueryBuilders.termQuery("text1", "499").boost(3.0f)) - .should(QueryBuilders.termQuery("text1", "800").boost(2.0f)) - .should(QueryBuilders.termQuery("text1", "201").boost(1.0f)) + assertResponse( + prepareSearch("nrd_index").setRankBuilder(new RRFRankBuilder(101, 1)) + .setTrackTotalHits(false) + .setKnnSearch(List.of(knnSearchAsc, knnSearchDesc)) + .setSubSearches( + List.of( + new SubSearchSourceBuilder( + QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("text0", "500").boost(10.0f)) + .should(QueryBuilders.termQuery("text0", "499").boost(9.0f)) + .should(QueryBuilders.termQuery("text0", "498").boost(8.0f)) + .should(QueryBuilders.termQuery("text0", "497").boost(7.0f)) + .should(QueryBuilders.termQuery("text0", "496").boost(6.0f)) + .should(QueryBuilders.termQuery("text0", "495").boost(5.0f)) + .should(QueryBuilders.termQuery("text0", "494").boost(4.0f)) + .should(QueryBuilders.termQuery("text0", "492").boost(3.0f)) + .should(QueryBuilders.termQuery("text0", "491").boost(2.0f)) + .should(QueryBuilders.termQuery("text0", "490").boost(1.0f)) + ), + new SubSearchSourceBuilder( + QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("text1", "508").boost(9.0f)) + .should(QueryBuilders.termQuery("text1", "304").boost(8.0f)) + .should(QueryBuilders.termQuery("text1", "501").boost(7.0f)) + .should(QueryBuilders.termQuery("text1", "504").boost(6.0f)) + .should(QueryBuilders.termQuery("text1", "492").boost(5.0f)) + .should(QueryBuilders.termQuery("text1", "502").boost(4.0f)) + .should(QueryBuilders.termQuery("text1", "499").boost(3.0f)) + .should(QueryBuilders.termQuery("text1", "800").boost(2.0f)) + .should(QueryBuilders.termQuery("text1", "201").boost(1.0f)) + ) ) ) - ) - .addFetchField("text0") - .addFetchField("text1") - .addFetchField("vector_asc") - .addFetchField("vector_desc") - .setSize(5) - .addAggregation(AggregationBuilders.terms("sums").field("int")) - .get(); - - assertNull(response.getHits().getTotalHits()); - assertEquals(5, response.getHits().getHits().length); - - SearchHit hit = response.getHits().getAt(0); - assertEquals(1, hit.getRank()); - assertEquals("term 500", hit.field("text0").getValue()); - assertEquals("term 500", hit.field("text1").getValue()); - assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); - assertEquals(500.0, ((Number) hit.field("vector_desc").getValue()).doubleValue(), 0.0); - - Set vectors = Arrays.stream(response.getHits().getHits()) - .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) - .collect(Collectors.toSet()); - assertEquals(Set.of(492.0, 498.0, 499.0, 500.0, 501.0), vectors); - - LongTerms aggregation = response.getAggregations().get("sums"); - assertEquals(3, aggregation.getBuckets().size()); - - for (LongTerms.Bucket bucket : aggregation.getBuckets()) { - if (0L == (long) bucket.getKey()) { - assertEquals(35, bucket.getDocCount()); - } else if (1L == (long) bucket.getKey()) { - assertEquals(35, bucket.getDocCount()); - } else if (2L == (long) bucket.getKey()) { - assertEquals(34, bucket.getDocCount()); - } else { - throw new IllegalArgumentException("unexpected bucket key [" + bucket.getKey() + "]"); + .addFetchField("text0") + .addFetchField("text1") + .addFetchField("vector_asc") + .addFetchField("vector_desc") + .setSize(5) + .addAggregation(AggregationBuilders.terms("sums").field("int")), + response -> { + assertNull(response.getHits().getTotalHits()); + assertEquals(5, response.getHits().getHits().length); + + SearchHit hit = response.getHits().getAt(0); + assertEquals(1, hit.getRank()); + assertEquals("term 500", hit.field("text0").getValue()); + assertEquals("term 500", hit.field("text1").getValue()); + assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); + assertEquals(500.0, ((Number) hit.field("vector_desc").getValue()).doubleValue(), 0.0); + + Set vectors = Arrays.stream(response.getHits().getHits()) + .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) + .collect(Collectors.toSet()); + assertEquals(Set.of(492.0, 498.0, 499.0, 500.0, 501.0), vectors); + + LongTerms aggregation = response.getAggregations().get("sums"); + assertEquals(3, aggregation.getBuckets().size()); + + for (LongTerms.Bucket bucket : aggregation.getBuckets()) { + if (0L == (long) bucket.getKey()) { + assertEquals(35, bucket.getDocCount()); + } else if (1L == (long) bucket.getKey()) { + assertEquals(35, bucket.getDocCount()); + } else if (2L == (long) bucket.getKey()) { + assertEquals(34, bucket.getDocCount()); + } else { + throw new IllegalArgumentException("unexpected bucket key [" + bucket.getKey() + "]"); + } + } } - } + ); } } diff --git a/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankShardCanMatchIT.java b/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankShardCanMatchIT.java index 0e51958ea164e..084ccc88bee33 100644 --- a/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankShardCanMatchIT.java +++ b/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankShardCanMatchIT.java @@ -9,7 +9,6 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -31,6 +30,7 @@ import java.util.List; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; @ESIntegTestCase.ClusterScope(maxNumDataNodes = 3) @ESIntegTestCase.SuiteScopeTestCase @@ -129,13 +129,13 @@ public void testCanMatchShard() throws IOException { int shardB = -1; for (int i = 0; i < 10; i++) { - DocWriteResponse ir = client().prepareIndex("value_index").setSource("value", "" + i).setRouting("a").get(); + DocWriteResponse ir = prepareIndex("value_index").setSource("value", "" + i).setRouting("a").get(); int a = ir.getShardId().id(); assertTrue(shardA == a || shardA == -1); shardA = a; } for (int i = 10; i < 20; i++) { - DocWriteResponse ir = client().prepareIndex("value_index").setSource("value", "" + i).setRouting("b").get(); + DocWriteResponse ir = prepareIndex("value_index").setSource("value", "" + i).setRouting("b").get(); int b = ir.getShardId().id(); assertTrue(shardB == b || shardB == -1); shardB = b; @@ -144,98 +144,108 @@ public void testCanMatchShard() throws IOException { indicesAdmin().prepareRefresh("value_index").get(); // match 2 separate shard with no overlap in queries - SearchResponse response = prepareSearch("value_index").setSearchType(SearchType.QUERY_THEN_FETCH) - .setPreFilterShardSize(1) - .setRankBuilder(new RRFRankBuilder(20, 1)) - .setTrackTotalHits(false) - .setSubSearches( - List.of( - new SubSearchSourceBuilder(new SkipShardPlugin.SkipShardQueryBuilder(shardA, shardB, "value", "9")), - new SubSearchSourceBuilder(new SkipShardPlugin.SkipShardQueryBuilder(shardA, shardB, "value", "19")) + assertResponse( + prepareSearch("value_index").setSearchType(SearchType.QUERY_THEN_FETCH) + .setPreFilterShardSize(1) + .setRankBuilder(new RRFRankBuilder(20, 1)) + .setTrackTotalHits(false) + .setSubSearches( + List.of( + new SubSearchSourceBuilder(new SkipShardPlugin.SkipShardQueryBuilder(shardA, shardB, "value", "9")), + new SubSearchSourceBuilder(new SkipShardPlugin.SkipShardQueryBuilder(shardA, shardB, "value", "19")) + ) ) - ) - .setSize(5) - .get(); - - assertNull(response.getHits().getTotalHits()); - assertEquals(2, response.getHits().getHits().length); - assertEquals(5, response.getSuccessfulShards()); - assertEquals(3, response.getSkippedShards()); + .setSize(5), + response -> { + assertNull(response.getHits().getTotalHits()); + assertEquals(2, response.getHits().getHits().length); + assertEquals(5, response.getSuccessfulShards()); + assertEquals(3, response.getSkippedShards()); + } + ); // match one shard with one query and do not match the other shard with one query - response = prepareSearch("value_index").setSearchType(SearchType.QUERY_THEN_FETCH) - .setPreFilterShardSize(1) - .setRankBuilder(new RRFRankBuilder(20, 1)) - .setTrackTotalHits(false) - .setSubSearches( - List.of( - new SubSearchSourceBuilder(new SkipShardPlugin.SkipShardQueryBuilder(shardA, shardB, "value", "30")), - new SubSearchSourceBuilder(new SkipShardPlugin.SkipShardQueryBuilder(shardA, shardB, "value", "19")) + assertResponse( + prepareSearch("value_index").setSearchType(SearchType.QUERY_THEN_FETCH) + .setPreFilterShardSize(1) + .setRankBuilder(new RRFRankBuilder(20, 1)) + .setTrackTotalHits(false) + .setSubSearches( + List.of( + new SubSearchSourceBuilder(new SkipShardPlugin.SkipShardQueryBuilder(shardA, shardB, "value", "30")), + new SubSearchSourceBuilder(new SkipShardPlugin.SkipShardQueryBuilder(shardA, shardB, "value", "19")) + ) ) - ) - .setSize(5) - .get(); - - assertNull(response.getHits().getTotalHits()); - assertEquals(1, response.getHits().getHits().length); - assertEquals(5, response.getSuccessfulShards()); - assertEquals(4, response.getSkippedShards()); + .setSize(5), + response -> { + assertNull(response.getHits().getTotalHits()); + assertEquals(1, response.getHits().getHits().length); + assertEquals(5, response.getSuccessfulShards()); + assertEquals(4, response.getSkippedShards()); + } + ); // match no shards, but still use one to generate a search response - response = prepareSearch("value_index").setSearchType(SearchType.QUERY_THEN_FETCH) - .setPreFilterShardSize(1) - .setRankBuilder(new RRFRankBuilder(20, 1)) - .setTrackTotalHits(false) - .setSubSearches( - List.of( - new SubSearchSourceBuilder(new SkipShardPlugin.SkipShardQueryBuilder(shardA, shardB, "value", "30")), - new SubSearchSourceBuilder(new SkipShardPlugin.SkipShardQueryBuilder(shardA, shardB, "value", "40")) + assertResponse( + prepareSearch("value_index").setSearchType(SearchType.QUERY_THEN_FETCH) + .setPreFilterShardSize(1) + .setRankBuilder(new RRFRankBuilder(20, 1)) + .setTrackTotalHits(false) + .setSubSearches( + List.of( + new SubSearchSourceBuilder(new SkipShardPlugin.SkipShardQueryBuilder(shardA, shardB, "value", "30")), + new SubSearchSourceBuilder(new SkipShardPlugin.SkipShardQueryBuilder(shardA, shardB, "value", "40")) + ) ) - ) - .setSize(5) - .get(); - - assertNull(response.getHits().getTotalHits()); - assertEquals(0, response.getHits().getHits().length); - assertEquals(5, response.getSuccessfulShards()); - assertEquals(4, response.getSkippedShards()); + .setSize(5), + response -> { + assertNull(response.getHits().getTotalHits()); + assertEquals(0, response.getHits().getHits().length); + assertEquals(5, response.getSuccessfulShards()); + assertEquals(4, response.getSkippedShards()); + } + ); // match the same shard for both queries - response = prepareSearch("value_index").setSearchType(SearchType.QUERY_THEN_FETCH) - .setPreFilterShardSize(1) - .setRankBuilder(new RRFRankBuilder(20, 1)) - .setTrackTotalHits(false) - .setSubSearches( - List.of( - new SubSearchSourceBuilder(new SkipShardPlugin.SkipShardQueryBuilder(shardA, shardB, "value", "15")), - new SubSearchSourceBuilder(new SkipShardPlugin.SkipShardQueryBuilder(shardA, shardB, "value", "16")) + assertResponse( + prepareSearch("value_index").setSearchType(SearchType.QUERY_THEN_FETCH) + .setPreFilterShardSize(1) + .setRankBuilder(new RRFRankBuilder(20, 1)) + .setTrackTotalHits(false) + .setSubSearches( + List.of( + new SubSearchSourceBuilder(new SkipShardPlugin.SkipShardQueryBuilder(shardA, shardB, "value", "15")), + new SubSearchSourceBuilder(new SkipShardPlugin.SkipShardQueryBuilder(shardA, shardB, "value", "16")) + ) ) - ) - .setSize(5) - .get(); - - assertNull(response.getHits().getTotalHits()); - assertEquals(2, response.getHits().getHits().length); - assertEquals(5, response.getSuccessfulShards()); - assertEquals(4, response.getSkippedShards()); + .setSize(5), + response -> { + assertNull(response.getHits().getTotalHits()); + assertEquals(2, response.getHits().getHits().length); + assertEquals(5, response.getSuccessfulShards()); + assertEquals(4, response.getSkippedShards()); + } + ); // match one shard with the exact same query - response = prepareSearch("value_index").setSearchType(SearchType.QUERY_THEN_FETCH) - .setPreFilterShardSize(1) - .setRankBuilder(new RRFRankBuilder(20, 1)) - .setTrackTotalHits(false) - .setSubSearches( - List.of( - new SubSearchSourceBuilder(new SkipShardPlugin.SkipShardQueryBuilder(shardA, shardB, "value", "8")), - new SubSearchSourceBuilder(new SkipShardPlugin.SkipShardQueryBuilder(shardA, shardB, "value", "8")) + assertResponse( + prepareSearch("value_index").setSearchType(SearchType.QUERY_THEN_FETCH) + .setPreFilterShardSize(1) + .setRankBuilder(new RRFRankBuilder(20, 1)) + .setTrackTotalHits(false) + .setSubSearches( + List.of( + new SubSearchSourceBuilder(new SkipShardPlugin.SkipShardQueryBuilder(shardA, shardB, "value", "8")), + new SubSearchSourceBuilder(new SkipShardPlugin.SkipShardQueryBuilder(shardA, shardB, "value", "8")) + ) ) - ) - .setSize(5) - .get(); - - assertNull(response.getHits().getTotalHits()); - assertEquals(1, response.getHits().getHits().length); - assertEquals(5, response.getSuccessfulShards()); - assertEquals(4, response.getSkippedShards()); + .setSize(5), + response -> { + assertNull(response.getHits().getTotalHits()); + assertEquals(1, response.getHits().getHits().length); + assertEquals(5, response.getSuccessfulShards()); + assertEquals(4, response.getSkippedShards()); + } + ); } } diff --git a/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankSingleShardIT.java b/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankSingleShardIT.java index 46884a140ff4d..3a82f697acc9d 100644 --- a/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankSingleShardIT.java +++ b/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankSingleShardIT.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.rank.rrf; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; @@ -29,6 +28,8 @@ import java.util.Set; import java.util.stream.Collectors; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; + public class RRFRankSingleShardIT extends ESSingleNodeTestCase { @Override @@ -62,9 +63,9 @@ public void setupIndices() throws Exception { createIndex("tiny_index", indexSettings, builder); ensureGreen("tiny_index"); - client().prepareIndex("tiny_index").setSource("vector", new float[] { 0.0f }, "text", "term term").get(); - client().prepareIndex("tiny_index").setSource("vector", new float[] { 1.0f }, "text", "other").get(); - client().prepareIndex("tiny_index").setSource("vector", new float[] { 2.0f }, "text", "term").get(); + prepareIndex("tiny_index").setSource("vector", new float[] { 0.0f }, "text", "term term").get(); + prepareIndex("tiny_index").setSource("vector", new float[] { 1.0f }, "text", "other").get(); + prepareIndex("tiny_index").setSource("vector", new float[] { 2.0f }, "text", "term").get(); client().admin().indices().prepareRefresh("tiny_index").get(); @@ -102,20 +103,18 @@ public void setupIndices() throws Exception { ensureGreen(TimeValue.timeValueSeconds(120), "nrd_index"); for (int doc = 0; doc < 1001; ++doc) { - client().prepareIndex("nrd_index") - .setSource( - "vector_asc", - new float[] { doc }, - "vector_desc", - new float[] { 1000 - doc }, - "int", - doc % 3, - "text0", - "term " + doc, - "text1", - "term " + (1000 - doc) - ) - .get(); + prepareIndex("nrd_index").setSource( + "vector_asc", + new float[] { doc }, + "vector_desc", + new float[] { 1000 - doc }, + "int", + doc % 3, + "text0", + "term " + doc, + "text1", + "term " + (1000 - doc) + ).get(); } client().admin().indices().prepareRefresh("nrd_index").get(); @@ -124,71 +123,75 @@ public void setupIndices() throws Exception { public void testTotalDocsSmallerThanSize() { float[] queryVector = { 0.0f }; KnnSearchBuilder knnSearch = new KnnSearchBuilder("vector", queryVector, 3, 3, null); - SearchResponse response = client().prepareSearch("tiny_index") - .setRankBuilder(new RRFRankBuilder(100, 1)) - .setKnnSearch(List.of(knnSearch)) - .setQuery(QueryBuilders.termQuery("text", "term")) - .addFetchField("vector") - .addFetchField("text") - .get(); - - // we cast to Number when looking at values in vector fields because different xContentTypes may return Float or Double - - assertEquals(3, response.getHits().getHits().length); - - SearchHit hit = response.getHits().getAt(0); - assertEquals(1, hit.getRank()); - assertEquals(0.0, ((Number) hit.field("vector").getValue()).doubleValue(), 0.0); - assertEquals("term term", hit.field("text").getValue()); - - hit = response.getHits().getAt(1); - assertEquals(2, hit.getRank()); - assertEquals(2.0, ((Number) hit.field("vector").getValue()).doubleValue(), 0.0); - assertEquals("term", hit.field("text").getValue()); - - hit = response.getHits().getAt(2); - assertEquals(3, hit.getRank()); - assertEquals(1.0, ((Number) hit.field("vector").getValue()).doubleValue(), 0.0); - assertEquals("other", hit.field("text").getValue()); + + assertResponse( + client().prepareSearch("tiny_index") + .setRankBuilder(new RRFRankBuilder(100, 1)) + .setKnnSearch(List.of(knnSearch)) + .setQuery(QueryBuilders.termQuery("text", "term")) + .addFetchField("vector") + .addFetchField("text"), + response -> { + // we cast to Number when looking at values in vector fields because different xContentTypes may return Float or Double + assertEquals(3, response.getHits().getHits().length); + + SearchHit hit = response.getHits().getAt(0); + assertEquals(1, hit.getRank()); + assertEquals(0.0, ((Number) hit.field("vector").getValue()).doubleValue(), 0.0); + assertEquals("term term", hit.field("text").getValue()); + + hit = response.getHits().getAt(1); + assertEquals(2, hit.getRank()); + assertEquals(2.0, ((Number) hit.field("vector").getValue()).doubleValue(), 0.0); + assertEquals("term", hit.field("text").getValue()); + + hit = response.getHits().getAt(2); + assertEquals(3, hit.getRank()); + assertEquals(1.0, ((Number) hit.field("vector").getValue()).doubleValue(), 0.0); + assertEquals("other", hit.field("text").getValue()); + } + ); } public void testBM25AndKnn() { float[] queryVector = { 500.0f }; KnnSearchBuilder knnSearch = new KnnSearchBuilder("vector_asc", queryVector, 101, 1001, null); - SearchResponse response = client().prepareSearch("nrd_index") - .setRankBuilder(new RRFRankBuilder(101, 1)) - .setTrackTotalHits(false) - .setKnnSearch(List.of(knnSearch)) - .setQuery( - QueryBuilders.boolQuery() - .should(QueryBuilders.termQuery("text0", "500").boost(11.0f)) - .should(QueryBuilders.termQuery("text0", "499").boost(10.0f)) - .should(QueryBuilders.termQuery("text0", "498").boost(9.0f)) - .should(QueryBuilders.termQuery("text0", "497").boost(8.0f)) - .should(QueryBuilders.termQuery("text0", "496").boost(7.0f)) - .should(QueryBuilders.termQuery("text0", "495").boost(6.0f)) - .should(QueryBuilders.termQuery("text0", "494").boost(5.0f)) - .should(QueryBuilders.termQuery("text0", "493").boost(4.0f)) - .should(QueryBuilders.termQuery("text0", "492").boost(3.0f)) - .should(QueryBuilders.termQuery("text0", "491").boost(2.0f)) - ) - .addFetchField("vector_asc") - .addFetchField("text0") - .setSize(11) - .get(); - - assertNull(response.getHits().getTotalHits()); - assertEquals(11, response.getHits().getHits().length); - - SearchHit hit = response.getHits().getAt(0); - assertEquals(1, hit.getRank()); - assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); - assertEquals("term 500", hit.field("text0").getValue()); - - Set vectors = Arrays.stream(response.getHits().getHits()) - .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) - .collect(Collectors.toSet()); - assertEquals(Set.of(492.0, 493.0, 494.0, 495.0, 496.0, 497.0, 498.0, 499.0, 500.0, 501.0, 502.0), vectors); + assertResponse( + client().prepareSearch("nrd_index") + .setRankBuilder(new RRFRankBuilder(101, 1)) + .setTrackTotalHits(false) + .setKnnSearch(List.of(knnSearch)) + .setQuery( + QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("text0", "500").boost(11.0f)) + .should(QueryBuilders.termQuery("text0", "499").boost(10.0f)) + .should(QueryBuilders.termQuery("text0", "498").boost(9.0f)) + .should(QueryBuilders.termQuery("text0", "497").boost(8.0f)) + .should(QueryBuilders.termQuery("text0", "496").boost(7.0f)) + .should(QueryBuilders.termQuery("text0", "495").boost(6.0f)) + .should(QueryBuilders.termQuery("text0", "494").boost(5.0f)) + .should(QueryBuilders.termQuery("text0", "493").boost(4.0f)) + .should(QueryBuilders.termQuery("text0", "492").boost(3.0f)) + .should(QueryBuilders.termQuery("text0", "491").boost(2.0f)) + ) + .addFetchField("vector_asc") + .addFetchField("text0") + .setSize(11), + response -> { + assertNull(response.getHits().getTotalHits()); + assertEquals(11, response.getHits().getHits().length); + + SearchHit hit = response.getHits().getAt(0); + assertEquals(1, hit.getRank()); + assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); + assertEquals("term 500", hit.field("text0").getValue()); + + Set vectors = Arrays.stream(response.getHits().getHits()) + .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) + .collect(Collectors.toSet()); + assertEquals(Set.of(492.0, 493.0, 494.0, 495.0, 496.0, 497.0, 498.0, 499.0, 500.0, 501.0, 502.0), vectors); + } + ); } public void testMultipleOnlyKnn() { @@ -196,49 +199,51 @@ public void testMultipleOnlyKnn() { float[] queryVectorDesc = { 500.0f }; KnnSearchBuilder knnSearchAsc = new KnnSearchBuilder("vector_asc", queryVectorAsc, 51, 1001, null); KnnSearchBuilder knnSearchDesc = new KnnSearchBuilder("vector_desc", queryVectorDesc, 51, 1001, null); - SearchResponse response = client().prepareSearch("nrd_index") - .setRankBuilder(new RRFRankBuilder(51, 1)) - .setTrackTotalHits(true) - .setKnnSearch(List.of(knnSearchAsc, knnSearchDesc)) - .addFetchField("vector_asc") - .addFetchField("text0") - .setSize(19) - .get(); - - assertEquals(51, response.getHits().getTotalHits().value); - assertEquals(19, response.getHits().getHits().length); - - SearchHit hit = response.getHits().getAt(0); - assertEquals(1, hit.getRank()); - assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); - assertEquals("term 500", hit.field("text0").getValue()); - - Set vectors = Arrays.stream(response.getHits().getHits()) - .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) - .collect(Collectors.toSet()); - assertEquals( - Set.of( - 491.0, - 492.0, - 493.0, - 494.0, - 495.0, - 496.0, - 497.0, - 498.0, - 499.0, - 500.0, - 501.0, - 502.0, - 503.0, - 504.0, - 505.0, - 506.0, - 507.0, - 508.0, - 509.0 - ), - vectors + assertResponse( + client().prepareSearch("nrd_index") + .setRankBuilder(new RRFRankBuilder(51, 1)) + .setTrackTotalHits(true) + .setKnnSearch(List.of(knnSearchAsc, knnSearchDesc)) + .addFetchField("vector_asc") + .addFetchField("text0") + .setSize(19), + response -> { + assertEquals(51, response.getHits().getTotalHits().value); + assertEquals(19, response.getHits().getHits().length); + + SearchHit hit = response.getHits().getAt(0); + assertEquals(1, hit.getRank()); + assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); + assertEquals("term 500", hit.field("text0").getValue()); + + Set vectors = Arrays.stream(response.getHits().getHits()) + .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) + .collect(Collectors.toSet()); + assertEquals( + Set.of( + 491.0, + 492.0, + 493.0, + 494.0, + 495.0, + 496.0, + 497.0, + 498.0, + 499.0, + 500.0, + 501.0, + 502.0, + 503.0, + 504.0, + 505.0, + 506.0, + 507.0, + 508.0, + 509.0 + ), + vectors + ); + } ); } @@ -247,126 +252,130 @@ public void testBM25AndMultipleKnn() { float[] queryVectorDesc = { 500.0f }; KnnSearchBuilder knnSearchAsc = new KnnSearchBuilder("vector_asc", queryVectorAsc, 51, 1001, null); KnnSearchBuilder knnSearchDesc = new KnnSearchBuilder("vector_desc", queryVectorDesc, 51, 1001, null); - SearchResponse response = client().prepareSearch("nrd_index") - .setRankBuilder(new RRFRankBuilder(51, 1)) - .setTrackTotalHits(false) - .setKnnSearch(List.of(knnSearchAsc, knnSearchDesc)) - .setQuery( - QueryBuilders.boolQuery() - .should(QueryBuilders.termQuery("text0", "500").boost(10.0f)) - .should(QueryBuilders.termQuery("text0", "499").boost(20.0f)) - .should(QueryBuilders.termQuery("text0", "498").boost(8.0f)) - .should(QueryBuilders.termQuery("text0", "497").boost(7.0f)) - .should(QueryBuilders.termQuery("text0", "496").boost(6.0f)) - .should(QueryBuilders.termQuery("text0", "485").boost(5.0f)) - .should(QueryBuilders.termQuery("text0", "494").boost(4.0f)) - .should(QueryBuilders.termQuery("text0", "506").boost(3.0f)) - .should(QueryBuilders.termQuery("text0", "505").boost(2.0f)) - .should(QueryBuilders.termQuery("text0", "511").boost(9.0f)) - ) - .addFetchField("vector_asc") - .addFetchField("vector_desc") - .addFetchField("text0") - .setSize(19) - .get(); - - assertNull(response.getHits().getTotalHits()); - assertEquals(19, response.getHits().getHits().length); - - SearchHit hit = response.getHits().getAt(0); - assertEquals(1, hit.getRank()); - assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); - assertEquals(500.0, ((Number) hit.field("vector_desc").getValue()).doubleValue(), 0.0); - assertEquals("term 500", hit.field("text0").getValue()); - - hit = response.getHits().getAt(1); - assertEquals(2, hit.getRank()); - assertEquals(499.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); - assertEquals(501.0, ((Number) hit.field("vector_desc").getValue()).doubleValue(), 0.0); - assertEquals("term 499", hit.field("text0").getValue()); - - Set vectors = Arrays.stream(response.getHits().getHits()) - .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) - .collect(Collectors.toSet()); - assertEquals( - Set.of( - 485.0, - 492.0, - 493.0, - 494.0, - 495.0, - 496.0, - 497.0, - 498.0, - 499.0, - 500.0, - 501.0, - 502.0, - 503.0, - 504.0, - 505.0, - 506.0, - 507.0, - 508.0, - 511.0 - ), - vectors + assertResponse( + client().prepareSearch("nrd_index") + .setRankBuilder(new RRFRankBuilder(51, 1)) + .setTrackTotalHits(false) + .setKnnSearch(List.of(knnSearchAsc, knnSearchDesc)) + .setQuery( + QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("text0", "500").boost(10.0f)) + .should(QueryBuilders.termQuery("text0", "499").boost(20.0f)) + .should(QueryBuilders.termQuery("text0", "498").boost(8.0f)) + .should(QueryBuilders.termQuery("text0", "497").boost(7.0f)) + .should(QueryBuilders.termQuery("text0", "496").boost(6.0f)) + .should(QueryBuilders.termQuery("text0", "485").boost(5.0f)) + .should(QueryBuilders.termQuery("text0", "494").boost(4.0f)) + .should(QueryBuilders.termQuery("text0", "506").boost(3.0f)) + .should(QueryBuilders.termQuery("text0", "505").boost(2.0f)) + .should(QueryBuilders.termQuery("text0", "511").boost(9.0f)) + ) + .addFetchField("vector_asc") + .addFetchField("vector_desc") + .addFetchField("text0") + .setSize(19), + response -> { + assertNull(response.getHits().getTotalHits()); + assertEquals(19, response.getHits().getHits().length); + + SearchHit hit = response.getHits().getAt(0); + assertEquals(1, hit.getRank()); + assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); + assertEquals(500.0, ((Number) hit.field("vector_desc").getValue()).doubleValue(), 0.0); + assertEquals("term 500", hit.field("text0").getValue()); + + hit = response.getHits().getAt(1); + assertEquals(2, hit.getRank()); + assertEquals(499.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); + assertEquals(501.0, ((Number) hit.field("vector_desc").getValue()).doubleValue(), 0.0); + assertEquals("term 499", hit.field("text0").getValue()); + + Set vectors = Arrays.stream(response.getHits().getHits()) + .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) + .collect(Collectors.toSet()); + assertEquals( + Set.of( + 485.0, + 492.0, + 493.0, + 494.0, + 495.0, + 496.0, + 497.0, + 498.0, + 499.0, + 500.0, + 501.0, + 502.0, + 503.0, + 504.0, + 505.0, + 506.0, + 507.0, + 508.0, + 511.0 + ), + vectors + ); + } ); } public void testBM25AndKnnWithBucketAggregation() { float[] queryVector = { 500.0f }; KnnSearchBuilder knnSearch = new KnnSearchBuilder("vector_asc", queryVector, 101, 1001, null); - SearchResponse response = client().prepareSearch("nrd_index") - .setRankBuilder(new RRFRankBuilder(101, 1)) - .setTrackTotalHits(true) - .setKnnSearch(List.of(knnSearch)) - .setQuery( - QueryBuilders.boolQuery() - .should(QueryBuilders.termQuery("text0", "500").boost(11.0f)) - .should(QueryBuilders.termQuery("text0", "499").boost(10.0f)) - .should(QueryBuilders.termQuery("text0", "498").boost(9.0f)) - .should(QueryBuilders.termQuery("text0", "497").boost(8.0f)) - .should(QueryBuilders.termQuery("text0", "496").boost(7.0f)) - .should(QueryBuilders.termQuery("text0", "495").boost(6.0f)) - .should(QueryBuilders.termQuery("text0", "494").boost(5.0f)) - .should(QueryBuilders.termQuery("text0", "493").boost(4.0f)) - .should(QueryBuilders.termQuery("text0", "492").boost(3.0f)) - .should(QueryBuilders.termQuery("text0", "491").boost(2.0f)) - ) - .addFetchField("vector_asc") - .addFetchField("text0") - .setSize(11) - .addAggregation(AggregationBuilders.terms("sums").field("int")) - .get(); - - assertEquals(101, response.getHits().getTotalHits().value); - assertEquals(11, response.getHits().getHits().length); - - SearchHit hit = response.getHits().getAt(0); - assertEquals(1, hit.getRank()); - assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); - assertEquals("term 500", hit.field("text0").getValue()); - - Set vectors = Arrays.stream(response.getHits().getHits()) - .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) - .collect(Collectors.toSet()); - assertEquals(Set.of(492.0, 493.0, 494.0, 495.0, 496.0, 497.0, 498.0, 499.0, 500.0, 501.0, 502.0), vectors); - - LongTerms aggregation = response.getAggregations().get("sums"); - assertEquals(3, aggregation.getBuckets().size()); - - for (LongTerms.Bucket bucket : aggregation.getBuckets()) { - if (0L == (long) bucket.getKey()) { - assertEquals(34, bucket.getDocCount()); - } else if (1L == (long) bucket.getKey()) { - assertEquals(34, bucket.getDocCount()); - } else if (2L == (long) bucket.getKey()) { - assertEquals(33, bucket.getDocCount()); - } else { - throw new IllegalArgumentException("unexpected bucket key [" + bucket.getKey() + "]"); + assertResponse( + client().prepareSearch("nrd_index") + .setRankBuilder(new RRFRankBuilder(101, 1)) + .setTrackTotalHits(true) + .setKnnSearch(List.of(knnSearch)) + .setQuery( + QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("text0", "500").boost(11.0f)) + .should(QueryBuilders.termQuery("text0", "499").boost(10.0f)) + .should(QueryBuilders.termQuery("text0", "498").boost(9.0f)) + .should(QueryBuilders.termQuery("text0", "497").boost(8.0f)) + .should(QueryBuilders.termQuery("text0", "496").boost(7.0f)) + .should(QueryBuilders.termQuery("text0", "495").boost(6.0f)) + .should(QueryBuilders.termQuery("text0", "494").boost(5.0f)) + .should(QueryBuilders.termQuery("text0", "493").boost(4.0f)) + .should(QueryBuilders.termQuery("text0", "492").boost(3.0f)) + .should(QueryBuilders.termQuery("text0", "491").boost(2.0f)) + ) + .addFetchField("vector_asc") + .addFetchField("text0") + .setSize(11) + .addAggregation(AggregationBuilders.terms("sums").field("int")), + response -> { + assertEquals(101, response.getHits().getTotalHits().value); + assertEquals(11, response.getHits().getHits().length); + + SearchHit hit = response.getHits().getAt(0); + assertEquals(1, hit.getRank()); + assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); + assertEquals("term 500", hit.field("text0").getValue()); + + Set vectors = Arrays.stream(response.getHits().getHits()) + .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) + .collect(Collectors.toSet()); + assertEquals(Set.of(492.0, 493.0, 494.0, 495.0, 496.0, 497.0, 498.0, 499.0, 500.0, 501.0, 502.0), vectors); + + LongTerms aggregation = response.getAggregations().get("sums"); + assertEquals(3, aggregation.getBuckets().size()); + + for (LongTerms.Bucket bucket : aggregation.getBuckets()) { + if (0L == (long) bucket.getKey()) { + assertEquals(34, bucket.getDocCount()); + } else if (1L == (long) bucket.getKey()) { + assertEquals(34, bucket.getDocCount()); + } else if (2L == (long) bucket.getKey()) { + assertEquals(33, bucket.getDocCount()); + } else { + throw new IllegalArgumentException("unexpected bucket key [" + bucket.getKey() + "]"); + } + } } - } + ); } public void testMultipleOnlyKnnWithAggregation() { @@ -374,66 +383,68 @@ public void testMultipleOnlyKnnWithAggregation() { float[] queryVectorDesc = { 500.0f }; KnnSearchBuilder knnSearchAsc = new KnnSearchBuilder("vector_asc", queryVectorAsc, 51, 1001, null); KnnSearchBuilder knnSearchDesc = new KnnSearchBuilder("vector_desc", queryVectorDesc, 51, 1001, null); - SearchResponse response = client().prepareSearch("nrd_index") - .setRankBuilder(new RRFRankBuilder(51, 1)) - .setTrackTotalHits(false) - .setKnnSearch(List.of(knnSearchAsc, knnSearchDesc)) - .addFetchField("vector_asc") - .addFetchField("text0") - .setSize(19) - .addAggregation(AggregationBuilders.terms("sums").field("int")) - .get(); - - assertNull(response.getHits().getTotalHits()); - assertEquals(19, response.getHits().getHits().length); - - SearchHit hit = response.getHits().getAt(0); - assertEquals(1, hit.getRank()); - assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); - assertEquals("term 500", hit.field("text0").getValue()); - - Set vectors = Arrays.stream(response.getHits().getHits()) - .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) - .collect(Collectors.toSet()); - assertEquals( - Set.of( - 491.0, - 492.0, - 493.0, - 494.0, - 495.0, - 496.0, - 497.0, - 498.0, - 499.0, - 500.0, - 501.0, - 502.0, - 503.0, - 504.0, - 505.0, - 506.0, - 507.0, - 508.0, - 509.0 - ), - vectors - ); - - LongTerms aggregation = response.getAggregations().get("sums"); - assertEquals(3, aggregation.getBuckets().size()); - - for (LongTerms.Bucket bucket : aggregation.getBuckets()) { - if (0L == (long) bucket.getKey()) { - assertEquals(17, bucket.getDocCount()); - } else if (1L == (long) bucket.getKey()) { - assertEquals(17, bucket.getDocCount()); - } else if (2L == (long) bucket.getKey()) { - assertEquals(17, bucket.getDocCount()); - } else { - throw new IllegalArgumentException("unexpected bucket key [" + bucket.getKey() + "]"); + assertResponse( + client().prepareSearch("nrd_index") + .setRankBuilder(new RRFRankBuilder(51, 1)) + .setTrackTotalHits(false) + .setKnnSearch(List.of(knnSearchAsc, knnSearchDesc)) + .addFetchField("vector_asc") + .addFetchField("text0") + .setSize(19) + .addAggregation(AggregationBuilders.terms("sums").field("int")), + response -> { + assertNull(response.getHits().getTotalHits()); + assertEquals(19, response.getHits().getHits().length); + + SearchHit hit = response.getHits().getAt(0); + assertEquals(1, hit.getRank()); + assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); + assertEquals("term 500", hit.field("text0").getValue()); + + Set vectors = Arrays.stream(response.getHits().getHits()) + .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) + .collect(Collectors.toSet()); + assertEquals( + Set.of( + 491.0, + 492.0, + 493.0, + 494.0, + 495.0, + 496.0, + 497.0, + 498.0, + 499.0, + 500.0, + 501.0, + 502.0, + 503.0, + 504.0, + 505.0, + 506.0, + 507.0, + 508.0, + 509.0 + ), + vectors + ); + + LongTerms aggregation = response.getAggregations().get("sums"); + assertEquals(3, aggregation.getBuckets().size()); + + for (LongTerms.Bucket bucket : aggregation.getBuckets()) { + if (0L == (long) bucket.getKey()) { + assertEquals(17, bucket.getDocCount()); + } else if (1L == (long) bucket.getKey()) { + assertEquals(17, bucket.getDocCount()); + } else if (2L == (long) bucket.getKey()) { + assertEquals(17, bucket.getDocCount()); + } else { + throw new IllegalArgumentException("unexpected bucket key [" + bucket.getKey() + "]"); + } + } } - } + ); } public void testBM25AndMultipleKnnWithAggregation() { @@ -441,96 +452,260 @@ public void testBM25AndMultipleKnnWithAggregation() { float[] queryVectorDesc = { 500.0f }; KnnSearchBuilder knnSearchAsc = new KnnSearchBuilder("vector_asc", queryVectorAsc, 51, 1001, null); KnnSearchBuilder knnSearchDesc = new KnnSearchBuilder("vector_desc", queryVectorDesc, 51, 1001, null); - SearchResponse response = client().prepareSearch("nrd_index") - .setRankBuilder(new RRFRankBuilder(51, 1)) - .setTrackTotalHits(true) - .setKnnSearch(List.of(knnSearchAsc, knnSearchDesc)) - .setQuery( - QueryBuilders.boolQuery() - .should(QueryBuilders.termQuery("text0", "500").boost(10.0f)) - .should(QueryBuilders.termQuery("text0", "499").boost(20.0f)) - .should(QueryBuilders.termQuery("text0", "498").boost(8.0f)) - .should(QueryBuilders.termQuery("text0", "497").boost(7.0f)) - .should(QueryBuilders.termQuery("text0", "496").boost(6.0f)) - .should(QueryBuilders.termQuery("text0", "485").boost(5.0f)) - .should(QueryBuilders.termQuery("text0", "494").boost(4.0f)) - .should(QueryBuilders.termQuery("text0", "506").boost(3.0f)) - .should(QueryBuilders.termQuery("text0", "505").boost(2.0f)) - .should(QueryBuilders.termQuery("text0", "511").boost(9.0f)) - ) - .addFetchField("vector_asc") - .addFetchField("vector_desc") - .addFetchField("text0") - .setSize(19) - .addAggregation(AggregationBuilders.terms("sums").field("int")) - .setStats("search") - .get(); - - assertEquals(51, response.getHits().getTotalHits().value); - assertEquals(19, response.getHits().getHits().length); - - SearchHit hit = response.getHits().getAt(0); - assertEquals(1, hit.getRank()); - assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); - assertEquals(500.0, ((Number) hit.field("vector_desc").getValue()).doubleValue(), 0.0); - assertEquals("term 500", hit.field("text0").getValue()); - - hit = response.getHits().getAt(1); - assertEquals(2, hit.getRank()); - assertEquals(499.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); - assertEquals(501.0, ((Number) hit.field("vector_desc").getValue()).doubleValue(), 0.0); - assertEquals("term 499", hit.field("text0").getValue()); - - Set vectors = Arrays.stream(response.getHits().getHits()) - .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) - .collect(Collectors.toSet()); - assertEquals( - Set.of( - 485.0, - 492.0, - 493.0, - 494.0, - 495.0, - 496.0, - 497.0, - 498.0, - 499.0, - 500.0, - 501.0, - 502.0, - 503.0, - 504.0, - 505.0, - 506.0, - 507.0, - 508.0, - 511.0 - ), - vectors + assertResponse( + client().prepareSearch("nrd_index") + .setRankBuilder(new RRFRankBuilder(51, 1)) + .setTrackTotalHits(true) + .setKnnSearch(List.of(knnSearchAsc, knnSearchDesc)) + .setQuery( + QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("text0", "500").boost(10.0f)) + .should(QueryBuilders.termQuery("text0", "499").boost(20.0f)) + .should(QueryBuilders.termQuery("text0", "498").boost(8.0f)) + .should(QueryBuilders.termQuery("text0", "497").boost(7.0f)) + .should(QueryBuilders.termQuery("text0", "496").boost(6.0f)) + .should(QueryBuilders.termQuery("text0", "485").boost(5.0f)) + .should(QueryBuilders.termQuery("text0", "494").boost(4.0f)) + .should(QueryBuilders.termQuery("text0", "506").boost(3.0f)) + .should(QueryBuilders.termQuery("text0", "505").boost(2.0f)) + .should(QueryBuilders.termQuery("text0", "511").boost(9.0f)) + ) + .addFetchField("vector_asc") + .addFetchField("vector_desc") + .addFetchField("text0") + .setSize(19) + .addAggregation(AggregationBuilders.terms("sums").field("int")) + .setStats("search"), + response -> { + assertEquals(51, response.getHits().getTotalHits().value); + assertEquals(19, response.getHits().getHits().length); + + SearchHit hit = response.getHits().getAt(0); + assertEquals(1, hit.getRank()); + assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); + assertEquals(500.0, ((Number) hit.field("vector_desc").getValue()).doubleValue(), 0.0); + assertEquals("term 500", hit.field("text0").getValue()); + + hit = response.getHits().getAt(1); + assertEquals(2, hit.getRank()); + assertEquals(499.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); + assertEquals(501.0, ((Number) hit.field("vector_desc").getValue()).doubleValue(), 0.0); + assertEquals("term 499", hit.field("text0").getValue()); + + Set vectors = Arrays.stream(response.getHits().getHits()) + .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) + .collect(Collectors.toSet()); + assertEquals( + Set.of( + 485.0, + 492.0, + 493.0, + 494.0, + 495.0, + 496.0, + 497.0, + 498.0, + 499.0, + 500.0, + 501.0, + 502.0, + 503.0, + 504.0, + 505.0, + 506.0, + 507.0, + 508.0, + 511.0 + ), + vectors + ); + + LongTerms aggregation = response.getAggregations().get("sums"); + assertEquals(3, aggregation.getBuckets().size()); + + for (LongTerms.Bucket bucket : aggregation.getBuckets()) { + if (0L == (long) bucket.getKey()) { + assertEquals(17, bucket.getDocCount()); + } else if (1L == (long) bucket.getKey()) { + assertEquals(17, bucket.getDocCount()); + } else if (2L == (long) bucket.getKey()) { + assertEquals(17, bucket.getDocCount()); + } else { + throw new IllegalArgumentException("unexpected bucket key [" + bucket.getKey() + "]"); + } + } + } ); + } - LongTerms aggregation = response.getAggregations().get("sums"); - assertEquals(3, aggregation.getBuckets().size()); - - for (LongTerms.Bucket bucket : aggregation.getBuckets()) { - if (0L == (long) bucket.getKey()) { - assertEquals(17, bucket.getDocCount()); - } else if (1L == (long) bucket.getKey()) { - assertEquals(17, bucket.getDocCount()); - } else if (2L == (long) bucket.getKey()) { - assertEquals(17, bucket.getDocCount()); - } else { - throw new IllegalArgumentException("unexpected bucket key [" + bucket.getKey() + "]"); - } + public void testMultiBM25() { + for (SearchType searchType : SearchType.CURRENTLY_SUPPORTED) { + assertResponse( + client().prepareSearch("nrd_index") + .setSearchType(searchType) + .setRankBuilder(new RRFRankBuilder(8, 1)) + .setTrackTotalHits(false) + .setSubSearches( + List.of( + new SubSearchSourceBuilder( + QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("text0", "500").boost(10.0f)) + .should(QueryBuilders.termQuery("text0", "499").boost(9.0f)) + .should(QueryBuilders.termQuery("text0", "498").boost(8.0f)) + .should(QueryBuilders.termQuery("text0", "497").boost(7.0f)) + .should(QueryBuilders.termQuery("text0", "496").boost(6.0f)) + .should(QueryBuilders.termQuery("text0", "495").boost(5.0f)) + .should(QueryBuilders.termQuery("text0", "494").boost(4.0f)) + .should(QueryBuilders.termQuery("text0", "492").boost(3.0f)) + .should(QueryBuilders.termQuery("text0", "491").boost(2.0f)) + .should(QueryBuilders.termQuery("text0", "490").boost(1.0f)) + ), + new SubSearchSourceBuilder( + QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("text1", "508").boost(9.0f)) + .should(QueryBuilders.termQuery("text1", "304").boost(8.0f)) + .should(QueryBuilders.termQuery("text1", "501").boost(7.0f)) + .should(QueryBuilders.termQuery("text1", "504").boost(6.0f)) + .should(QueryBuilders.termQuery("text1", "502").boost(5.0f)) + .should(QueryBuilders.termQuery("text1", "499").boost(4.0f)) + .should(QueryBuilders.termQuery("text1", "800").boost(3.0f)) + .should(QueryBuilders.termQuery("text1", "201").boost(2.0f)) + .should(QueryBuilders.termQuery("text1", "492").boost(1.0f)) + ) + ) + ) + .addFetchField("text0") + .addFetchField("text1") + .setSize(5), + response -> { + assertNull(response.getHits().getTotalHits()); + assertEquals(5, response.getHits().getHits().length); + + SearchHit hit = response.getHits().getAt(0); + assertEquals(1, hit.getRank()); + assertEquals("term 492", hit.field("text0").getValue()); + assertEquals("term 508", hit.field("text1").getValue()); + + hit = response.getHits().getAt(1); + assertEquals(2, hit.getRank()); + assertEquals("term 499", hit.field("text0").getValue()); + assertEquals("term 501", hit.field("text1").getValue()); + + hit = response.getHits().getAt(2); + assertEquals(3, hit.getRank()); + assertEquals("term 500", hit.field("text0").getValue()); + assertEquals("term 500", hit.field("text1").getValue()); + + hit = response.getHits().getAt(3); + assertEquals(4, hit.getRank()); + assertEquals("term 498", hit.field("text0").getValue()); + assertEquals("term 502", hit.field("text1").getValue()); + + hit = response.getHits().getAt(4); + assertEquals(5, hit.getRank()); + assertEquals("term 496", hit.field("text0").getValue()); + assertEquals("term 504", hit.field("text1").getValue()); + } + ); } } - public void testMultiBM25() { + public void testMultiBM25WithAggregation() { for (SearchType searchType : SearchType.CURRENTLY_SUPPORTED) { - SearchResponse response = client().prepareSearch("nrd_index") - .setSearchType(searchType) - .setRankBuilder(new RRFRankBuilder(8, 1)) + assertResponse( + client().prepareSearch("nrd_index") + .setSearchType(searchType) + .setRankBuilder(new RRFRankBuilder(8, 1)) + .setTrackTotalHits(false) + .setSubSearches( + List.of( + new SubSearchSourceBuilder( + QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("text0", "500").boost(10.0f)) + .should(QueryBuilders.termQuery("text0", "499").boost(9.0f)) + .should(QueryBuilders.termQuery("text0", "498").boost(8.0f)) + .should(QueryBuilders.termQuery("text0", "497").boost(7.0f)) + .should(QueryBuilders.termQuery("text0", "496").boost(6.0f)) + .should(QueryBuilders.termQuery("text0", "495").boost(5.0f)) + .should(QueryBuilders.termQuery("text0", "494").boost(4.0f)) + .should(QueryBuilders.termQuery("text0", "492").boost(3.0f)) + .should(QueryBuilders.termQuery("text0", "491").boost(2.0f)) + .should(QueryBuilders.termQuery("text0", "490").boost(1.0f)) + ), + new SubSearchSourceBuilder( + QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("text1", "508").boost(9.0f)) + .should(QueryBuilders.termQuery("text1", "304").boost(8.0f)) + .should(QueryBuilders.termQuery("text1", "501").boost(7.0f)) + .should(QueryBuilders.termQuery("text1", "504").boost(6.0f)) + .should(QueryBuilders.termQuery("text1", "502").boost(5.0f)) + .should(QueryBuilders.termQuery("text1", "499").boost(4.0f)) + .should(QueryBuilders.termQuery("text1", "801").boost(3.0f)) + .should(QueryBuilders.termQuery("text1", "201").boost(2.0f)) + .should(QueryBuilders.termQuery("text1", "492").boost(1.0f)) + ) + ) + ) + .addFetchField("text0") + .addFetchField("text1") + .setSize(5) + .addAggregation(AggregationBuilders.terms("sums").field("int")), + response -> { + assertNull(response.getHits().getTotalHits()); + assertEquals(5, response.getHits().getHits().length); + + SearchHit hit = response.getHits().getAt(0); + assertEquals(1, hit.getRank()); + assertEquals("term 492", hit.field("text0").getValue()); + assertEquals("term 508", hit.field("text1").getValue()); + + hit = response.getHits().getAt(1); + assertEquals(2, hit.getRank()); + assertEquals("term 499", hit.field("text0").getValue()); + assertEquals("term 501", hit.field("text1").getValue()); + + hit = response.getHits().getAt(2); + assertEquals(3, hit.getRank()); + assertEquals("term 500", hit.field("text0").getValue()); + assertEquals("term 500", hit.field("text1").getValue()); + + hit = response.getHits().getAt(3); + assertEquals(4, hit.getRank()); + assertEquals("term 498", hit.field("text0").getValue()); + assertEquals("term 502", hit.field("text1").getValue()); + + hit = response.getHits().getAt(4); + assertEquals(5, hit.getRank()); + assertEquals("term 496", hit.field("text0").getValue()); + assertEquals("term 504", hit.field("text1").getValue()); + + LongTerms aggregation = response.getAggregations().get("sums"); + assertEquals(3, aggregation.getBuckets().size()); + + for (LongTerms.Bucket bucket : aggregation.getBuckets()) { + if (0L == (long) bucket.getKey()) { + assertEquals(5, bucket.getDocCount()); + } else if (1L == (long) bucket.getKey()) { + assertEquals(6, bucket.getDocCount()); + } else if (2L == (long) bucket.getKey()) { + assertEquals(4, bucket.getDocCount()); + } else { + throw new IllegalArgumentException("unexpected bucket key [" + bucket.getKey() + "]"); + } + } + } + ); + } + } + + public void testMultiBM25AndSingleKnn() { + float[] queryVector = { 500.0f }; + KnnSearchBuilder knnSearch = new KnnSearchBuilder("vector_asc", queryVector, 101, 1001, null); + assertResponse( + client().prepareSearch("nrd_index") + .setRankBuilder(new RRFRankBuilder(101, 1)) .setTrackTotalHits(false) + .setKnnSearch(List.of(knnSearch)) .setSubSearches( List.of( new SubSearchSourceBuilder( @@ -552,55 +727,44 @@ public void testMultiBM25() { .should(QueryBuilders.termQuery("text1", "304").boost(8.0f)) .should(QueryBuilders.termQuery("text1", "501").boost(7.0f)) .should(QueryBuilders.termQuery("text1", "504").boost(6.0f)) - .should(QueryBuilders.termQuery("text1", "502").boost(5.0f)) - .should(QueryBuilders.termQuery("text1", "499").boost(4.0f)) - .should(QueryBuilders.termQuery("text1", "800").boost(3.0f)) - .should(QueryBuilders.termQuery("text1", "201").boost(2.0f)) - .should(QueryBuilders.termQuery("text1", "492").boost(1.0f)) + .should(QueryBuilders.termQuery("text1", "492").boost(5.0f)) + .should(QueryBuilders.termQuery("text1", "502").boost(4.0f)) + .should(QueryBuilders.termQuery("text1", "499").boost(3.0f)) + .should(QueryBuilders.termQuery("text1", "800").boost(2.0f)) + .should(QueryBuilders.termQuery("text1", "201").boost(1.0f)) ) ) ) .addFetchField("text0") .addFetchField("text1") - .setSize(5) - .get(); - - assertNull(response.getHits().getTotalHits()); - assertEquals(5, response.getHits().getHits().length); - - SearchHit hit = response.getHits().getAt(0); - assertEquals(1, hit.getRank()); - assertEquals("term 492", hit.field("text0").getValue()); - assertEquals("term 508", hit.field("text1").getValue()); - - hit = response.getHits().getAt(1); - assertEquals(2, hit.getRank()); - assertEquals("term 499", hit.field("text0").getValue()); - assertEquals("term 501", hit.field("text1").getValue()); - - hit = response.getHits().getAt(2); - assertEquals(3, hit.getRank()); - assertEquals("term 500", hit.field("text0").getValue()); - assertEquals("term 500", hit.field("text1").getValue()); - - hit = response.getHits().getAt(3); - assertEquals(4, hit.getRank()); - assertEquals("term 498", hit.field("text0").getValue()); - assertEquals("term 502", hit.field("text1").getValue()); - - hit = response.getHits().getAt(4); - assertEquals(5, hit.getRank()); - assertEquals("term 496", hit.field("text0").getValue()); - assertEquals("term 504", hit.field("text1").getValue()); - } + .addFetchField("vector_asc") + .setSize(5), + response -> { + assertNull(response.getHits().getTotalHits()); + assertEquals(5, response.getHits().getHits().length); + + SearchHit hit = response.getHits().getAt(0); + assertEquals(1, hit.getRank()); + assertEquals("term 500", hit.field("text0").getValue()); + assertEquals("term 500", hit.field("text1").getValue()); + assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); + + Set vectors = Arrays.stream(response.getHits().getHits()) + .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) + .collect(Collectors.toSet()); + assertEquals(Set.of(492.0, 496.0, 498.0, 499.0, 500.0), vectors); + } + ); } - public void testMultiBM25WithAggregation() { - for (SearchType searchType : SearchType.CURRENTLY_SUPPORTED) { - SearchResponse response = client().prepareSearch("nrd_index") - .setSearchType(searchType) - .setRankBuilder(new RRFRankBuilder(8, 1)) + public void testMultiBM25AndSingleKnnWithAggregation() { + float[] queryVector = { 500.0f }; + KnnSearchBuilder knnSearch = new KnnSearchBuilder("vector_asc", queryVector, 101, 1001, null); + assertResponse( + client().prepareSearch("nrd_index") + .setRankBuilder(new RRFRankBuilder(101, 1)) .setTrackTotalHits(false) + .setKnnSearch(List.of(knnSearch)) .setSubSearches( List.of( new SubSearchSourceBuilder( @@ -622,193 +786,50 @@ public void testMultiBM25WithAggregation() { .should(QueryBuilders.termQuery("text1", "304").boost(8.0f)) .should(QueryBuilders.termQuery("text1", "501").boost(7.0f)) .should(QueryBuilders.termQuery("text1", "504").boost(6.0f)) - .should(QueryBuilders.termQuery("text1", "502").boost(5.0f)) - .should(QueryBuilders.termQuery("text1", "499").boost(4.0f)) - .should(QueryBuilders.termQuery("text1", "801").boost(3.0f)) - .should(QueryBuilders.termQuery("text1", "201").boost(2.0f)) - .should(QueryBuilders.termQuery("text1", "492").boost(1.0f)) + .should(QueryBuilders.termQuery("text1", "492").boost(5.0f)) + .should(QueryBuilders.termQuery("text1", "502").boost(4.0f)) + .should(QueryBuilders.termQuery("text1", "499").boost(3.0f)) + .should(QueryBuilders.termQuery("text1", "800").boost(2.0f)) + .should(QueryBuilders.termQuery("text1", "201").boost(1.0f)) ) ) ) .addFetchField("text0") .addFetchField("text1") + .addFetchField("vector_asc") .setSize(5) - .addAggregation(AggregationBuilders.terms("sums").field("int")) - .get(); - - assertNull(response.getHits().getTotalHits()); - assertEquals(5, response.getHits().getHits().length); - - SearchHit hit = response.getHits().getAt(0); - assertEquals(1, hit.getRank()); - assertEquals("term 492", hit.field("text0").getValue()); - assertEquals("term 508", hit.field("text1").getValue()); - - hit = response.getHits().getAt(1); - assertEquals(2, hit.getRank()); - assertEquals("term 499", hit.field("text0").getValue()); - assertEquals("term 501", hit.field("text1").getValue()); - - hit = response.getHits().getAt(2); - assertEquals(3, hit.getRank()); - assertEquals("term 500", hit.field("text0").getValue()); - assertEquals("term 500", hit.field("text1").getValue()); - - hit = response.getHits().getAt(3); - assertEquals(4, hit.getRank()); - assertEquals("term 498", hit.field("text0").getValue()); - assertEquals("term 502", hit.field("text1").getValue()); - - hit = response.getHits().getAt(4); - assertEquals(5, hit.getRank()); - assertEquals("term 496", hit.field("text0").getValue()); - assertEquals("term 504", hit.field("text1").getValue()); - - LongTerms aggregation = response.getAggregations().get("sums"); - assertEquals(3, aggregation.getBuckets().size()); - - for (LongTerms.Bucket bucket : aggregation.getBuckets()) { - if (0L == (long) bucket.getKey()) { - assertEquals(5, bucket.getDocCount()); - } else if (1L == (long) bucket.getKey()) { - assertEquals(6, bucket.getDocCount()); - } else if (2L == (long) bucket.getKey()) { - assertEquals(4, bucket.getDocCount()); - } else { - throw new IllegalArgumentException("unexpected bucket key [" + bucket.getKey() + "]"); + .addAggregation(AggregationBuilders.terms("sums").field("int")), + response -> { + assertNull(response.getHits().getTotalHits()); + assertEquals(5, response.getHits().getHits().length); + + SearchHit hit = response.getHits().getAt(0); + assertEquals(1, hit.getRank()); + assertEquals("term 500", hit.field("text0").getValue()); + assertEquals("term 500", hit.field("text1").getValue()); + assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); + + Set vectors = Arrays.stream(response.getHits().getHits()) + .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) + .collect(Collectors.toSet()); + assertEquals(Set.of(492.0, 496.0, 498.0, 499.0, 500.0), vectors); + + LongTerms aggregation = response.getAggregations().get("sums"); + assertEquals(3, aggregation.getBuckets().size()); + + for (LongTerms.Bucket bucket : aggregation.getBuckets()) { + if (0L == (long) bucket.getKey()) { + assertEquals(35, bucket.getDocCount()); + } else if (1L == (long) bucket.getKey()) { + assertEquals(35, bucket.getDocCount()); + } else if (2L == (long) bucket.getKey()) { + assertEquals(34, bucket.getDocCount()); + } else { + throw new IllegalArgumentException("unexpected bucket key [" + bucket.getKey() + "]"); + } } } - } - } - - public void testMultiBM25AndSingleKnn() { - float[] queryVector = { 500.0f }; - KnnSearchBuilder knnSearch = new KnnSearchBuilder("vector_asc", queryVector, 101, 1001, null); - SearchResponse response = client().prepareSearch("nrd_index") - .setRankBuilder(new RRFRankBuilder(101, 1)) - .setTrackTotalHits(false) - .setKnnSearch(List.of(knnSearch)) - .setSubSearches( - List.of( - new SubSearchSourceBuilder( - QueryBuilders.boolQuery() - .should(QueryBuilders.termQuery("text0", "500").boost(10.0f)) - .should(QueryBuilders.termQuery("text0", "499").boost(9.0f)) - .should(QueryBuilders.termQuery("text0", "498").boost(8.0f)) - .should(QueryBuilders.termQuery("text0", "497").boost(7.0f)) - .should(QueryBuilders.termQuery("text0", "496").boost(6.0f)) - .should(QueryBuilders.termQuery("text0", "495").boost(5.0f)) - .should(QueryBuilders.termQuery("text0", "494").boost(4.0f)) - .should(QueryBuilders.termQuery("text0", "492").boost(3.0f)) - .should(QueryBuilders.termQuery("text0", "491").boost(2.0f)) - .should(QueryBuilders.termQuery("text0", "490").boost(1.0f)) - ), - new SubSearchSourceBuilder( - QueryBuilders.boolQuery() - .should(QueryBuilders.termQuery("text1", "508").boost(9.0f)) - .should(QueryBuilders.termQuery("text1", "304").boost(8.0f)) - .should(QueryBuilders.termQuery("text1", "501").boost(7.0f)) - .should(QueryBuilders.termQuery("text1", "504").boost(6.0f)) - .should(QueryBuilders.termQuery("text1", "492").boost(5.0f)) - .should(QueryBuilders.termQuery("text1", "502").boost(4.0f)) - .should(QueryBuilders.termQuery("text1", "499").boost(3.0f)) - .should(QueryBuilders.termQuery("text1", "800").boost(2.0f)) - .should(QueryBuilders.termQuery("text1", "201").boost(1.0f)) - ) - ) - ) - .addFetchField("text0") - .addFetchField("text1") - .addFetchField("vector_asc") - .setSize(5) - .get(); - - assertNull(response.getHits().getTotalHits()); - assertEquals(5, response.getHits().getHits().length); - - SearchHit hit = response.getHits().getAt(0); - assertEquals(1, hit.getRank()); - assertEquals("term 500", hit.field("text0").getValue()); - assertEquals("term 500", hit.field("text1").getValue()); - assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); - - Set vectors = Arrays.stream(response.getHits().getHits()) - .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) - .collect(Collectors.toSet()); - assertEquals(Set.of(492.0, 496.0, 498.0, 499.0, 500.0), vectors); - } - - public void testMultiBM25AndSingleKnnWithAggregation() { - float[] queryVector = { 500.0f }; - KnnSearchBuilder knnSearch = new KnnSearchBuilder("vector_asc", queryVector, 101, 1001, null); - SearchResponse response = client().prepareSearch("nrd_index") - .setRankBuilder(new RRFRankBuilder(101, 1)) - .setTrackTotalHits(false) - .setKnnSearch(List.of(knnSearch)) - .setSubSearches( - List.of( - new SubSearchSourceBuilder( - QueryBuilders.boolQuery() - .should(QueryBuilders.termQuery("text0", "500").boost(10.0f)) - .should(QueryBuilders.termQuery("text0", "499").boost(9.0f)) - .should(QueryBuilders.termQuery("text0", "498").boost(8.0f)) - .should(QueryBuilders.termQuery("text0", "497").boost(7.0f)) - .should(QueryBuilders.termQuery("text0", "496").boost(6.0f)) - .should(QueryBuilders.termQuery("text0", "495").boost(5.0f)) - .should(QueryBuilders.termQuery("text0", "494").boost(4.0f)) - .should(QueryBuilders.termQuery("text0", "492").boost(3.0f)) - .should(QueryBuilders.termQuery("text0", "491").boost(2.0f)) - .should(QueryBuilders.termQuery("text0", "490").boost(1.0f)) - ), - new SubSearchSourceBuilder( - QueryBuilders.boolQuery() - .should(QueryBuilders.termQuery("text1", "508").boost(9.0f)) - .should(QueryBuilders.termQuery("text1", "304").boost(8.0f)) - .should(QueryBuilders.termQuery("text1", "501").boost(7.0f)) - .should(QueryBuilders.termQuery("text1", "504").boost(6.0f)) - .should(QueryBuilders.termQuery("text1", "492").boost(5.0f)) - .should(QueryBuilders.termQuery("text1", "502").boost(4.0f)) - .should(QueryBuilders.termQuery("text1", "499").boost(3.0f)) - .should(QueryBuilders.termQuery("text1", "800").boost(2.0f)) - .should(QueryBuilders.termQuery("text1", "201").boost(1.0f)) - ) - ) - ) - .addFetchField("text0") - .addFetchField("text1") - .addFetchField("vector_asc") - .setSize(5) - .addAggregation(AggregationBuilders.terms("sums").field("int")) - .get(); - - assertNull(response.getHits().getTotalHits()); - assertEquals(5, response.getHits().getHits().length); - - SearchHit hit = response.getHits().getAt(0); - assertEquals(1, hit.getRank()); - assertEquals("term 500", hit.field("text0").getValue()); - assertEquals("term 500", hit.field("text1").getValue()); - assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); - - Set vectors = Arrays.stream(response.getHits().getHits()) - .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) - .collect(Collectors.toSet()); - assertEquals(Set.of(492.0, 496.0, 498.0, 499.0, 500.0), vectors); - - LongTerms aggregation = response.getAggregations().get("sums"); - assertEquals(3, aggregation.getBuckets().size()); - - for (LongTerms.Bucket bucket : aggregation.getBuckets()) { - if (0L == (long) bucket.getKey()) { - assertEquals(35, bucket.getDocCount()); - } else if (1L == (long) bucket.getKey()) { - assertEquals(35, bucket.getDocCount()); - } else if (2L == (long) bucket.getKey()) { - assertEquals(34, bucket.getDocCount()); - } else { - throw new IllegalArgumentException("unexpected bucket key [" + bucket.getKey() + "]"); - } - } + ); } public void testMultiBM25AndMultipleKnn() { @@ -816,60 +837,62 @@ public void testMultiBM25AndMultipleKnn() { float[] queryVectorDesc = { 500.0f }; KnnSearchBuilder knnSearchAsc = new KnnSearchBuilder("vector_asc", queryVectorAsc, 101, 1001, null); KnnSearchBuilder knnSearchDesc = new KnnSearchBuilder("vector_desc", queryVectorDesc, 101, 1001, null); - SearchResponse response = client().prepareSearch("nrd_index") - .setRankBuilder(new RRFRankBuilder(101, 1)) - .setTrackTotalHits(false) - .setKnnSearch(List.of(knnSearchAsc, knnSearchDesc)) - .setSubSearches( - List.of( - new SubSearchSourceBuilder( - QueryBuilders.boolQuery() - .should(QueryBuilders.termQuery("text0", "500").boost(10.0f)) - .should(QueryBuilders.termQuery("text0", "499").boost(9.0f)) - .should(QueryBuilders.termQuery("text0", "498").boost(8.0f)) - .should(QueryBuilders.termQuery("text0", "497").boost(7.0f)) - .should(QueryBuilders.termQuery("text0", "496").boost(6.0f)) - .should(QueryBuilders.termQuery("text0", "495").boost(5.0f)) - .should(QueryBuilders.termQuery("text0", "494").boost(4.0f)) - .should(QueryBuilders.termQuery("text0", "492").boost(3.0f)) - .should(QueryBuilders.termQuery("text0", "491").boost(2.0f)) - .should(QueryBuilders.termQuery("text0", "490").boost(1.0f)) - ), - new SubSearchSourceBuilder( - QueryBuilders.boolQuery() - .should(QueryBuilders.termQuery("text1", "508").boost(9.0f)) - .should(QueryBuilders.termQuery("text1", "304").boost(8.0f)) - .should(QueryBuilders.termQuery("text1", "501").boost(7.0f)) - .should(QueryBuilders.termQuery("text1", "504").boost(6.0f)) - .should(QueryBuilders.termQuery("text1", "492").boost(5.0f)) - .should(QueryBuilders.termQuery("text1", "502").boost(4.0f)) - .should(QueryBuilders.termQuery("text1", "499").boost(3.0f)) - .should(QueryBuilders.termQuery("text1", "800").boost(2.0f)) - .should(QueryBuilders.termQuery("text1", "201").boost(1.0f)) + assertResponse( + client().prepareSearch("nrd_index") + .setRankBuilder(new RRFRankBuilder(101, 1)) + .setTrackTotalHits(false) + .setKnnSearch(List.of(knnSearchAsc, knnSearchDesc)) + .setSubSearches( + List.of( + new SubSearchSourceBuilder( + QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("text0", "500").boost(10.0f)) + .should(QueryBuilders.termQuery("text0", "499").boost(9.0f)) + .should(QueryBuilders.termQuery("text0", "498").boost(8.0f)) + .should(QueryBuilders.termQuery("text0", "497").boost(7.0f)) + .should(QueryBuilders.termQuery("text0", "496").boost(6.0f)) + .should(QueryBuilders.termQuery("text0", "495").boost(5.0f)) + .should(QueryBuilders.termQuery("text0", "494").boost(4.0f)) + .should(QueryBuilders.termQuery("text0", "492").boost(3.0f)) + .should(QueryBuilders.termQuery("text0", "491").boost(2.0f)) + .should(QueryBuilders.termQuery("text0", "490").boost(1.0f)) + ), + new SubSearchSourceBuilder( + QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("text1", "508").boost(9.0f)) + .should(QueryBuilders.termQuery("text1", "304").boost(8.0f)) + .should(QueryBuilders.termQuery("text1", "501").boost(7.0f)) + .should(QueryBuilders.termQuery("text1", "504").boost(6.0f)) + .should(QueryBuilders.termQuery("text1", "492").boost(5.0f)) + .should(QueryBuilders.termQuery("text1", "502").boost(4.0f)) + .should(QueryBuilders.termQuery("text1", "499").boost(3.0f)) + .should(QueryBuilders.termQuery("text1", "800").boost(2.0f)) + .should(QueryBuilders.termQuery("text1", "201").boost(1.0f)) + ) ) ) - ) - .addFetchField("text0") - .addFetchField("text1") - .addFetchField("vector_asc") - .addFetchField("vector_desc") - .setSize(5) - .get(); - - assertNull(response.getHits().getTotalHits()); - assertEquals(5, response.getHits().getHits().length); - - SearchHit hit = response.getHits().getAt(0); - assertEquals(1, hit.getRank()); - assertEquals("term 500", hit.field("text0").getValue()); - assertEquals("term 500", hit.field("text1").getValue()); - assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); - assertEquals(500.0, ((Number) hit.field("vector_desc").getValue()).doubleValue(), 0.0); - - Set vectors = Arrays.stream(response.getHits().getHits()) - .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) - .collect(Collectors.toSet()); - assertEquals(Set.of(492.0, 498.0, 499.0, 500.0, 501.0), vectors); + .addFetchField("text0") + .addFetchField("text1") + .addFetchField("vector_asc") + .addFetchField("vector_desc") + .setSize(5), + response -> { + assertNull(response.getHits().getTotalHits()); + assertEquals(5, response.getHits().getHits().length); + + SearchHit hit = response.getHits().getAt(0); + assertEquals(1, hit.getRank()); + assertEquals("term 500", hit.field("text0").getValue()); + assertEquals("term 500", hit.field("text1").getValue()); + assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); + assertEquals(500.0, ((Number) hit.field("vector_desc").getValue()).doubleValue(), 0.0); + + Set vectors = Arrays.stream(response.getHits().getHits()) + .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) + .collect(Collectors.toSet()); + assertEquals(Set.of(492.0, 498.0, 499.0, 500.0, 501.0), vectors); + } + ); } public void testMultiBM25AndMultipleKnnWithAggregation() { @@ -877,75 +900,77 @@ public void testMultiBM25AndMultipleKnnWithAggregation() { float[] queryVectorDesc = { 500.0f }; KnnSearchBuilder knnSearchAsc = new KnnSearchBuilder("vector_asc", queryVectorAsc, 101, 1001, null); KnnSearchBuilder knnSearchDesc = new KnnSearchBuilder("vector_desc", queryVectorDesc, 101, 1001, null); - SearchResponse response = client().prepareSearch("nrd_index") - .setRankBuilder(new RRFRankBuilder(101, 1)) - .setTrackTotalHits(false) - .setKnnSearch(List.of(knnSearchAsc, knnSearchDesc)) - .setSubSearches( - List.of( - new SubSearchSourceBuilder( - QueryBuilders.boolQuery() - .should(QueryBuilders.termQuery("text0", "500").boost(10.0f)) - .should(QueryBuilders.termQuery("text0", "499").boost(9.0f)) - .should(QueryBuilders.termQuery("text0", "498").boost(8.0f)) - .should(QueryBuilders.termQuery("text0", "497").boost(7.0f)) - .should(QueryBuilders.termQuery("text0", "496").boost(6.0f)) - .should(QueryBuilders.termQuery("text0", "495").boost(5.0f)) - .should(QueryBuilders.termQuery("text0", "494").boost(4.0f)) - .should(QueryBuilders.termQuery("text0", "492").boost(3.0f)) - .should(QueryBuilders.termQuery("text0", "491").boost(2.0f)) - .should(QueryBuilders.termQuery("text0", "490").boost(1.0f)) - ), - new SubSearchSourceBuilder( - QueryBuilders.boolQuery() - .should(QueryBuilders.termQuery("text1", "508").boost(9.0f)) - .should(QueryBuilders.termQuery("text1", "304").boost(8.0f)) - .should(QueryBuilders.termQuery("text1", "501").boost(7.0f)) - .should(QueryBuilders.termQuery("text1", "504").boost(6.0f)) - .should(QueryBuilders.termQuery("text1", "492").boost(5.0f)) - .should(QueryBuilders.termQuery("text1", "502").boost(4.0f)) - .should(QueryBuilders.termQuery("text1", "499").boost(3.0f)) - .should(QueryBuilders.termQuery("text1", "800").boost(2.0f)) - .should(QueryBuilders.termQuery("text1", "201").boost(1.0f)) + assertResponse( + client().prepareSearch("nrd_index") + .setRankBuilder(new RRFRankBuilder(101, 1)) + .setTrackTotalHits(false) + .setKnnSearch(List.of(knnSearchAsc, knnSearchDesc)) + .setSubSearches( + List.of( + new SubSearchSourceBuilder( + QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("text0", "500").boost(10.0f)) + .should(QueryBuilders.termQuery("text0", "499").boost(9.0f)) + .should(QueryBuilders.termQuery("text0", "498").boost(8.0f)) + .should(QueryBuilders.termQuery("text0", "497").boost(7.0f)) + .should(QueryBuilders.termQuery("text0", "496").boost(6.0f)) + .should(QueryBuilders.termQuery("text0", "495").boost(5.0f)) + .should(QueryBuilders.termQuery("text0", "494").boost(4.0f)) + .should(QueryBuilders.termQuery("text0", "492").boost(3.0f)) + .should(QueryBuilders.termQuery("text0", "491").boost(2.0f)) + .should(QueryBuilders.termQuery("text0", "490").boost(1.0f)) + ), + new SubSearchSourceBuilder( + QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("text1", "508").boost(9.0f)) + .should(QueryBuilders.termQuery("text1", "304").boost(8.0f)) + .should(QueryBuilders.termQuery("text1", "501").boost(7.0f)) + .should(QueryBuilders.termQuery("text1", "504").boost(6.0f)) + .should(QueryBuilders.termQuery("text1", "492").boost(5.0f)) + .should(QueryBuilders.termQuery("text1", "502").boost(4.0f)) + .should(QueryBuilders.termQuery("text1", "499").boost(3.0f)) + .should(QueryBuilders.termQuery("text1", "800").boost(2.0f)) + .should(QueryBuilders.termQuery("text1", "201").boost(1.0f)) + ) ) ) - ) - .addFetchField("text0") - .addFetchField("text1") - .addFetchField("vector_asc") - .addFetchField("vector_desc") - .setSize(5) - .addAggregation(AggregationBuilders.terms("sums").field("int")) - .get(); - - assertNull(response.getHits().getTotalHits()); - assertEquals(5, response.getHits().getHits().length); - - SearchHit hit = response.getHits().getAt(0); - assertEquals(1, hit.getRank()); - assertEquals("term 500", hit.field("text0").getValue()); - assertEquals("term 500", hit.field("text1").getValue()); - assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); - assertEquals(500.0, ((Number) hit.field("vector_desc").getValue()).doubleValue(), 0.0); - - Set vectors = Arrays.stream(response.getHits().getHits()) - .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) - .collect(Collectors.toSet()); - assertEquals(Set.of(492.0, 498.0, 499.0, 500.0, 501.0), vectors); - - LongTerms aggregation = response.getAggregations().get("sums"); - assertEquals(3, aggregation.getBuckets().size()); - - for (LongTerms.Bucket bucket : aggregation.getBuckets()) { - if (0L == (long) bucket.getKey()) { - assertEquals(35, bucket.getDocCount()); - } else if (1L == (long) bucket.getKey()) { - assertEquals(35, bucket.getDocCount()); - } else if (2L == (long) bucket.getKey()) { - assertEquals(34, bucket.getDocCount()); - } else { - throw new IllegalArgumentException("unexpected bucket key [" + bucket.getKey() + "]"); + .addFetchField("text0") + .addFetchField("text1") + .addFetchField("vector_asc") + .addFetchField("vector_desc") + .setSize(5) + .addAggregation(AggregationBuilders.terms("sums").field("int")), + response -> { + assertNull(response.getHits().getTotalHits()); + assertEquals(5, response.getHits().getHits().length); + + SearchHit hit = response.getHits().getAt(0); + assertEquals(1, hit.getRank()); + assertEquals("term 500", hit.field("text0").getValue()); + assertEquals("term 500", hit.field("text1").getValue()); + assertEquals(500.0, ((Number) hit.field("vector_asc").getValue()).doubleValue(), 0.0); + assertEquals(500.0, ((Number) hit.field("vector_desc").getValue()).doubleValue(), 0.0); + + Set vectors = Arrays.stream(response.getHits().getHits()) + .map(h -> ((Number) h.field("vector_asc").getValue()).doubleValue()) + .collect(Collectors.toSet()); + assertEquals(Set.of(492.0, 498.0, 499.0, 500.0, 501.0), vectors); + + LongTerms aggregation = response.getAggregations().get("sums"); + assertEquals(3, aggregation.getBuckets().size()); + + for (LongTerms.Bucket bucket : aggregation.getBuckets()) { + if (0L == (long) bucket.getKey()) { + assertEquals(35, bucket.getDocCount()); + } else if (1L == (long) bucket.getKey()) { + assertEquals(35, bucket.getDocCount()); + } else if (2L == (long) bucket.getKey()) { + assertEquals(34, bucket.getDocCount()); + } else { + throw new IllegalArgumentException("unexpected bucket key [" + bucket.getKey() + "]"); + } + } } - } + ); } } diff --git a/x-pack/plugin/repositories-metering-api/qa/s3/build.gradle b/x-pack/plugin/repositories-metering-api/qa/s3/build.gradle index 05dbe3d6bd8c7..5f2bf66f31b21 100644 --- a/x-pack/plugin/repositories-metering-api/qa/s3/build.gradle +++ b/x-pack/plugin/repositories-metering-api/qa/s3/build.gradle @@ -1,13 +1,12 @@ import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE import org.elasticsearch.gradle.internal.info.BuildParams -apply plugin: 'elasticsearch.legacy-java-rest-test' +apply plugin: 'elasticsearch.internal-java-rest-test' apply plugin: 'elasticsearch.rest-resources' -final Project fixture = project(':test:fixtures:s3-fixture') - dependencies { javaRestTestImplementation(testArtifact(project(xpackModule('repositories-metering-api')))) + javaRestTestImplementation project(':test:fixtures:s3-fixture') } restResources { @@ -33,38 +32,14 @@ if (!s3AccessKey && !s3SecretKey && !s3Bucket && !s3BasePath) { throw new IllegalArgumentException("not all options specified to run against external S3 service are present") } -if (useFixture) { - apply plugin: 'elasticsearch.test.fixtures' - testFixtures.useFixture(fixture.path, 's3-fixture-repositories-metering') -} - tasks.named("javaRestTest").configure { + usesDefaultDistribution() + systemProperty("tests.use.fixture", Boolean.toString(useFixture)) systemProperty 'test.s3.bucket', s3Bucket - nonInputProperties.systemProperty 'test.s3.base_path', s3BasePath ? s3BasePath + "_repositories_metering" + BuildParams.testSeed : 'base_path' -} - -testClusters.matching { it.name == "javaRestTest" }.configureEach { - testDistribution = 'DEFAULT' - - keystore 's3.client.repositories_metering.access_key', s3AccessKey - keystore 's3.client.repositories_metering.secret_key', s3SecretKey - - if (useFixture) { - def fixtureAddress = { fixtureName -> - assert useFixture: 'closure should not be used without a fixture' - int ephemeralPort = fixture.postProcessFixture.ext."test.fixtures.${fixtureName}.tcp.80" - assert ephemeralPort > 0 - '127.0.0.1:' + ephemeralPort - } - setting 's3.client.repositories_metering.protocol', 'http' - setting 's3.client.repositories_metering.endpoint', { "${-> fixtureAddress('s3-fixture-repositories-metering')}" }, IGNORE_VALUE - - } else { - println "Using an external service to test " + project.name - } - setting 'xpack.security.enabled', 'false' + systemProperty("s3AccessKey", s3AccessKey) + systemProperty("s3SecretKey", s3SecretKey) + nonInputProperties.systemProperty 'test.s3.base_path', s3BasePath ? s3BasePath + "_repositories_metering" + BuildParams.testSeed : 'base_path_integration_tests' } - tasks.register("s3ThirdPartyTest").configure { dependsOn "javaRestTest" } diff --git a/x-pack/plugin/repositories-metering-api/qa/s3/src/javaRestTest/java/org/elasticsearch/xpack/repositories/metering/s3/S3RepositoriesMeteringIT.java b/x-pack/plugin/repositories-metering-api/qa/s3/src/javaRestTest/java/org/elasticsearch/xpack/repositories/metering/s3/S3RepositoriesMeteringIT.java index 00dcd8738ed16..b3f1d9fb0b6cf 100644 --- a/x-pack/plugin/repositories-metering-api/qa/s3/src/javaRestTest/java/org/elasticsearch/xpack/repositories/metering/s3/S3RepositoriesMeteringIT.java +++ b/x-pack/plugin/repositories-metering-api/qa/s3/src/javaRestTest/java/org/elasticsearch/xpack/repositories/metering/s3/S3RepositoriesMeteringIT.java @@ -6,14 +6,43 @@ */ package org.elasticsearch.xpack.repositories.metering.s3; +import fixture.s3.S3HttpFixture; + import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.xpack.repositories.metering.AbstractRepositoriesMeteringAPIRestTestCase; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; import java.util.List; import java.util.Map; public class S3RepositoriesMeteringIT extends AbstractRepositoriesMeteringAPIRestTestCase { + static final boolean USE_FIXTURE = Boolean.parseBoolean(System.getProperty("tests.use.fixture", "true")); + + public static final S3HttpFixture s3Fixture = new S3HttpFixture(USE_FIXTURE); + + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .keystore("s3.client.repositories_metering.access_key", System.getProperty("s3AccessKey")) + .keystore("s3.client.repositories_metering.secret_key", System.getProperty("s3SecretKey")) + .setting("xpack.license.self_generated.type", "trial") + .setting("s3.client.repositories_metering.protocol", () -> "http", (n) -> USE_FIXTURE) + .setting("s3.client.repositories_metering.endpoint", s3Fixture::getAddress, (n) -> USE_FIXTURE) + .setting("xpack.security.enabled", "false") + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(s3Fixture).around(cluster); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + @Override protected String repositoryType() { return "s3"; diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java index aac47ca1d2ea1..68b5b8953ccb7 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java @@ -15,8 +15,8 @@ import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.ParentTaskAssigningClient; import org.elasticsearch.common.scheduler.SchedulerEngine; @@ -130,7 +130,7 @@ protected void doNextSearch(long waitTimeInNanos, ActionListener job.getHeaders(), ClientHelper.ROLLUP_ORIGIN, client, - SearchAction.INSTANCE, + TransportSearchAction.TYPE, buildSearchRequest(), nextPhase ); diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java index 0cc37f7ed3945..44f5f51668ea3 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java @@ -205,9 +205,14 @@ public void testRolledMissingAggs() throws Exception { msearch, InternalAggregationTestCase.emptyReduceContextBuilder() ); - assertNotNull(response); - Aggregations responseAggs = response.getAggregations(); - assertThat(responseAggs.asList().size(), equalTo(0)); + try { + assertNotNull(response); + Aggregations responseAggs = response.getAggregations(); + assertThat(responseAggs.asList().size(), equalTo(0)); + } finally { + // this SearchResponse is not a mock, so must be decRef'd + response.decRef(); + } } public void testMissingRolledIndex() { @@ -237,6 +242,7 @@ public void testVerifyNormal() throws Exception { SearchResponse response = mock(SearchResponse.class); MultiSearchResponse.Item item = new MultiSearchResponse.Item(response, null); + // this SearchResponse is a mock, so does not need a decRef call SearchResponse finalResponse = RollupResponseTranslator.verifyResponse(item); assertThat(finalResponse, equalTo(response)); } @@ -280,15 +286,20 @@ public void testTranslateRollup() throws Exception { when(response.getAggregations()).thenReturn(mockAggs); MultiSearchResponse.Item item = new MultiSearchResponse.Item(response, null); + // this is not a mock, so needs to be decRef'd SearchResponse finalResponse = RollupResponseTranslator.translateResponse( new MultiSearchResponse.Item[] { item }, InternalAggregationTestCase.emptyReduceContextBuilder() ); - assertNotNull(finalResponse); - Aggregations responseAggs = finalResponse.getAggregations(); - assertNotNull(finalResponse); - Avg avg = responseAggs.get("foo"); - assertThat(avg.getValue(), equalTo(5.0)); + try { + assertNotNull(finalResponse); + Aggregations responseAggs = finalResponse.getAggregations(); + assertNotNull(finalResponse); + Avg avg = responseAggs.get("foo"); + assertThat(avg.getValue(), equalTo(5.0)); + } finally { + finalResponse.decRef(); + } } public void testTranslateMissingRollup() { @@ -409,6 +420,7 @@ public void testSimpleReduction() throws Exception { MultiSearchResponse.Item[] msearch = new MultiSearchResponse.Item[] { unrolledResponse, rolledResponse }; + // this SearchResponse is not a mock, so needs a decRef SearchResponse response = RollupResponseTranslator.combineResponses( msearch, InternalAggregationTestCase.emptyReduceContextBuilder( @@ -416,11 +428,15 @@ public void testSimpleReduction() throws Exception { .addAggregator(new MaxAggregationBuilder("foo." + RollupField.COUNT_FIELD)) ) ); - assertNotNull(response); - Aggregations responseAggs = response.getAggregations(); - assertNotNull(responseAggs); - Avg avg = responseAggs.get("foo"); - assertThat(avg.getValue(), equalTo(5.0)); + try { + assertNotNull(response); + Aggregations responseAggs = response.getAggregations(); + assertNotNull(responseAggs); + Avg avg = responseAggs.get("foo"); + assertThat(avg.getValue(), equalTo(5.0)); + } finally { + response.decRef(); + } } public void testUnsupported() throws IOException { diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java index 21789ecc5e0ba..32b9c2df962a9 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java @@ -678,13 +678,17 @@ public void testLiveOnlyProcess() throws Exception { SearchResponse response = mock(SearchResponse.class); MultiSearchResponse.Item item = new MultiSearchResponse.Item(response, null); MultiSearchResponse msearchResponse = new MultiSearchResponse(new MultiSearchResponse.Item[] { item }, 1); - - SearchResponse r = TransportRollupSearchAction.processResponses( - result, - msearchResponse, - InternalAggregationTestCase.emptyReduceContextBuilder() - ); - assertThat(r, equalTo(response)); + try { + // a mock SearchResponse, so does not need to be decRef'd + SearchResponse r = TransportRollupSearchAction.processResponses( + result, + msearchResponse, + InternalAggregationTestCase.emptyReduceContextBuilder() + ); + assertThat(r, equalTo(response)); + } finally { + msearchResponse.decRef(); + } } public void testRollupOnly() throws Exception { @@ -737,17 +741,24 @@ public void testRollupOnly() throws Exception { when(response.getAggregations()).thenReturn(mockAggs); MultiSearchResponse.Item item = new MultiSearchResponse.Item(response, null); MultiSearchResponse msearchResponse = new MultiSearchResponse(new MultiSearchResponse.Item[] { item }, 1); - - SearchResponse r = TransportRollupSearchAction.processResponses( - result, - msearchResponse, - InternalAggregationTestCase.emptyReduceContextBuilder() - ); - - assertNotNull(r); - Aggregations responseAggs = r.getAggregations(); - Avg avg = responseAggs.get("foo"); - assertThat(avg.getValue(), IsEqual.equalTo(5.0)); + try { + SearchResponse r = TransportRollupSearchAction.processResponses( + result, + msearchResponse, + InternalAggregationTestCase.emptyReduceContextBuilder() + ); + try { + assertNotNull(r); + Aggregations responseAggs = r.getAggregations(); + Avg avg = responseAggs.get("foo"); + assertThat(avg.getValue(), IsEqual.equalTo(5.0)); + } finally { + // this SearchResponse is not a mock, so we decRef + r.decRef(); + } + } finally { + msearchResponse.decRef(); + } } public void testTooManyRollups() throws IOException { @@ -788,16 +799,19 @@ public void testEmptyMsearch() { Collections.emptySet() ); MultiSearchResponse msearchResponse = new MultiSearchResponse(new MultiSearchResponse.Item[0], 1); - - RuntimeException e = expectThrows( - RuntimeException.class, - () -> TransportRollupSearchAction.processResponses( - result, - msearchResponse, - InternalAggregationTestCase.emptyReduceContextBuilder() - ) - ); - assertThat(e.getMessage(), equalTo("MSearch response was empty, cannot unroll RollupSearch results")); + try { + RuntimeException e = expectThrows( + RuntimeException.class, + () -> TransportRollupSearchAction.processResponses( + result, + msearchResponse, + InternalAggregationTestCase.emptyReduceContextBuilder() + ) + ); + assertThat(e.getMessage(), equalTo("MSearch response was empty, cannot unroll RollupSearch results")); + } finally { + msearchResponse.decRef(); + } } public void testBoth() throws Exception { @@ -864,25 +878,30 @@ public void testBoth() throws Exception { when(responseWithout.getAggregations()).thenReturn(mockAggsWithout); MultiSearchResponse.Item rolledResponse = new MultiSearchResponse.Item(responseWithout, null); - MultiSearchResponse msearchResponse = new MultiSearchResponse( + final MultiSearchResponse msearchResponse = new MultiSearchResponse( new MultiSearchResponse.Item[] { unrolledResponse, rolledResponse }, 123 ); - - SearchResponse response = TransportRollupSearchAction.processResponses( - separateIndices, - msearchResponse, - InternalAggregationTestCase.emptyReduceContextBuilder( - new AggregatorFactories.Builder().addAggregator(new MaxAggregationBuilder("foo")) - .addAggregator(new MaxAggregationBuilder("foo." + RollupField.COUNT_FIELD)) - ) - ); - - assertNotNull(response); - Aggregations responseAggs = response.getAggregations(); - assertNotNull(responseAggs); - Avg avg = responseAggs.get("foo"); - assertThat(avg.getValue(), IsEqual.equalTo(5.0)); - + try { + SearchResponse response = TransportRollupSearchAction.processResponses( + separateIndices, + msearchResponse, + InternalAggregationTestCase.emptyReduceContextBuilder( + new AggregatorFactories.Builder().addAggregator(new MaxAggregationBuilder("foo")) + .addAggregator(new MaxAggregationBuilder("foo." + RollupField.COUNT_FIELD)) + ) + ); + try { + assertNotNull(response); + Aggregations responseAggs = response.getAggregations(); + assertNotNull(responseAggs); + Avg avg = responseAggs.get("foo"); + assertThat(avg.getValue(), IsEqual.equalTo(5.0)); + } finally { + response.decRef(); + } + } finally { + msearchResponse.decRef(); + } } } diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java index 6cba6bcfb5071..6fb40541330b2 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java @@ -191,18 +191,6 @@ private static class NonEmptyRollupIndexer extends RollupIndexer { final BiConsumer> saveStateCheck; private CountDownLatch latch; - NonEmptyRollupIndexer( - ThreadPool threadPool, - RollupJob job, - AtomicReference initialState, - Map initialPosition, - Function searchFunction, - Function bulkFunction, - Consumer failureConsumer - ) { - this(threadPool, job, initialState, initialPosition, searchFunction, bulkFunction, failureConsumer, (i, m) -> {}); - } - NonEmptyRollupIndexer( ThreadPool threadPool, RollupJob job, diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java index 5befaafba0f8a..7fcde59f73088 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java @@ -12,9 +12,9 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.indices.refresh.RefreshAction; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.scheduler.SchedulerEngine; import org.elasticsearch.common.settings.Settings; @@ -619,7 +619,7 @@ public void testTriggerWithoutHeaders() throws Exception { ((ActionListener) invocationOnMock.getArguments()[2]).onResponse(r); return null; - }).when(client).execute(eq(SearchAction.INSTANCE), any(), any()); + }).when(client).execute(eq(TransportSearchAction.TYPE), any(), any()); SchedulerEngine schedulerEngine = mock(SchedulerEngine.class); TaskId taskId = new TaskId("node", 123); @@ -728,7 +728,7 @@ public void testTriggerWithHeaders() throws Exception { ((ActionListener) invocationOnMock.getArguments()[2]).onResponse(r); return null; - }).when(client).execute(eq(SearchAction.INSTANCE), any(), any()); + }).when(client).execute(eq(TransportSearchAction.TYPE), any(), any()); SchedulerEngine schedulerEngine = mock(SchedulerEngine.class); TaskId taskId = new TaskId("node", 123); @@ -838,7 +838,7 @@ public void testSaveStateChangesIDScheme() throws Exception { ((ActionListener) invocationOnMock.getArguments()[2]).onResponse(r); return null; - }).when(client).execute(eq(SearchAction.INSTANCE), any(), any()); + }).when(client).execute(eq(TransportSearchAction.TYPE), any(), any()); SchedulerEngine schedulerEngine = mock(SchedulerEngine.class); RollupJobStatus status = new RollupJobStatus(IndexerState.STOPPED, null); diff --git a/x-pack/plugin/search-business-rules/src/internalClusterTest/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilderIT.java b/x-pack/plugin/search-business-rules/src/internalClusterTest/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilderIT.java index 9012946abc686..9a28e11ee1cbb 100644 --- a/x-pack/plugin/search-business-rules/src/internalClusterTest/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilderIT.java +++ b/x-pack/plugin/search-business-rules/src/internalClusterTest/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilderIT.java @@ -9,7 +9,6 @@ import org.apache.lucene.search.Explanation; import org.elasticsearch.action.admin.indices.alias.Alias; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.Operator; @@ -32,6 +31,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFourthHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThirdHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; @@ -71,16 +71,16 @@ public void testPinnedPromotions() throws Exception { for (int i = 0; i < numRelevantDocs; i++) { if (i % 2 == 0) { // add lower-scoring text - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field1", "the quick brown fox").get(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field1", "the quick brown fox").get(); } else { // add higher-scoring text - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field1", "red fox").get(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field1", "red fox").get(); } } // Add docs with no relevance int numIrrelevantDocs = randomIntBetween(1, 10); for (int i = numRelevantDocs; i <= numRelevantDocs + numIrrelevantDocs; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field1", "irrelevant").get(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field1", "irrelevant").get(); } refresh(); @@ -113,50 +113,45 @@ public void testPinnedPromotions() throws Exception { private void assertPinnedPromotions(PinnedQueryBuilder pqb, LinkedHashSet pins, int iter, int numRelevantDocs) { int from = randomIntBetween(0, numRelevantDocs); int size = randomIntBetween(10, 100); - SearchResponse searchResponse = prepareSearch().setQuery(pqb) - .setTrackTotalHits(true) - .setSize(size) - .setFrom(from) - .setSearchType(DFS_QUERY_THEN_FETCH) - .get(); - - long numHits = searchResponse.getHits().getTotalHits().value; - assertThat(numHits, lessThanOrEqualTo((long) numRelevantDocs + pins.size())); - - // Check pins are sorted by increasing score, (unlike organic, there are no duplicate scores) - float lastScore = Float.MAX_VALUE; - SearchHit[] hits = searchResponse.getHits().getHits(); - for (int hitNumber = 0; hitNumber < Math.min(hits.length, pins.size() - from); hitNumber++) { - assertThat("Hit " + hitNumber + " in iter " + iter + " wrong" + pins, hits[hitNumber].getScore(), lessThan(lastScore)); - lastScore = hits[hitNumber].getScore(); - } - // Check that the pins appear in the requested order (globalHitNumber is cursor independent of from and size window used) - int globalHitNumber = 0; - for (String id : pins) { - if (globalHitNumber < size && globalHitNumber >= from) { - assertThat( - "Hit " + globalHitNumber + " in iter " + iter + " wrong" + pins, - hits[globalHitNumber - from].getId(), - equalTo(id) - ); - } - globalHitNumber++; - } - // Test the organic hits are sorted by text relevance - boolean highScoresExhausted = false; - for (; globalHitNumber < hits.length + from; globalHitNumber++) { - if (globalHitNumber >= from) { - int id = Integer.parseInt(hits[globalHitNumber - from].getId()); - if (id % 2 == 0) { - highScoresExhausted = true; - } else { - assertFalse("All odd IDs should have scored higher than even IDs in organic results", highScoresExhausted); + assertResponse( + prepareSearch().setQuery(pqb).setTrackTotalHits(true).setSize(size).setFrom(from).setSearchType(DFS_QUERY_THEN_FETCH), + response -> { + long numHits = response.getHits().getTotalHits().value; + assertThat(numHits, lessThanOrEqualTo((long) numRelevantDocs + pins.size())); + + // Check pins are sorted by increasing score, (unlike organic, there are no duplicate scores) + float lastScore = Float.MAX_VALUE; + SearchHit[] hits = response.getHits().getHits(); + for (int hitNumber = 0; hitNumber < Math.min(hits.length, pins.size() - from); hitNumber++) { + assertThat("Hit " + hitNumber + " in iter " + iter + " wrong" + pins, hits[hitNumber].getScore(), lessThan(lastScore)); + lastScore = hits[hitNumber].getScore(); + } + // Check that the pins appear in the requested order (globalHitNumber is cursor independent of from and size window used) + int globalHitNumber = 0; + for (String id : pins) { + if (globalHitNumber < size && globalHitNumber >= from) { + assertThat( + "Hit " + globalHitNumber + " in iter " + iter + " wrong" + pins, + hits[globalHitNumber - from].getId(), + equalTo(id) + ); + } + globalHitNumber++; + } + // Test the organic hits are sorted by text relevance + boolean highScoresExhausted = false; + for (; globalHitNumber < hits.length + from; globalHitNumber++) { + if (globalHitNumber >= from) { + int id = Integer.parseInt(hits[globalHitNumber - from].getId()); + if (id % 2 == 0) { + highScoresExhausted = true; + } else { + assertFalse("All odd IDs should have scored higher than even IDs in organic results", highScoresExhausted); + } + } } - } - - } - + ); } /** @@ -182,8 +177,8 @@ public void testExhaustiveScoring() throws Exception { ).setSettings(Settings.builder().put(indexSettings()).put("index.number_of_shards", 1)) ); - client().prepareIndex("test").setId("1").setSource("field1", "foo").get(); - client().prepareIndex("test").setId("2").setSource("field1", "foo", "field2", "foo").get(); + prepareIndex("test").setId("1").setSource("field1", "foo").get(); + prepareIndex("test").setId("2").setSource("field1", "foo", "field2", "foo").get(); refresh(); @@ -193,10 +188,10 @@ public void testExhaustiveScoring() throws Exception { } private void assertExhaustiveScoring(PinnedQueryBuilder pqb) { - SearchResponse searchResponse = prepareSearch().setQuery(pqb).setTrackTotalHits(true).setSearchType(DFS_QUERY_THEN_FETCH).get(); - - long numHits = searchResponse.getHits().getTotalHits().value; - assertThat(numHits, equalTo(2L)); + assertResponse(prepareSearch().setQuery(pqb).setTrackTotalHits(true).setSearchType(DFS_QUERY_THEN_FETCH), response -> { + long numHits = response.getHits().getTotalHits().value; + assertThat(numHits, equalTo(2L)); + }); } public void testExplain() throws Exception { @@ -215,10 +210,10 @@ public void testExplain() throws Exception { ) ); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("field1", "the quick brown fox").get(); - client().prepareIndex("test").setId("2").setSource("field1", "pinned").get(); - client().prepareIndex("test").setId("3").setSource("field1", "irrelevant").get(); - client().prepareIndex("test").setId("4").setSource("field1", "slow brown cat").get(); + prepareIndex("test").setId("1").setSource("field1", "the quick brown fox").get(); + prepareIndex("test").setId("2").setSource("field1", "pinned").get(); + prepareIndex("test").setId("3").setSource("field1", "irrelevant").get(); + prepareIndex("test").setId("4").setSource("field1", "slow brown cat").get(); refresh(); QueryBuilder organicQuery = QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR); @@ -227,19 +222,19 @@ public void testExplain() throws Exception { } private void assertExplain(PinnedQueryBuilder pqb) { - SearchResponse searchResponse = prepareSearch().setSearchType(SearchType.DFS_QUERY_THEN_FETCH).setQuery(pqb).setExplain(true).get(); - assertHitCount(searchResponse, 3); - assertFirstHit(searchResponse, hasId("2")); - assertSecondHit(searchResponse, hasId("1")); - assertThirdHit(searchResponse, hasId("4")); - - Explanation pinnedExplanation = searchResponse.getHits().getAt(0).getExplanation(); - assertThat(pinnedExplanation, notNullValue()); - assertThat(pinnedExplanation.isMatch(), equalTo(true)); - assertThat(pinnedExplanation.getDetails().length, equalTo(1)); - assertThat(pinnedExplanation.getDetails()[0].isMatch(), equalTo(true)); - assertThat(pinnedExplanation.getDetails()[0].getDescription(), containsString("ConstantScore")); - + assertResponse(prepareSearch().setSearchType(SearchType.DFS_QUERY_THEN_FETCH).setQuery(pqb).setExplain(true), searchResponse -> { + assertHitCount(searchResponse, 3); + assertFirstHit(searchResponse, hasId("2")); + assertSecondHit(searchResponse, hasId("1")); + assertThirdHit(searchResponse, hasId("4")); + + Explanation pinnedExplanation = searchResponse.getHits().getAt(0).getExplanation(); + assertThat(pinnedExplanation, notNullValue()); + assertThat(pinnedExplanation.isMatch(), equalTo(true)); + assertThat(pinnedExplanation.getDetails().length, equalTo(1)); + assertThat(pinnedExplanation.getDetails()[0].isMatch(), equalTo(true)); + assertThat(pinnedExplanation.getDetails()[0].getDescription(), containsString("ConstantScore")); + }); } public void testHighlight() throws Exception { @@ -259,7 +254,7 @@ public void testHighlight() throws Exception { ) ); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("field1", "the quick brown fox").get(); + prepareIndex("test").setId("1").setSource("field1", "the quick brown fox").get(); refresh(); QueryBuilder organicQuery = QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR); @@ -271,16 +266,16 @@ private void assertHighlight(PinnedQueryBuilder pqb) { HighlightBuilder testHighlighter = new HighlightBuilder(); testHighlighter.field("field1"); - SearchResponse searchResponse = prepareSearch().setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .setQuery(pqb) - .highlighter(testHighlighter) - .setExplain(true) - .get(); - assertHitCount(searchResponse, 1); - Map highlights = searchResponse.getHits().getHits()[0].getHighlightFields(); - assertThat(highlights.size(), equalTo(1)); - HighlightField highlight = highlights.get("field1"); - assertThat(highlight.fragments()[0].toString(), equalTo("the quick brown fox")); + assertResponse( + prepareSearch().setSearchType(SearchType.DFS_QUERY_THEN_FETCH).setQuery(pqb).highlighter(testHighlighter).setExplain(true), + searchResponse -> { + assertHitCount(searchResponse, 1); + Map highlights = searchResponse.getHits().getHits()[0].getHighlightFields(); + assertThat(highlights.size(), equalTo(1)); + HighlightField highlight = highlights.get("field1"); + assertThat(highlight.fragments()[0].toString(), equalTo("the quick brown fox")); + } + ); } public void testMultiIndexDocs() throws Exception { @@ -314,12 +309,12 @@ public void testMultiIndexDocs() throws Exception { ).setSettings(Settings.builder().put(indexSettings()).put("index.number_of_shards", randomIntBetween(2, 5))) ); - client().prepareIndex("test1").setId("a").setSource("field1", "1a bar").get(); - client().prepareIndex("test1").setId("b").setSource("field1", "1b bar").get(); - client().prepareIndex("test1").setId("c").setSource("field1", "1c bar").get(); - client().prepareIndex("test2").setId("a").setSource("field1", "2a bar").get(); - client().prepareIndex("test2").setId("b").setSource("field1", "2b bar").get(); - client().prepareIndex("test2").setId("c").setSource("field1", "2c foo").get(); + prepareIndex("test1").setId("a").setSource("field1", "1a bar").get(); + prepareIndex("test1").setId("b").setSource("field1", "1b bar").get(); + prepareIndex("test1").setId("c").setSource("field1", "1c bar").get(); + prepareIndex("test2").setId("a").setSource("field1", "2a bar").get(); + prepareIndex("test2").setId("b").setSource("field1", "2b bar").get(); + prepareIndex("test2").setId("c").setSource("field1", "2c foo").get(); refresh(); @@ -330,13 +325,13 @@ public void testMultiIndexDocs() throws Exception { new Item("test1", "b") ); - SearchResponse searchResponse = prepareSearch().setQuery(pqb).setTrackTotalHits(true).setSearchType(DFS_QUERY_THEN_FETCH).get(); - - assertHitCount(searchResponse, 4); - assertFirstHit(searchResponse, both(hasIndex("test2")).and(hasId("a"))); - assertSecondHit(searchResponse, both(hasIndex("test1")).and(hasId("a"))); - assertThirdHit(searchResponse, both(hasIndex("test1")).and(hasId("b"))); - assertFourthHit(searchResponse, both(hasIndex("test2")).and(hasId("c"))); + assertResponse(prepareSearch().setQuery(pqb).setTrackTotalHits(true).setSearchType(DFS_QUERY_THEN_FETCH), searchResponse -> { + assertHitCount(searchResponse, 4); + assertFirstHit(searchResponse, both(hasIndex("test2")).and(hasId("a"))); + assertSecondHit(searchResponse, both(hasIndex("test1")).and(hasId("a"))); + assertThirdHit(searchResponse, both(hasIndex("test1")).and(hasId("b"))); + assertFourthHit(searchResponse, both(hasIndex("test2")).and(hasId("c"))); + }); } public void testMultiIndexWithAliases() throws Exception { @@ -357,9 +352,9 @@ public void testMultiIndexWithAliases() throws Exception { .addAlias(new Alias("test-alias")) ); - client().prepareIndex("test").setId("a").setSource("field1", "document a").get(); - client().prepareIndex("test").setId("b").setSource("field1", "document b").get(); - client().prepareIndex("test").setId("c").setSource("field1", "document c").get(); + prepareIndex("test").setId("a").setSource("field1", "document a").get(); + prepareIndex("test").setId("b").setSource("field1", "document b").get(); + prepareIndex("test").setId("c").setSource("field1", "document c").get(); refresh(); @@ -370,12 +365,12 @@ public void testMultiIndexWithAliases() throws Exception { new Item("test", "a") ); - SearchResponse searchResponse = prepareSearch().setQuery(pqb).setTrackTotalHits(true).setSearchType(DFS_QUERY_THEN_FETCH).get(); - - assertHitCount(searchResponse, 3); - assertFirstHit(searchResponse, both(hasIndex("test")).and(hasId("b"))); - assertSecondHit(searchResponse, both(hasIndex("test")).and(hasId("a"))); - assertThirdHit(searchResponse, both(hasIndex("test")).and(hasId("c"))); + assertResponse(prepareSearch().setQuery(pqb).setTrackTotalHits(true).setSearchType(DFS_QUERY_THEN_FETCH), searchResponse -> { + assertHitCount(searchResponse, 3); + assertFirstHit(searchResponse, both(hasIndex("test")).and(hasId("b"))); + assertSecondHit(searchResponse, both(hasIndex("test")).and(hasId("a"))); + assertThirdHit(searchResponse, both(hasIndex("test")).and(hasId("c"))); + }); } public void testMultiIndexWithAliasesAndDuplicateIds() throws Exception { @@ -411,11 +406,11 @@ public void testMultiIndexWithAliasesAndDuplicateIds() throws Exception { ).setSettings(Settings.builder().put(indexSettings()).put("index.number_of_shards", randomIntBetween(2, 5))).addAlias(alias) ); - client().prepareIndex("test1").setId("a").setSource("field1", "document a").get(); - client().prepareIndex("test1").setId("b").setSource("field1", "document b").get(); - client().prepareIndex("test1").setId("c").setSource("field1", "document c").get(); + prepareIndex("test1").setId("a").setSource("field1", "document a").get(); + prepareIndex("test1").setId("b").setSource("field1", "document b").get(); + prepareIndex("test1").setId("c").setSource("field1", "document c").get(); - client().prepareIndex("test2").setId("a").setSource("field1", "document a").get(); + prepareIndex("test2").setId("a").setSource("field1", "document a").get(); refresh(); @@ -428,12 +423,12 @@ public void testMultiIndexWithAliasesAndDuplicateIds() throws Exception { new Item("test-alias", "a") ); - SearchResponse searchResponse = prepareSearch().setQuery(pqb).setTrackTotalHits(true).setSearchType(DFS_QUERY_THEN_FETCH).get(); - - assertHitCount(searchResponse, 4); - assertFirstHit(searchResponse, both(hasIndex("test1")).and(hasId("b"))); - assertSecondHit(searchResponse, hasId("a")); - assertThirdHit(searchResponse, hasId("a")); - assertFourthHit(searchResponse, both(hasIndex("test1")).and(hasId("c"))); + assertResponse(prepareSearch().setQuery(pqb).setTrackTotalHits(true).setSearchType(DFS_QUERY_THEN_FETCH), searchResponse -> { + assertHitCount(searchResponse, 4); + assertFirstHit(searchResponse, both(hasIndex("test1")).and(hasId("b"))); + assertSecondHit(searchResponse, hasId("a")); + assertThirdHit(searchResponse, hasId("a")); + assertFourthHit(searchResponse, both(hasIndex("test1")).and(hasId("c"))); + }); } } diff --git a/x-pack/plugin/searchable-snapshots/qa/s3/build.gradle b/x-pack/plugin/searchable-snapshots/qa/s3/build.gradle index 01a2dc065ec69..8919ddc6d29fd 100644 --- a/x-pack/plugin/searchable-snapshots/qa/s3/build.gradle +++ b/x-pack/plugin/searchable-snapshots/qa/s3/build.gradle @@ -1,13 +1,13 @@ import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE import org.elasticsearch.gradle.internal.info.BuildParams -apply plugin: 'elasticsearch.legacy-java-rest-test' +apply plugin: 'elasticsearch.internal-java-rest-test' apply plugin: 'elasticsearch.rest-resources' -final Project fixture = project(':test:fixtures:s3-fixture') - dependencies { javaRestTestImplementation(testArtifact(project(xpackModule('searchable-snapshots')))) + javaRestTestImplementation project(":test:framework") + javaRestTestImplementation project(':test:fixtures:s3-fixture') } restResources { @@ -33,42 +33,14 @@ if (!s3AccessKey && !s3SecretKey && !s3Bucket && !s3BasePath) { throw new IllegalArgumentException("not all options specified to run against external S3 service are present") } -if (useFixture) { - apply plugin: 'elasticsearch.test.fixtures' - testFixtures.useFixture(fixture.path, 's3-fixture-other') -} - tasks.named("javaRestTest").configure { + usesDefaultDistribution() + systemProperty("tests.use.fixture", Boolean.toString(useFixture)) systemProperty 'test.s3.bucket', s3Bucket - nonInputProperties.systemProperty 'test.s3.base_path', s3BasePath ? s3BasePath + "_searchable_snapshots_tests" + BuildParams.testSeed : 'base_path' -} - -testClusters.matching { it.name == "javaRestTest" }.configureEach { - testDistribution = 'DEFAULT' - - keystore 's3.client.searchable_snapshots.access_key', s3AccessKey - keystore 's3.client.searchable_snapshots.secret_key', s3SecretKey - setting 'xpack.license.self_generated.type', 'trial' - - if (useFixture) { - def fixtureAddress = { fixtureName -> - assert useFixture: 'closure should not be used without a fixture' - int ephemeralPort = fixture.postProcessFixture.ext."test.fixtures.${fixtureName}.tcp.80" - assert ephemeralPort > 0 - '127.0.0.1:' + ephemeralPort - } - setting 's3.client.searchable_snapshots.protocol', 'http' - setting 's3.client.searchable_snapshots.endpoint', { "${-> fixtureAddress('s3-fixture-other')}" }, IGNORE_VALUE - - } else { - println "Using an external service to test " + project.name - } - - setting 'xpack.searchable.snapshot.shared_cache.size', '16MB' - setting 'xpack.searchable.snapshot.shared_cache.region_size', '256KB' - setting 'xpack.searchable_snapshots.cache_fetch_async_thread_pool.keep_alive', '0ms' + systemProperty("s3AccessKey", s3AccessKey) + systemProperty("s3SecretKey", s3SecretKey) - setting 'xpack.security.enabled', 'false' + nonInputProperties.systemProperty 'test.s3.base_path', s3BasePath ? s3BasePath + "_searchable_snapshots_tests" + BuildParams.testSeed : 'base_path_integration_tests' } tasks.register("s3ThirdPartyTest") { diff --git a/x-pack/plugin/searchable-snapshots/qa/s3/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/s3/S3SearchableSnapshotsIT.java b/x-pack/plugin/searchable-snapshots/qa/s3/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/s3/S3SearchableSnapshotsIT.java index 8f59a2b47c3c0..93ac3df92f47b 100644 --- a/x-pack/plugin/searchable-snapshots/qa/s3/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/s3/S3SearchableSnapshotsIT.java +++ b/x-pack/plugin/searchable-snapshots/qa/s3/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/s3/S3SearchableSnapshotsIT.java @@ -6,13 +6,39 @@ */ package org.elasticsearch.xpack.searchablesnapshots.s3; +import fixture.s3.S3HttpFixture; + import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.xpack.searchablesnapshots.AbstractSearchableSnapshotsRestTestCase; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; import static org.hamcrest.Matchers.blankOrNullString; import static org.hamcrest.Matchers.not; public class S3SearchableSnapshotsIT extends AbstractSearchableSnapshotsRestTestCase { + static final boolean USE_FIXTURE = Boolean.parseBoolean(System.getProperty("tests.use.fixture", "true")); + + public static final S3HttpFixture s3Fixture = new S3HttpFixture(USE_FIXTURE); + + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .keystore("s3.client.searchable_snapshots.access_key", System.getProperty("s3AccessKey")) + .keystore("s3.client.searchable_snapshots.secret_key", System.getProperty("s3SecretKey")) + .setting("xpack.license.self_generated.type", "trial") + .setting("s3.client.searchable_snapshots.protocol", () -> "http", (n) -> USE_FIXTURE) + .setting("s3.client.searchable_snapshots.endpoint", s3Fixture::getAddress, (n) -> USE_FIXTURE) + .setting("xpack.searchable.snapshot.shared_cache.size", "16MB") + .setting("xpack.searchable.snapshot.shared_cache.region_size", "256KB") + .setting("xpack.searchable_snapshots.cache_fetch_async_thread_pool.keep_alive", "0ms") + .setting("xpack.security.enabled", "false") + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(s3Fixture).around(cluster); @Override protected String writeRepositoryType() { @@ -29,4 +55,9 @@ protected Settings writeRepositorySettings() { return Settings.builder().put("client", "searchable_snapshots").put("bucket", bucket).put("base_path", basePath).build(); } + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } } diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/BaseSearchableSnapshotsIntegTestCase.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/BaseSearchableSnapshotsIntegTestCase.java index b07d307f105c0..d3bb435dc03ab 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/BaseSearchableSnapshotsIntegTestCase.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/BaseSearchableSnapshotsIntegTestCase.java @@ -203,7 +203,7 @@ protected void populateIndex(String indexName, int maxIndexRequests) throws Inte // This index does not permit dynamic fields, so we can only use defined field names final String key = indexName.equals(SearchableSnapshots.SNAPSHOT_BLOB_CACHE_INDEX) ? "type" : "foo"; for (int i = between(10, maxIndexRequests); i >= 0; i--) { - indexRequestBuilders.add(client().prepareIndex(indexName).setSource(key, randomBoolean() ? "bar" : "baz")); + indexRequestBuilders.add(prepareIndex(indexName).setSource(key, randomBoolean() ? "bar" : "baz")); } indexRandom(true, true, indexRequestBuilders); refresh(indexName); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/ClusterStateApplierOrderingTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/ClusterStateApplierOrderingTests.java index 3b4c4739311a4..82ded22603ef1 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/ClusterStateApplierOrderingTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/ClusterStateApplierOrderingTests.java @@ -49,7 +49,7 @@ public void testRepositoriesServiceClusterStateApplierIsCalledBeforeIndicesClust final List indexRequestBuilders = new ArrayList<>(); for (int i = between(10, 10_000); i >= 0; i--) { - indexRequestBuilders.add(client().prepareIndex(indexName).setSource("foo", randomBoolean() ? "bar" : "baz")); + indexRequestBuilders.add(prepareIndex(indexName).setSource("foo", randomBoolean() ? "bar" : "baz")); } indexRandom(true, true, indexRequestBuilders); refresh(indexName); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/FrozenSearchableSnapshotsIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/FrozenSearchableSnapshotsIntegTests.java index ee1ce56528361..daf61ff9a4931 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/FrozenSearchableSnapshotsIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/FrozenSearchableSnapshotsIntegTests.java @@ -430,8 +430,8 @@ public void testRequestCacheOnFrozen() throws Exception { ); indexRandom( true, - client().prepareIndex("test-index").setSource("f", "2014-03-10T00:00:00.000Z"), - client().prepareIndex("test-index").setSource("f", "2014-05-13T00:00:00.000Z") + prepareIndex("test-index").setSource("f", "2014-03-10T00:00:00.000Z"), + prepareIndex("test-index").setSource("f", "2014-05-13T00:00:00.000Z") ); ensureSearchable("test-index"); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/RetrySearchIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/RetrySearchIntegTests.java index 39e476107a0d6..0551ac3007f10 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/RetrySearchIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/RetrySearchIntegTests.java @@ -7,12 +7,12 @@ package org.elasticsearch.xpack.searchablesnapshots; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.ClosePointInTimeAction; import org.elasticsearch.action.search.ClosePointInTimeRequest; -import org.elasticsearch.action.search.OpenPointInTimeAction; import org.elasticsearch.action.search.OpenPointInTimeRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.action.search.TransportClosePointInTimeAction; +import org.elasticsearch.action.search.TransportOpenPointInTimeAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; @@ -49,7 +49,7 @@ public void testSearcherId() throws Exception { final List indexRequestBuilders = new ArrayList<>(); final int docCount = between(0, 100); for (int i = 0; i < docCount; i++) { - indexRequestBuilders.add(client().prepareIndex(indexName).setSource("created_date", "2011-02-02")); + indexRequestBuilders.add(prepareIndex(indexName).setSource("created_date", "2011-02-02")); } indexRandom(true, false, indexRequestBuilders); assertThat( @@ -116,7 +116,7 @@ public void testRetryPointInTime() throws Exception { final List indexRequestBuilders = new ArrayList<>(); final int docCount = between(0, 100); for (int i = 0; i < docCount; i++) { - indexRequestBuilders.add(client().prepareIndex(indexName).setSource("created_date", "2011-02-02")); + indexRequestBuilders.add(prepareIndex(indexName).setSource("created_date", "2011-02-02")); } indexRandom(true, false, indexRequestBuilders); assertThat( @@ -142,7 +142,7 @@ public void testRetryPointInTime() throws Exception { final OpenPointInTimeRequest openRequest = new OpenPointInTimeRequest(indexName).indicesOptions( IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED ).keepAlive(TimeValue.timeValueMinutes(2)); - final String pitId = client().execute(OpenPointInTimeAction.INSTANCE, openRequest).actionGet().getPointInTimeId(); + final String pitId = client().execute(TransportOpenPointInTimeAction.TYPE, openRequest).actionGet().getPointInTimeId(); try { SearchResponse resp = prepareSearch().setIndices(indexName) .setPreference(null) @@ -169,7 +169,7 @@ public void testRetryPointInTime() throws Exception { assertThat(resp.pointInTimeId(), equalTo(pitId)); assertHitCount(resp, docCount); } finally { - client().execute(ClosePointInTimeAction.INSTANCE, new ClosePointInTimeRequest(pitId)).actionGet(); + client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pitId)).actionGet(); } } } diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java index 6249324f46e79..32c031f80177d 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java @@ -12,10 +12,10 @@ import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchShardsAction; import org.elasticsearch.action.search.SearchShardsGroup; import org.elasticsearch.action.search.SearchShardsRequest; import org.elasticsearch.action.search.SearchShardsResponse; +import org.elasticsearch.action.search.TransportSearchShardsAction; import org.elasticsearch.blobcache.shared.SharedBlobCacheService; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -209,7 +209,8 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying ); if (includeIndexCoveringSearchRangeInSearchRequest) { - SearchShardsResponse searchShardsResponse = client().execute(SearchShardsAction.INSTANCE, searchShardsRequest).actionGet(); + SearchShardsResponse searchShardsResponse = client().execute(TransportSearchShardsAction.TYPE, searchShardsRequest) + .actionGet(); assertThat(searchShardsResponse.getGroups().size(), equalTo(totalShards)); List> partitionedBySkipped = searchShardsResponse.getGroups() .stream() @@ -227,7 +228,7 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying } else { SearchShardsResponse searchShardsResponse = null; try { - searchShardsResponse = client().execute(SearchShardsAction.INSTANCE, searchShardsRequest).actionGet(); + searchShardsResponse = client().execute(TransportSearchShardsAction.TYPE, searchShardsRequest).actionGet(); } catch (SearchPhaseExecutionException e) { // ignore as this is expected to happen } @@ -290,7 +291,8 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying null ); - SearchShardsResponse searchShardsResponse = client().execute(SearchShardsAction.INSTANCE, searchShardsRequest).actionGet(); + SearchShardsResponse searchShardsResponse = client().execute(TransportSearchShardsAction.TYPE, searchShardsRequest) + .actionGet(); assertThat(searchShardsResponse.getGroups().size(), equalTo(totalShards)); List> partitionedBySkipped = searchShardsResponse.getGroups() .stream() @@ -324,7 +326,7 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying SearchShardsResponse searchShardsResponse = null; try { - searchShardsResponse = client().execute(SearchShardsAction.INSTANCE, searchShardsRequest).actionGet(); + searchShardsResponse = client().execute(TransportSearchShardsAction.TYPE, searchShardsRequest).actionGet(); } catch (SearchPhaseExecutionException e) { // ignore as this is what should happen } @@ -356,7 +358,7 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying null ); - SearchShardsResponse searchShardsResponse = client().execute(SearchShardsAction.INSTANCE, searchShardsRequest) + SearchShardsResponse searchShardsResponse = client().execute(TransportSearchShardsAction.TYPE, searchShardsRequest) .actionGet(); assertThat(searchShardsResponse.getGroups().size(), equalTo(indexOutsideSearchRangeShardCount)); List> partitionedBySkipped = searchShardsResponse.getGroups() @@ -469,7 +471,7 @@ public void testQueryPhaseIsExecutedInAnAvailableNodeWhenAllShardsCanBeSkipped() null ); - SearchShardsResponse searchShardsResponse = client().execute(SearchShardsAction.INSTANCE, searchShardsRequest).actionGet(); + SearchShardsResponse searchShardsResponse = client().execute(TransportSearchShardsAction.TYPE, searchShardsRequest).actionGet(); assertThat(searchShardsResponse.getGroups().size(), equalTo(totalShards)); List> partitionedBySkipped = searchShardsResponse.getGroups() .stream() @@ -535,7 +537,7 @@ public void testQueryPhaseIsExecutedInAnAvailableNodeWhenAllShardsCanBeSkipped() null ); - SearchShardsResponse searchShardsResponse = client().execute(SearchShardsAction.INSTANCE, searchShardsRequest).actionGet(); + SearchShardsResponse searchShardsResponse = client().execute(TransportSearchShardsAction.TYPE, searchShardsRequest).actionGet(); assertThat(searchShardsResponse.getGroups().size(), equalTo(totalShards)); List> partitionedBySkipped = searchShardsResponse.getGroups() .stream() @@ -637,7 +639,7 @@ public void testSearchableSnapshotShardsThatHaveMatchingDataAreNotSkippedOnTheCo { SearchShardsResponse searchShardsResponse = null; try { - client().execute(SearchShardsAction.INSTANCE, searchShardsRequest).actionGet(); + client().execute(TransportSearchShardsAction.TYPE, searchShardsRequest).actionGet(); } catch (SearchPhaseExecutionException e) { // ignore as this is expected to happen } @@ -678,7 +680,7 @@ public void testSearchableSnapshotShardsThatHaveMatchingDataAreNotSkippedOnTheCo { SearchShardsResponse searchShardsResponse = null; try { - client().execute(SearchShardsAction.INSTANCE, searchShardsRequest).actionGet(); + client().execute(TransportSearchShardsAction.TYPE, searchShardsRequest).actionGet(); } catch (SearchPhaseExecutionException e) { // ignore as this is expected to happen } @@ -724,18 +726,17 @@ private void indexDocumentsWithTimestampWithinDate(String indexName, int docCoun final List indexRequestBuilders = new ArrayList<>(); for (int i = 0; i < docCount; i++) { indexRequestBuilders.add( - client().prepareIndex(indexName) - .setSource( - DataStream.TIMESTAMP_FIELD_NAME, - String.format( - Locale.ROOT, - timestampTemplate, - between(0, 23), - between(0, 59), - between(0, 59), - randomLongBetween(0, 999999999L) - ) + prepareIndex(indexName).setSource( + DataStream.TIMESTAMP_FIELD_NAME, + String.format( + Locale.ROOT, + timestampTemplate, + between(0, 23), + between(0, 59), + between(0, 59), + randomLongBetween(0, 999999999L) ) + ) ); } indexRandom(true, false, indexRequestBuilders); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java index 81ea36e88628c..c3f5e44ae32a0 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java @@ -365,7 +365,7 @@ public void testCanMountSnapshotTakenWhileConcurrentlyIndexing() throws Exceptio final Thread indexingThead = new Thread(() -> { final List indexRequestBuilders = new ArrayList<>(); for (int i = between(10, 10_000); i >= 0; i--) { - indexRequestBuilders.add(client().prepareIndex(indexName).setSource("foo", randomBoolean() ? "bar" : "baz")); + indexRequestBuilders.add(prepareIndex(indexName).setSource("foo", randomBoolean() ? "bar" : "baz")); } try { safeAwait(cyclicBarrier); @@ -427,7 +427,7 @@ public void testMaxRestoreBytesPerSecIsUsed() throws Exception { true, false, IntStream.range(0, nbDocs) - .mapToObj(i -> client().prepareIndex(indexName).setSource("foo", randomAlphaOfLength(1048))) + .mapToObj(i -> prepareIndex(indexName).setSource("foo", randomAlphaOfLength(1048))) .collect(Collectors.toList()) ); refresh(indexName); @@ -699,18 +699,17 @@ public void testSnapshotMountedIndexWithTimestampsRecordsTimestampRangeInIndexMe final int docCount = between(0, 1000); for (int i = 0; i < docCount; i++) { indexRequestBuilders.add( - client().prepareIndex(indexName) - .setSource( - DataStream.TIMESTAMP_FIELD_NAME, - String.format( - Locale.ROOT, - "2020-11-26T%02d:%02d:%02d.%09dZ", - between(0, 23), - between(0, 59), - between(0, 59), - randomLongBetween(0, 999999999L) - ) + prepareIndex(indexName).setSource( + DataStream.TIMESTAMP_FIELD_NAME, + String.format( + Locale.ROOT, + "2020-11-26T%02d:%02d:%02d.%09dZ", + between(0, 23), + between(0, 59), + between(0, 59), + randomLongBetween(0, 999999999L) ) + ) ); } indexRandom(true, false, indexRequestBuilders); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheIntegTests.java index 9847eb101531c..b5ebf1104a195 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheIntegTests.java @@ -132,7 +132,7 @@ public void testBlobStoreCache() throws Exception { for (int i = numberOfDocs; i > 0; i--) { XContentBuilder builder = XContentFactory.smileBuilder(); builder.startObject().field("text", randomRealisticUnicodeOfCodepointLengthBetween(5, 50)).field("num", i).endObject(); - indexRequestBuilders.add(client().prepareIndex(indexName).setSource(builder)); + indexRequestBuilders.add(prepareIndex(indexName).setSource(builder)); } indexRandom(true, true, true, indexRequestBuilders); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheMaintenanceIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheMaintenanceIntegTests.java index 68b702469d138..c89df1edfa100 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheMaintenanceIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheMaintenanceIntegTests.java @@ -366,7 +366,7 @@ private Map> mountRandomIndicesWithCache(String re builder.field("int_" + j, randomInt()); } builder.endObject(); - indexRequestBuilders.add(client().prepareIndex(indexName).setSource(builder)); + indexRequestBuilders.add(prepareIndex(indexName).setSource(builder)); } indexRandom(true, indexRequestBuilders); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/SearchableSnapshotsPrewarmingIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/SearchableSnapshotsPrewarmingIntegTests.java index 3892ef6ea4640..7e6981a4594e5 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/SearchableSnapshotsPrewarmingIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/SearchableSnapshotsPrewarmingIntegTests.java @@ -137,7 +137,7 @@ public void testConcurrentPrewarming() throws Exception { if (nbDocs > 0) { final BulkRequestBuilder bulkRequest = client().prepareBulk(); for (int i = 0; i < nbDocs; i++) { - bulkRequest.add(client().prepareIndex(indexName).setSource("foo", randomBoolean() ? "bar" : "baz")); + bulkRequest.add(prepareIndex(indexName).setSource("foo", randomBoolean() ? "bar" : "baz")); } final BulkResponse bulkResponse = bulkRequest.get(); assertThat(bulkResponse.hasFailures(), is(false)); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/NodesCachesStatsIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/NodesCachesStatsIntegTests.java index 0d1bc7eec94bc..3858b087f4d3a 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/NodesCachesStatsIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/NodesCachesStatsIntegTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.test.BackgroundIndexer; +import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xpack.searchablesnapshots.BaseFrozenSearchableSnapshotsIntegTestCase; import org.elasticsearch.xpack.searchablesnapshots.SearchableSnapshots; import org.elasticsearch.xpack.searchablesnapshots.action.ClearSearchableSnapshotsCacheAction; @@ -27,7 +28,6 @@ import org.elasticsearch.xpack.searchablesnapshots.action.cache.TransportSearchableSnapshotsNodeCachesStatsAction.NodesCachesStatsResponse; import org.elasticsearch.xpack.searchablesnapshots.action.cache.TransportSearchableSnapshotsNodeCachesStatsAction.NodesRequest; -import java.util.Locale; import java.util.stream.Collectors; import static java.util.stream.Collectors.toSet; @@ -38,6 +38,7 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) public class NodesCachesStatsIntegTests extends BaseFrozenSearchableSnapshotsIntegTestCase { public void testNodesCachesStats() throws Exception { @@ -46,7 +47,7 @@ public void testNodesCachesStats() throws Exception { // since this test verifies the cache stats on specific nodes ensureStableCluster(nodeNames.length); - final String index = getTestName().toLowerCase(Locale.ROOT); + final String index = randomIdentifier(); createIndex(index, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build()); final int nbDocs = randomIntBetween(1_000, 10_000); diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/SearchableSnapshotEnableAllocationDecider.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/SearchableSnapshotEnableAllocationDecider.java index 52be65bb783c5..9fa76c681bafd 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/SearchableSnapshotEnableAllocationDecider.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/SearchableSnapshotEnableAllocationDecider.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.searchablesnapshots.allocation.decider; -import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; @@ -18,6 +17,7 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.UpdateForV9; public class SearchableSnapshotEnableAllocationDecider extends AllocationDecider { @@ -28,6 +28,7 @@ public class SearchableSnapshotEnableAllocationDecider extends AllocationDecider * ongoing is determined by cluster.routing.allocation.enable=primaries. Notice that other values for that setting except "all" mean * that no searchable snapshots are allocated anyway. */ + @UpdateForV9 // xpack.searchable.snapshot.allocate_on_rolling_restart was only temporary, remove it in the next major public static final Setting SEARCHABLE_SNAPSHOTS_ALLOCATE_ON_ROLLING_RESTART = Setting.boolSetting( "xpack.searchable.snapshot.allocate_on_rolling_restart", false, @@ -36,11 +37,6 @@ public class SearchableSnapshotEnableAllocationDecider extends AllocationDecider Setting.Property.Deprecated ); - static { - // TODO xpack.searchable.snapshot.allocate_on_rolling_restart was only temporary, remove it in the next major - assert Version.CURRENT.major == Version.V_7_17_0.major + 1; - } - private volatile EnableAllocationDecider.Allocation enableAllocation; private volatile boolean allocateOnRollingRestart; diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/BlobStoreCacheMaintenanceService.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/BlobStoreCacheMaintenanceService.java index 1b09ea10d36a8..64508e1d49959 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/BlobStoreCacheMaintenanceService.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/BlobStoreCacheMaintenanceService.java @@ -16,15 +16,15 @@ import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteResponse; -import org.elasticsearch.action.search.ClosePointInTimeAction; import org.elasticsearch.action.search.ClosePointInTimeRequest; import org.elasticsearch.action.search.ClosePointInTimeResponse; -import org.elasticsearch.action.search.OpenPointInTimeAction; import org.elasticsearch.action.search.OpenPointInTimeRequest; import org.elasticsearch.action.search.OpenPointInTimeResponse; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportClosePointInTimeAction; +import org.elasticsearch.action.search.TransportOpenPointInTimeAction; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.OriginSettingClient; @@ -435,7 +435,7 @@ public void run() { if (pointIntTimeId == null) { final OpenPointInTimeRequest openRequest = new OpenPointInTimeRequest(SNAPSHOT_BLOB_CACHE_INDEX); openRequest.keepAlive(keepAlive); - clientWithOrigin.execute(OpenPointInTimeAction.INSTANCE, openRequest, new ActionListener<>() { + clientWithOrigin.execute(TransportOpenPointInTimeAction.TYPE, openRequest, new ActionListener<>() { @Override public void onResponse(OpenPointInTimeResponse response) { logger.trace("periodic maintenance task initialized with point-in-time id [{}]", response.getPointInTimeId()); @@ -476,7 +476,7 @@ public void onFailure(Exception e) { searchSource.pointInTimeBuilder(pointInTime); final SearchRequest searchRequest = new SearchRequest(); searchRequest.source(searchSource); - clientWithOrigin.execute(SearchAction.INSTANCE, searchRequest, new ActionListener<>() { + clientWithOrigin.execute(TransportSearchAction.TYPE, searchRequest, new ActionListener<>() { @Override public void onResponse(SearchResponse response) { if (searchAfter == null) { @@ -652,21 +652,25 @@ private void complete(@Nullable Exception failure) { final String pitId = pointIntTimeId; if (Strings.hasLength(pitId)) { final ClosePointInTimeRequest closeRequest = new ClosePointInTimeRequest(pitId); - clientWithOrigin.execute(ClosePointInTimeAction.INSTANCE, closeRequest, ActionListener.runAfter(new ActionListener<>() { - @Override - public void onResponse(ClosePointInTimeResponse response) { - if (response.isSucceeded()) { - logger.debug("periodic maintenance task successfully closed point-in-time id [{}]", pitId); - } else { - logger.debug("point-in-time id [{}] not found", pitId); + clientWithOrigin.execute( + TransportClosePointInTimeAction.TYPE, + closeRequest, + ActionListener.runAfter(new ActionListener<>() { + @Override + public void onResponse(ClosePointInTimeResponse response) { + if (response.isSucceeded()) { + logger.debug("periodic maintenance task successfully closed point-in-time id [{}]", pitId); + } else { + logger.debug("point-in-time id [{}] not found", pitId); + } } - } - @Override - public void onFailure(Exception e) { - logger.warn(() -> "failed to close point-in-time id [" + pitId + "]", e); - } - }, () -> Releasables.close(releasable))); + @Override + public void onFailure(Exception e) { + logger.warn(() -> "failed to close point-in-time id [" + pitId + "]", e); + } + }, () -> Releasables.close(releasable)) + ); waitForRelease = true; } } finally { diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/BlobStoreCacheService.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/BlobStoreCacheService.java index 586621082adbf..448e1e02d889e 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/BlobStoreCacheService.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/BlobStoreCacheService.java @@ -124,7 +124,7 @@ public CachedBlob get( assert Thread.currentThread().getName().contains('[' + ThreadPool.Names.SYSTEM_READ + ']') == false : "must not block [" + Thread.currentThread().getName() + "] for a cache read"; - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); getAsync(repository, snapshotId, indexId, shardId, name, range, future); try { return future.actionGet(5, TimeUnit.SECONDS); diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/common/CacheFile.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/common/CacheFile.java index 01dc78bbc8931..33d65d9f248d8 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/common/CacheFile.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/common/CacheFile.java @@ -352,7 +352,7 @@ public Future populateAndRead( final RangeMissingHandler writer, final Executor executor ) { - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); Releasable decrementRef = null; try { final FileChannelReference reference = acquireFileChannelReference(); @@ -404,7 +404,7 @@ public void onFailure(Exception e) { */ @Nullable public Future readIfAvailableOrPending(final ByteRange rangeToRead, final RangeAvailableHandler reader) { - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); Releasable decrementRef = null; try { final FileChannelReference reference = acquireFileChannelReference(); diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/CacheService.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/CacheService.java index db56addb434c7..6e480a21d507a 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/CacheService.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/CacheService.java @@ -347,7 +347,7 @@ public void markShardAsEvictedInCache(String snapshotUUID, String snapshotIndexN if (allowShardsEvictions) { final ShardEviction shardEviction = new ShardEviction(snapshotUUID, snapshotIndexName, shardId); pendingShardsEvictions.computeIfAbsent(shardEviction, shard -> { - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); threadPool.generic().execute(new AbstractRunnable() { @Override protected void doRun() { diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectory.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectory.java index 7203b45e86efa..b56cd28e9dc6c 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectory.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectory.java @@ -506,13 +506,12 @@ private void prewarmCache(ActionListener listener, Supplier cance CachedBlobContainerIndexInput cachedIndexInput = (CachedBlobContainerIndexInput) input; final AtomicBoolean alreadyCached = new AtomicBoolean(); - try (var fileListener = new RefCountingListener(ActionListener.runBefore(completionListener.acquire().map(v -> { + try (var fileListener = new RefCountingListener(ActionListener.runBefore(completionListener.acquire(v -> { if (alreadyCached.get()) { recoveryState.markIndexFileAsReused(file.physicalName()); } else { recoveryState.getIndex().addRecoveredFromSnapshotBytesToFile(file.physicalName(), file.length()); } - return v; }), () -> IOUtils.closeWhileHandlingException(cachedIndexInput)))) { if (cachedIndexInput.getPersistentCacheInitialLength() == file.length()) { alreadyCached.set(true); @@ -527,7 +526,7 @@ private void prewarmCache(ActionListener listener, Supplier cance for (int p = 0; p < file.numberOfParts(); p++) { final int part = p; - prewarmTaskRunner.enqueueTask(fileListener.acquire().map(releasable -> { + prewarmTaskRunner.enqueueTask(fileListener.acquire(releasable -> { try (releasable) { var fileName = file.physicalName(); final long startTimeInNanos = statsCurrentTimeNanosSupplier.getAsLong(); @@ -543,7 +542,6 @@ private void prewarmCache(ActionListener listener, Supplier cance prefetchedPartBytes ); } - return null; } })); } diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/BlobStoreCacheServiceTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/BlobStoreCacheServiceTests.java index 828b349466a3b..b5e476ecbf25f 100644 --- a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/BlobStoreCacheServiceTests.java +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/BlobStoreCacheServiceTests.java @@ -103,13 +103,13 @@ public void testGetWhenServiceNotStarted() { BlobStoreCacheService blobCacheService = new BlobStoreCacheService(null, mockClient, SNAPSHOT_BLOB_CACHE_INDEX); blobCacheService.start(); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); blobCacheService.getAsync(repository, snapshotId, indexId, shardId, fileName, range, future); assertThat(future.actionGet(), equalTo(CachedBlob.CACHE_MISS)); blobCacheService.stop(); - future = PlainActionFuture.newFuture(); + future = new PlainActionFuture<>(); blobCacheService.getAsync(repository, snapshotId, indexId, shardId, fileName, range, future); assertThat(future.actionGet(), equalTo(CachedBlob.CACHE_NOT_READY)); } @@ -135,13 +135,13 @@ public void testPutWhenServiceNotStarted() { BlobStoreCacheService blobCacheService = new BlobStoreCacheService(null, mockClient, SNAPSHOT_BLOB_CACHE_INDEX); blobCacheService.start(); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); blobCacheService.putAsync(repository, snapshotId, indexId, shardId, fileName, range, BytesArray.EMPTY, 0L, future); assertThat(future.actionGet(), nullValue()); blobCacheService.stop(); - future = PlainActionFuture.newFuture(); + future = new PlainActionFuture<>(); blobCacheService.putAsync(repository, snapshotId, indexId, shardId, fileName, range, BytesArray.EMPTY, 0L, future); IllegalStateException exception = expectThrows(IllegalStateException.class, future::actionGet); assertThat(exception.getMessage(), containsString("Blob cache service is closed")); @@ -177,7 +177,7 @@ public void testWaitForInFlightCacheFillsToComplete() throws Exception { final List> futures = new ArrayList<>(nbThreads); for (int i = 0; i < nbThreads; i++) { - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); threadPool.generic() .execute( () -> blobCacheService.putAsync(repository, snapshotId, indexId, shardId, fileName, range, BytesArray.EMPTY, 0L, future) diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectoryStatsTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectoryStatsTests.java index 25049cf5791bd..90101eca2573a 100644 --- a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectoryStatsTests.java +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectoryStatsTests.java @@ -694,7 +694,7 @@ protected IndexInputStats createIndexInputStats(long numFiles, long totalSize, l ); DiscoveryNode targetNode = DiscoveryNodeUtils.create("local"); RecoveryState recoveryState = new SearchableSnapshotRecoveryState(shardRouting, targetNode, null); - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); final boolean loaded = directory.loadSnapshot(recoveryState, () -> false, future); future.get(); assertThat("Failed to load snapshot", loaded, is(true)); diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectoryTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectoryTests.java index 97e5ad3ffec4a..9c36d7b762871 100644 --- a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectoryTests.java +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectoryTests.java @@ -619,7 +619,7 @@ protected void assertSnapshotOrGenericThread() { final SnapshotId snapshotId = new SnapshotId("_snapshot", UUIDs.randomBase64UUID(random())); final IndexId indexId = new IndexId(indexSettings.getIndex().getName(), UUIDs.randomBase64UUID(random())); - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); threadPool.generic().submit(() -> { IndexShardSnapshotStatus snapshotStatus = IndexShardSnapshotStatus.newInitializing(null); repository.snapshotShard( @@ -674,7 +674,7 @@ protected void assertSnapshotOrGenericThread() { sharedBlobCacheService ) ) { - final PlainActionFuture f = PlainActionFuture.newFuture(); + final PlainActionFuture f = new PlainActionFuture<>(); final boolean loaded = snapshotDirectory.loadSnapshot(recoveryState, store::isClosing, f); try { f.get(); @@ -779,7 +779,7 @@ public void testClearCache() throws Exception { ) ) { final RecoveryState recoveryState = createRecoveryState(randomBoolean()); - final PlainActionFuture f = PlainActionFuture.newFuture(); + final PlainActionFuture f = new PlainActionFuture<>(); final boolean loaded = directory.loadSnapshot(recoveryState, () -> false, f); f.get(); assertThat("Failed to load snapshot", loaded, is(true)); diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/CachedBlobContainerIndexInputTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/CachedBlobContainerIndexInputTests.java index 1495b6c5a99e2..86726289309e3 100644 --- a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/CachedBlobContainerIndexInputTests.java +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/CachedBlobContainerIndexInputTests.java @@ -126,7 +126,7 @@ public void testRandomReads() throws Exception { ) ) { RecoveryState recoveryState = createRecoveryState(recoveryFinalizedDone); - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); final boolean loaded = directory.loadSnapshot(recoveryState, () -> false, future); if (randomBoolean()) { // randomly wait for pre-warm before running the below reads @@ -231,7 +231,7 @@ public void testThrowsEOFException() throws Exception { ) ) { RecoveryState recoveryState = createRecoveryState(randomBoolean()); - final PlainActionFuture f = PlainActionFuture.newFuture(); + final PlainActionFuture f = new PlainActionFuture<>(); final boolean loaded = searchableSnapshotDirectory.loadSnapshot(recoveryState, () -> false, f); try { f.get(); diff --git a/x-pack/plugin/security/build.gradle b/x-pack/plugin/security/build.gradle index 509d4d5012f52..acb802743586c 100644 --- a/x-pack/plugin/security/build.gradle +++ b/x-pack/plugin/security/build.gradle @@ -57,7 +57,7 @@ dependencies { api "org.opensaml:opensaml-storage-impl:${versions.opensaml}" api "net.shibboleth.utilities:java-support:8.4.0" api "com.google.code.findbugs:jsr305:3.0.2" - api "org.apache.santuario:xmlsec:2.3.2" + api "org.apache.santuario:xmlsec:2.3.4" api "io.dropwizard.metrics:metrics-core:4.1.4" api ( "org.cryptacular:cryptacular:1.2.5") { exclude group: 'org.bouncycastle' diff --git a/x-pack/plugin/security/cli/build.gradle b/x-pack/plugin/security/cli/build.gradle index 3e98dfe60ea20..dcf3c7305dbc7 100644 --- a/x-pack/plugin/security/cli/build.gradle +++ b/x-pack/plugin/security/cli/build.gradle @@ -10,8 +10,9 @@ base { dependencies { compileOnly project(":server") compileOnly project(path: xpackModule('core')) - api "org.bouncycastle:bcpkix-jdk15on:${versions.bouncycastle}" - api "org.bouncycastle:bcprov-jdk15on:${versions.bouncycastle}" + api "org.bouncycastle:bcpkix-jdk18on:${versions.bouncycastle}" + api "org.bouncycastle:bcprov-jdk18on:${versions.bouncycastle}" + api "org.bouncycastle:bcutil-jdk18on:${versions.bouncycastle}" testImplementation("com.google.jimfs:jimfs:${versions.jimfs}") { // this is provided by the runtime classpath, from the security project exclude group: "com.google.guava", module: "guava" @@ -35,14 +36,6 @@ tasks.named("test").configure { systemProperty 'tests.security.manager', 'false' // the main code under test runs without the SecurityManager } -tasks.named("thirdPartyAudit").configure { - ignoreMissingClasses( - // Used in org.bouncycastle.pqc.crypto.qtesla.QTeslaKeyEncodingTests - 'junit.framework.Assert', - 'junit.framework.TestCase' - ) -} - if (BuildParams.inFipsJvm) { tasks.named("test").configure { enabled = false diff --git a/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/CertificateGenerateToolTests.java b/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/CertificateGenerateToolTests.java index 695222d31a9e3..1faabcfd46fdb 100644 --- a/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/CertificateGenerateToolTests.java +++ b/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/CertificateGenerateToolTests.java @@ -87,7 +87,6 @@ /** * Unit tests for the tool used to simplify SSL certificate generation */ -// TODO baz - fix this to work in intellij+java9, its complaining about java.sql.Date not being on the classpath public class CertificateGenerateToolTests extends ESTestCase { private FileSystem jimfs; @@ -515,8 +514,8 @@ private void assertSubjAltNames(GeneralNames subjAltNames, CertificateInformatio assertThat(seq.getObjectAt(1), instanceOf(DLTaggedObject.class)); DLTaggedObject taggedName = (DLTaggedObject) seq.getObjectAt(1); assertThat(taggedName.getTagNo(), equalTo(0)); - assertThat(taggedName.getObject(), instanceOf(ASN1String.class)); - assertThat(taggedName.getObject().toString(), is(in(certInfo.commonNames))); + assertThat(taggedName.getBaseObject(), instanceOf(ASN1String.class)); + assertThat(taggedName.getBaseObject().toString(), is(in(certInfo.commonNames))); } else { fail("unknown general name with tag " + generalName.getTagNo()); } diff --git a/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/CertificateToolTests.java b/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/CertificateToolTests.java index d10fb7ec85c0c..a0484de419fe7 100644 --- a/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/CertificateToolTests.java +++ b/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/CertificateToolTests.java @@ -970,8 +970,8 @@ private void assertSubjAltNames(GeneralNames subjAltNames, CertificateInformatio assertThat(seq.getObjectAt(0).toString(), equalTo(CN_OID)); assertThat(seq.getObjectAt(1), instanceOf(ASN1TaggedObject.class)); ASN1TaggedObject tagged = (ASN1TaggedObject) seq.getObjectAt(1); - assertThat(tagged.getObject(), instanceOf(ASN1String.class)); - assertThat(tagged.getObject().toString(), is(in(certInfo.commonNames))); + assertThat(tagged.getBaseObject(), instanceOf(ASN1String.class)); + assertThat(tagged.getBaseObject().toString(), is(in(certInfo.commonNames))); } else { fail("unknown general name with tag " + generalName.getTagNo()); } diff --git a/x-pack/plugin/security/qa/jwt-realm/src/javaRestTest/resources/jwk/README.md b/x-pack/plugin/security/qa/jwt-realm/src/javaRestTest/resources/jwk/README.md index a36129e2a13ed..daf62f92bb0fa 100644 --- a/x-pack/plugin/security/qa/jwt-realm/src/javaRestTest/resources/jwk/README.md +++ b/x-pack/plugin/security/qa/jwt-realm/src/javaRestTest/resources/jwk/README.md @@ -6,7 +6,7 @@ These files are created by running the tests in `JwtRealmGenerateTests`. Those tests generate the yaml settings, the keystore settings and the JWK Sets -for each sample reaml +for each sample realm. Copy the output from the test output into the applicable file (you may wish to run it through `jq` first in order to make it more readable). diff --git a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java index 9f490792d800f..bdfb3bc14286c 100644 --- a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java +++ b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java @@ -123,6 +123,11 @@ public class Constants { "cluster:admin/xpack/ccr/auto_follow_pattern/put", "cluster:admin/xpack/ccr/pause_follow", "cluster:admin/xpack/ccr/resume_follow", + "cluster:admin/xpack/connector/delete", + "cluster:admin/xpack/connector/get", + "cluster:admin/xpack/connector/list", + "cluster:admin/xpack/connector/put", + "cluster:admin/xpack/connector/sync_job/post", "cluster:admin/xpack/deprecation/info", "cluster:admin/xpack/deprecation/nodes/info", "cluster:admin/xpack/enrich/delete", @@ -294,6 +299,7 @@ public class Constants { "cluster:monitor/ccr/follow_info", "cluster:monitor/ccr/follow_stats", "cluster:monitor/ccr/stats", + "cluster:monitor/data_stream/lifecycle/stats", "cluster:monitor/eql/async/status", "cluster:monitor/fetch/health/info", "cluster:monitor/health", @@ -302,6 +308,7 @@ public class Constants { "cluster:monitor/update/health/info", "cluster:monitor/ingest/geoip/stats", "cluster:monitor/main", + "cluster:monitor/nodes/data_tier_usage", "cluster:monitor/nodes/hot_threads", "cluster:monitor/nodes/info", "cluster:monitor/nodes/stats", @@ -521,6 +528,7 @@ public class Constants { "indices:data/read/xpack/rollup/get/index/caps", "indices:data/read/xpack/rollup/search", "indices:data/read/xpack/termsenum/list", + "indices:data/write/simulate/bulk", "indices:data/write/bulk", "indices:data/write/bulk[s]", "indices:data/write/bulk_shard_operations[s]", diff --git a/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/QueryApiKeyIT.java b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/QueryApiKeyIT.java index 456fcbd80cf12..f79077ae3a550 100644 --- a/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/QueryApiKeyIT.java +++ b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/QueryApiKeyIT.java @@ -129,7 +129,7 @@ public void testQuery() throws IOException { { "query": { "prefix": {"api_key_hash": "{PBKDF2}10000$"} } }"""); // Search for fields that are not allowed in Query DSL but used internally by the service itself - final String fieldName = randomFrom("doc_type", "api_key_invalidated"); + final String fieldName = randomFrom("doc_type", "api_key_invalidated", "invalidation_time"); assertQueryError(API_KEY_ADMIN_AUTH_HEADER, 400, Strings.format(""" { "query": { "term": {"%s": "%s"} } }""", fieldName, randomAlphaOfLengthBetween(3, 8))); @@ -164,6 +164,7 @@ public void testQuery() throws IOException { final String queryString = randomFrom(""" {"query": { "term": {"name": "temporary-key-1"} } }""", Strings.format(""" {"query":{"bool":{"must":[{"term":{"name":{"value":"temporary-key-1"}}},\ + {"range": {"invalidation": {"lte": "now"}}}, {"term":{"invalidated":{"value":"%s"}}}]}}} """, randomBoolean())); @@ -176,6 +177,7 @@ public void testQuery() throws IOException { assertThat(apiKeys.get(0).get("name"), equalTo("temporary-key-1")); assertThat(apiKeys.get(0).get("id"), equalTo(invalidatedApiKeyId1)); assertThat(apiKeys.get(0).get("invalidated"), is(true)); + assertThat(apiKeys.get(0).get("invalidation"), notNullValue()); } apiKeys.forEach(k -> assertThat(k, not(hasKey("_sort")))); }); @@ -386,6 +388,12 @@ public void testExistsQuery() throws IOException, InterruptedException { // Create an invalidated API key createAndInvalidateApiKey("test-exists-4", authHeader); + // Get the invalidated API key + assertQuery(authHeader, """ + {"query": {"exists": {"field": "invalidation" }}}""", apiKeys -> { + assertThat(apiKeys.stream().map(k -> (String) k.get("name")).toList(), containsInAnyOrder("test-exists-4")); + }); + // Ensure the short-lived key is expired final long elapsed = Instant.now().toEpochMilli() - startTime; if (elapsed < 10) { @@ -402,6 +410,11 @@ public void testExistsQuery() throws IOException, InterruptedException { "invalidated": false } }, + "must_not": { + "exists": { + "field": "invalidation" + } + }, "should": [ { "range": { diff --git a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/GetApiKeysRestIT.java b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/GetApiKeysRestIT.java index 0996463e9adb9..e9dc00acf3211 100644 --- a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/GetApiKeysRestIT.java +++ b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/GetApiKeysRestIT.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.security.apikey; +import org.apache.http.client.methods.HttpDelete; import org.apache.http.client.methods.HttpGet; import org.apache.http.util.EntityUtils; import org.elasticsearch.client.Request; @@ -20,6 +21,7 @@ import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.security.action.apikey.ApiKey; import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyResponse; +import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyResponse; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.security.SecurityOnTrialLicenseRestTestCase; import org.junit.Before; @@ -37,6 +39,8 @@ import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.emptyArray; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; public class GetApiKeysRestIT extends SecurityOnTrialLicenseRestTestCase { private static final SecureString END_USER_PASSWORD = new SecureString("end-user-password".toCharArray()); @@ -194,6 +198,41 @@ public void testGetApiKeysWithActiveOnlyFlagAndMultipleUsers() throws Exception ); } + public void testInvalidateApiKey() throws Exception { + final String apiKeyId0 = createApiKey(MANAGE_SECURITY_USER, "key-2"); + + Request request = new Request(HttpGet.METHOD_NAME, "/_security/api_key/"); + setUserForRequest(request, MANAGE_SECURITY_USER); + GetApiKeyResponse getApiKeyResponse = GetApiKeyResponse.fromXContent(getParser(client().performRequest(request))); + + assertThat(getApiKeyResponse.getApiKeyInfos().length, equalTo(1)); + ApiKey apiKey = getApiKeyResponse.getApiKeyInfos()[0]; + assertThat(apiKey.isInvalidated(), equalTo(false)); + assertThat(apiKey.getInvalidation(), nullValue()); + assertThat(apiKey.getId(), equalTo(apiKeyId0)); + + request = new Request(HttpDelete.METHOD_NAME, "/_security/api_key/"); + setUserForRequest(request, MANAGE_SECURITY_USER); + request.setJsonEntity(XContentTestUtils.convertToXContent(Map.of("ids", List.of(apiKeyId0)), XContentType.JSON).utf8ToString()); + + InvalidateApiKeyResponse invalidateApiKeyResponse = InvalidateApiKeyResponse.fromXContent( + getParser(client().performRequest(request)) + ); + + assertThat(invalidateApiKeyResponse.getInvalidatedApiKeys().size(), equalTo(1)); + assertThat(invalidateApiKeyResponse.getInvalidatedApiKeys().get(0), equalTo(apiKey.getId())); + + request = new Request(HttpGet.METHOD_NAME, "/_security/api_key/"); + setUserForRequest(request, MANAGE_SECURITY_USER); + getApiKeyResponse = GetApiKeyResponse.fromXContent(getParser(client().performRequest(request))); + + assertThat(getApiKeyResponse.getApiKeyInfos().length, equalTo(1)); + apiKey = getApiKeyResponse.getApiKeyInfos()[0]; + assertThat(apiKey.isInvalidated(), equalTo(true)); + assertThat(apiKey.getInvalidation(), notNullValue()); + assertThat(apiKey.getId(), equalTo(apiKeyId0)); + } + private GetApiKeyResponse getApiKeysWithRequestParams(Map requestParams) throws IOException { return getApiKeysWithRequestParams(MANAGE_SECURITY_USER, requestParams); } diff --git a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/crossclusteraccess/CrossClusterAccessHeadersForCcsRestIT.java b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/crossclusteraccess/CrossClusterAccessHeadersForCcsRestIT.java index 9788042ac5ece..97b52a699749e 100644 --- a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/crossclusteraccess/CrossClusterAccessHeadersForCcsRestIT.java +++ b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/crossclusteraccess/CrossClusterAccessHeadersForCcsRestIT.java @@ -16,13 +16,13 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchShardsAction; import org.elasticsearch.action.search.SearchShardsRequest; import org.elasticsearch.action.search.SearchShardsResponse; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.action.search.TransportSearchShardsAction; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.cluster.ClusterName; @@ -1035,9 +1035,9 @@ private void expectActionsAndHeadersForCluster( ) throws IOException { final Set expectedActions = new HashSet<>(); if (minimizeRoundtrips) { - expectedActions.add(SearchAction.NAME); + expectedActions.add(TransportSearchAction.TYPE.name()); } else { - expectedActions.add(SearchShardsAction.NAME); + expectedActions.add(TransportSearchShardsAction.TYPE.name()); } if (false == useProxyMode) { expectedActions.add(RemoteClusterNodesAction.TYPE.name()); @@ -1066,7 +1066,7 @@ private void expectActionsAndHeadersForCluster( ); assertThat(actualCrossClusterAccessSubjectInfo, equalTo(expectedCrossClusterAccessSubjectInfo)); } - case SearchAction.NAME, SearchShardsAction.NAME -> { + case TransportSearchAction.NAME, TransportSearchShardsAction.NAME -> { assertContainsHeadersExpectedForCrossClusterAccess(actual.headers()); assertContainsCrossClusterAccessCredentialsHeader(encodedCredential, actual); final var actualCrossClusterAccessSubjectInfo = CrossClusterAccessSubjectInfo.decode( @@ -1132,7 +1132,7 @@ private static MockTransportService startTransport( } ); service.registerRequestHandler( - SearchShardsAction.NAME, + TransportSearchShardsAction.TYPE.name(), EsExecutors.DIRECT_EXECUTOR_SERVICE, SearchShardsRequest::new, (request, channel, task) -> { @@ -1143,7 +1143,7 @@ private static MockTransportService startTransport( } ); service.registerRequestHandler( - SearchAction.NAME, + TransportSearchAction.TYPE.name(), EsExecutors.DIRECT_EXECUTOR_SERVICE, SearchRequest::new, (request, channel, task) -> { diff --git a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/role/RoleWithRemoteIndicesPrivilegesRestIT.java b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/role/RoleWithRemoteIndicesPrivilegesRestIT.java index 9fc236ff88597..4e3a520678f70 100644 --- a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/role/RoleWithRemoteIndicesPrivilegesRestIT.java +++ b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/role/RoleWithRemoteIndicesPrivilegesRestIT.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.security.role; -import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; @@ -101,7 +101,7 @@ public void testRemoteIndexPrivileges() throws IOException { ); final ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(searchRequest)); assertEquals(403, e.getResponse().getStatusLine().getStatusCode()); - assertThat(e.getMessage(), containsString("action [" + SearchAction.NAME + "] is unauthorized for user")); + assertThat(e.getMessage(), containsString("action [" + TransportSearchAction.TYPE.name() + "] is unauthorized for user")); // Add local privileges and check local authorization works putRoleRequest = new Request("PUT", "_security/role/" + REMOTE_SEARCH_ROLE); diff --git a/x-pack/plugin/security/qa/smoke-test-all-realms/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/JwtRealmAuthIT.java b/x-pack/plugin/security/qa/smoke-test-all-realms/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/JwtRealmAuthIT.java index fb761079dc8e6..0c0218c51bacc 100644 --- a/x-pack/plugin/security/qa/smoke-test-all-realms/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/JwtRealmAuthIT.java +++ b/x-pack/plugin/security/qa/smoke-test-all-realms/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/JwtRealmAuthIT.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; import org.elasticsearch.xpack.security.authc.jwt.JwtRealm; import java.io.IOException; @@ -34,7 +35,7 @@ public void testAuthenticationUsingJwtRealm() throws IOException { final RequestOptions.Builder options = RequestOptions.DEFAULT.toBuilder() .addHeader( JwtRealm.HEADER_CLIENT_AUTHENTICATION, - JwtRealm.HEADER_SHARED_SECRET_AUTHENTICATION_SCHEME + " " + HEADER_CLIENT_SECRET + JwtRealmSettings.HEADER_SHARED_SECRET_AUTHENTICATION_SCHEME + " " + HEADER_CLIENT_SECRET ) .addHeader(JwtRealm.HEADER_END_USER_AUTHENTICATION, JwtRealm.HEADER_END_USER_AUTHENTICATION_SCHEME + " " + HEADER_JWT); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/BulkUpdateTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/BulkUpdateTests.java index 14e9056569533..4c49b7a0a3c14 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/BulkUpdateTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/BulkUpdateTests.java @@ -43,7 +43,7 @@ public Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { public void testThatBulkUpdateDoesNotLoseFields() { assertEquals( DocWriteResponse.Result.CREATED, - client().prepareIndex("index1").setSource("{\"test\": \"test\"}", XContentType.JSON).setId("1").get().getResult() + prepareIndex("index1").setSource("{\"test\": \"test\"}", XContentType.JSON).setId("1").get().getResult() ); GetResponse getResponse = client().prepareGet("index1", "1").get(); assertEquals("test", getResponse.getSource().get("test")); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamLifecycleDownsamplingSecurityIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamLifecycleDownsamplingSecurityIT.java index 4b7a9f46431a5..9a68ac06d4d19 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamLifecycleDownsamplingSecurityIT.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamLifecycleDownsamplingSecurityIT.java @@ -72,6 +72,7 @@ import java.util.concurrent.TimeUnit; import java.util.function.Supplier; +import static org.elasticsearch.cluster.metadata.ClusterChangedEventUtils.indicesCreated; import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.backingIndexEqualTo; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.anyOf; @@ -151,11 +152,11 @@ public void testDownsamplingAuthorized() throws Exception { Set witnessedDownsamplingIndices = new HashSet<>(); clusterService().addListener(event -> { - if (event.indicesCreated().contains(firstRoundDownsamplingIndex) + if (indicesCreated(event).contains(firstRoundDownsamplingIndex) || event.indicesDeleted().stream().anyMatch(index -> index.getName().equals(firstRoundDownsamplingIndex))) { witnessedDownsamplingIndices.add(firstRoundDownsamplingIndex); } - if (event.indicesCreated().contains(secondRoundDownsamplingIndex)) { + if (indicesCreated(event).contains(secondRoundDownsamplingIndex)) { witnessedDownsamplingIndices.add(secondRoundDownsamplingIndex); } }); @@ -203,7 +204,7 @@ public void testSystemDataStreamConfigurationWithDownsampling() throws Exception Set witnessedDownsamplingIndices = new HashSet<>(); clusterService().addListener(event -> { - if (event.indicesCreated().contains(secondRoundDownsamplingIndex)) { + if (indicesCreated(event).contains(secondRoundDownsamplingIndex)) { witnessedDownsamplingIndices.add(secondRoundDownsamplingIndex); } }); @@ -244,7 +245,7 @@ private Map collectErrorsFromStoreAsMap() { Map indicesAndErrors = new HashMap<>(); for (DataStreamLifecycleService lifecycleService : lifecycleServices) { DataStreamLifecycleErrorStore errorStore = lifecycleService.getErrorStore(); - List allIndices = errorStore.getAllIndices(); + Set allIndices = errorStore.getAllIndices(); for (var index : allIndices) { ErrorEntry error = errorStore.getError(index); if (error != null) { @@ -336,16 +337,12 @@ private void putComposableIndexTemplate( ) { PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request(id); request.indexTemplate( - new ComposableIndexTemplate( - patterns, - new Template(settings, mappings, null, lifecycle), - null, - null, - null, - metadata, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(patterns) + .template(new Template(settings, mappings, null, lifecycle)) + .metadata(metadata) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ); client.execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); } @@ -440,15 +437,11 @@ public Collection getSystemDataStreamDescriptors() { SYSTEM_DATA_STREAM_NAME, "a system data stream for testing", SystemDataStreamDescriptor.Type.EXTERNAL, - new ComposableIndexTemplate( - List.of(SYSTEM_DATA_STREAM_NAME), - new Template(settings.build(), getTSDBMappings(), null, LIFECYCLE), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate() - ), + ComposableIndexTemplate.builder() + .indexPatterns(List.of(SYSTEM_DATA_STREAM_NAME)) + .template(new Template(settings.build(), getTSDBMappings(), null, LIFECYCLE)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build(), Map.of(), Collections.singletonList("test"), new ExecutorNames( diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamLifecycleServiceRuntimeSecurityIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamLifecycleServiceRuntimeSecurityIT.java index 93cd03060842b..f5349cac99ed7 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamLifecycleServiceRuntimeSecurityIT.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamLifecycleServiceRuntimeSecurityIT.java @@ -48,6 +48,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Set; import java.util.concurrent.ExecutionException; import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.backingIndexEqualTo; @@ -168,7 +169,7 @@ private Map collectErrorsFromStoreAsMap() { Map indicesAndErrors = new HashMap<>(); for (DataStreamLifecycleService lifecycleService : lifecycleServices) { DataStreamLifecycleErrorStore errorStore = lifecycleService.getErrorStore(); - List allIndices = errorStore.getAllIndices(); + Set allIndices = errorStore.getAllIndices(); for (var index : allIndices) { ErrorEntry error = errorStore.getError(index); if (error != null) { @@ -217,16 +218,12 @@ private static void putComposableIndexTemplate( ) throws IOException { PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request(id); request.indexTemplate( - new ComposableIndexTemplate( - patterns, - new Template(settings, mappings == null ? null : CompressedXContent.fromJSON(mappings), null, lifecycle), - null, - null, - null, - metadata, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(patterns) + .template(new Template(settings, mappings == null ? null : CompressedXContent.fromJSON(mappings), null, lifecycle)) + .metadata(metadata) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); } @@ -260,15 +257,11 @@ public Collection getSystemDataStreamDescriptors() { SYSTEM_DATA_STREAM_NAME, "a system data stream for testing", SystemDataStreamDescriptor.Type.EXTERNAL, - new ComposableIndexTemplate( - List.of(SYSTEM_DATA_STREAM_NAME), - new Template(Settings.EMPTY, null, null, DataStreamLifecycle.newBuilder().dataRetention(0).build()), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate() - ), + ComposableIndexTemplate.builder() + .indexPatterns(List.of(SYSTEM_DATA_STREAM_NAME)) + .template(new Template(Settings.EMPTY, null, null, DataStreamLifecycle.newBuilder().dataRetention(0).build())) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build(), Map.of(), Collections.singletonList("test"), new ExecutorNames(ThreadPool.Names.SYSTEM_CRITICAL_READ, ThreadPool.Names.SYSTEM_READ, ThreadPool.Names.SYSTEM_WRITE) diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamSecurityIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamSecurityIT.java index 58d33fc221b21..1e67ae572e4ff 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamSecurityIT.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamSecurityIT.java @@ -60,16 +60,10 @@ public void testRemoveGhostReference() throws Exception { var putTemplateRequest = new PutComposableIndexTemplateAction.Request("id"); putTemplateRequest.indexTemplate( - new ComposableIndexTemplate( - List.of("logs-*"), - null, - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("logs-*")) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ); assertAcked(client.execute(PutComposableIndexTemplateAction.INSTANCE, putTemplateRequest).actionGet()); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DateMathExpressionIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DateMathExpressionIntegTests.java index 3a0fb370ac0d2..7aeaccf63bab4 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DateMathExpressionIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DateMathExpressionIntegTests.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.get.MultiGetResponse; -import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.update.UpdateResponse; @@ -26,6 +25,7 @@ import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.NONE; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; @@ -80,10 +80,10 @@ public void testDateMathExpressionsCanBeAuthorized() throws Exception { SearchResponse searchResponse = client.prepareSearch(expression).setQuery(QueryBuilders.matchAllQuery()).get(); assertThat(searchResponse.getHits().getTotalHits().value, is(1L)); - MultiSearchResponse multiSearchResponse = client.prepareMultiSearch() - .add(client.prepareSearch(expression).setQuery(QueryBuilders.matchAllQuery()).request()) - .get(); - assertThat(multiSearchResponse.getResponses()[0].getResponse().getHits().getTotalHits().value, is(1L)); + assertResponse( + client.prepareMultiSearch().add(client.prepareSearch(expression).setQuery(QueryBuilders.matchAllQuery()).request()), + multiSearchResponse -> assertThat(multiSearchResponse.getResponses()[0].getResponse().getHits().getTotalHits().value, is(1L)) + ); UpdateResponse updateResponse = client.prepareUpdate(expression, response.getId()) .setDoc(Requests.INDEX_CONTENT_TYPE, "new", "field") diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DlsFlsRequestCacheTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DlsFlsRequestCacheTests.java index 7c33c69460768..0e8cb486ffb2d 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DlsFlsRequestCacheTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DlsFlsRequestCacheTests.java @@ -48,7 +48,6 @@ import static org.elasticsearch.test.SecuritySettingsSource.TEST_PASSWORD_HASHED; import static org.elasticsearch.test.SecuritySettingsSourceField.TEST_PASSWORD; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; import static org.hamcrest.Matchers.equalTo; @@ -482,7 +481,4 @@ private void assertCacheState(String index, long expectedHits, long expectedMiss ); } - private void clearCache() { - assertNoFailures(client().admin().indices().prepareClearCache(DLS_INDEX, FLS_INDEX, INDEX).setRequestCache(true).get()); - } } diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentAndFieldLevelSecurityTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentAndFieldLevelSecurityTests.java index 9714805e3db96..164d28216ea93 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentAndFieldLevelSecurityTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentAndFieldLevelSecurityTests.java @@ -110,8 +110,8 @@ public Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { public void testSimpleQuery() { assertAcked(indicesAdmin().prepareCreate("test").setMapping("id", "type=keyword", "field1", "type=text", "field2", "type=text")); - client().prepareIndex("test").setId("1").setSource("id", "1", "field1", "value1").setRefreshPolicy(IMMEDIATE).get(); - client().prepareIndex("test").setId("2").setSource("id", "2", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("id", "1", "field1", "value1").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("2").setSource("id", "2", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); SearchResponse response = client().filterWithHeader( Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD)) @@ -148,7 +148,7 @@ public void testUpdatesAreRejected() { .setMapping("id", "type=keyword", "field1", "type=text", "field2", "type=text") .setSettings(indexSettings(1, 0)) ); - client().prepareIndex(indexName).setId("1").setSource("id", "1", "field1", "value1").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex(indexName).setId("1").setSource("id", "1", "field1", "value1").setRefreshPolicy(IMMEDIATE).get(); ElasticsearchSecurityException exception = expectThrows(ElasticsearchSecurityException.class, () -> { client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) @@ -178,8 +178,8 @@ public void testUpdatesAreRejected() { public void testDLSIsAppliedBeforeFLS() { assertAcked(indicesAdmin().prepareCreate("test").setMapping("field1", "type=text", "field2", "type=text")); - client().prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value1").setRefreshPolicy(IMMEDIATE).get(); - client().prepareIndex("test").setId("2").setSource("field1", "value2", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value1").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("2").setSource("field1", "value2", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); SearchResponse response = client().filterWithHeader( Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user5", USERS_PASSWD)) @@ -203,8 +203,8 @@ public void testQueryCache() { .setSettings(Settings.builder().put(IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.getKey(), true)) .setMapping("id", "type=keyword", "field1", "type=text", "field2", "type=text") ); - client().prepareIndex("test").setId("1").setSource("id", "1", "field1", "value1").setRefreshPolicy(IMMEDIATE).get(); - client().prepareIndex("test").setId("2").setSource("id", "2", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("id", "1", "field1", "value1").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("2").setSource("id", "2", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); // Both users have the same role query, but user3 has access to field2 and not field1, which should result in zero hits: int max = scaledRandomIntBetween(4, 32); @@ -256,8 +256,8 @@ public void testQueryCache() { public void testGetMappingsIsFiltered() { assertAcked(indicesAdmin().prepareCreate("test").setMapping("field1", "type=text", "field2", "type=text")); - client().prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); - client().prepareIndex("test").setId("2").setSource("field2", "value2").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("2").setSource("field2", "value2").setRefreshPolicy(IMMEDIATE).get(); { GetMappingsResponse getMappingsResponse = client().filterWithHeader( @@ -290,8 +290,8 @@ public void testGetMappingsIsFiltered() { public void testGetIndexMappingsIsFiltered() { assertAcked(indicesAdmin().prepareCreate("test").setMapping("field1", "type=text", "field2", "type=text")); - client().prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); - client().prepareIndex("test").setId("2").setSource("field2", "value2").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("2").setSource("field2", "value2").setRefreshPolicy(IMMEDIATE).get(); { GetIndexResponse getIndexResponse = client().filterWithHeader( @@ -321,8 +321,8 @@ public void testGetIndexMappingsIsFiltered() { public void testGetFieldMappingsIsFiltered() { assertAcked(indicesAdmin().prepareCreate("test").setMapping("field1", "type=text", "field2", "type=text")); - client().prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); - client().prepareIndex("test").setId("2").setSource("field2", "value2").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("2").setSource("field2", "value2").setRefreshPolicy(IMMEDIATE).get(); { GetFieldMappingsResponse getFieldMappingsResponse = client().filterWithHeader( @@ -364,8 +364,8 @@ public void testGetFieldMappingsIsFiltered() { public void testFieldCapabilitiesIsFiltered() { assertAcked(indicesAdmin().prepareCreate("test").setMapping("field1", "type=text", "field2", "type=text")); - client().prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); - client().prepareIndex("test").setId("2").setSource("field2", "value2").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("2").setSource("field2", "value2").setRefreshPolicy(IMMEDIATE).get(); { FieldCapabilitiesRequest fieldCapabilitiesRequest = new FieldCapabilitiesRequest().fields("*").indices("test"); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityFeatureUsageTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityFeatureUsageTests.java index 2eca81ab9c2fb..116e94cafcadf 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityFeatureUsageTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityFeatureUsageTests.java @@ -85,9 +85,9 @@ public Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { public void testDlsFeatureUsageTracking() throws Exception { assertAcked(indicesAdmin().prepareCreate("test").setMapping("field1", "type=text", "field2", "type=text", "field3", "type=text")); - client().prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); - client().prepareIndex("test").setId("2").setSource("field2", "value2").setRefreshPolicy(IMMEDIATE).get(); - client().prepareIndex("test").setId("3").setSource("field3", "value3").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("2").setSource("field2", "value2").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("3").setSource("field3", "value3").setRefreshPolicy(IMMEDIATE).get(); SearchResponse response = internalCluster().coordOnlyNodeClient() .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) @@ -105,8 +105,8 @@ public void testDlsFeatureUsageTracking() throws Exception { public void testDlsFlsFeatureUsageNotTracked() { assertAcked(indicesAdmin().prepareCreate("test").setMapping("id", "type=keyword", "field1", "type=text", "field2", "type=text")); - client().prepareIndex("test").setId("1").setSource("id", "1", "field1", "value1").setRefreshPolicy(IMMEDIATE).get(); - client().prepareIndex("test").setId("2").setSource("id", "2", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("id", "1", "field1", "value1").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("2").setSource("id", "2", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); // Running a search with user2 (which has role3 without DLS/FLS) should not trigger feature tracking. SearchResponse response = internalCluster().coordOnlyNodeClient() diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityRandomTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityRandomTests.java index b5e5183df086d..61126810e3df1 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityRandomTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityRandomTests.java @@ -98,7 +98,7 @@ public void testDuelWithAliasFilters() throws Exception { IndicesAliasesRequestBuilder builder = indicesAdmin().prepareAliases(); for (int i = 1; i <= numberOfRoles; i++) { String value = "value" + i; - requests.add(client().prepareIndex("test").setId(value).setSource("field1", value)); + requests.add(prepareIndex("test").setId(value).setSource("field1", value)); builder.addAlias("test", "alias" + i, QueryBuilders.termQuery("field1", value)); } indexRandom(true, requests); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java index a76b043737375..e42fab4708b8a 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java @@ -14,11 +14,10 @@ import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.get.MultiGetResponse; -import org.elasticsearch.action.search.ClosePointInTimeAction; import org.elasticsearch.action.search.ClosePointInTimeRequest; -import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportClosePointInTimeAction; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.termvectors.MultiTermVectorsResponse; import org.elasticsearch.action.termvectors.TermVectorsRequest; @@ -94,6 +93,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHitsWithoutFailures; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; @@ -204,9 +204,9 @@ public Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { public void testSimpleQuery() throws Exception { assertAcked(indicesAdmin().prepareCreate("test").setMapping("field1", "type=text", "field2", "type=text", "field3", "type=text")); - client().prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); - client().prepareIndex("test").setId("2").setSource("field2", "value2").setRefreshPolicy(IMMEDIATE).get(); - client().prepareIndex("test").setId("3").setSource("field3", "value3").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("2").setSource("field2", "value2").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("3").setSource("field3", "value3").setRefreshPolicy(IMMEDIATE).get(); assertSearchHitsWithoutFailures( client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) @@ -236,9 +236,9 @@ public void testSimpleQuery() throws Exception { public void testGetApi() throws Exception { assertAcked(indicesAdmin().prepareCreate("test").setMapping("field1", "type=text", "field2", "type=text", "field3", "type=text")); - client().prepareIndex("test").setId("1").setSource("field1", "value1").get(); - client().prepareIndex("test").setId("2").setSource("field2", "value2").get(); - client().prepareIndex("test").setId("3").setSource("field3", "value3").get(); + prepareIndex("test").setId("1").setSource("field1", "value1").get(); + prepareIndex("test").setId("2").setSource("field2", "value2").get(); + prepareIndex("test").setId("3").setSource("field3", "value3").get(); // test documents users can see boolean realtime = randomBoolean(); @@ -302,8 +302,8 @@ public void testRealtimeGetApi() { final boolean realtime = true; final boolean refresh = false; - client().prepareIndex("test").setId("1").setSource("field1", "value1").get(); - client().prepareIndex("test").setId("2").setSource("field2", "value2").get(); + prepareIndex("test").setId("1").setSource("field1", "value1").get(); + prepareIndex("test").setId("2").setSource("field2", "value2").get(); // do a realtime get beforehand to flip an internal translog flag so that subsequent realtime gets are // served from the translog (this first one is NOT, it internally forces a refresh of the index) client().prepareMultiGet().add("test", "1").add("test", "2").setRealtime(realtime).setRefresh(refresh).get(); @@ -311,13 +311,11 @@ public void testRealtimeGetApi() { // updates don't change the doc visibility for users // but updates populate the translog and the DLS filter must apply to the translog operations as well if (randomBoolean()) { - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource("field1", "value1", "field3", "value3") .setRefreshPolicy(WriteRequest.RefreshPolicy.NONE) .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource("field2", "value2", "field3", "value3") .setRefreshPolicy(WriteRequest.RefreshPolicy.NONE) .get(); @@ -369,9 +367,9 @@ public void testRealtimeGetApi() { public void testMGetApi() throws Exception { assertAcked(indicesAdmin().prepareCreate("test").setMapping("field1", "type=text", "field2", "type=text", "field3", "type=text")); - client().prepareIndex("test").setId("1").setSource("field1", "value1").get(); - client().prepareIndex("test").setId("2").setSource("field2", "value2").get(); - client().prepareIndex("test").setId("3").setSource("field3", "value3").get(); + prepareIndex("test").setId("1").setSource("field1", "value1").get(); + prepareIndex("test").setId("2").setSource("field2", "value2").get(); + prepareIndex("test").setId("3").setSource("field3", "value3").get(); boolean realtime = randomBoolean(); MultiGetResponse response = client().filterWithHeader( @@ -443,76 +441,89 @@ public void testMSearch() throws Exception { .setMapping("field1", "type=text", "field2", "type=text", "field3", "type=text", "id", "type=integer") ); - client().prepareIndex("test1").setId("1").setSource("field1", "value1", "id", 1).get(); - client().prepareIndex("test1").setId("2").setSource("field2", "value2", "id", 2).get(); - client().prepareIndex("test1").setId("3").setSource("field3", "value3", "id", 3).get(); - client().prepareIndex("test2").setId("1").setSource("field1", "value1", "id", 1).get(); - client().prepareIndex("test2").setId("2").setSource("field2", "value2", "id", 2).get(); - client().prepareIndex("test2").setId("3").setSource("field3", "value3", "id", 3).get(); + prepareIndex("test1").setId("1").setSource("field1", "value1", "id", 1).get(); + prepareIndex("test1").setId("2").setSource("field2", "value2", "id", 2).get(); + prepareIndex("test1").setId("3").setSource("field3", "value3", "id", 3).get(); + prepareIndex("test2").setId("1").setSource("field1", "value1", "id", 1).get(); + prepareIndex("test2").setId("2").setSource("field2", "value2", "id", 2).get(); + prepareIndex("test2").setId("3").setSource("field3", "value3", "id", 3).get(); indicesAdmin().prepareRefresh("test1", "test2").get(); - MultiSearchResponse response = client().filterWithHeader( - Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD)) - ) - .prepareMultiSearch() - .add(prepareSearch("test1").setQuery(QueryBuilders.matchAllQuery())) - .add(prepareSearch("test2").setQuery(QueryBuilders.matchAllQuery())) - .get(); - assertFalse(response.getResponses()[0].isFailure()); - assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits().value, is(1L)); - assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(2)); - assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); - assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("id"), is(1)); - - assertFalse(response.getResponses()[1].isFailure()); - assertThat(response.getResponses()[1].getResponse().getHits().getTotalHits().value, is(1L)); - assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(2)); - assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); - assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("id"), is(1)); - - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) - .prepareMultiSearch() - .add(prepareSearch("test1").setQuery(QueryBuilders.matchAllQuery())) - .add(prepareSearch("test2").setQuery(QueryBuilders.matchAllQuery())) - .get(); - assertFalse(response.getResponses()[0].isFailure()); - assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits().value, is(1L)); - assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(2)); - assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); - assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("id"), is(2)); - - assertFalse(response.getResponses()[1].isFailure()); - assertThat(response.getResponses()[1].getResponse().getHits().getTotalHits().value, is(1L)); - assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(2)); - assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); - assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("id"), is(2)); - - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) - .prepareMultiSearch() - .add( - prepareSearch("test1").addSort(SortBuilders.fieldSort("id").sortMode(SortMode.MIN)).setQuery(QueryBuilders.matchAllQuery()) - ) - .add( - prepareSearch("test2").addSort(SortBuilders.fieldSort("id").sortMode(SortMode.MIN)).setQuery(QueryBuilders.matchAllQuery()) - ) - .get(); - assertFalse(response.getResponses()[0].isFailure()); - assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits().value, is(2L)); - assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(2)); - assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); - assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("id"), is(1)); - assertThat(response.getResponses()[0].getResponse().getHits().getAt(1).getSourceAsMap().size(), is(2)); - assertThat(response.getResponses()[0].getResponse().getHits().getAt(1).getSourceAsMap().get("field2"), is("value2")); - assertThat(response.getResponses()[0].getResponse().getHits().getAt(1).getSourceAsMap().get("id"), is(2)); - - assertFalse(response.getResponses()[1].isFailure()); - assertThat(response.getResponses()[1].getResponse().getHits().getTotalHits().value, is(2L)); - assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(2)); - assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); - assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("id"), is(1)); - assertThat(response.getResponses()[1].getResponse().getHits().getAt(1).getSourceAsMap().size(), is(2)); - assertThat(response.getResponses()[1].getResponse().getHits().getAt(1).getSourceAsMap().get("field2"), is("value2")); - assertThat(response.getResponses()[1].getResponse().getHits().getAt(1).getSourceAsMap().get("id"), is(2)); + { + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareMultiSearch() + .add(prepareSearch("test1").setQuery(QueryBuilders.matchAllQuery())) + .add(prepareSearch("test2").setQuery(QueryBuilders.matchAllQuery())), + response -> { + assertFalse(response.getResponses()[0].isFailure()); + assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits().value, is(1L)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(2)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("id"), is(1)); + + assertFalse(response.getResponses()[1].isFailure()); + assertThat(response.getResponses()[1].getResponse().getHits().getTotalHits().value, is(1L)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(2)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("id"), is(1)); + } + ); + } + { + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareMultiSearch() + .add(prepareSearch("test1").setQuery(QueryBuilders.matchAllQuery())) + .add(prepareSearch("test2").setQuery(QueryBuilders.matchAllQuery())), + response -> { + assertFalse(response.getResponses()[0].isFailure()); + assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits().value, is(1L)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(2)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("id"), is(2)); + + assertFalse(response.getResponses()[1].isFailure()); + assertThat(response.getResponses()[1].getResponse().getHits().getTotalHits().value, is(1L)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(2)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("id"), is(2)); + } + ); + } + { + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareMultiSearch() + .add( + prepareSearch("test1").addSort(SortBuilders.fieldSort("id").sortMode(SortMode.MIN)) + .setQuery(QueryBuilders.matchAllQuery()) + ) + .add( + prepareSearch("test2").addSort(SortBuilders.fieldSort("id").sortMode(SortMode.MIN)) + .setQuery(QueryBuilders.matchAllQuery()) + ), + response -> { + assertFalse(response.getResponses()[0].isFailure()); + assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits().value, is(2L)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(2)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("id"), is(1)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(1).getSourceAsMap().size(), is(2)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(1).getSourceAsMap().get("field2"), is("value2")); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(1).getSourceAsMap().get("id"), is(2)); + + assertFalse(response.getResponses()[1].isFailure()); + assertThat(response.getResponses()[1].getResponse().getHits().getTotalHits().value, is(2L)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(2)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("id"), is(1)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(1).getSourceAsMap().size(), is(2)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(1).getSourceAsMap().get("field2"), is("value2")); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(1).getSourceAsMap().get("id"), is(2)); + } + ); + } } public void testPercolateQueryWithIndexedDocWithDLS() { @@ -521,14 +532,12 @@ public void testPercolateQueryWithIndexedDocWithDLS() { .setMapping("message", "type=text", "query", "type=percolator", "field1", "type=text", "field2", "type=text") ); assertAcked(indicesAdmin().prepareCreate("doc_index").setMapping("message", "type=text", "field1", "type=text")); - client().prepareIndex("query_index") - .setId("1") + prepareIndex("query_index").setId("1") .setSource(""" {"field1": "value1", "field2": "value2", "query": {"match": {"message": "bonsai tree"}}}""", XContentType.JSON) .setRefreshPolicy(IMMEDIATE) .get(); - client().prepareIndex("doc_index") - .setId("1") + prepareIndex("doc_index").setId("1") .setSource("{\"field1\": \"value1\", \"message\": \"A new bonsai tree in the office\"}", XContentType.JSON) .setRefreshPolicy(IMMEDIATE) .get(); @@ -565,14 +574,12 @@ public void testGeoQueryWithIndexedShapeWithDLS() { indicesAdmin().prepareCreate("shape_index") .setMapping("shape_field", "type=shape", "field1", "type=text", "field2", "type=text") ); - client().prepareIndex("search_index") - .setId("1") + prepareIndex("search_index").setId("1") .setSource(""" {"field1": "value1", "field2": "value2", "search_field": { "type": "point", "coordinates":[1, 1] }}""", XContentType.JSON) .setRefreshPolicy(IMMEDIATE) .get(); - client().prepareIndex("shape_index") - .setId("1") + prepareIndex("shape_index").setId("1") .setSource(""" {"field1": "value1", "shape_field": { "type": "envelope", "coordinates": [[0, 2], [2, 0]]}}""", XContentType.JSON) .setRefreshPolicy(IMMEDIATE) @@ -631,38 +638,28 @@ public void testTermsLookupOnIndexWithDLS() { indicesAdmin().prepareCreate("lookup_index") .setMapping("lookup_field", "type=keyword", "field1", "type=text", "field2", "type=text") ); - client().prepareIndex("search_index") - .setId("1") + prepareIndex("search_index").setId("1") .setSource("field1", "value1", "search_field", List.of("value1", "value2", "value3")) .setRefreshPolicy(IMMEDIATE) .get(); - client().prepareIndex("search_index") - .setId("2") + prepareIndex("search_index").setId("2") .setSource("field1", "value1", "field2", "value2", "search_field", List.of("value1", "value2")) .setRefreshPolicy(IMMEDIATE) .get(); - client().prepareIndex("search_index") - .setId("3") + prepareIndex("search_index").setId("3") .setSource("field1", "value1", "field2", "value1", "search_field", "value1") .setRefreshPolicy(IMMEDIATE) .get(); - client().prepareIndex("search_index") - .setId("4") - .setSource("field2", "value2", "search_field", "value1") - .setRefreshPolicy(IMMEDIATE) - .get(); - client().prepareIndex("search_index") - .setId("5") + prepareIndex("search_index").setId("4").setSource("field2", "value2", "search_field", "value1").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("search_index").setId("5") .setSource("field2", "value2", "search_field", List.of("value1", "value2")) .setRefreshPolicy(IMMEDIATE) .get(); - client().prepareIndex("lookup_index") - .setId("1") + prepareIndex("lookup_index").setId("1") .setSource("field1", "value1", "field2", "value1", "lookup_field", List.of("value1", "value2")) .setRefreshPolicy(IMMEDIATE) .get(); - client().prepareIndex("lookup_index") - .setId("2") + prepareIndex("lookup_index").setId("2") .setSource("field1", "value2", "field2", "value2", "lookup_field", List.of("value2")) .setRefreshPolicy(IMMEDIATE) .get(); @@ -732,9 +729,9 @@ public void testTVApi() throws Exception { "type=text,term_vector=with_positions_offsets_payloads" ) ); - client().prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); - client().prepareIndex("test").setId("2").setSource("field2", "value2").setRefreshPolicy(IMMEDIATE).get(); - client().prepareIndex("test").setId("3").setSource("field3", "value3").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("2").setSource("field2", "value2").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("3").setSource("field3", "value3").setRefreshPolicy(IMMEDIATE).get(); boolean realtime = randomBoolean(); TermVectorsResponse response = client().filterWithHeader( @@ -795,9 +792,9 @@ public void testMTVApi() throws Exception { "type=text,term_vector=with_positions_offsets_payloads" ) ); - client().prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); - client().prepareIndex("test").setId("2").setSource("field2", "value2").setRefreshPolicy(IMMEDIATE).get(); - client().prepareIndex("test").setId("3").setSource("field3", "value3").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("2").setSource("field2", "value2").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("3").setSource("field3", "value3").setRefreshPolicy(IMMEDIATE).get(); boolean realtime = randomBoolean(); MultiTermVectorsResponse response = client().filterWithHeader( @@ -864,8 +861,8 @@ public void testKnnSearch() throws Exception { assertAcked(indicesAdmin().prepareCreate("test").setSettings(indexSettings).setMapping(builder)); for (int i = 0; i < 5; i++) { - client().prepareIndex("test").setSource("field1", "value1", "other", "valueA", "vector", new float[] { i, i, i }).get(); - client().prepareIndex("test").setSource("field2", "value2", "other", "valueB", "vector", new float[] { i, i, i }).get(); + prepareIndex("test").setSource("field1", "value1", "other", "valueA", "vector", new float[] { i, i, i }).get(); + prepareIndex("test").setSource("field2", "value2", "other", "valueB", "vector", new float[] { i, i, i }).get(); } indicesAdmin().prepareRefresh("test").get(); @@ -917,9 +914,9 @@ public void testGlobalAggregation() throws Exception { indicesAdmin().prepareCreate("test") .setMapping("field1", "type=text", "field2", "type=text,fielddata=true", "field3", "type=text") ); - client().prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); - client().prepareIndex("test").setId("2").setSource("field2", "value2").setRefreshPolicy(IMMEDIATE).get(); - client().prepareIndex("test").setId("3").setSource("field3", "value3").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("2").setSource("field2", "value2").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("3").setSource("field3", "value3").setRefreshPolicy(IMMEDIATE).get(); SearchResponse response = prepareSearch("test").addAggregation( AggregationBuilders.global("global").subAggregation(AggregationBuilders.terms("field2").field("field2")) @@ -997,7 +994,7 @@ public void testParentChild() throws Exception { ensureGreen(); // index simple data - client().prepareIndex("test").setId("p1").setSource("join_field", "parent", "field1", "value1").get(); + prepareIndex("test").setId("p1").setSource("join_field", "parent", "field1", "value1").get(); Map source = new HashMap<>(); source.put("field2", "value2"); @@ -1006,14 +1003,14 @@ public void testParentChild() throws Exception { joinField.put("name", "child"); joinField.put("parent", "p1"); source.put("join_field", joinField); - client().prepareIndex("test").setId("c1").setSource(source).setRouting("p1").get(); + prepareIndex("test").setId("c1").setSource(source).setRouting("p1").get(); source.put("id", "c2"); - client().prepareIndex("test").setId("c2").setSource(source).setRouting("p1").get(); + prepareIndex("test").setId("c2").setSource(source).setRouting("p1").get(); source = new HashMap<>(); source.put("field3", "value3"); source.put("join_field", joinField); source.put("id", "c3"); - client().prepareIndex("test").setId("c3").setSource(source).setRouting("p1").get(); + prepareIndex("test").setId("c3").setSource(source).setRouting("p1").get(); refresh(); verifyParentChild(); } @@ -1087,12 +1084,12 @@ public void testScroll() throws Exception { final int numInVisible = scaledRandomIntBetween(2, 10); int id = 1; for (int i = 0; i < numVisible; i++) { - client().prepareIndex("test").setId(String.valueOf(id++)).setSource("field1", "value1").get(); + prepareIndex("test").setId(String.valueOf(id++)).setSource("field1", "value1").get(); } for (int i = 0; i < numInVisible; i++) { - client().prepareIndex("test").setId(String.valueOf(id++)).setSource("field2", "value2").get(); - client().prepareIndex("test").setId(String.valueOf(id++)).setSource("field3", "value3").get(); + prepareIndex("test").setId(String.valueOf(id++)).setSource("field2", "value2").get(); + prepareIndex("test").setId(String.valueOf(id++)).setSource("field3", "value3").get(); } refresh(); @@ -1138,12 +1135,12 @@ public void testReaderId() throws Exception { final int numInvisible = scaledRandomIntBetween(2, 10); int id = 1; for (int i = 0; i < numVisible; i++) { - client().prepareIndex("test").setId(String.valueOf(id++)).setSource("field1", "value1").get(); + prepareIndex("test").setId(String.valueOf(id++)).setSource("field1", "value1").get(); } for (int i = 0; i < numInvisible; i++) { - client().prepareIndex("test").setId(String.valueOf(id++)).setSource("field2", "value2").get(); - client().prepareIndex("test").setId(String.valueOf(id++)).setSource("field3", "value3").get(); + prepareIndex("test").setId(String.valueOf(id++)).setSource("field2", "value2").get(); + prepareIndex("test").setId(String.valueOf(id++)).setSource("field3", "value3").get(); } refresh(); @@ -1166,7 +1163,7 @@ public void testReaderId() throws Exception { assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); } } finally { - client().execute(ClosePointInTimeAction.INSTANCE, new ClosePointInTimeRequest(response.pointInTimeId())).actionGet(); + client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(response.pointInTimeId())).actionGet(); } } @@ -1176,9 +1173,9 @@ public void testRequestCache() throws Exception { .setSettings(Settings.builder().put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true)) .setMapping("field1", "type=text", "field2", "type=text", "field3", "type=text") ); - client().prepareIndex("test").setId("1").setSource("field1", "value1").get(); - client().prepareIndex("test").setId("2").setSource("field2", "value2").get(); - client().prepareIndex("test").setId("3").setSource("field3", "value3").get(); + prepareIndex("test").setId("1").setSource("field1", "value1").get(); + prepareIndex("test").setId("2").setSource("field2", "value2").get(); + prepareIndex("test").setId("3").setSource("field3", "value3").get(); refresh(); int max = scaledRandomIntBetween(4, 32); @@ -1210,7 +1207,7 @@ public void testRequestCache() throws Exception { public void testUpdateApiIsBlocked() throws Exception { assertAcked(indicesAdmin().prepareCreate("test").setMapping("field1", "type=text", "field2", "type=text")); - client().prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); // With document level security enabled the update is not allowed: try { @@ -1252,8 +1249,7 @@ public void testUpdateApiIsBlocked() throws Exception { public void testNestedInnerHits() throws Exception { assertAcked(indicesAdmin().prepareCreate("test").setMapping("field1", "type=text", "nested_field", "type=nested")); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("field1", "value1") @@ -1268,8 +1264,7 @@ public void testNestedInnerHits() throws Exception { .endObject() ) .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource( jsonBuilder().startObject() .field("field1", "value2") @@ -1314,8 +1309,7 @@ public void testSuggesters() throws Exception { .setMapping("field1", "type=text", "suggest_field1", "type=text", "suggest_field2", "type=completion") ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("field1", "value1") @@ -1327,8 +1321,7 @@ public void testSuggesters() throws Exception { ) .get(); // A document that is always included by role query of both roles: - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource(jsonBuilder().startObject().field("field1", "value1").field("field2", "value2").endObject()) .get(); refresh("test"); @@ -1425,13 +1418,11 @@ public void testProfile() throws Exception { .setMapping("field1", "type=text", "other_field", "type=text") ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("field1", "value1").field("other_field", "value").endObject()) .get(); // A document that is always included by role query of both roles: - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource(jsonBuilder().startObject().field("field1", "value1").field("field2", "value2").endObject()) .get(); refresh("test"); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityFeatureUsageTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityFeatureUsageTests.java index 415bcedb9d68a..29ec9c809ebf2 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityFeatureUsageTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityFeatureUsageTests.java @@ -73,8 +73,8 @@ public Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { public void testFlsFeatureUsageTracking() throws Exception { assertAcked(indicesAdmin().prepareCreate("test").setMapping("field1", "type=text", "field2", "type=text")); - client().prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value1").setRefreshPolicy(IMMEDIATE).get(); - client().prepareIndex("test").setId("2").setSource("field1", "value2", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value1").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("2").setSource("field1", "value2", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); assertHitCount( internalCluster().coordOnlyNodeClient() diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityRandomTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityRandomTests.java index f9bd893ea3653..40672bf597b8c 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityRandomTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityRandomTests.java @@ -157,7 +157,7 @@ public void testRandom() { doc.put(field, "value"); } assertAcked(indicesAdmin().prepareCreate("test").setMapping(fieldMappers)); - client().prepareIndex("test").setId("1").setSource(doc).setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource(doc).setRefreshPolicy(IMMEDIATE).get(); for (String allowedField : allowedFields) { logger.info("Checking allowed field [{}]", allowedField); @@ -190,7 +190,7 @@ public void testDuel() throws Exception { for (int i = 1; i <= numDocs; i++) { String field = randomFrom("field1", "field2", "field3"); String value = "value"; - requests.add(client().prepareIndex("test").setId(value).setSource("id", Integer.toString(i), field, value)); + requests.add(prepareIndex("test").setId(value).setSource("id", Integer.toString(i), field, value)); } indexRandom(true, requests); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityTests.java index d5d48440c34ea..9c962095b3229 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityTests.java @@ -12,15 +12,14 @@ import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.get.MultiGetResponse; -import org.elasticsearch.action.search.ClosePointInTimeAction; import org.elasticsearch.action.search.ClosePointInTimeRequest; -import org.elasticsearch.action.search.MultiSearchResponse; -import org.elasticsearch.action.search.OpenPointInTimeAction; import org.elasticsearch.action.search.OpenPointInTimeRequest; import org.elasticsearch.action.search.OpenPointInTimeResponse; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportClosePointInTimeAction; +import org.elasticsearch.action.search.TransportOpenPointInTimeAction; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.termvectors.MultiTermVectorsResponse; import org.elasticsearch.action.termvectors.TermVectorsRequest; @@ -81,6 +80,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHitsWithoutFailures; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.BASIC_AUTH_HEADER; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; @@ -229,8 +229,7 @@ public void testQuery() { indicesAdmin().prepareCreate("test") .setMapping("field1", "type=text", "field2", "type=text", "field3", "type=text", "alias", "type=alias,path=field1") ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource("field1", "value1", "field2", "value2", "field3", "value3") .setRefreshPolicy(IMMEDIATE) .get(); @@ -415,8 +414,7 @@ public void testKnnSearch() throws IOException { .endObject(); assertAcked(indicesAdmin().prepareCreate("test").setMapping(builder)); - client().prepareIndex("test") - .setSource("field1", "value1", "field2", "value2", "vector", new float[] { 0.0f, 0.0f, 0.0f }) + prepareIndex("test").setSource("field1", "value1", "field2", "value2", "vector", new float[] { 0.0f, 0.0f, 0.0f }) .setRefreshPolicy(IMMEDIATE) .get(); @@ -475,9 +473,9 @@ public void testKnnSearch() throws IOException { public void testPercolateQueryWithIndexedDocWithFLS() { assertAcked(indicesAdmin().prepareCreate("query_index").setMapping("query", "type=percolator", "field2", "type=text")); assertAcked(indicesAdmin().prepareCreate("doc_index").setMapping("field2", "type=text", "field1", "type=text")); - client().prepareIndex("query_index").setId("1").setSource(""" + prepareIndex("query_index").setId("1").setSource(""" {"query": {"match": {"field2": "bonsai tree"}}}""", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get(); - client().prepareIndex("doc_index").setId("1").setSource(""" + prepareIndex("doc_index").setId("1").setSource(""" {"field1": "value1", "field2": "A new bonsai tree in the office"}""", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get(); QueryBuilder percolateQuery = new PercolateQueryBuilder("query", "doc_index", "1", null, null, null); // user7 sees everything @@ -517,21 +515,21 @@ public void testPercolateQueryWithIndexedDocWithFLS() { public void testGeoQueryWithIndexedShapeWithFLS() { assertAcked(indicesAdmin().prepareCreate("search_index").setMapping("field", "type=shape", "other", "type=shape")); assertAcked(indicesAdmin().prepareCreate("shape_index").setMapping("field", "type=shape", "other", "type=shape")); - client().prepareIndex("search_index").setId("1").setSource(""" + prepareIndex("search_index").setId("1").setSource(""" { "field": { "type": "point", "coordinates": [ 1, 1 ] } }""", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get(); - client().prepareIndex("search_index").setId("2").setSource(""" + prepareIndex("search_index").setId("2").setSource(""" { "other": { "type": "point", "coordinates": [ 1, 1 ] } }""", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get(); - client().prepareIndex("shape_index").setId("1").setSource(""" + prepareIndex("shape_index").setId("1").setSource(""" { "field": { "type": "envelope", @@ -548,7 +546,7 @@ public void testGeoQueryWithIndexedShapeWithFLS() { ] } }""", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get(); - client().prepareIndex("shape_index").setId("2").setSource(""" + prepareIndex("shape_index").setId("2").setSource(""" { "other": { "type": "envelope", @@ -617,19 +615,17 @@ public void testGeoQueryWithIndexedShapeWithFLS() { public void testTermsLookupOnIndexWithFLS() { assertAcked(indicesAdmin().prepareCreate("search_index").setMapping("field", "type=keyword", "other", "type=text")); assertAcked(indicesAdmin().prepareCreate("lookup_index").setMapping("field", "type=keyword", "other", "type=text")); - client().prepareIndex("search_index").setId("1").setSource("field", List.of("value1", "value2")).setRefreshPolicy(IMMEDIATE).get(); - client().prepareIndex("search_index") - .setId("2") + prepareIndex("search_index").setId("1").setSource("field", List.of("value1", "value2")).setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("search_index").setId("2") .setSource("field", "value1", "other", List.of("value1", "value2")) .setRefreshPolicy(IMMEDIATE) .get(); - client().prepareIndex("search_index") - .setId("3") + prepareIndex("search_index").setId("3") .setSource("field", "value3", "other", List.of("value1", "value2")) .setRefreshPolicy(IMMEDIATE) .get(); - client().prepareIndex("lookup_index").setId("1").setSource("field", List.of("value1", "value2")).setRefreshPolicy(IMMEDIATE).get(); - client().prepareIndex("lookup_index").setId("2").setSource("other", "value2", "field", "value2").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("lookup_index").setId("1").setSource("field", List.of("value1", "value2")).setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("lookup_index").setId("2").setSource("other", "value2", "field", "value2").setRefreshPolicy(IMMEDIATE).get(); // user sees the terms doc field assertSearchHitsWithoutFailures( @@ -664,7 +660,7 @@ public void testTermsLookupOnIndexWithFLS() { public void testGetApi() throws Exception { assertAcked(indicesAdmin().prepareCreate("test").setMapping("field1", "type=text", "field2", "type=text", "field3", "type=text")); - client().prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2", "field3", "value3").get(); + prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2", "field3", "value3").get(); boolean realtime = randomBoolean(); // user1 is granted access to field1 only: @@ -762,7 +758,7 @@ public void testRealtimeGetApi() { final boolean realtime = true; final boolean refresh = false; - client().prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2", "field3", "value3").get(); + prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2", "field3", "value3").get(); // do a realtime get beforehand to flip an internal translog flag so that subsequent realtime gets are // served from the translog (this first one is NOT, it internally forces a refresh of the index) client().prepareGet("test", "1").setRealtime(realtime).setRefresh(refresh).get(); @@ -770,8 +766,7 @@ public void testRealtimeGetApi() { // updates don't change the doc visibility for users // but updates populate the translog and the FLS filter must apply to the translog operations too if (randomBoolean()) { - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource("field1", "value1", "field2", "value2", "field3", "value3") .setRefreshPolicy(WriteRequest.RefreshPolicy.NONE) .get(); @@ -817,7 +812,7 @@ public void testRealtimeGetApi() { public void testMGetApi() throws Exception { assertAcked(indicesAdmin().prepareCreate("test").setMapping("field1", "type=text", "field2", "type=text", "field3", "type=text")); - client().prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2", "field3", "value3").get(); + prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2", "field3", "value3").get(); boolean realtime = randomBoolean(); // user1 is granted access to field1 only: @@ -925,137 +920,169 @@ public void testMSearchApi() throws Exception { assertAcked(indicesAdmin().prepareCreate("test1").setMapping("field1", "type=text", "field2", "type=text", "field3", "type=text")); assertAcked(indicesAdmin().prepareCreate("test2").setMapping("field1", "type=text", "field2", "type=text", "field3", "type=text")); - client().prepareIndex("test1").setId("1").setSource("field1", "value1", "field2", "value2", "field3", "value3").get(); - client().prepareIndex("test2").setId("1").setSource("field1", "value1", "field2", "value2", "field3", "value3").get(); + prepareIndex("test1").setId("1").setSource("field1", "value1", "field2", "value2", "field3", "value3").get(); + prepareIndex("test2").setId("1").setSource("field1", "value1", "field2", "value2", "field3", "value3").get(); indicesAdmin().prepareRefresh("test1", "test2").get(); // user1 is granted access to field1 only - MultiSearchResponse response = client().filterWithHeader( - Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD)) - ) - .prepareMultiSearch() - .add(prepareSearch("test1").setQuery(QueryBuilders.matchAllQuery())) - .add(prepareSearch("test2").setQuery(QueryBuilders.matchAllQuery())) - .get(); - assertFalse(response.getResponses()[0].isFailure()); - assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits().value, is(1L)); - assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(1)); - assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); - assertThat(response.getResponses()[1].getResponse().getHits().getTotalHits().value, is(1L)); - assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(1)); - assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); - - // user2 is granted access to field2 only - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) - .prepareMultiSearch() - .add(prepareSearch("test1").setQuery(QueryBuilders.matchAllQuery())) - .add(prepareSearch("test2").setQuery(QueryBuilders.matchAllQuery())) - .get(); - assertFalse(response.getResponses()[0].isFailure()); - assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits().value, is(1L)); - assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(1)); - assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); - assertThat(response.getResponses()[1].getResponse().getHits().getTotalHits().value, is(1L)); - assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(1)); - assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); - - // user3 is granted access to field1 and field2 - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) - .prepareMultiSearch() - .add(prepareSearch("test1").setQuery(QueryBuilders.matchAllQuery())) - .add(prepareSearch("test2").setQuery(QueryBuilders.matchAllQuery())) - .get(); - assertFalse(response.getResponses()[0].isFailure()); - assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits().value, is(1L)); - assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(2)); - assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); - assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); - assertThat(response.getResponses()[1].getResponse().getHits().getTotalHits().value, is(1L)); - assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(2)); - assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); - assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); - - // user4 is granted access to no fields, so the search response does say the doc exist, but no fields are returned - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user4", USERS_PASSWD))) - .prepareMultiSearch() - .add(prepareSearch("test1").setQuery(QueryBuilders.matchAllQuery())) - .add(prepareSearch("test2").setQuery(QueryBuilders.matchAllQuery())) - .get(); - assertFalse(response.getResponses()[0].isFailure()); - assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits().value, is(1L)); - assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(0)); - assertThat(response.getResponses()[1].getResponse().getHits().getTotalHits().value, is(1L)); - assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(0)); - - // user5 has no field level security configured, so all fields are returned - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user5", USERS_PASSWD))) - .prepareMultiSearch() - .add(prepareSearch("test1").setQuery(QueryBuilders.matchAllQuery())) - .add(prepareSearch("test2").setQuery(QueryBuilders.matchAllQuery())) - .get(); - assertFalse(response.getResponses()[0].isFailure()); - assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits().value, is(1L)); - assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(3)); - assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); - assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); - assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field3"), is("value3")); - assertThat(response.getResponses()[1].getResponse().getHits().getTotalHits().value, is(1L)); - assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(3)); - assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); - assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); - assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field3"), is("value3")); - - // user6 has access to field* - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user6", USERS_PASSWD))) - .prepareMultiSearch() - .add(prepareSearch("test1").setQuery(QueryBuilders.matchAllQuery())) - .add(prepareSearch("test2").setQuery(QueryBuilders.matchAllQuery())) - .get(); - assertFalse(response.getResponses()[0].isFailure()); - assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits().value, is(1L)); - assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(3)); - assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); - assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); - assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field3"), is("value3")); - assertThat(response.getResponses()[1].getResponse().getHits().getTotalHits().value, is(1L)); - assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(3)); - assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); - assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); - assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field3"), is("value3")); - - // user7 has roles with field level security and without field level security - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user7", USERS_PASSWD))) - .prepareMultiSearch() - .add(prepareSearch("test1").setQuery(QueryBuilders.matchAllQuery())) - .add(prepareSearch("test2").setQuery(QueryBuilders.matchAllQuery())) - .get(); - assertFalse(response.getResponses()[0].isFailure()); - assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits().value, is(1L)); - assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(3)); - assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); - assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); - assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field3"), is("value3")); - assertThat(response.getResponses()[1].getResponse().getHits().getTotalHits().value, is(1L)); - assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(3)); - assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); - assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); - assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field3"), is("value3")); + { + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareMultiSearch() + .add(prepareSearch("test1").setQuery(QueryBuilders.matchAllQuery())) + .add(prepareSearch("test2").setQuery(QueryBuilders.matchAllQuery())), + response -> { + assertFalse(response.getResponses()[0].isFailure()); + assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits().value, is(1L)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(1)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + assertThat(response.getResponses()[1].getResponse().getHits().getTotalHits().value, is(1L)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(1)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + } + ); + } - // user8 has roles with field level security with access to field1 and field2 - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user8", USERS_PASSWD))) - .prepareMultiSearch() - .add(prepareSearch("test1").setQuery(QueryBuilders.matchAllQuery())) - .add(prepareSearch("test2").setQuery(QueryBuilders.matchAllQuery())) - .get(); - assertFalse(response.getResponses()[0].isFailure()); - assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits().value, is(1L)); - assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(2)); - assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); - assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); - assertThat(response.getResponses()[1].getResponse().getHits().getTotalHits().value, is(1L)); - assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(2)); - assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); - assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); + { + // user2 is granted access to field2 only + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareMultiSearch() + .add(prepareSearch("test1").setQuery(QueryBuilders.matchAllQuery())) + .add(prepareSearch("test2").setQuery(QueryBuilders.matchAllQuery())), + response -> { + assertFalse(response.getResponses()[0].isFailure()); + assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits().value, is(1L)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(1)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); + assertThat(response.getResponses()[1].getResponse().getHits().getTotalHits().value, is(1L)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(1)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); + } + ); + } + { + // user3 is granted access to field1 and field2 + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareMultiSearch() + .add(prepareSearch("test1").setQuery(QueryBuilders.matchAllQuery())) + .add(prepareSearch("test2").setQuery(QueryBuilders.matchAllQuery())), + response -> { + assertFalse(response.getResponses()[0].isFailure()); + assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits().value, is(1L)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(2)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); + assertThat(response.getResponses()[1].getResponse().getHits().getTotalHits().value, is(1L)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(2)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); + } + ); + } + { + // user4 is granted access to no fields, so the search response does say the doc exist, but no fields are returned + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user4", USERS_PASSWD))) + .prepareMultiSearch() + .add(prepareSearch("test1").setQuery(QueryBuilders.matchAllQuery())) + .add(prepareSearch("test2").setQuery(QueryBuilders.matchAllQuery())), + response -> { + assertFalse(response.getResponses()[0].isFailure()); + assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits().value, is(1L)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(0)); + assertThat(response.getResponses()[1].getResponse().getHits().getTotalHits().value, is(1L)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(0)); + } + ); + } + { + // user5 has no field level security configured, so all fields are returned + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user5", USERS_PASSWD))) + .prepareMultiSearch() + .add(prepareSearch("test1").setQuery(QueryBuilders.matchAllQuery())) + .add(prepareSearch("test2").setQuery(QueryBuilders.matchAllQuery())), + response -> { + assertFalse(response.getResponses()[0].isFailure()); + assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits().value, is(1L)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(3)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field3"), is("value3")); + assertThat(response.getResponses()[1].getResponse().getHits().getTotalHits().value, is(1L)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(3)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field3"), is("value3")); + } + ); + } + { + // user6 has access to field* + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user6", USERS_PASSWD))) + .prepareMultiSearch() + .add(prepareSearch("test1").setQuery(QueryBuilders.matchAllQuery())) + .add(prepareSearch("test2").setQuery(QueryBuilders.matchAllQuery())), + response -> { + assertFalse(response.getResponses()[0].isFailure()); + assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits().value, is(1L)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(3)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field3"), is("value3")); + assertThat(response.getResponses()[1].getResponse().getHits().getTotalHits().value, is(1L)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(3)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field3"), is("value3")); + } + ); + } + { + // user7 has roles with field level security and without field level security + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user7", USERS_PASSWD))) + .prepareMultiSearch() + .add(prepareSearch("test1").setQuery(QueryBuilders.matchAllQuery())) + .add(prepareSearch("test2").setQuery(QueryBuilders.matchAllQuery())), + response -> { + assertFalse(response.getResponses()[0].isFailure()); + assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits().value, is(1L)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(3)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field3"), is("value3")); + assertThat(response.getResponses()[1].getResponse().getHits().getTotalHits().value, is(1L)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(3)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field3"), is("value3")); + } + ); + } + { + // user8 has roles with field level security with access to field1 and field2 + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user8", USERS_PASSWD))) + .prepareMultiSearch() + .add(prepareSearch("test1").setQuery(QueryBuilders.matchAllQuery())) + .add(prepareSearch("test2").setQuery(QueryBuilders.matchAllQuery())), + response -> { + assertFalse(response.getResponses()[0].isFailure()); + assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits().value, is(1L)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(2)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); + assertThat(response.getResponses()[1].getResponse().getHits().getTotalHits().value, is(1L)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(2)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); + } + ); + } } public void testScroll() throws Exception { @@ -1067,10 +1094,7 @@ public void testScroll() throws Exception { final int numDocs = scaledRandomIntBetween(2, 10); for (int i = 0; i < numDocs; i++) { - client().prepareIndex("test") - .setId(String.valueOf(i)) - .setSource("field1", "value1", "field2", "value2", "field3", "value3") - .get(); + prepareIndex("test").setId(String.valueOf(i)).setSource("field1", "value1", "field2", "value2", "field3", "value3").get(); } refresh("test"); @@ -1113,7 +1137,7 @@ static String openPointInTime(String userName, TimeValue keepAlive, String... in OpenPointInTimeRequest request = new OpenPointInTimeRequest(indices).keepAlive(keepAlive); final OpenPointInTimeResponse response = client().filterWithHeader( Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue(userName, USERS_PASSWD)) - ).execute(OpenPointInTimeAction.INSTANCE, request).actionGet(); + ).execute(TransportOpenPointInTimeAction.TYPE, request).actionGet(); return response.getPointInTimeId(); } @@ -1126,10 +1150,7 @@ public void testPointInTimeId() throws Exception { final int numDocs = scaledRandomIntBetween(2, 10); for (int i = 0; i < numDocs; i++) { - client().prepareIndex("test") - .setId(String.valueOf(i)) - .setSource("field1", "value1", "field2", "value2", "field3", "value3") - .get(); + prepareIndex("test").setId(String.valueOf(i)).setSource("field1", "value1", "field2", "value2", "field3", "value3").get(); } refresh("test"); @@ -1153,7 +1174,7 @@ public void testPointInTimeId() throws Exception { assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); } } finally { - client().execute(ClosePointInTimeAction.INSTANCE, new ClosePointInTimeRequest(pitId)).actionGet(); + client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pitId)).actionGet(); } } @@ -1163,8 +1184,7 @@ public void testQueryCache() throws Exception { .setSettings(Settings.builder().put(IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.getKey(), true)) .setMapping("field1", "type=text", "field2", "type=text", "field3", "type=text") ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource("field1", "value1", "field2", "value2", "field3", "value3") .setRefreshPolicy(IMMEDIATE) .get(); @@ -1203,7 +1223,7 @@ public void testScrollWithQueryCache() { final int numDocs = scaledRandomIntBetween(2, 4); for (int i = 0; i < numDocs; i++) { - client().prepareIndex("test").setId(String.valueOf(i)).setSource("field1", "value1", "field2", "value2").get(); + prepareIndex("test").setId(String.valueOf(i)).setSource("field1", "value1", "field2", "value2").get(); } refresh("test"); @@ -1304,7 +1324,7 @@ public void testRequestCache() throws Exception { .setSettings(Settings.builder().put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true)) .setMapping("field1", "type=text", "field2", "type=text", "field3", "type=text") ); - client().prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); int max = scaledRandomIntBetween(4, 32); for (int i = 0; i < max; i++) { @@ -1345,8 +1365,7 @@ public void testFields() throws Exception { "type=alias,path=field1" ) ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource("field1", "value1", "field2", "value2", "field3", "value3") .setRefreshPolicy(IMMEDIATE) .get(); @@ -1453,8 +1472,7 @@ public void testFields() throws Exception { public void testSource() throws Exception { assertAcked(indicesAdmin().prepareCreate("test").setMapping("field1", "type=text", "field2", "type=text", "field3", "type=text")); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource("field1", "value1", "field2", "value2", "field3", "value3") .setRefreshPolicy(IMMEDIATE) .get(); @@ -1527,7 +1545,7 @@ public void testSort() { assertAcked( indicesAdmin().prepareCreate("test").setMapping("field1", "type=long", "field2", "type=long", "alias", "type=alias,path=field1") ); - client().prepareIndex("test").setId("1").setSource("field1", 1d, "field2", 2d).setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("field1", 1d, "field2", 2d).setRefreshPolicy(IMMEDIATE).get(); // user1 is granted to use field1, so it is included in the sort_values SearchResponse response = client().filterWithHeader( @@ -1576,8 +1594,7 @@ public void testHighlighting() { indicesAdmin().prepareCreate("test") .setMapping("field1", "type=text", "field2", "type=text", "field3", "type=text", "alias", "type=alias,path=field1") ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource("field1", "value1", "field2", "value2", "field3", "value3") .setRefreshPolicy(IMMEDIATE) .get(); @@ -1626,7 +1643,7 @@ public void testAggs() { indicesAdmin().prepareCreate("test") .setMapping("field1", "type=text,fielddata=true", "field2", "type=text,fielddata=true", "alias", "type=alias,path=field1") ); - client().prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); // user1 is authorized to use field1, so buckets are include for a term agg on field1 SearchResponse response = client().filterWithHeader( @@ -1682,8 +1699,7 @@ public void testTVApi() throws Exception { "type=text,term_vector=with_positions_offsets_payloads" ) ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource("field1", "value1", "field2", "value2", "field3", "value3") .setRefreshPolicy(IMMEDIATE) .get(); @@ -1772,8 +1788,7 @@ public void testMTVApi() throws Exception { "type=text,term_vector=with_positions_offsets_payloads" ) ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource("field1", "value1", "field2", "value2", "field3", "value3") .setRefreshPolicy(IMMEDIATE) .get(); @@ -1881,18 +1896,18 @@ public void testParentChild() throws Exception { ensureGreen(); // index simple data - client().prepareIndex("test").setId("p1").setSource("join_field", "parent").get(); + prepareIndex("test").setId("p1").setSource("join_field", "parent").get(); Map source = new HashMap<>(); source.put("field1", "red"); Map joinField = new HashMap<>(); joinField.put("name", "child"); joinField.put("parent", "p1"); source.put("join_field", joinField); - client().prepareIndex("test").setId("c1").setSource(source).setRouting("p1").get(); + prepareIndex("test").setId("c1").setSource(source).setRouting("p1").get(); source = new HashMap<>(); source.put("field1", "yellow"); source.put("join_field", joinField); - client().prepareIndex("test").setId("c2").setSource(source).setRouting("p1").get(); + prepareIndex("test").setId("c2").setSource(source).setRouting("p1").get(); refresh(); verifyParentChild(); } @@ -1931,7 +1946,7 @@ private void verifyParentChild() { public void testUpdateApiIsBlocked() throws Exception { assertAcked(indicesAdmin().prepareCreate("test").setMapping("field1", "type=text", "field2", "type=text")); - client().prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value1").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value1").setRefreshPolicy(IMMEDIATE).get(); // With field level security enabled the update is not allowed: try { @@ -1973,7 +1988,7 @@ public void testUpdateApiIsBlocked() throws Exception { public void testQuery_withRoleWithFieldWildcards() { assertAcked(indicesAdmin().prepareCreate("test").setMapping("field1", "type=text", "field2", "type=text")); - client().prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); // user6 has access to all fields, so the query should match with the document: SearchResponse response = client().filterWithHeader( @@ -2000,8 +2015,7 @@ public void testExistQuery() { .setMapping("field1", "type=text", "field2", "type=text", "field3", "type=text", "alias", "type=alias,path=field1") ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource("field1", "value1", "field2", "value2", "field3", "value3") .setRefreshPolicy(IMMEDIATE) .get(); @@ -2083,13 +2097,11 @@ public void testLookupRuntimeFields() throws Exception { assertAcked( indicesAdmin().prepareCreate("hosts").setMapping("field1", "type=keyword", "field2", "type=text", "field3", "type=text") ); - client().prepareIndex("hosts") - .setId("1") + prepareIndex("hosts").setId("1") .setSource("field1", "192.168.1.1", "field2", "windows", "field3", "canada") .setRefreshPolicy(IMMEDIATE) .get(); - client().prepareIndex("hosts") - .setId("2") + prepareIndex("hosts").setId("2") .setSource("field1", "192.168.1.2", "field2", "macos", "field3", "us") .setRefreshPolicy(IMMEDIATE) .get(); @@ -2099,13 +2111,11 @@ public void testLookupRuntimeFields() throws Exception { .setMapping("field1", "type=keyword", "field2", "type=text", "field3", "type=date,format=yyyy-MM-dd") ); - client().prepareIndex("logs") - .setId("1") + prepareIndex("logs").setId("1") .setSource("field1", "192.168.1.1", "field2", "out of memory", "field3", "2021-01-20") .setRefreshPolicy(IMMEDIATE) .get(); - client().prepareIndex("logs") - .setId("2") + prepareIndex("logs").setId("2") .setSource("field1", "192.168.1.2", "field2", "authentication fails", "field3", "2021-01-21") .setRefreshPolicy(IMMEDIATE) .get(); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/IndicesPermissionsWithAliasesWildcardsAndRegexsTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/IndicesPermissionsWithAliasesWildcardsAndRegexsTests.java index 8420f8fd8d481..0e799589409f8 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/IndicesPermissionsWithAliasesWildcardsAndRegexsTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/IndicesPermissionsWithAliasesWildcardsAndRegexsTests.java @@ -98,8 +98,7 @@ public void testGetResolveWildcardsRegexs() throws Exception { .addAlias(new Alias("my_alias")) .addAlias(new Alias("an_alias")) ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource("field1", "value1", "field2", "value2", "field3", "value3") .setRefreshPolicy(IMMEDIATE) .get(); @@ -130,8 +129,7 @@ public void testSearchResolveWildcardsRegexs() throws Exception { .addAlias(new Alias("my_alias")) .addAlias(new Alias("an_alias")) ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource("field1", "value1", "field2", "value2", "field3", "value3") .setRefreshPolicy(IMMEDIATE) .get(); @@ -197,8 +195,7 @@ public void testSearchResolveDataStreams() throws Exception { assertAcked(indicesAdmin().aliases(aliasesRequest).actionGet()); String value = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(System.currentTimeMillis()); - client().prepareIndex("test") - .setCreate(true) + prepareIndex("test").setCreate(true) .setId("1") .setSource(DEFAULT_TIMESTAMP_FIELD, value, "field1", "value1", "field2", "value2", "field3", "value3") .setRefreshPolicy(IMMEDIATE) @@ -255,16 +252,11 @@ public void testSearchResolveDataStreams() throws Exception { private void putComposableIndexTemplate(String id, List patterns) throws IOException { PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request(id); request.indexTemplate( - new ComposableIndexTemplate( - patterns, - new Template(null, null, null), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(patterns) + .template(new Template(null, null, null)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); } diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/KibanaUserRoleIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/KibanaUserRoleIntegTests.java index e2cce37789ffb..afe9e68716579 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/KibanaUserRoleIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/KibanaUserRoleIntegTests.java @@ -63,7 +63,7 @@ public String configUsersRoles() { public void testFieldMappings() throws Exception { final String index = "logstash-20-12-2015"; final String field = "foo"; - indexRandom(true, client().prepareIndex().setIndex(index).setSource(field, "bar")); + indexRandom(true, prepareIndex(index).setSource(field, "bar")); GetFieldMappingsResponse response = indicesAdmin().prepareGetFieldMappings() .addIndices("logstash-*") @@ -85,7 +85,7 @@ public void testValidateQuery() throws Exception { final String index = "logstash-20-12-2015"; final String type = "event"; final String field = "foo"; - indexRandom(true, client().prepareIndex().setIndex(index).setSource(field, "bar")); + indexRandom(true, prepareIndex(index).setSource(field, "bar")); ValidateQueryResponse response = indicesAdmin().prepareValidateQuery(index).setQuery(QueryBuilders.termQuery(field, "bar")).get(); assertThat(response.isValid(), is(true)); @@ -100,7 +100,7 @@ public void testSearchAndMSearch() throws Exception { final String index = "logstash-20-12-2015"; final String type = "event"; final String field = "foo"; - indexRandom(true, client().prepareIndex().setIndex(index).setSource(field, "bar")); + indexRandom(true, prepareIndex(index).setSource(field, "bar")); SearchResponse response = prepareSearch(index).setQuery(QueryBuilders.matchAllQuery()).get(); final long hits = response.getHits().getTotalHits().value; @@ -110,22 +110,31 @@ public void testSearchAndMSearch() throws Exception { ).prepareSearch(index).setQuery(QueryBuilders.matchAllQuery()).get(); assertEquals(response.getHits().getTotalHits().value, hits); + final long multiHits; MultiSearchResponse multiSearchResponse = client().prepareMultiSearch() .add(prepareSearch(index).setQuery(QueryBuilders.matchAllQuery())) .get(); - final long multiHits = multiSearchResponse.getResponses()[0].getResponse().getHits().getTotalHits().value; - assertThat(hits, greaterThan(0L)); + try { + multiHits = multiSearchResponse.getResponses()[0].getResponse().getHits().getTotalHits().value; + assertThat(hits, greaterThan(0L)); + } finally { + multiSearchResponse.decRef(); + } multiSearchResponse = client().filterWithHeader( singletonMap("Authorization", UsernamePasswordToken.basicAuthHeaderValue("kibana_user", USERS_PASSWD)) ).prepareMultiSearch().add(prepareSearch(index).setQuery(QueryBuilders.matchAllQuery())).get(); - assertEquals(multiSearchResponse.getResponses()[0].getResponse().getHits().getTotalHits().value, multiHits); + try { + assertEquals(multiSearchResponse.getResponses()[0].getResponse().getHits().getTotalHits().value, multiHits); + } finally { + multiSearchResponse.decRef(); + } } public void testGetIndex() throws Exception { final String index = "logstash-20-12-2015"; final String type = "event"; final String field = "foo"; - indexRandom(true, client().prepareIndex().setIndex(index).setSource(field, "bar")); + indexRandom(true, prepareIndex(index).setSource(field, "bar")); GetIndexResponse response = indicesAdmin().prepareGetIndex().setIndices(index).get(); assertThat(response.getIndices(), arrayContaining(index)); @@ -140,7 +149,7 @@ public void testGetMappings() throws Exception { final String index = "logstash-20-12-2015"; final String type = "_doc"; final String field = "foo"; - indexRandom(true, client().prepareIndex().setIndex(index).setSource(field, "bar")); + indexRandom(true, prepareIndex(index).setSource(field, "bar")); GetMappingsResponse response = client().filterWithHeader( singletonMap("Authorization", UsernamePasswordToken.basicAuthHeaderValue("kibana_user", USERS_PASSWD)) diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/MultipleIndicesPermissionsTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/MultipleIndicesPermissionsTests.java index c9f987b1981a9..2e5d92839d3f7 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/MultipleIndicesPermissionsTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/MultipleIndicesPermissionsTests.java @@ -170,16 +170,20 @@ public void testSingleRole() throws Exception { .add(client.prepareSearch("test")) .add(client.prepareSearch("test1")) .get(); - MultiSearchResponse.Item[] items = msearchResponse.getResponses(); - assertThat(items.length, is(2)); - assertThat(items[0].isFailure(), is(false)); - searchResponse = items[0].getResponse(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 1); - assertThat(items[1].isFailure(), is(false)); - searchResponse = items[1].getResponse(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 1); + try { + MultiSearchResponse.Item[] items = msearchResponse.getResponses(); + assertThat(items.length, is(2)); + assertThat(items[0].isFailure(), is(false)); + searchResponse = items[0].getResponse(); + assertNoFailures(searchResponse); + assertHitCount(searchResponse, 1); + assertThat(items[1].isFailure(), is(false)); + searchResponse = items[1].getResponse(); + assertNoFailures(searchResponse); + assertHitCount(searchResponse, 1); + } finally { + msearchResponse.decRef(); + } } public void testMonitorRestrictedWildcards() throws Exception { @@ -303,7 +307,7 @@ public void testMultiNamesWorkCorrectly() { .addAlias(new Alias("alias1").filter(QueryBuilders.termQuery("field1", "public"))) ); - client().prepareIndex("index1").setId("1").setSource("field1", "private").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("index1").setId("1").setSource("field1", "private").setRefreshPolicy(IMMEDIATE).get(); final Client userAClient = client().filterWithHeader( Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user_a", USERS_PASSWD)) diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/SecurityCachePermissionTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/SecurityCachePermissionTests.java index d2e3907204654..30f8507325a7e 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/SecurityCachePermissionTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/SecurityCachePermissionTests.java @@ -49,7 +49,7 @@ public void loadData() { public void testThatTermsFilterQueryDoesntLeakData() { SearchResponse response = prepareSearch("data").setQuery( QueryBuilders.constantScoreQuery(QueryBuilders.termsLookupQuery("token", new TermsLookup("tokens", "1", "tokens"))) - ).execute().actionGet(); + ).get(); assertThat(response.isTimedOut(), is(false)); assertThat(response.getHits().getHits().length, is(1)); @@ -65,8 +65,7 @@ public void testThatTermsFilterQueryDoesntLeakData() { .setQuery( QueryBuilders.constantScoreQuery(QueryBuilders.termsLookupQuery("token", new TermsLookup("tokens", "1", "tokens"))) ) - .execute() - .actionGet(); + .get(); fail("search phase exception should have been thrown! response was:\n" + response.toString()); } catch (ElasticsearchSecurityException e) { assertThat(e.toString(), containsString("ElasticsearchSecurityException: action")); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/SecurityClearScrollTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/SecurityClearScrollTests.java index ca826be904771..434e478545ffe 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/SecurityClearScrollTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/SecurityClearScrollTests.java @@ -28,6 +28,7 @@ import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.BASIC_AUTH_HEADER; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; import static org.hamcrest.Matchers.containsString; @@ -68,9 +69,7 @@ protected String configRoles() { public void indexRandomDocuments() { BulkRequestBuilder bulkRequestBuilder = client().prepareBulk().setRefreshPolicy(IMMEDIATE); for (int i = 0; i < randomIntBetween(10, 50); i++) { - bulkRequestBuilder.add( - client().prepareIndex("index").setId(String.valueOf(i)).setSource("{ \"foo\" : \"bar\" }", XContentType.JSON) - ); + bulkRequestBuilder.add(prepareIndex("index").setId(String.valueOf(i)).setSource("{ \"foo\" : \"bar\" }", XContentType.JSON)); } BulkResponse bulkItemResponses = bulkRequestBuilder.get(); assertThat(bulkItemResponses.hasFailures(), is(false)); @@ -80,8 +79,8 @@ public void indexRandomDocuments() { for (int i = 0; i < count; i++) { multiSearchRequestBuilder.add(prepareSearch("index").setScroll("10m").setSize(1)); } - MultiSearchResponse multiSearchResponse = multiSearchRequestBuilder.get(); - scrollIds = getScrollIds(multiSearchResponse); + scrollIds = new ArrayList<>(); + assertResponse(multiSearchRequestBuilder, multiSearchResponse -> scrollIds.addAll(getScrollIds(multiSearchResponse))); } @After diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/ShrinkIndexWithSecurityTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/ShrinkIndexWithSecurityTests.java index c3016a810c27f..e5a1ff867302c 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/ShrinkIndexWithSecurityTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/ShrinkIndexWithSecurityTests.java @@ -31,7 +31,7 @@ protected int minimumNumberOfShards() { public void testShrinkIndex() throws Exception { final int randomNumberOfDocs = scaledRandomIntBetween(2, 12); for (int i = 0; i < randomNumberOfDocs; i++) { - client().prepareIndex("bigindex").setSource("foo", "bar").get(); + prepareIndex("bigindex").setSource("foo", "bar").get(); } Map dataNodes = clusterAdmin().prepareState().get().getState().nodes().getDataNodes(); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java index bfb030d5f9b0f..3dfe440d56831 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java @@ -254,13 +254,17 @@ private void awaitApiKeysRemoverCompletion() throws Exception { } } + private Client authorizedClient() { + return client().filterWithHeader( + Collections.singletonMap("Authorization", basicAuthHeaderValue(ES_TEST_ROOT_USER, TEST_PASSWORD_SECURE_STRING)) + ); + } + public void testCreateApiKey() throws Exception { // Get an instant without nanoseconds as the expiration has millisecond precision final Instant start = Instant.ofEpochMilli(Instant.now().toEpochMilli()); final RoleDescriptor descriptor = new RoleDescriptor("role", new String[] { "monitor" }, null, null); - Client client = client().filterWithHeader( - Collections.singletonMap("Authorization", basicAuthHeaderValue(ES_TEST_ROOT_USER, TEST_PASSWORD_SECURE_STRING)) - ); + Client client = authorizedClient(); final CreateApiKeyResponse response = new CreateApiKeyRequestBuilder(client).setName("test key") .setExpiration(TimeValue.timeValueHours(TimeUnit.DAYS.toHours(7L))) .setRoleDescriptors(Collections.singletonList(descriptor)) @@ -317,9 +321,7 @@ public void testMultipleApiKeysCanHaveSameName() { List responses = new ArrayList<>(); for (int i = 0; i < noOfApiKeys; i++) { final RoleDescriptor descriptor = new RoleDescriptor("role", new String[] { "monitor" }, null, null); - Client client = client().filterWithHeader( - Collections.singletonMap("Authorization", basicAuthHeaderValue(ES_TEST_ROOT_USER, TEST_PASSWORD_SECURE_STRING)) - ); + Client client = authorizedClient(); final CreateApiKeyResponse response = new CreateApiKeyRequestBuilder(client).setName(keyName) .setExpiration(null) .setRoleDescriptors(Collections.singletonList(descriptor)) @@ -337,9 +339,7 @@ public void testMultipleApiKeysCanHaveSameName() { } public void testCreateApiKeyWithoutNameWillFail() { - Client client = client().filterWithHeader( - Collections.singletonMap("Authorization", basicAuthHeaderValue(ES_TEST_ROOT_USER, TEST_PASSWORD_SECURE_STRING)) - ); + Client client = authorizedClient(); final ActionRequestValidationException e = expectThrows( ActionRequestValidationException.class, () -> new CreateApiKeyRequestBuilder(client).setRefreshPolicy(randomFrom(NONE, WAIT_UNTIL, IMMEDIATE)).get() @@ -350,9 +350,7 @@ public void testCreateApiKeyWithoutNameWillFail() { public void testInvalidateApiKeysForRealm() throws InterruptedException, ExecutionException { int noOfApiKeys = randomIntBetween(3, 5); List responses = createApiKeys(noOfApiKeys, null).v1(); - Client client = client().filterWithHeader( - Collections.singletonMap("Authorization", basicAuthHeaderValue(ES_TEST_ROOT_USER, TEST_PASSWORD_SECURE_STRING)) - ); + Client client = authorizedClient(); PlainActionFuture listener = new PlainActionFuture<>(); client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingRealmName("file"), listener); InvalidateApiKeyResponse invalidateResponse = listener.get(); @@ -362,9 +360,7 @@ public void testInvalidateApiKeysForRealm() throws InterruptedException, Executi public void testInvalidateApiKeysForUser() throws Exception { int noOfApiKeys = randomIntBetween(3, 5); List responses = createApiKeys(noOfApiKeys, null).v1(); - Client client = client().filterWithHeader( - Collections.singletonMap("Authorization", basicAuthHeaderValue(ES_TEST_ROOT_USER, TEST_PASSWORD_SECURE_STRING)) - ); + Client client = authorizedClient(); PlainActionFuture listener = new PlainActionFuture<>(); client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingUserName(ES_TEST_ROOT_USER), listener); InvalidateApiKeyResponse invalidateResponse = listener.get(); @@ -373,9 +369,7 @@ public void testInvalidateApiKeysForUser() throws Exception { public void testInvalidateApiKeysForRealmAndUser() throws InterruptedException, ExecutionException { List responses = createApiKeys(1, null).v1(); - Client client = client().filterWithHeader( - Collections.singletonMap("Authorization", basicAuthHeaderValue(ES_TEST_ROOT_USER, TEST_PASSWORD_SECURE_STRING)) - ); + Client client = authorizedClient(); PlainActionFuture listener = new PlainActionFuture<>(); client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingRealmAndUserName("file", ES_TEST_ROOT_USER), listener); InvalidateApiKeyResponse invalidateResponse = listener.get(); @@ -384,9 +378,7 @@ public void testInvalidateApiKeysForRealmAndUser() throws InterruptedException, public void testInvalidateApiKeysForApiKeyId() throws InterruptedException, ExecutionException { List responses = createApiKeys(1, null).v1(); - Client client = client().filterWithHeader( - Collections.singletonMap("Authorization", basicAuthHeaderValue(ES_TEST_ROOT_USER, TEST_PASSWORD_SECURE_STRING)) - ); + Client client = authorizedClient(); PlainActionFuture listener = new PlainActionFuture<>(); client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingApiKeyId(responses.get(0).getId(), false), listener); InvalidateApiKeyResponse invalidateResponse = listener.get(); @@ -395,9 +387,7 @@ public void testInvalidateApiKeysForApiKeyId() throws InterruptedException, Exec public void testInvalidateApiKeysForApiKeyName() throws InterruptedException, ExecutionException { List responses = createApiKeys(1, null).v1(); - Client client = client().filterWithHeader( - Collections.singletonMap("Authorization", basicAuthHeaderValue(ES_TEST_ROOT_USER, TEST_PASSWORD_SECURE_STRING)) - ); + Client client = authorizedClient(); PlainActionFuture listener = new PlainActionFuture<>(); client.execute( InvalidateApiKeyAction.INSTANCE, @@ -437,9 +427,7 @@ public void testInvalidateApiKeyWillClearApiKeyCache() throws IOException, Execu } // Invalidate the first key - Client client = client().filterWithHeader( - Collections.singletonMap("Authorization", basicAuthHeaderValue(ES_TEST_ROOT_USER, TEST_PASSWORD_SECURE_STRING)) - ); + Client client = authorizedClient(); PlainActionFuture listener = new PlainActionFuture<>(); client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingApiKeyId(apiKey1.v1(), false), listener); InvalidateApiKeyResponse invalidateResponse = listener.get(); @@ -463,6 +451,65 @@ public void testInvalidateApiKeyWillClearApiKeyCache() throws IOException, Execu assertThat(e.getResponse().getStatusLine().getStatusCode(), is(RestStatus.UNAUTHORIZED.getStatus())); } + public void testDynamicDeletionInterval() throws Exception { + try { + // Set retention period to be 1 ms, and delete interval to be 1 hour + long deleteIntervalMs = 3600_000; + Settings.Builder builder = Settings.builder(); + builder.put(ApiKeyService.DELETE_RETENTION_PERIOD.getKey(), TimeValue.timeValueMillis(1)); + builder.put(ApiKeyService.DELETE_INTERVAL.getKey(), TimeValue.timeValueMillis(deleteIntervalMs)); + updateClusterSettings(builder); + + // Create API keys to test + List responses = createApiKeys(3, null).v1(); + String[] apiKeyIds = responses.stream().map(CreateApiKeyResponse::getId).toArray(String[]::new); + + // Invalidate one API key to trigger run of inactive remover (will run once and then after DELETE_INTERVAL) + PlainActionFuture listener = new PlainActionFuture<>(); + Client client = authorizedClient(); + client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingApiKeyId(apiKeyIds[0], false), listener); + verifyInvalidateResponse(1, Collections.singletonList(responses.get(0)), listener.get()); + awaitApiKeysRemoverCompletion(); + refreshSecurityIndex(); + + // Get API keys to make sure remover didn't remove any yet + assertThat(getAllApiKeyInfo(client, false).length, equalTo(3)); + + // Invalidate another key + listener = new PlainActionFuture<>(); + client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingApiKeyId(apiKeyIds[1], false), listener); + verifyInvalidateResponse(1, Collections.singletonList(responses.get(1)), listener.get()); + awaitApiKeysRemoverCompletion(); + refreshSecurityIndex(); + + // Get API keys to make sure remover didn't remove any yet (shouldn't be removed because of the long DELETE_INTERVAL) + assertThat(getAllApiKeyInfo(client, false).length, equalTo(3)); + + // Update DELETE_INTERVAL to every 0 ms + builder = Settings.builder(); + deleteIntervalMs = 0; + builder.put(ApiKeyService.DELETE_INTERVAL.getKey(), TimeValue.timeValueMillis(deleteIntervalMs)); + updateClusterSettings(builder); + + // Invalidate another key + listener = new PlainActionFuture<>(); + client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingApiKeyId(apiKeyIds[2], false), listener); + verifyInvalidateResponse(1, Collections.singletonList(responses.get(2)), listener.get()); + awaitApiKeysRemoverCompletion(); + refreshSecurityIndex(); + + // Make sure all keys except the last invalidated one are deleted + // There is a (tiny) risk that the remover runs after the invalidation and therefore deletes the key that was just + // invalidated, so 0 or 1 keys can be returned from the get api + assertThat(getAllApiKeyInfo(client, false).length, in(Set.of(0, 1))); + } finally { + final Settings.Builder builder = Settings.builder(); + builder.putNull(ApiKeyService.DELETE_INTERVAL.getKey()); + builder.putNull(ApiKeyService.DELETE_RETENTION_PERIOD.getKey()); + updateClusterSettings(builder); + } + } + private void verifyInvalidateResponse( int noOfApiKeys, List responses, @@ -510,7 +557,7 @@ private void setRetentionPeriod(boolean clear) { private void doTestInvalidKeysImmediatelyDeletedByRemover(String namePrefix) throws Exception { assertThat(deleteRetentionPeriodDays, equalTo(0L)); - Client client = waitForExpiredApiKeysRemoverTriggerReadyAndGetClient().filterWithHeader( + Client client = waitForInactiveApiKeysRemoverTriggerReadyAndGetClient().filterWithHeader( Collections.singletonMap("Authorization", basicAuthHeaderValue(ES_TEST_ROOT_USER, TEST_PASSWORD_SECURE_STRING)) ); @@ -562,7 +609,7 @@ private void doTestInvalidKeysImmediatelyDeletedByRemover(String namePrefix) thr is((apiKeyInvalidatedButNotYetDeletedByExpiredApiKeysRemover) ? 3 : 2) ); - client = waitForExpiredApiKeysRemoverTriggerReadyAndGetClient().filterWithHeader( + client = waitForInactiveApiKeysRemoverTriggerReadyAndGetClient().filterWithHeader( Collections.singletonMap("Authorization", basicAuthHeaderValue(ES_TEST_ROOT_USER, TEST_PASSWORD_SECURE_STRING)) ); @@ -605,7 +652,7 @@ private void doTestInvalidKeysImmediatelyDeletedByRemover(String namePrefix) thr ); } - private Client waitForExpiredApiKeysRemoverTriggerReadyAndGetClient() throws Exception { + private Client waitForInactiveApiKeysRemoverTriggerReadyAndGetClient() throws Exception { String nodeWithMostRecentRun = null; long apiKeyLastTrigger = -1L; for (String nodeName : internalCluster().getNodeNames()) { @@ -625,7 +672,7 @@ private Client waitForExpiredApiKeysRemoverTriggerReadyAndGetClient() throws Exc private void doTestDeletionBehaviorWhenKeysBecomeInvalidBeforeAndAfterRetentionPeriod(String namePrefix) throws Exception { assertThat(deleteRetentionPeriodDays, greaterThan(0L)); - Client client = waitForExpiredApiKeysRemoverTriggerReadyAndGetClient().filterWithHeader( + Client client = waitForInactiveApiKeysRemoverTriggerReadyAndGetClient().filterWithHeader( Collections.singletonMap("Authorization", basicAuthHeaderValue(ES_TEST_ROOT_USER, TEST_PASSWORD_SECURE_STRING)) ); @@ -774,9 +821,7 @@ public void testActiveApiKeysWithNoExpirationNeverGetDeletedByRemover() throws E final Tuple, List>> tuple = createApiKeys(2, null); List responses = tuple.v1(); - Client client = client().filterWithHeader( - Collections.singletonMap("Authorization", basicAuthHeaderValue(ES_TEST_ROOT_USER, TEST_PASSWORD_SECURE_STRING)) - ); + Client client = authorizedClient(); PlainActionFuture listener = new PlainActionFuture<>(); // trigger expired keys remover client.execute(InvalidateApiKeyAction.INSTANCE, InvalidateApiKeyRequest.usingApiKeyId(responses.get(1).getId(), false), listener); @@ -809,9 +854,7 @@ public void testGetApiKeysForRealm() throws InterruptedException, ExecutionExcep int noOfApiKeys = randomIntBetween(3, 5); final Tuple, List>> tuple = createApiKeys(noOfApiKeys, null); List responses = tuple.v1(); - Client client = client().filterWithHeader( - Collections.singletonMap("Authorization", basicAuthHeaderValue(ES_TEST_ROOT_USER, TEST_PASSWORD_SECURE_STRING)) - ); + Client client = authorizedClient(); boolean invalidate = randomBoolean(); List invalidatedApiKeyIds = null; Set expectedValidKeyIds = null; @@ -857,9 +900,7 @@ public void testGetApiKeysForUser() throws Exception { int noOfApiKeys = randomIntBetween(3, 5); final Tuple, List>> tuple = createApiKeys(noOfApiKeys, null); List responses = tuple.v1(); - Client client = client().filterWithHeader( - Collections.singletonMap("Authorization", basicAuthHeaderValue(ES_TEST_ROOT_USER, TEST_PASSWORD_SECURE_STRING)) - ); + Client client = authorizedClient(); final boolean withLimitedBy = randomBoolean(); PlainActionFuture listener = new PlainActionFuture<>(); client.execute( @@ -883,9 +924,7 @@ public void testGetApiKeysForUser() throws Exception { public void testGetApiKeysForRealmAndUser() throws InterruptedException, ExecutionException, IOException { final Tuple, List>> tuple = createApiKeys(1, null); List responses = tuple.v1(); - Client client = client().filterWithHeader( - Collections.singletonMap("Authorization", basicAuthHeaderValue(ES_TEST_ROOT_USER, TEST_PASSWORD_SECURE_STRING)) - ); + Client client = authorizedClient(); final boolean withLimitedBy = randomBoolean(); PlainActionFuture listener = new PlainActionFuture<>(); client.execute( @@ -909,9 +948,7 @@ public void testGetApiKeysForRealmAndUser() throws InterruptedException, Executi public void testGetApiKeysForApiKeyId() throws InterruptedException, ExecutionException, IOException { final Tuple, List>> tuple = createApiKeys(1, null); List responses = tuple.v1(); - Client client = client().filterWithHeader( - Collections.singletonMap("Authorization", basicAuthHeaderValue(ES_TEST_ROOT_USER, TEST_PASSWORD_SECURE_STRING)) - ); + Client client = authorizedClient(); final boolean withLimitedBy = randomBoolean(); PlainActionFuture listener = new PlainActionFuture<>(); client.execute( @@ -1595,9 +1632,7 @@ public void testApiKeyWithManageOwnPrivilegeIsAbleToInvalidateItselfButNotAnyOth } public void testDerivedKeys() throws ExecutionException, InterruptedException { - Client client = client().filterWithHeader( - Collections.singletonMap("Authorization", basicAuthHeaderValue(ES_TEST_ROOT_USER, TEST_PASSWORD_SECURE_STRING)) - ); + Client client = authorizedClient(); final CreateApiKeyResponse response = new CreateApiKeyRequestBuilder(client).setName("key-1") .setRoleDescriptors( Collections.singletonList(new RoleDescriptor("role", new String[] { "manage_api_key", "manage_token" }, null, null)) @@ -1744,9 +1779,7 @@ public void testCreationAndAuthenticationReturns429WhenThreadPoolIsSaturated() t final ApiKeyService apiKeyService = internalCluster().getInstance(ApiKeyService.class, nodeName); final RoleDescriptor descriptor = new RoleDescriptor("auth_only", new String[] {}, null, null); - final Client client = client().filterWithHeader( - Collections.singletonMap("Authorization", basicAuthHeaderValue(ES_TEST_ROOT_USER, TEST_PASSWORD_SECURE_STRING)) - ); + final Client client = authorizedClient(); final CreateApiKeyResponse createApiKeyResponse = new CreateApiKeyRequestBuilder(client).setName("auth only key") .setRoleDescriptors(Collections.singletonList(descriptor)) .setMetadata(ApiKeyTests.randomMetadata()) @@ -2858,9 +2891,7 @@ private ServiceWithNodeName getServiceWithNodeName() { private record ServiceWithNodeName(ApiKeyService service, String nodeName) {} private Tuple createApiKeyAndAuthenticateWithIt() throws IOException { - Client client = client().filterWithHeader( - Collections.singletonMap("Authorization", basicAuthHeaderValue(ES_TEST_ROOT_USER, TEST_PASSWORD_SECURE_STRING)) - ); + Client client = authorizedClient(); final CreateApiKeyResponse createApiKeyResponse = new CreateApiKeyRequestBuilder(client).setName("test key") .setMetadata(ApiKeyTests.randomMetadata()) diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/apikey/ApiKeySingleNodeTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/apikey/ApiKeySingleNodeTests.java index dcc98a8e6df7d..9323299a4d9c5 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/apikey/ApiKeySingleNodeTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/apikey/ApiKeySingleNodeTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.query.QueryBuilders; @@ -64,8 +65,10 @@ import org.elasticsearch.xpack.core.security.action.token.CreateTokenResponse; import org.elasticsearch.xpack.core.security.action.user.AuthenticateAction; import org.elasticsearch.xpack.core.security.action.user.AuthenticateRequest; +import org.elasticsearch.xpack.core.security.action.user.AuthenticateResponse; import org.elasticsearch.xpack.core.security.action.user.PutUserAction; import org.elasticsearch.xpack.core.security.action.user.PutUserRequest; +import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.AuthenticationServiceField; import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; @@ -99,6 +102,7 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; public class ApiKeySingleNodeTests extends SecuritySingleNodeTestCase { @@ -108,6 +112,19 @@ protected Settings nodeSettings() { Settings.Builder builder = Settings.builder().put(super.nodeSettings()); builder.put(XPackSettings.API_KEY_SERVICE_ENABLED_SETTING.getKey(), true); builder.put(XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey(), true); + builder.put("xpack.security.authc.realms.jwt.jwt1.order", 2) + .put("xpack.security.authc.realms.jwt.jwt1.allowed_audiences", "https://audience.example.com/") + .put("xpack.security.authc.realms.jwt.jwt1.allowed_issuer", "https://issuer.example.com/") + .put( + "xpack.security.authc.realms.jwt.jwt1.pkc_jwkset_path", + getDataPath("/org/elasticsearch/xpack/security/authc/apikey/rsa-public-jwkset.json") + ) + .put("xpack.security.authc.realms.jwt.jwt1.client_authentication.type", "NONE") + .put("xpack.security.authc.realms.jwt.jwt1.claims.name", "name") + .put("xpack.security.authc.realms.jwt.jwt1.claims.dn", "dn") + .put("xpack.security.authc.realms.jwt.jwt1.claims.groups", "roles") + .put("xpack.security.authc.realms.jwt.jwt1.claims.principal", "sub") + .put("xpack.security.authc.realms.jwt.jwt1.claims.mail", "mail"); return builder.build(); } @@ -402,6 +419,106 @@ public void testGrantApiKeyForUserWithRunAs() throws IOException { ); } + public void testGrantAPIKeyFromTokens() throws IOException { + final String jwtToken; + try (var in = getDataInputStream("/org/elasticsearch/xpack/security/authc/apikey/serialized-signed-RS256-jwt.txt")) { + jwtToken = new String(in.readAllBytes(), StandardCharsets.UTF_8); + } + getSecurityClient().putRole(new RoleDescriptor("user1_role", new String[] { "manage_token" }, null, null)); + String role_mapping_rules = """ + { + "enabled": true, + "roles": "user1_role", + "rules": { + "all": [ + { + "field": { + "realm.name": "jwt1" + } + }, + { + "field": { + "username": "user1" + } + } + ] + } + } + """; + getSecurityClient().putRoleMapping( + "user1_role_mapping", + XContentHelper.convertToMap(XContentType.JSON.xContent(), role_mapping_rules, true) + ); + // grant API Key for regular ES access tokens (itself created from JWT credentials) + { + // get ES access token from JWT + final TestSecurityClient.OAuth2Token oAuth2Token = getSecurityClient( + RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", "Bearer " + jwtToken).build() + ).createTokenWithClientCredentialsGrant(); + String apiKeyName = randomAlphaOfLength(8); + GrantApiKeyRequest grantApiKeyRequest = new GrantApiKeyRequest(); + grantApiKeyRequest.getGrant().setType("access_token"); + grantApiKeyRequest.getGrant().setAccessToken(new SecureString(oAuth2Token.accessToken().toCharArray())); + grantApiKeyRequest.setRefreshPolicy(randomFrom(IMMEDIATE, WAIT_UNTIL)); + grantApiKeyRequest.getApiKeyRequest().setName(apiKeyName); + CreateApiKeyResponse createApiKeyResponse = client().execute(GrantApiKeyAction.INSTANCE, grantApiKeyRequest).actionGet(); + // use the API Key to check it's legit + assertThat(createApiKeyResponse.getName(), is(apiKeyName)); + assertThat(createApiKeyResponse.getId(), notNullValue()); + assertThat(createApiKeyResponse.getKey(), notNullValue()); + final String apiKeyId = createApiKeyResponse.getId(); + final String base64ApiKeyKeyValue = Base64.getEncoder() + .encodeToString((apiKeyId + ":" + createApiKeyResponse.getKey()).getBytes(StandardCharsets.UTF_8)); + AuthenticateResponse authenticateResponse = client().filterWithHeader( + Collections.singletonMap("Authorization", "ApiKey " + base64ApiKeyKeyValue) + ).execute(AuthenticateAction.INSTANCE, AuthenticateRequest.INSTANCE).actionGet(); + assertThat(authenticateResponse.authentication().getEffectiveSubject().getUser().principal(), is("user1")); + assertThat(authenticateResponse.authentication().getAuthenticationType(), is(Authentication.AuthenticationType.API_KEY)); + // BUT client_authentication is not supported with the ES access token + { + GrantApiKeyRequest wrongGrantApiKeyRequest = new GrantApiKeyRequest(); + wrongGrantApiKeyRequest.setRefreshPolicy(randomFrom(IMMEDIATE, WAIT_UNTIL, NONE)); + wrongGrantApiKeyRequest.getApiKeyRequest().setName(randomAlphaOfLength(8)); + wrongGrantApiKeyRequest.getGrant().setType("access_token"); + wrongGrantApiKeyRequest.getGrant().setAccessToken(new SecureString(oAuth2Token.accessToken().toCharArray())); + wrongGrantApiKeyRequest.getGrant() + .setClientAuthentication(new Grant.ClientAuthentication(new SecureString("whatever".toCharArray()))); + ElasticsearchSecurityException e = expectThrows( + ElasticsearchSecurityException.class, + () -> client().execute(GrantApiKeyAction.INSTANCE, wrongGrantApiKeyRequest).actionGet() + ); + assertThat(e.getMessage(), containsString("[client_authentication] not supported with the supplied access_token type")); + } + } + // grant API Key for JWT token + { + String apiKeyName = randomAlphaOfLength(8); + GrantApiKeyRequest grantApiKeyRequest = new GrantApiKeyRequest(); + grantApiKeyRequest.getGrant().setType("access_token"); + grantApiKeyRequest.getGrant().setAccessToken(new SecureString(jwtToken.toCharArray())); + grantApiKeyRequest.setRefreshPolicy(randomFrom(IMMEDIATE, WAIT_UNTIL)); + grantApiKeyRequest.getApiKeyRequest().setName(apiKeyName); + // client authentication is ignored for JWTs that don't require it + if (randomBoolean()) { + grantApiKeyRequest.getGrant() + .setClientAuthentication(new Grant.ClientAuthentication(new SecureString("whatever".toCharArray()))); + } + CreateApiKeyResponse createApiKeyResponse = client().execute(GrantApiKeyAction.INSTANCE, grantApiKeyRequest).actionGet(); + // use the API Key to check it's legit + assertThat(createApiKeyResponse.getName(), is(apiKeyName)); + assertThat(createApiKeyResponse.getId(), notNullValue()); + assertThat(createApiKeyResponse.getKey(), notNullValue()); + final String apiKeyId = createApiKeyResponse.getId(); + final String base64ApiKeyKeyValue = Base64.getEncoder() + .encodeToString((apiKeyId + ":" + createApiKeyResponse.getKey()).getBytes(StandardCharsets.UTF_8)); + AuthenticateResponse authenticateResponse = client().filterWithHeader( + Collections.singletonMap("Authorization", "ApiKey " + base64ApiKeyKeyValue) + ).execute(AuthenticateAction.INSTANCE, AuthenticateRequest.INSTANCE).actionGet(); + assertThat(authenticateResponse.authentication().getEffectiveSubject().getUser().principal(), is("user1")); + assertThat(authenticateResponse.authentication().getAuthenticationType(), is(Authentication.AuthenticationType.API_KEY)); + } + } + public void testInvalidateApiKeyWillRecordTimestamp() { CreateApiKeyRequest createApiKeyRequest = new CreateApiKeyRequest( randomAlphaOfLengthBetween(3, 8), @@ -635,8 +752,7 @@ public void testCannotCreateDerivedCrossClusterApiKey() throws IOException { ) ) .setRefreshPolicy(randomFrom(NONE, WAIT_UNTIL, IMMEDIATE)) - .execute() - .actionGet(); + .get(); final String encoded = Base64.getEncoder() .encodeToString( (createAdminKeyResponse.getId() + ":" + createAdminKeyResponse.getKey().toString()).getBytes(StandardCharsets.UTF_8) diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java index d3229b390fc87..cb07cd76a5faa 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java @@ -316,7 +316,7 @@ private void testAddUserAndRoleThenAuth(String username, String roleName) { createIndex("idx"); ensureGreen("idx"); // Index a document with the default test user - client().prepareIndex("idx").setId("1").setSource("body", "foo").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("idx").setId("1").setSource("body", "foo").setRefreshPolicy(IMMEDIATE).get(); String token = basicAuthHeaderValue(username, new SecureString("s3krit-password")); SearchResponse searchResp = client().filterWithHeader(Collections.singletonMap("Authorization", token)).prepareSearch("idx").get(); @@ -339,7 +339,7 @@ public void testUpdatingUserAndAuthentication() throws Exception { createIndex("idx"); ensureGreen("idx"); // Index a document with the default test user - client().prepareIndex("idx").setId("1").setSource("body", "foo").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("idx").setId("1").setSource("body", "foo").setRefreshPolicy(IMMEDIATE).get(); String token = basicAuthHeaderValue("joe", new SecureString("s3krit-password")); SearchResponse searchResp = client().filterWithHeader(Collections.singletonMap("Authorization", token)).prepareSearch("idx").get(); @@ -373,7 +373,7 @@ public void testCreateDeleteAuthenticate() { createIndex("idx"); ensureGreen("idx"); // Index a document with the default test user - client().prepareIndex("idx").setId("1").setSource("body", "foo").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("idx").setId("1").setSource("body", "foo").setRefreshPolicy(IMMEDIATE).get(); String token = basicAuthHeaderValue("joe", new SecureString("s3krit-password")); SearchResponse searchResp = client().filterWithHeader(Collections.singletonMap("Authorization", token)).prepareSearch("idx").get(); @@ -684,7 +684,7 @@ public void testCannotCreateUserWithInvalidCharactersInName() throws Exception { } public void testUsersAndRolesDoNotInterfereWithIndicesStats() throws Exception { - client().prepareIndex("foo").setSource("ignore", "me").get(); + prepareIndex("foo").setSource("ignore", "me").get(); if (randomBoolean()) { preparePutUser("joe", "s3krit-password", hasher, SecuritySettingsSource.TEST_ROLE).get(); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmSingleNodeTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmSingleNodeTests.java index d84b93fa6f638..c9b43afd4322d 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmSingleNodeTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmSingleNodeTests.java @@ -16,12 +16,14 @@ import com.nimbusds.jwt.SignedJWT; import org.apache.http.HttpEntity; +import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentHelper; @@ -34,7 +36,17 @@ import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.action.Grant; +import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyResponse; +import org.elasticsearch.xpack.core.security.action.apikey.GrantApiKeyAction; +import org.elasticsearch.xpack.core.security.action.apikey.GrantApiKeyRequest; +import org.elasticsearch.xpack.core.security.action.user.AuthenticateAction; +import org.elasticsearch.xpack.core.security.action.user.AuthenticateRequest; +import org.elasticsearch.xpack.core.security.action.user.AuthenticateResponse; +import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.Realm; +import org.elasticsearch.xpack.core.security.authc.jwt.JwtAuthenticationToken; +import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; import org.elasticsearch.xpack.security.LocalStateSecurity; import org.elasticsearch.xpack.security.Security; import org.elasticsearch.xpack.security.authc.Realms; @@ -44,17 +56,22 @@ import java.text.ParseException; import java.time.Instant; import java.time.temporal.ChronoUnit; +import java.util.Base64; import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.stream.Collectors; +import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.WAIT_UNTIL; import static org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings.CLIENT_AUTH_SHARED_SECRET_ROTATION_GRACE_PERIOD; import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; public class JwtRealmSingleNodeTests extends SecuritySingleNodeTestCase { @@ -134,6 +151,76 @@ protected boolean addMockHttpTransport() { return false; } + @TestLogging(value = "org.elasticsearch.xpack.security.authc.jwt:DEBUG", reason = "failures can be very difficult to troubleshoot") + public void testGrantApiKeyForJWT() throws Exception { + final JWTClaimsSet.Builder jwtClaims = new JWTClaimsSet.Builder(); + final String subject; + final String sharedSecret; + // id_token or access_token + if (randomBoolean()) { + subject = "me"; + // JWT "id_token" valid for jwt0 + jwtClaims.audience("es-01") + .issuer("my-issuer-01") + .subject(subject) + .claim("groups", "admin") + .issueTime(Date.from(Instant.now())) + .expirationTime(Date.from(Instant.now().plusSeconds(600))) + .build(); + sharedSecret = jwt0SharedSecret; + } else { + subject = "me@example.com"; + // JWT "access_token" valid for jwt2 + jwtClaims.audience("es-03") + .issuer("my-issuer-03") + .subject("user-03") + .claim("groups", "admin") + .claim("email", subject) + .issueTime(Date.from(Instant.now())) + .expirationTime(Date.from(Instant.now().plusSeconds(300))); + sharedSecret = jwt2SharedSecret; + } + { + // JWT is valid but the client authentication is NOT + GrantApiKeyRequest grantApiKeyRequest = getGrantApiKeyForJWT(getSignedJWT(jwtClaims.build()), randomFrom("WRONG", null)); + ElasticsearchSecurityException e = expectThrows( + ElasticsearchSecurityException.class, + () -> client().execute(GrantApiKeyAction.INSTANCE, grantApiKeyRequest).actionGet() + ); + assertThat(e.getMessage(), containsString("unable to authenticate user")); + } + { + // both JWT and client authentication are valid + GrantApiKeyRequest grantApiKeyRequest = getGrantApiKeyForJWT(getSignedJWT(jwtClaims.build()), sharedSecret); + CreateApiKeyResponse createApiKeyResponse = client().execute(GrantApiKeyAction.INSTANCE, grantApiKeyRequest).actionGet(); + assertThat(createApiKeyResponse.getId(), notNullValue()); + assertThat(createApiKeyResponse.getKey(), notNullValue()); + assertThat(createApiKeyResponse.getName(), is(grantApiKeyRequest.getApiKeyRequest().getName())); + final String base64ApiKeyKeyValue = Base64.getEncoder() + .encodeToString((createApiKeyResponse.getId() + ":" + createApiKeyResponse.getKey()).getBytes(StandardCharsets.UTF_8)); + AuthenticateResponse authenticateResponse = client().filterWithHeader(Map.of("Authorization", "ApiKey " + base64ApiKeyKeyValue)) + .execute(AuthenticateAction.INSTANCE, AuthenticateRequest.INSTANCE) + .actionGet(); + assertThat(authenticateResponse.authentication().getEffectiveSubject().getUser().principal(), is(subject)); + assertThat(authenticateResponse.authentication().getAuthenticationType(), is(Authentication.AuthenticationType.API_KEY)); + } + { + // client authentication is valid but the JWT is not + final SignedJWT wrongJWT; + if (randomBoolean()) { + wrongJWT = getSignedJWT(jwtClaims.build(), ("wrong key that's longer than 256 bits").getBytes(StandardCharsets.UTF_8)); + } else { + wrongJWT = getSignedJWT(jwtClaims.audience("wrong audience claim value").build()); + } + GrantApiKeyRequest grantApiKeyRequest = getGrantApiKeyForJWT(wrongJWT, sharedSecret); + ElasticsearchSecurityException e = expectThrows( + ElasticsearchSecurityException.class, + () -> client().execute(GrantApiKeyAction.INSTANCE, grantApiKeyRequest).actionGet() + ); + assertThat(e.getMessage(), containsString("unable to authenticate user")); + } + } + @SuppressWarnings("unchecked") public void testInvalidJWTDoesNotFallbackToAnonymousAccess() throws Exception { // anonymous access works when no valid Bearer @@ -327,73 +414,101 @@ public void testClientSecretRotation() throws Exception { 200, client.performRequest(getRequest(getSignedJWT(jwt2Claims.build()), jwt2SharedSecret)).getStatusLine().getStatusCode() ); - // update the secret in the secure settings - final MockSecureSettings newSecureSettings = new MockSecureSettings(); - newSecureSettings.setString( - "xpack.security.authc.realms.jwt." + realm0.name() + ".client_authentication.shared_secret", - "realm0updatedSecret" - ); - newSecureSettings.setString( - "xpack.security.authc.realms.jwt." + realm1.name() + ".client_authentication.shared_secret", - "realm1updatedSecret" - ); - newSecureSettings.setString( - "xpack.security.authc.realms.jwt." + realm2.name() + ".client_authentication.shared_secret", - "realm2updatedSecret" - ); - // reload settings final PluginsService plugins = getInstanceFromNode(PluginsService.class); final LocalStateSecurity localStateSecurity = plugins.filterPlugins(LocalStateSecurity.class).findFirst().get(); - for (Plugin p : localStateSecurity.plugins()) { - if (p instanceof Security securityPlugin) { - Settings.Builder newSettingsBuilder = Settings.builder().setSecureSettings(newSecureSettings); - securityPlugin.reload(newSettingsBuilder.build()); + // update the secret in the secure settings + try { + final MockSecureSettings newSecureSettings = new MockSecureSettings(); + newSecureSettings.setString( + "xpack.security.authc.realms.jwt." + realm0.name() + ".client_authentication.shared_secret", + "realm0updatedSecret" + ); + newSecureSettings.setString( + "xpack.security.authc.realms.jwt." + realm1.name() + ".client_authentication.shared_secret", + "realm1updatedSecret" + ); + newSecureSettings.setString( + "xpack.security.authc.realms.jwt." + realm2.name() + ".client_authentication.shared_secret", + "realm2updatedSecret" + ); + // reload settings + for (Plugin p : localStateSecurity.plugins()) { + if (p instanceof Security securityPlugin) { + Settings.Builder newSettingsBuilder = Settings.builder().setSecureSettings(newSecureSettings); + securityPlugin.reload(newSettingsBuilder.build()); + } + } + // ensure the old value still works for realm 0 (default grace period) + assertEquals( + 200, + client.performRequest(getRequest(getSignedJWT(jwt0Claims.build()), jwt0SharedSecret)).getStatusLine().getStatusCode() + ); + assertEquals( + 200, + client.performRequest(getRequest(getSignedJWT(jwt0Claims.build()), "realm0updatedSecret")).getStatusLine().getStatusCode() + ); + // ensure the old value still works for realm 1 (explicit grace period) + assertEquals( + 200, + client.performRequest(getRequest(getSignedJWT(jwt1Claims.build()), jwt1SharedSecret)).getStatusLine().getStatusCode() + ); + assertEquals( + 200, + client.performRequest(getRequest(getSignedJWT(jwt1Claims.build()), "realm1updatedSecret")).getStatusLine().getStatusCode() + ); + // ensure the old value does not work for realm 2 (no grace period) + ResponseException exception = expectThrows( + ResponseException.class, + () -> client.performRequest(getRequest(getSignedJWT(jwt2Claims.build()), jwt2SharedSecret)).getStatusLine().getStatusCode() + ); + assertEquals(401, exception.getResponse().getStatusLine().getStatusCode()); + assertEquals( + 200, + client.performRequest(getRequest(getSignedJWT(jwt2Claims.build()), "realm2updatedSecret")).getStatusLine().getStatusCode() + ); + } finally { + // update them back to their original values + final MockSecureSettings newSecureSettings = new MockSecureSettings(); + newSecureSettings.setString( + "xpack.security.authc.realms.jwt." + realm0.name() + ".client_authentication.shared_secret", + jwt0SharedSecret + ); + newSecureSettings.setString( + "xpack.security.authc.realms.jwt." + realm1.name() + ".client_authentication.shared_secret", + jwt1SharedSecret + ); + newSecureSettings.setString( + "xpack.security.authc.realms.jwt." + realm2.name() + ".client_authentication.shared_secret", + jwt2SharedSecret + ); + // reload settings + for (Plugin p : localStateSecurity.plugins()) { + if (p instanceof Security securityPlugin) { + Settings.Builder newSettingsBuilder = Settings.builder().setSecureSettings(newSecureSettings); + securityPlugin.reload(newSettingsBuilder.build()); + } } } - // ensure the old value still works for realm 0 (default grace period) - assertEquals( - 200, - client.performRequest(getRequest(getSignedJWT(jwt0Claims.build()), jwt0SharedSecret)).getStatusLine().getStatusCode() - ); - assertEquals( - 200, - client.performRequest(getRequest(getSignedJWT(jwt0Claims.build()), "realm0updatedSecret")).getStatusLine().getStatusCode() - ); - // ensure the old value still works for realm 1 (explicit grace period) - assertEquals( - 200, - client.performRequest(getRequest(getSignedJWT(jwt1Claims.build()), jwt1SharedSecret)).getStatusLine().getStatusCode() - ); - assertEquals( - 200, - client.performRequest(getRequest(getSignedJWT(jwt1Claims.build()), "realm1updatedSecret")).getStatusLine().getStatusCode() - ); - // ensure the old value does not work for realm 2 (no grace period) - ResponseException exception = expectThrows( - ResponseException.class, - () -> client.performRequest(getRequest(getSignedJWT(jwt2Claims.build()), jwt2SharedSecret)).getStatusLine().getStatusCode() - ); - assertEquals(401, exception.getResponse().getStatusLine().getStatusCode()); - assertEquals( - 200, - client.performRequest(getRequest(getSignedJWT(jwt2Claims.build()), "realm2updatedSecret")).getStatusLine().getStatusCode() - ); } - private SignedJWT getSignedJWT(JWTClaimsSet claimsSet) throws Exception { + private SignedJWT getSignedJWT(JWTClaimsSet claimsSet, byte[] hmacKeyBytes) throws Exception { JWSHeader jwtHeader = new JWSHeader.Builder(JWSAlgorithm.HS256).build(); - OctetSequenceKey.Builder jwt0signer = new OctetSequenceKey.Builder(jwtHmacKey.getBytes(StandardCharsets.UTF_8)); + OctetSequenceKey.Builder jwt0signer = new OctetSequenceKey.Builder(hmacKeyBytes); jwt0signer.algorithm(JWSAlgorithm.HS256); SignedJWT jwt = new SignedJWT(jwtHeader, claimsSet); jwt.sign(new MACSigner(jwt0signer.build())); return jwt; } - private Request getRequest(SignedJWT jwt, String shardSecret) { + private SignedJWT getSignedJWT(JWTClaimsSet claimsSet) throws Exception { + return getSignedJWT(claimsSet, jwtHmacKey.getBytes(StandardCharsets.UTF_8)); + } + + private Request getRequest(SignedJWT jwt, String sharedSecret) { Request request = new Request("GET", "/_security/_authenticate"); RequestOptions.Builder options = RequestOptions.DEFAULT.toBuilder(); options.addHeader("Authorization", "Bearer " + jwt.serialize()); - options.addHeader("ES-Client-Authentication", "SharedSecret " + shardSecret); + options.addHeader("ES-Client-Authentication", "SharedSecret " + sharedSecret); request.setOptions(options); return request; } @@ -446,9 +561,22 @@ private ThreadContext prepareThreadContext(SignedJWT signedJWT, String clientSec if (clientSecret != null) { threadContext.putHeader( JwtRealm.HEADER_CLIENT_AUTHENTICATION, - JwtRealm.HEADER_SHARED_SECRET_AUTHENTICATION_SCHEME + " " + clientSecret + JwtRealmSettings.HEADER_SHARED_SECRET_AUTHENTICATION_SCHEME + " " + clientSecret ); } return threadContext; } + + private static GrantApiKeyRequest getGrantApiKeyForJWT(SignedJWT signedJWT, String sharedSecret) { + GrantApiKeyRequest grantApiKeyRequest = new GrantApiKeyRequest(); + grantApiKeyRequest.getGrant().setType("access_token"); + grantApiKeyRequest.getGrant().setAccessToken(new SecureString(signedJWT.serialize().toCharArray())); + if (sharedSecret != null) { + grantApiKeyRequest.getGrant() + .setClientAuthentication(new Grant.ClientAuthentication("SharedSecret", new SecureString(sharedSecret.toCharArray()))); + } + grantApiKeyRequest.setRefreshPolicy(randomFrom(IMMEDIATE, WAIT_UNTIL)); + grantApiKeyRequest.getApiKeyRequest().setName(randomAlphaOfLength(8)); + return grantApiKeyRequest; + } } diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/ReadActionsTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/ReadActionsTests.java index 6220fc2ae2c2c..a5399e3699bb8 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/ReadActionsTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/ReadActionsTests.java @@ -10,11 +10,10 @@ import org.elasticsearch.action.get.GetAction; import org.elasticsearch.action.get.MultiGetAction; import org.elasticsearch.action.get.MultiGetResponse; -import org.elasticsearch.action.search.MultiSearchResponse; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.termvectors.MultiTermVectorsAction; import org.elasticsearch.action.termvectors.MultiTermVectorsResponse; @@ -31,6 +30,7 @@ import static org.elasticsearch.test.SecurityTestsUtils.assertAuthorizationExceptionDefaultUsers; import static org.elasticsearch.test.SecurityTestsUtils.assertThrowsAuthorizationExceptionDefaultUsers; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoSearchHits; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.hasItems; import static org.hamcrest.core.IsEqual.equalTo; import static org.hamcrest.core.IsInstanceOf.instanceOf; @@ -133,12 +133,12 @@ public void testEmptyAuthorizedIndicesSearchForWildcardDisallowNoIndices() { public void testExplicitNonAuthorizedIndex() { createIndicesWithRandomAliases("test1", "test2", "index1"); - assertThrowsAuthorizationExceptionDefaultUsers(() -> trySearch("test*", "index1").get(), SearchAction.NAME); + assertThrowsAuthorizationExceptionDefaultUsers(() -> trySearch("test*", "index1").get(), TransportSearchAction.TYPE.name()); } public void testIndexNotFound() { createIndicesWithRandomAliases("test1", "test2", "index1"); - assertThrowsAuthorizationExceptionDefaultUsers(() -> trySearch("missing").get(), SearchAction.NAME); + assertThrowsAuthorizationExceptionDefaultUsers(() -> trySearch("missing").get(), TransportSearchAction.TYPE.name()); } public void testIndexNotFoundIgnoreUnavailable() { @@ -207,64 +207,72 @@ public void testMultiSearchUnauthorizedIndex() { // index1 is not authorized, only that specific item fails createIndicesWithRandomAliases("test1", "test2", "test3", "index1"); { - MultiSearchResponse multiSearchResponse = client().prepareMultiSearch() - .add(new SearchRequest(new String[] {})) - .add(new SearchRequest("index1")) - .get(); - assertEquals(2, multiSearchResponse.getResponses().length); - assertFalse(multiSearchResponse.getResponses()[0].isFailure()); - SearchResponse searchResponse = multiSearchResponse.getResponses()[0].getResponse(); - assertThat(searchResponse.getHits().getTotalHits().value, greaterThan(0L)); - assertReturnedIndices(searchResponse, "test1", "test2", "test3"); - assertTrue(multiSearchResponse.getResponses()[1].isFailure()); - Exception exception = multiSearchResponse.getResponses()[1].getFailure(); - assertThat(exception, instanceOf(ElasticsearchSecurityException.class)); - assertAuthorizationExceptionDefaultUsers(exception, SearchAction.NAME); + assertResponse( + client().prepareMultiSearch().add(new SearchRequest(new String[] {})).add(new SearchRequest("index1")), + multiSearchResponse -> { + assertEquals(2, multiSearchResponse.getResponses().length); + assertFalse(multiSearchResponse.getResponses()[0].isFailure()); + SearchResponse searchResponse = multiSearchResponse.getResponses()[0].getResponse(); + assertThat(searchResponse.getHits().getTotalHits().value, greaterThan(0L)); + assertReturnedIndices(searchResponse, "test1", "test2", "test3"); + assertTrue(multiSearchResponse.getResponses()[1].isFailure()); + Exception exception = multiSearchResponse.getResponses()[1].getFailure(); + assertThat(exception, instanceOf(ElasticsearchSecurityException.class)); + assertAuthorizationExceptionDefaultUsers(exception, TransportSearchAction.TYPE.name()); + } + ); } { - MultiSearchResponse multiSearchResponse = client().prepareMultiSearch() - .add(new SearchRequest(new String[] {})) - .add(new SearchRequest("index1").indicesOptions(IndicesOptions.fromOptions(true, true, true, randomBoolean()))) - .get(); - assertEquals(2, multiSearchResponse.getResponses().length); - assertFalse(multiSearchResponse.getResponses()[0].isFailure()); - SearchResponse searchResponse = multiSearchResponse.getResponses()[0].getResponse(); - assertThat(searchResponse.getHits().getTotalHits().value, greaterThan(0L)); - assertReturnedIndices(searchResponse, "test1", "test2", "test3"); - assertFalse(multiSearchResponse.getResponses()[1].isFailure()); - assertNoSearchHits(multiSearchResponse.getResponses()[1].getResponse()); + assertResponse( + client().prepareMultiSearch() + .add(new SearchRequest(new String[] {})) + .add(new SearchRequest("index1").indicesOptions(IndicesOptions.fromOptions(true, true, true, randomBoolean()))), + multiSearchResponse -> { + assertEquals(2, multiSearchResponse.getResponses().length); + assertFalse(multiSearchResponse.getResponses()[0].isFailure()); + SearchResponse searchResponse = multiSearchResponse.getResponses()[0].getResponse(); + assertThat(searchResponse.getHits().getTotalHits().value, greaterThan(0L)); + assertReturnedIndices(searchResponse, "test1", "test2", "test3"); + assertFalse(multiSearchResponse.getResponses()[1].isFailure()); + assertNoSearchHits(multiSearchResponse.getResponses()[1].getResponse()); + } + ); } } public void testMultiSearchMissingUnauthorizedIndex() { createIndicesWithRandomAliases("test1", "test2", "test3", "index1"); { - MultiSearchResponse multiSearchResponse = client().prepareMultiSearch() - .add(new SearchRequest(new String[] {})) - .add(new SearchRequest("missing")) - .get(); - assertEquals(2, multiSearchResponse.getResponses().length); - assertFalse(multiSearchResponse.getResponses()[0].isFailure()); - SearchResponse searchResponse = multiSearchResponse.getResponses()[0].getResponse(); - assertThat(searchResponse.getHits().getTotalHits().value, greaterThan(0L)); - assertReturnedIndices(searchResponse, "test1", "test2", "test3"); - assertTrue(multiSearchResponse.getResponses()[1].isFailure()); - Exception exception = multiSearchResponse.getResponses()[1].getFailure(); - assertThat(exception, instanceOf(ElasticsearchSecurityException.class)); - assertAuthorizationExceptionDefaultUsers(exception, SearchAction.NAME); + assertResponse( + client().prepareMultiSearch().add(new SearchRequest(new String[] {})).add(new SearchRequest("missing")), + multiSearchResponse -> { + assertEquals(2, multiSearchResponse.getResponses().length); + assertFalse(multiSearchResponse.getResponses()[0].isFailure()); + SearchResponse searchResponse = multiSearchResponse.getResponses()[0].getResponse(); + assertThat(searchResponse.getHits().getTotalHits().value, greaterThan(0L)); + assertReturnedIndices(searchResponse, "test1", "test2", "test3"); + assertTrue(multiSearchResponse.getResponses()[1].isFailure()); + Exception exception = multiSearchResponse.getResponses()[1].getFailure(); + assertThat(exception, instanceOf(ElasticsearchSecurityException.class)); + assertAuthorizationExceptionDefaultUsers(exception, TransportSearchAction.TYPE.name()); + } + ); } { - MultiSearchResponse multiSearchResponse = client().prepareMultiSearch() - .add(new SearchRequest(new String[] {})) - .add(new SearchRequest("missing").indicesOptions(IndicesOptions.fromOptions(true, true, true, randomBoolean()))) - .get(); - assertEquals(2, multiSearchResponse.getResponses().length); - assertFalse(multiSearchResponse.getResponses()[0].isFailure()); - SearchResponse searchResponse = multiSearchResponse.getResponses()[0].getResponse(); - assertThat(searchResponse.getHits().getTotalHits().value, greaterThan(0L)); - assertReturnedIndices(searchResponse, "test1", "test2", "test3"); - assertFalse(multiSearchResponse.getResponses()[1].isFailure()); - assertNoSearchHits(multiSearchResponse.getResponses()[1].getResponse()); + assertResponse( + client().prepareMultiSearch() + .add(new SearchRequest(new String[] {})) + .add(new SearchRequest("missing").indicesOptions(IndicesOptions.fromOptions(true, true, true, randomBoolean()))), + multiSearchResponse -> { + assertEquals(2, multiSearchResponse.getResponses().length); + assertFalse(multiSearchResponse.getResponses()[0].isFailure()); + SearchResponse searchResponse = multiSearchResponse.getResponses()[0].getResponse(); + assertThat(searchResponse.getHits().getTotalHits().value, greaterThan(0L)); + assertReturnedIndices(searchResponse, "test1", "test2", "test3"); + assertFalse(multiSearchResponse.getResponses()[1].isFailure()); + assertNoSearchHits(multiSearchResponse.getResponses()[1].getResponse()); + } + ); } } @@ -273,56 +281,68 @@ public void testMultiSearchMissingAuthorizedIndex() { createIndicesWithRandomAliases("test1", "test2", "test3", "index1"); { // default indices options for search request don't ignore unavailable indices, only individual items fail. - MultiSearchResponse multiSearchResponse = client().prepareMultiSearch() - .add(new SearchRequest(new String[] {})) - .add(new SearchRequest("test4")) - .get(); - assertFalse(multiSearchResponse.getResponses()[0].isFailure()); - assertReturnedIndices(multiSearchResponse.getResponses()[0].getResponse(), "test1", "test2", "test3"); - assertTrue(multiSearchResponse.getResponses()[1].isFailure()); - assertThat( - multiSearchResponse.getResponses()[1].getFailure().toString(), - equalTo("[test4] org.elasticsearch.index.IndexNotFoundException: no such index [test4]") + assertResponse( + client().prepareMultiSearch().add(new SearchRequest(new String[] {})).add(new SearchRequest("test4")), + multiSearchResponse -> { + assertFalse(multiSearchResponse.getResponses()[0].isFailure()); + assertReturnedIndices(multiSearchResponse.getResponses()[0].getResponse(), "test1", "test2", "test3"); + assertTrue(multiSearchResponse.getResponses()[1].isFailure()); + assertThat( + multiSearchResponse.getResponses()[1].getFailure().toString(), + equalTo("[test4] org.elasticsearch.index.IndexNotFoundException: no such index [test4]") + ); + } ); } { // we set ignore_unavailable and allow_no_indices to true, no errors returned, second item doesn't have hits. - MultiSearchResponse multiSearchResponse = client().prepareMultiSearch() - .add(new SearchRequest(new String[] {})) - .add(new SearchRequest("test4").indicesOptions(IndicesOptions.fromOptions(true, true, true, randomBoolean()))) - .get(); - assertReturnedIndices(multiSearchResponse.getResponses()[0].getResponse(), "test1", "test2", "test3"); - assertNoSearchHits(multiSearchResponse.getResponses()[1].getResponse()); + assertResponse( + client().prepareMultiSearch() + .add(new SearchRequest(new String[] {})) + .add(new SearchRequest("test4").indicesOptions(IndicesOptions.fromOptions(true, true, true, randomBoolean()))), + multiSearchResponse -> { + assertReturnedIndices(multiSearchResponse.getResponses()[0].getResponse(), "test1", "test2", "test3"); + assertNoSearchHits(multiSearchResponse.getResponses()[1].getResponse()); + } + ); } } public void testMultiSearchWildcard() { createIndicesWithRandomAliases("test1", "test2", "test3", "index1"); { - MultiSearchResponse multiSearchResponse = client().prepareMultiSearch() - .add(new SearchRequest(new String[] {})) - .add(new SearchRequest("index*")) - .get(); - assertEquals(2, multiSearchResponse.getResponses().length); - assertFalse(multiSearchResponse.getResponses()[0].isFailure()); - SearchResponse searchResponse = multiSearchResponse.getResponses()[0].getResponse(); - assertThat(searchResponse.getHits().getTotalHits().value, greaterThan(0L)); - assertReturnedIndices(searchResponse, "test1", "test2", "test3"); - assertNoSearchHits(multiSearchResponse.getResponses()[1].getResponse()); + assertResponse( + client().prepareMultiSearch().add(new SearchRequest(new String[] {})).add(new SearchRequest("index*")), + multiSearchResponse -> { + assertEquals(2, multiSearchResponse.getResponses().length); + assertFalse(multiSearchResponse.getResponses()[0].isFailure()); + SearchResponse searchResponse = multiSearchResponse.getResponses()[0].getResponse(); + assertThat(searchResponse.getHits().getTotalHits().value, greaterThan(0L)); + assertReturnedIndices(searchResponse, "test1", "test2", "test3"); + assertNoSearchHits(multiSearchResponse.getResponses()[1].getResponse()); + } + ); } { - MultiSearchResponse multiSearchResponse = client().prepareMultiSearch() - .add(new SearchRequest(new String[] {})) - .add(new SearchRequest("index*").indicesOptions(IndicesOptions.fromOptions(randomBoolean(), false, true, randomBoolean()))) - .get(); - assertEquals(2, multiSearchResponse.getResponses().length); - assertFalse(multiSearchResponse.getResponses()[0].isFailure()); - SearchResponse searchResponse = multiSearchResponse.getResponses()[0].getResponse(); - assertThat(searchResponse.getHits().getTotalHits().value, greaterThan(0L)); - assertReturnedIndices(searchResponse, "test1", "test2", "test3"); - assertTrue(multiSearchResponse.getResponses()[1].isFailure()); - Exception exception = multiSearchResponse.getResponses()[1].getFailure(); - assertThat(exception, instanceOf(IndexNotFoundException.class)); + assertResponse( + client().prepareMultiSearch() + .add(new SearchRequest(new String[] {})) + .add( + new SearchRequest("index*").indicesOptions( + IndicesOptions.fromOptions(randomBoolean(), false, true, randomBoolean()) + ) + ), + multiSearchResponse -> { + assertEquals(2, multiSearchResponse.getResponses().length); + assertFalse(multiSearchResponse.getResponses()[0].isFailure()); + SearchResponse searchResponse = multiSearchResponse.getResponses()[0].getResponse(); + assertThat(searchResponse.getHits().getTotalHits().value, greaterThan(0L)); + assertReturnedIndices(searchResponse, "test1", "test2", "test3"); + assertTrue(multiSearchResponse.getResponses()[1].isFailure()); + Exception exception = multiSearchResponse.getResponses()[1].getFailure(); + assertThat(exception, instanceOf(IndexNotFoundException.class)); + } + ); } } diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/SecurityScrollTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/SecurityScrollTests.java index 57137075c5942..c5da26deaf03d 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/SecurityScrollTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/SecurityScrollTests.java @@ -42,7 +42,7 @@ public void testScrollIsPerUser() throws Exception { final int numDocs = randomIntBetween(4, 16); IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; for (int i = 0; i < docs.length; i++) { - docs[i] = client().prepareIndex("foo").setSource("doc", i); + docs[i] = prepareIndex("foo").setSource("doc", i); } indexRandom(true, docs); @@ -74,7 +74,7 @@ public void testScrollIsPerUser() throws Exception { public void testSearchAndClearScroll() throws Exception { IndexRequestBuilder[] docs = new IndexRequestBuilder[randomIntBetween(20, 100)]; for (int i = 0; i < docs.length; i++) { - docs[i] = client().prepareIndex("idx").setSource("field", "value"); + docs[i] = prepareIndex("idx").setSource("field", "value"); } indexRandom(true, docs); SearchResponse response = prepareSearch().setQuery(matchAllQuery()) diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/WriteActionsTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/WriteActionsTests.java index c55e5b409279a..8cd85297377ce 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/WriteActionsTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/WriteActionsTests.java @@ -51,17 +51,17 @@ protected String configRoles() { public void testIndex() { createIndex("test1", "index1"); - client().prepareIndex("test1").setId("id").setSource("field", "value").get(); + prepareIndex("test1").setId("id").setSource("field", "value").get(); assertThrowsAuthorizationExceptionDefaultUsers( - client().prepareIndex("index1").setId("id").setSource("field", "value")::get, + prepareIndex("index1").setId("id").setSource("field", "value")::get, BulkAction.NAME + "[s]" ); - client().prepareIndex("test4").setId("id").setSource("field", "value").get(); + prepareIndex("test4").setId("id").setSource("field", "value").get(); // the missing index gets automatically created (user has permissions for that), but indexing fails due to missing authorization assertThrowsAuthorizationExceptionDefaultUsers( - client().prepareIndex("missing").setId("id").setSource("field", "value")::get, + prepareIndex("missing").setId("id").setSource("field", "value")::get, BulkAction.NAME + "[s]" ); ensureGreen(); @@ -69,7 +69,7 @@ public void testIndex() { public void testDelete() { createIndex("test1", "index1"); - client().prepareIndex("test1").setId("id").setSource("field", "value").get(); + prepareIndex("test1").setId("id").setSource("field", "value").get(); assertEquals(RestStatus.OK, client().prepareDelete("test1", "id").get().status()); assertThrowsAuthorizationExceptionDefaultUsers(client().prepareDelete("index1", "id")::get, BulkAction.NAME + "[s]"); @@ -80,7 +80,7 @@ public void testDelete() { public void testUpdate() { createIndex("test1", "index1"); - client().prepareIndex("test1").setId("id").setSource("field", "value").get(); + prepareIndex("test1").setId("id").setSource("field", "value").get(); assertEquals( RestStatus.OK, client().prepareUpdate("test1", "id").setDoc(Requests.INDEX_CONTENT_TYPE, "field2", "value2").get().status() diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreCacheTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreCacheTests.java index 7e05121e10fe0..c0605d9e9380e 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreCacheTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreCacheTests.java @@ -122,8 +122,7 @@ public void testGetPrivilegesUsesCache() { ApplicationPrivilegeDescriptor[] privileges = new GetPrivilegesRequestBuilder(client).application("app-2") .privileges("write") - .execute() - .actionGet() + .get() .privileges(); assertEquals(1, privileges.length); @@ -138,18 +137,17 @@ public void testGetPrivilegesUsesCache() { .add(new DeleteRequest(SECURITY_MAIN_ALIAS, DOC_TYPE_VALUE + "_app-2:write")) .add(new DeleteRequest(SECURITY_MAIN_ALIAS, DOC_TYPE_VALUE + "_app-2:admin")) .setRefreshPolicy(IMMEDIATE) - .execute() - .actionGet() + .get() .hasFailures() ); // We can still get the privileges because it is cached - privileges = new GetPrivilegesRequestBuilder(client).application("app-2").privileges("read").execute().actionGet().privileges(); + privileges = new GetPrivilegesRequestBuilder(client).application("app-2").privileges("read").get().privileges(); assertEquals(1, privileges.length); // We can get all app-2 privileges because cache is keyed by application - privileges = new GetPrivilegesRequestBuilder(client).application("app-2").execute().actionGet().privileges(); + privileges = new GetPrivilegesRequestBuilder(client).application("app-2").get().privileges(); assertEquals(3, privileges.length); @@ -161,14 +159,14 @@ public void testGetPrivilegesUsesCache() { assertFalse(clearPrivilegesCacheResponse.hasFailures()); // app-2 is no longer found - privileges = new GetPrivilegesRequestBuilder(client).application("app-2").privileges("read").execute().actionGet().privileges(); + privileges = new GetPrivilegesRequestBuilder(client).application("app-2").privileges("read").get().privileges(); assertEquals(0, privileges.length); } public void testPopulationOfCacheWhenLoadingPrivilegesForAllApplications() { final Client client = client(); - ApplicationPrivilegeDescriptor[] privileges = new GetPrivilegesRequestBuilder(client).execute().actionGet().privileges(); + ApplicationPrivilegeDescriptor[] privileges = new GetPrivilegesRequestBuilder(client).get().privileges(); assertEquals(6, privileges.length); @@ -176,18 +174,15 @@ public void testPopulationOfCacheWhenLoadingPrivilegesForAllApplications() { deleteApplicationPrivilege("app-2", "read"); // A direct read should also get nothing - assertEquals( - 0, - new GetPrivilegesRequestBuilder(client).application("app-2").privileges("read").execute().actionGet().privileges().length - ); + assertEquals(0, new GetPrivilegesRequestBuilder(client).application("app-2").privileges("read").get().privileges().length); // The wildcard expression expansion should be invalidated - assertEquals(5, new GetPrivilegesRequestBuilder(client).execute().actionGet().privileges().length); + assertEquals(5, new GetPrivilegesRequestBuilder(client).get().privileges().length); // Now put it back and wild expression expansion should be invalidated again addApplicationPrivilege("app-2", "read", "r:e:f:g", "r:t:u:v"); - assertEquals(6, new GetPrivilegesRequestBuilder(client).execute().actionGet().privileges().length); + assertEquals(6, new GetPrivilegesRequestBuilder(client).get().privileges().length); // Delete the privilege again which invalidate the wildcard expansion deleteApplicationPrivilege("app-2", "read"); @@ -199,45 +194,31 @@ public void testPopulationOfCacheWhenLoadingPrivilegesForAllApplications() { .add(new DeleteRequest(SECURITY_MAIN_ALIAS, DOC_TYPE_VALUE + "_app-1:write")) .add(new DeleteRequest(SECURITY_MAIN_ALIAS, DOC_TYPE_VALUE + "_app-2:write")) .setRefreshPolicy(IMMEDIATE) - .execute() - .actionGet() + .get() .hasFailures() ); // app-2 write privilege will not be found since cache is invalidated and backing document is gone - assertEquals( - 0, - new GetPrivilegesRequestBuilder(client).application("app-2").privileges("write").execute().actionGet().privileges().length - ); + assertEquals(0, new GetPrivilegesRequestBuilder(client).application("app-2").privileges("write").get().privileges().length); // app-1 write privilege is still found since it is cached even when the backing document is gone - assertEquals( - 1, - new GetPrivilegesRequestBuilder(client).application("app-1").privileges("write").execute().actionGet().privileges().length - ); + assertEquals(1, new GetPrivilegesRequestBuilder(client).application("app-1").privileges("write").get().privileges().length); } public void testSuffixWildcard() { final Client client = client(); // Populate the cache with suffix wildcard - assertEquals(6, new GetPrivilegesRequestBuilder(client).application("app-*").execute().actionGet().privileges().length); + assertEquals(6, new GetPrivilegesRequestBuilder(client).application("app-*").get().privileges().length); // Delete a backing document assertEquals( RestStatus.OK, - client.prepareDelete(SECURITY_MAIN_ALIAS, DOC_TYPE_VALUE + "_app-1:read") - .setRefreshPolicy(IMMEDIATE) - .execute() - .actionGet() - .status() + client.prepareDelete(SECURITY_MAIN_ALIAS, DOC_TYPE_VALUE + "_app-1:read").setRefreshPolicy(IMMEDIATE).get().status() ); // A direct get privilege with no wildcard should still hit the cache without needing it to be in the names cache - assertEquals( - 1, - new GetPrivilegesRequestBuilder(client).application("app-1").privileges("read").execute().actionGet().privileges().length - ); + assertEquals(1, new GetPrivilegesRequestBuilder(client).application("app-1").privileges("read").get().privileges().length); } public void testHasPrivileges() { @@ -383,8 +364,7 @@ private void deleteApplicationPrivilege(String applicationName, String privilege singleton(privilegeName), new DeletePrivilegesRequestBuilder(client()).application(applicationName) .privileges(new String[] { privilegeName }) - .execute() - .actionGet() + .get() .found() ); } diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreSingleNodeTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreSingleNodeTests.java index a36461b8d91f3..a3bcc19bca160 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreSingleNodeTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreSingleNodeTests.java @@ -82,7 +82,7 @@ public void testResolvePrivilegesWorkWhenExpensiveQueriesAreDisabled() throws IO // Disable expensive query new ClusterUpdateSettingsRequestBuilder(client(), ClusterUpdateSettingsAction.INSTANCE).setTransientSettings( Settings.builder().put(ALLOW_EXPENSIVE_QUERIES.getKey(), false) - ).execute().actionGet(); + ).get(); try { // Prove that expensive queries are indeed disabled @@ -96,9 +96,7 @@ public void testResolvePrivilegesWorkWhenExpensiveQueriesAreDisabled() throws IO ); // Get privileges work with wildcard application name - final GetPrivilegesResponse getPrivilegesResponse = new GetPrivilegesRequestBuilder(client()).application("yourapp*") - .execute() - .actionGet(); + final GetPrivilegesResponse getPrivilegesResponse = new GetPrivilegesRequestBuilder(client()).application("yourapp*").get(); assertThat(getPrivilegesResponse.privileges(), arrayWithSize(4)); assertThat( Arrays.stream(getPrivilegesResponse.privileges()) @@ -124,13 +122,12 @@ public void testResolvePrivilegesWorkWhenExpensiveQueriesAreDisabled() throws IO } ] } - """), XContentType.JSON).execute().actionGet(); + """), XContentType.JSON).get(); new PutUserRequestBuilder(client(), PutUserAction.INSTANCE).username("app_user") .password(TEST_PASSWORD_SECURE_STRING, getFastStoredHashAlgoForTests()) .roles("app_user_role") - .execute() - .actionGet(); + .get(); Client appUserClient; appUserClient = client().filterWithHeader( @@ -192,7 +189,7 @@ public void testResolvePrivilegesWorkWhenExpensiveQueriesAreDisabled() throws IO // Reset setting since test suite expects things in a clean slate new ClusterUpdateSettingsRequestBuilder(client(), ClusterUpdateSettingsAction.INSTANCE).setTransientSettings( Settings.builder().putNull(ALLOW_EXPENSIVE_QUERIES.getKey()) - ).execute().actionGet(); + ).get(); } } } diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileCancellationIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileCancellationIntegTests.java index 84d4bc0700571..87a5146113f72 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileCancellationIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileCancellationIntegTests.java @@ -10,7 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.client.Cancellable; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; @@ -129,7 +129,11 @@ public void onFailure(Exception exception) { final List taskActions = tasks.stream().map(Task::getAction).toList(); assertThat( taskActions, - hasItems(equalTo(SuggestProfilesAction.NAME), equalTo(SearchAction.NAME), startsWith(SearchAction.NAME)) + hasItems( + equalTo(SuggestProfilesAction.NAME), + equalTo(TransportSearchAction.TYPE.name()), + startsWith(TransportSearchAction.TYPE.name()) + ) ); assertThat(isShardSearchBlocked(), is(true)); tasks.forEach(t -> { diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileDomainIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileDomainIntegTests.java index 9c271a50fe49b..22cf6d4872909 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileDomainIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileDomainIntegTests.java @@ -452,8 +452,7 @@ private void indexDocument(String uid) { List.of("role1", "role2"), Instant.now().toEpochMilli() ); - client().prepareIndex(randomFrom(INTERNAL_SECURITY_PROFILE_INDEX_8, SECURITY_PROFILE_ALIAS)) - .setId("profile_" + uid) + prepareIndex(randomFrom(INTERNAL_SECURITY_PROFILE_INDEX_8, SECURITY_PROFILE_ALIAS)).setId("profile_" + uid) .setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL) .setSource(source, XContentType.JSON) .get(); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileIntegTests.java index 165235181de41..f2268a76221e3 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileIntegTests.java @@ -119,9 +119,9 @@ public void testProfileIndexAutoCreation() { assertThat(getProfileIndexResponse().getIndices(), not(hasItemInArray(INTERNAL_SECURITY_PROFILE_INDEX_8))); // Trigger index creation by indexing - var indexResponse = client().prepareIndex(randomFrom(INTERNAL_SECURITY_PROFILE_INDEX_8, SECURITY_PROFILE_ALIAS)) - .setSource(Map.of("user_profile", Map.of("uid", randomAlphaOfLength(22)))) - .get(); + var indexResponse = prepareIndex(randomFrom(INTERNAL_SECURITY_PROFILE_INDEX_8, SECURITY_PROFILE_ALIAS)).setSource( + Map.of("user_profile", Map.of("uid", randomAlphaOfLength(22))) + ).get(); assertThat(indexResponse.status().getStatus(), equalTo(201)); final GetIndexResponse getIndexResponse = getProfileIndexResponse(); @@ -779,8 +779,7 @@ public void testHasPrivileges() { public void testGetUsersWithProfileUid() throws IOException { new ChangePasswordRequestBuilder(client()).username(ElasticUser.NAME) .password(TEST_PASSWORD_SECURE_STRING.clone().getChars(), Hasher.BCRYPT) - .execute() - .actionGet(); + .get(); final Profile elasticUserProfile = doActivateProfile(ElasticUser.NAME, TEST_PASSWORD_SECURE_STRING); final Profile nativeRacUserProfile = doActivateProfile(RAC_USER_NAME, NATIVE_RAC_USER_PASSWORD); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/SecurityDomainIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/SecurityDomainIntegTests.java index 0892c6f88873f..66c5b9fa02ab4 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/SecurityDomainIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/SecurityDomainIntegTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.xpack.core.security.action.token.CreateTokenRequest; import org.elasticsearch.xpack.core.security.action.token.CreateTokenResponse; import org.elasticsearch.xpack.core.security.action.token.RefreshTokenAction; +import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.ExpressionParser; import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; import org.elasticsearch.xpack.security.authc.jwt.JwtRealm; @@ -200,7 +201,7 @@ public void testTokenRefreshUnderSameUsernameInDomain() throws IOException { var refreshTokenResponse = client().filterWithHeader( Map.of( JwtRealm.HEADER_CLIENT_AUTHENTICATION, - JwtRealm.HEADER_SHARED_SECRET_AUTHENTICATION_SCHEME + " " + HEADER_SECRET_JWT_REALM_1, + JwtRealmSettings.HEADER_SHARED_SECRET_AUTHENTICATION_SCHEME + " " + HEADER_SECRET_JWT_REALM_1, JwtRealm.HEADER_END_USER_AUTHENTICATION, JwtRealm.HEADER_END_USER_AUTHENTICATION_SCHEME + " " + HEADER_JWT_REALM_1 ) @@ -211,7 +212,7 @@ public void testTokenRefreshUnderSameUsernameInDomain() throws IOException { createTokenResponse = client().filterWithHeader( Map.of( JwtRealm.HEADER_CLIENT_AUTHENTICATION, - JwtRealm.HEADER_SHARED_SECRET_AUTHENTICATION_SCHEME + " " + HEADER_SECRET_JWT_REALM_1, + JwtRealmSettings.HEADER_SHARED_SECRET_AUTHENTICATION_SCHEME + " " + HEADER_SECRET_JWT_REALM_1, JwtRealm.HEADER_END_USER_AUTHENTICATION, JwtRealm.HEADER_END_USER_AUTHENTICATION_SCHEME + " " + HEADER_JWT_REALM_1 ) @@ -292,7 +293,7 @@ public void testTokenRefreshFailsForUsernameOutsideDomain() throws IOException { () -> client().filterWithHeader( Map.of( JwtRealm.HEADER_CLIENT_AUTHENTICATION, - JwtRealm.HEADER_SHARED_SECRET_AUTHENTICATION_SCHEME + " " + HEADER_SECRET_JWT_REALM_2, + JwtRealmSettings.HEADER_SHARED_SECRET_AUTHENTICATION_SCHEME + " " + HEADER_SECRET_JWT_REALM_2, JwtRealm.HEADER_END_USER_AUTHENTICATION, JwtRealm.HEADER_END_USER_AUTHENTICATION_SCHEME + " " + HEADER_JWT_REALM_2 ) @@ -310,7 +311,7 @@ public void testTokenRefreshFailsForUsernameOutsideDomain() throws IOException { .actionGet(); } - public void testDomainCaptureForApiKey() { + public void testDomainCaptureForApiKey() throws IOException { final CreateApiKeyRequest createApiKeyRequest = new CreateApiKeyRequest(randomAlphaOfLengthBetween(3, 8), null, null); createApiKeyRequest.setRefreshPolicy(randomFrom(NONE, WAIT_UNTIL, IMMEDIATE)); @@ -319,9 +320,7 @@ public void testDomainCaptureForApiKey() { ).execute(CreateApiKeyAction.INSTANCE, createApiKeyRequest).actionGet(); final XContentTestUtils.JsonMapView getResponseView = XContentTestUtils.createJsonMapView( - new ByteArrayInputStream( - client().prepareGet(SECURITY_MAIN_ALIAS, createApiKeyResponse.getId()).execute().actionGet().getSourceAsBytes() - ) + client().prepareGet(SECURITY_MAIN_ALIAS, createApiKeyResponse.getId()).get().getSourceAsBytesRef().streamInput() ); // domain info is captured @@ -337,10 +336,10 @@ public void testDomainCaptureForApiKey() { (createApiKeyResponse.getId() + ":" + createApiKeyResponse.getKey()).getBytes(StandardCharsets.UTF_8) ) ) - ).admin().cluster().prepareHealth().execute().actionGet(); + ).admin().cluster().prepareHealth().get(); } - public void testDomainCaptureForServiceToken() { + public void testDomainCaptureForServiceToken() throws IOException { final String tokenName = randomAlphaOfLengthBetween(3, 8); final CreateServiceAccountTokenRequest createServiceTokenRequest = new CreateServiceAccountTokenRequest( "elastic", @@ -353,12 +352,10 @@ public void testDomainCaptureForServiceToken() { ).execute(CreateServiceAccountTokenAction.INSTANCE, createServiceTokenRequest).actionGet(); final XContentTestUtils.JsonMapView responseView = XContentTestUtils.createJsonMapView( - new ByteArrayInputStream( - client().prepareGet(SECURITY_MAIN_ALIAS, "service_account_token-elastic/fleet-server/" + tokenName) - .execute() - .actionGet() - .getSourceAsBytes() - ) + client().prepareGet(SECURITY_MAIN_ALIAS, "service_account_token-elastic/fleet-server/" + tokenName) + .get() + .getSourceAsBytesRef() + .streamInput() ); assertThat(responseView.get("creator.realm_domain"), equalTo(MY_DOMAIN_REALM_MAP)); @@ -368,8 +365,7 @@ public void testDomainCaptureForServiceToken() { .admin() .cluster() .prepareHealth() - .execute() - .actionGet(); + .get(); } private void assertAccessToken(CreateTokenResponse createTokenResponse) throws IOException { @@ -377,9 +373,8 @@ private void assertAccessToken(CreateTokenResponse createTokenResponse) throws I .admin() .cluster() .prepareHealth() - .execute() - .actionGet(); - final SearchResponse searchResponse = prepareSearch(SecuritySystemIndices.SECURITY_TOKENS_ALIAS).execute().actionGet(); + .get(); + final SearchResponse searchResponse = prepareSearch(SecuritySystemIndices.SECURITY_TOKENS_ALIAS).get(); final String encodedAuthentication = createTokenResponse.getAuthentication().encode(); for (SearchHit searchHit : searchResponse.getHits().getHits()) { diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerIntegTests.java index 4705361e51dbd..d3e1f736c1267 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerIntegTests.java @@ -125,22 +125,22 @@ public void testSecurityIndexSettingsCannotBeChanged() throws Exception { ) ); // create an new-style template - ComposableIndexTemplate cit = new ComposableIndexTemplate( - securityIndexNames, - new Template( - Settings.builder() - .put("index.refresh_interval", "1234s") - .put("index.priority", "9876") - .put("index.number_of_replicas", "8") - .build(), - null, - null - ), - null, - 4L, - 5L, - null - ); + ComposableIndexTemplate cit = ComposableIndexTemplate.builder() + .indexPatterns(securityIndexNames) + .template( + new Template( + Settings.builder() + .put("index.refresh_interval", "1234s") + .put("index.priority", "9876") + .put("index.number_of_replicas", "8") + .build(), + null, + null + ) + ) + .priority(4L) + .version(5L) + .build(); assertAcked( client().execute( PutComposableIndexTemplateAction.INSTANCE, diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringUpdateTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringUpdateTests.java index 1113b128b74ed..d5cb0f165b89d 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringUpdateTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringUpdateTests.java @@ -145,7 +145,7 @@ public void testThatInvalidDynamicIpFilterConfigurationIsRejected() { expectThrows( IllegalArgumentException.class, settingName, - () -> clusterAdmin().prepareUpdateSettings().setPersistentSettings(settings).execute().actionGet() + () -> clusterAdmin().prepareUpdateSettings().setPersistentSettings(settings).get() ).getMessage(), allOf(containsString("invalid IP filter"), containsString(invalidValue)) ); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/ssl/SSLTrustRestrictionsTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/ssl/SSLTrustRestrictionsTests.java index ce4d17e7ad992..e11219bf6f93d 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/ssl/SSLTrustRestrictionsTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/ssl/SSLTrustRestrictionsTests.java @@ -276,18 +276,10 @@ private CertificateInfo(PrivateKey key, Path keyPath, X509Certificate certificat this.certPath = certPath; } - private PrivateKey getKey() { - return key; - } - private Path getKeyPath() { return keyPath; } - private X509Certificate getCertificate() { - return certificate; - } - private Path getCertPath() { return certPath; } diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/ssl/SslClientAuthenticationTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/ssl/SslClientAuthenticationTests.java index 038bb9c2c0079..bc01b0693af0a 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/ssl/SslClientAuthenticationTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/ssl/SslClientAuthenticationTests.java @@ -31,6 +31,7 @@ import java.nio.file.Path; import java.security.SecureRandom; import java.security.cert.CertPathBuilderException; +import java.security.cert.CertificateException; import java.util.HashSet; import java.util.List; @@ -103,11 +104,13 @@ public void testThatHttpFailsWithoutSslClientAuth() throws IOException { restClient.performRequest(new Request("GET", "/")); fail("Expected SSLHandshakeException"); } catch (IOException e) { - Throwable t = ExceptionsHelper.unwrap(e, CertPathBuilderException.class); - assertThat(t, instanceOf(CertPathBuilderException.class)); if (inFipsJvm()) { + Throwable t = ExceptionsHelper.unwrap(e, CertificateException.class); + assertThat(t, instanceOf(CertificateException.class)); assertThat(t.getMessage(), containsString("Unable to find certificate chain")); } else { + Throwable t = ExceptionsHelper.unwrap(e, CertPathBuilderException.class); + assertThat(t, instanceOf(CertPathBuilderException.class)); assertThat(t.getMessage(), containsString("unable to find valid certification path to requested target")); } } diff --git a/x-pack/plugin/security/src/main/java/module-info.java b/x-pack/plugin/security/src/main/java/module-info.java index 1ab0253fb7140..316f640b65476 100644 --- a/x-pack/plugin/security/src/main/java/module-info.java +++ b/x-pack/plugin/security/src/main/java/module-info.java @@ -34,7 +34,6 @@ requires org.opensaml.saml; requires org.opensaml.saml.impl; requires org.opensaml.security.impl; - requires org.opensaml.security; requires org.opensaml.xmlsec.impl; requires org.opensaml.xmlsec; @@ -82,4 +81,6 @@ provides org.elasticsearch.reservedstate.ReservedClusterStateHandlerProvider with org.elasticsearch.xpack.security.ReservedSecurityStateHandlerProvider; + + provides org.elasticsearch.features.FeatureSpecification with org.elasticsearch.xpack.security.SecurityFeatures; } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 02de32078469e..6d7f6fcd3822b 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -53,6 +53,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeMetadata; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.http.HttpPreRequest; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.http.netty4.Netty4HttpServerTransport; @@ -625,6 +626,7 @@ public Collection createComponents(PluginServices services) { services.client(), services.threadPool(), services.clusterService(), + services.featureService(), services.resourceWatcherService(), services.scriptService(), services.xContentRegistry(), @@ -642,6 +644,7 @@ Collection createComponents( Client client, ThreadPool threadPool, ClusterService clusterService, + FeatureService featureService, ResourceWatcherService resourceWatcherService, ScriptService scriptService, NamedXContentRegistry xContentRegistry, @@ -868,8 +871,8 @@ Collection createComponents( client, systemIndices.getProfileIndexManager(), clusterService, - realms::getDomainConfig, - threadPool + featureService, + realms::getDomainConfig ); components.add(profileService); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatures.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatures.java new file mode 100644 index 0000000000000..d3c96107f3e15 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatures.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security; + +import org.elasticsearch.Version; +import org.elasticsearch.features.FeatureSpecification; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.xpack.security.support.SecuritySystemIndices; + +import java.util.Map; + +public class SecurityFeatures implements FeatureSpecification { + @Override + public Map getHistoricalFeatures() { + return Map.of(SecuritySystemIndices.SECURITY_PROFILE_ORIGIN_FEATURE, SecuritySystemIndices.VERSION_SECURITY_PROFILE_ORIGIN); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityUsageTransportAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityUsageTransportAction.java index 56a102be9587f..3fa5e0e5319c7 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityUsageTransportAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityUsageTransportAction.java @@ -8,13 +8,13 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.RefCountingListener; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.Maps; -import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.core.Nullable; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.protocol.xpack.XPackUsageRequest; @@ -113,109 +113,75 @@ protected void masterOperation( OperatorPrivileges.OPERATOR_PRIVILEGES_ENABLED.get(settings) ); - final AtomicReference> rolesUsageRef = new AtomicReference<>(); - final AtomicReference> roleMappingUsageRef = new AtomicReference<>(); - final AtomicReference> realmsUsageRef = new AtomicReference<>(); - final AtomicReference> domainsUsageRef = new AtomicReference<>(); - final AtomicReference> userProfileUsageRef = new AtomicReference<>(); - final AtomicReference> remoteClusterServerUsageRef = new AtomicReference<>(); + final AtomicReference> rolesUsageRef = new AtomicReference<>(Map.of()); + final AtomicReference> roleMappingUsageRef = new AtomicReference<>(Map.of()); + final AtomicReference> realmsUsageRef = new AtomicReference<>(Map.of()); + final AtomicReference> domainsUsageRef = new AtomicReference<>(Map.of()); + final AtomicReference> userProfileUsageRef = new AtomicReference<>(Map.of()); + final AtomicReference> remoteClusterServerUsageRef = new AtomicReference<>(Map.of()); final boolean enabled = XPackSettings.SECURITY_ENABLED.get(settings); - final CountDown countDown = new CountDown(5); - final Runnable doCountDown = () -> { - if (countDown.countDown()) { - var usage = new SecurityFeatureSetUsage( - enabled, - realmsUsageRef.get(), - rolesUsageRef.get(), - roleMappingUsageRef.get(), - sslUsage, - auditUsage, - ipFilterUsage, - anonymousUsage, - tokenServiceUsage, - apiKeyServiceUsage, - fips140Usage, - operatorPrivilegesUsage, - domainsUsageRef.get(), - userProfileUsageRef.get(), - remoteClusterServerUsageRef.get() - ); - listener.onResponse(new XPackUsageFeatureResponse(usage)); - } - }; - - final ActionListener> rolesStoreUsageListener = ActionListener.wrap(rolesStoreUsage -> { - rolesUsageRef.set(rolesStoreUsage); - doCountDown.run(); - }, listener::onFailure); - - final ActionListener> roleMappingStoreUsageListener = ActionListener.wrap(nativeRoleMappingStoreUsage -> { - Map usage = singletonMap("native", nativeRoleMappingStoreUsage); - roleMappingUsageRef.set(usage); - doCountDown.run(); - }, listener::onFailure); - - final ActionListener> realmsUsageListener = ActionListener.wrap(realmsUsage -> { - realmsUsageRef.set(realmsUsage); - doCountDown.run(); - }, listener::onFailure); - - final ActionListener> userProfileUsageListener = ActionListener.wrap(userProfileUsage -> { - userProfileUsageRef.set(userProfileUsage); - doCountDown.run(); - }, listener::onFailure); - - final ActionListener> remoteClusterServerUsageListener = ActionListener.wrap(remoteClusterServerUsage -> { - remoteClusterServerUsageRef.set(remoteClusterServerUsage); - doCountDown.run(); - }, listener::onFailure); - - if (rolesStore == null || enabled == false) { - rolesStoreUsageListener.onResponse(Collections.emptyMap()); - } else { - rolesStore.usageStats(rolesStoreUsageListener); - } - if (roleMappingStore == null || enabled == false) { - roleMappingStoreUsageListener.onResponse(Collections.emptyMap()); - } else { - roleMappingStore.usageStats(roleMappingStoreUsageListener); - } - if (realms == null || enabled == false) { - domainsUsageRef.set(Map.of()); - realmsUsageListener.onResponse(Collections.emptyMap()); - } else { - domainsUsageRef.set(realms.domainUsageStats()); - realms.usageStats(realmsUsageListener); - } - if (profileService == null || enabled == false) { - userProfileUsageListener.onResponse(Map.of()); - } else { - profileService.usageStats(userProfileUsageListener); - } - if (apiKeyService == null || enabled == false) { - remoteClusterServerUsageListener.onResponse(Map.of()); - } else { - remoteClusterServerUsage(remoteClusterServerUsageListener); - } - } - private void remoteClusterServerUsage(ActionListener> listener) { - apiKeyService.crossClusterApiKeyUsageStats( - ActionListener.wrap( - usage -> listener.onResponse( - Map.of( - "available", - ADVANCED_REMOTE_CLUSTER_SECURITY_FEATURE.checkWithoutTracking(licenseState), - "enabled", - RemoteClusterPortSettings.REMOTE_CLUSTER_SERVER_ENABLED.get(settings), - "api_keys", - usage + try ( + var listeners = new RefCountingListener( + listener.map( + ignored -> new XPackUsageFeatureResponse( + new SecurityFeatureSetUsage( + enabled, + realmsUsageRef.get(), + rolesUsageRef.get(), + roleMappingUsageRef.get(), + sslUsage, + auditUsage, + ipFilterUsage, + anonymousUsage, + tokenServiceUsage, + apiKeyServiceUsage, + fips140Usage, + operatorPrivilegesUsage, + domainsUsageRef.get(), + userProfileUsageRef.get(), + remoteClusterServerUsageRef.get() + ) ) - ), - listener::onFailure + ) ) - ); + ) { + if (enabled == false) { + return; + } + if (rolesStore != null) { + rolesStore.usageStats(listeners.acquire(rolesUsageRef::set)); + } + if (roleMappingStore != null) { + roleMappingStore.usageStats( + listeners.acquire(nativeRoleMappingStoreUsage -> roleMappingUsageRef.set(Map.of("native", nativeRoleMappingStoreUsage))) + ); + } + if (realms != null) { + domainsUsageRef.set(realms.domainUsageStats()); + realms.usageStats(listeners.acquire(realmsUsageRef::set)); + } + if (profileService != null) { + profileService.usageStats(listeners.acquire(userProfileUsageRef::set)); + } + if (apiKeyService != null) { + apiKeyService.crossClusterApiKeyUsageStats( + listeners.acquire( + usage -> remoteClusterServerUsageRef.set( + Map.of( + "available", + ADVANCED_REMOTE_CLUSTER_SECURITY_FEATURE.checkWithoutTracking(licenseState), + "enabled", + RemoteClusterPortSettings.REMOTE_CLUSTER_SERVER_ENABLED.get(settings), + "api_keys", + usage + ) + ) + ) + ); + } + } } static Map sslUsage(Settings settings) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/SecurityActionMapper.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/SecurityActionMapper.java index 6432ccae9bf57..f4ab867a39669 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/SecurityActionMapper.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/SecurityActionMapper.java @@ -7,8 +7,8 @@ package org.elasticsearch.xpack.security.action; import org.elasticsearch.action.admin.indices.analyze.AnalyzeAction; -import org.elasticsearch.action.search.ClearScrollAction; import org.elasticsearch.action.search.ClearScrollRequest; +import org.elasticsearch.action.search.TransportClearScrollAction; import org.elasticsearch.action.support.single.shard.SingleShardRequest; import org.elasticsearch.transport.TransportRequest; @@ -29,7 +29,7 @@ public class SecurityActionMapper { */ public static String action(String action, TransportRequest request) { switch (action) { - case ClearScrollAction.NAME -> { + case TransportClearScrollAction.NAME -> { assert request instanceof ClearScrollRequest; boolean isClearAllScrollRequest = ((ClearScrollRequest) request).scrollIds().contains("_all"); if (isClearAllScrollRequest) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportGrantAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportGrantAction.java index 7b65c01f7691e..881d1340ebc3f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportGrantAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportGrantAction.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.security.action; -import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionResponse; @@ -56,12 +55,6 @@ public final void doExecute(Task task, Request request, ActionListener try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { final AuthenticationToken authenticationToken = request.getGrant().getAuthenticationToken(); assert authenticationToken != null : "authentication token must not be null"; - if (authenticationToken == null) { - listener.onFailure( - new ElasticsearchSecurityException("the grant type [{}] is not supported", request.getGrant().getType()) - ); - return; - } final String runAsUsername = request.getGrant().getRunAsUsername(); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/privilege/TransportPutPrivilegesAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/privilege/TransportPutPrivilegesAction.java index da28d640c1dae..f13e599ef741c 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/privilege/TransportPutPrivilegesAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/privilege/TransportPutPrivilegesAction.java @@ -7,9 +7,11 @@ package org.elasticsearch.xpack.security.action.privilege; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; @@ -19,6 +21,8 @@ import org.elasticsearch.xpack.security.authz.store.NativePrivilegeStore; import java.util.Collections; +import java.util.List; +import java.util.Map; /** * Transport action to retrieve one or more application privileges from the security index @@ -45,8 +49,23 @@ protected void doExecute(Task task, final PutPrivilegesRequest request, final Ac this.privilegeStore.putPrivileges( request.getPrivileges(), request.getRefreshPolicy(), - ActionListener.wrap(created -> listener.onResponse(new PutPrivilegesResponse(created)), listener::onFailure) + ActionListener.wrap(result -> listener.onResponse(buildResponse(result)), listener::onFailure) ); } } + + private static PutPrivilegesResponse buildResponse(Map> result) { + final Map> createdPrivilegesByApplicationName = Maps.newHashMapWithExpectedSize(result.size()); + result.forEach((appName, privileges) -> { + List createdPrivileges = privileges.entrySet() + .stream() + .filter(e -> e.getValue() == DocWriteResponse.Result.CREATED) + .map(e -> e.getKey()) + .toList(); + if (createdPrivileges.isEmpty() == false) { + createdPrivilegesByApplicationName.put(appName, createdPrivileges); + } + }); + return new PutPrivilegesResponse(createdPrivilegesByApplicationName); + } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java index 7a3e0474eec51..2d700e23f127c 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java @@ -29,8 +29,8 @@ import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.update.UpdateRequest; @@ -170,7 +170,8 @@ public class ApiKeyService { public static final Setting DELETE_INTERVAL = Setting.timeSetting( "xpack.security.authc.api_key.delete.interval", TimeValue.timeValueHours(24L), - Property.NodeScope + Property.NodeScope, + Property.Dynamic ); public static final Setting DELETE_RETENTION_PERIOD = Setting.positiveTimeSetting( "xpack.security.authc.api_key.delete.retention_period", @@ -209,14 +210,11 @@ public class ApiKeyService { private final boolean enabled; private final Settings settings; private final InactiveApiKeysRemover inactiveApiKeysRemover; - private final TimeValue deleteInterval; private final Cache> apiKeyAuthCache; private final Hasher cacheHasher; private final ThreadPool threadPool; private final ApiKeyDocCache apiKeyDocCache; - private volatile long lastExpirationRunMs; - // The API key secret is a Base64 encoded v4 UUID without padding. The UUID is 128 bits, i.e. 16 byte, // which requires 22 digits of Base64 characters for encoding without padding. // See also UUIDs.randomBase64UUIDSecureString @@ -244,7 +242,6 @@ public ApiKeyService( this.enabled = XPackSettings.API_KEY_SERVICE_ENABLED_SETTING.get(settings); this.hasher = Hasher.resolve(PASSWORD_HASHING_ALGORITHM.get(settings)); this.settings = settings; - this.deleteInterval = DELETE_INTERVAL.get(settings); this.inactiveApiKeysRemover = new InactiveApiKeysRemover(settings, client, clusterService); this.threadPool = threadPool; this.cacheHasher = Hasher.resolve(CACHE_HASH_ALGO_SETTING.get(settings)); @@ -1873,15 +1870,12 @@ boolean isExpirationInProgress() { // pkg scoped for testing long lastTimeWhenApiKeysRemoverWasTriggered() { - return lastExpirationRunMs; + return inactiveApiKeysRemover.getLastRunTimestamp(); } private void maybeStartApiKeyRemover() { if (securityIndex.isAvailable(PRIMARY_SHARDS)) { - if (client.threadPool().relativeTimeInMillis() - lastExpirationRunMs > deleteInterval.getMillis()) { - inactiveApiKeysRemover.submit(client.threadPool()); - lastExpirationRunMs = client.threadPool().relativeTimeInMillis(); - } + inactiveApiKeysRemover.maybeSubmit(client.threadPool()); } } @@ -1945,7 +1939,7 @@ public void queryApiKeys(SearchRequest searchRequest, boolean withLimitedBy, Act () -> executeAsyncWithOrigin( client, SECURITY_ORIGIN, - SearchAction.INSTANCE, + TransportSearchAction.TYPE, searchRequest, ActionListener.wrap(searchResponse -> { final long total = searchResponse.getHits().getTotalHits().value; @@ -1996,6 +1990,7 @@ private ApiKey convertSearchHitToApiKeyInfo(SearchHit hit, boolean withLimitedBy Instant.ofEpochMilli(apiKeyDoc.creationTime), apiKeyDoc.expirationTime != -1 ? Instant.ofEpochMilli(apiKeyDoc.expirationTime) : null, apiKeyDoc.invalidated, + apiKeyDoc.invalidation != -1 ? Instant.ofEpochMilli(apiKeyDoc.invalidation) : null, (String) apiKeyDoc.creator.get("principal"), (String) apiKeyDoc.creator.get("realm"), metadata, @@ -2183,6 +2178,7 @@ public static final class ApiKeyDoc { builder.declareLong(constructorArg(), new ParseField("creation_time")); builder.declareLongOrNull(constructorArg(), -1, new ParseField("expiration_time")); builder.declareBoolean(constructorArg(), new ParseField("api_key_invalidated")); + builder.declareLong(optionalConstructorArg(), new ParseField("invalidation_time")); builder.declareString(constructorArg(), new ParseField("api_key_hash")); builder.declareStringOrNull(optionalConstructorArg(), new ParseField("name")); builder.declareInt(constructorArg(), new ParseField("version")); @@ -2198,6 +2194,7 @@ public static final class ApiKeyDoc { final long creationTime; final long expirationTime; final Boolean invalidated; + final long invalidation; final String hash; @Nullable final String name; @@ -2214,6 +2211,7 @@ public ApiKeyDoc( long creationTime, long expirationTime, Boolean invalidated, + @Nullable Long invalidation, String hash, @Nullable String name, int version, @@ -2232,6 +2230,7 @@ public ApiKeyDoc( this.creationTime = creationTime; this.expirationTime = expirationTime; this.invalidated = invalidated; + this.invalidation = (invalidation == null) ? -1 : invalidation; this.hash = hash; this.name = name; this.version = version; @@ -2253,6 +2252,7 @@ public CachedApiKeyDoc toCachedApiKeyDoc() { creationTime, expirationTime, invalidated, + invalidation, hash, name, version, @@ -2278,6 +2278,7 @@ public static final class CachedApiKeyDoc { final long creationTime; final long expirationTime; final Boolean invalidated; + final long invalidation; final String hash; final String name; final int version; @@ -2292,6 +2293,7 @@ public CachedApiKeyDoc( long creationTime, long expirationTime, Boolean invalidated, + long invalidation, String hash, String name, int version, @@ -2304,6 +2306,7 @@ public CachedApiKeyDoc( this.creationTime = creationTime; this.expirationTime = expirationTime; this.invalidated = invalidated; + this.invalidation = invalidation; this.hash = hash; this.name = name; this.version = version; @@ -2320,6 +2323,7 @@ public ApiKeyDoc toApiKeyDoc(BytesReference roleDescriptorsBytes, BytesReference creationTime, expirationTime, invalidated, + invalidation, hash, name, version, diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/InactiveApiKeysRemover.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/InactiveApiKeysRemover.java index 1d34371f6186b..14a77b7dfb244 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/InactiveApiKeysRemover.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/InactiveApiKeysRemover.java @@ -44,6 +44,8 @@ public final class InactiveApiKeysRemover extends AbstractRunnable { private final AtomicBoolean inProgress = new AtomicBoolean(false); private final TimeValue timeout; private final AtomicLong retentionPeriodInMs; + private final AtomicLong deleteIntervalInMs; + private volatile long lastRunMs; InactiveApiKeysRemover(Settings settings, Client client, ClusterService clusterService) { this.client = client; @@ -54,6 +56,12 @@ public final class InactiveApiKeysRemover extends AbstractRunnable { ApiKeyService.DELETE_RETENTION_PERIOD, newRetentionPeriod -> this.retentionPeriodInMs.set(newRetentionPeriod.getMillis()) ); + this.deleteIntervalInMs = new AtomicLong(ApiKeyService.DELETE_INTERVAL.get(settings).getMillis()); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer( + ApiKeyService.DELETE_INTERVAL, + newDeleteInterval -> this.deleteIntervalInMs.set(newDeleteInterval.getMillis()) + ); } @Override @@ -85,12 +93,19 @@ public void doRun() { }, this::onFailure)); } - void submit(ThreadPool threadPool) { - if (inProgress.compareAndSet(false, true)) { - threadPool.executor(Names.GENERIC).submit(this); + void maybeSubmit(ThreadPool threadPool) { + if (threadPool.relativeTimeInMillis() - lastRunMs > deleteIntervalInMs.get()) { + if (inProgress.compareAndSet(false, true)) { + threadPool.executor(Names.GENERIC).submit(this); + } + lastRunMs = client.threadPool().relativeTimeInMillis(); } } + long getLastRunTimestamp() { + return lastRunMs; + } + private static void debugDbqResponse(BulkByScrollResponse response) { if (logger.isDebugEnabled()) { logger.debug( diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwkSetLoader.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwkSetLoader.java index 063cc85ea0187..0266fc7488e29 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwkSetLoader.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwkSetLoader.java @@ -22,6 +22,7 @@ import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; +import org.elasticsearch.xpack.core.security.authc.jwt.JwtUtil; import org.elasticsearch.xpack.core.ssl.SSLService; import java.io.IOException; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwkValidateUtil.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwkValidateUtil.java index 89391f91a2731..cc07b7dfa8381 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwkValidateUtil.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwkValidateUtil.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; +import org.elasticsearch.xpack.core.security.authc.jwt.JwtUtil; import java.nio.charset.StandardCharsets; import java.security.PublicKey; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtAuthenticator.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtAuthenticator.java index e122ecf4eb1ab..9c1deff9ed891 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtAuthenticator.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtAuthenticator.java @@ -18,6 +18,7 @@ import org.elasticsearch.core.Releasable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.jwt.JwtAuthenticationToken; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; import org.elasticsearch.xpack.core.ssl.SSLService; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealm.java index eb2517f8e54e4..d8b0575c54d36 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealm.java @@ -30,7 +30,9 @@ import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; +import org.elasticsearch.xpack.core.security.authc.jwt.JwtAuthenticationToken; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; +import org.elasticsearch.xpack.core.security.authc.jwt.JwtUtil; import org.elasticsearch.xpack.core.security.authc.support.CachingRealm; import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; import org.elasticsearch.xpack.core.security.support.CacheIteratorHelper; @@ -64,7 +66,6 @@ public class JwtRealm extends Realm implements CachingRealm, Releasable { public static final String HEADER_END_USER_AUTHENTICATION = "Authorization"; public static final String HEADER_CLIENT_AUTHENTICATION = "ES-Client-Authentication"; public static final String HEADER_END_USER_AUTHENTICATION_SCHEME = "Bearer"; - public static final String HEADER_SHARED_SECRET_AUTHENTICATION_SCHEME = "SharedSecret"; private final Cache jwtCache; private final CacheIteratorHelper jwtCacheHelper; @@ -193,7 +194,7 @@ public AuthenticationToken token(final ThreadContext threadContext) { final SecureString clientCredentials = JwtUtil.getHeaderValue( threadContext, JwtRealm.HEADER_CLIENT_AUTHENTICATION, - JwtRealm.HEADER_SHARED_SECRET_AUTHENTICATION_SCHEME, + JwtRealmSettings.HEADER_SHARED_SECRET_AUTHENTICATION_SCHEME, true ); return new JwtAuthenticationToken(signedJWT, JwtUtil.sha256(userCredentials), clientCredentials); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtSignatureValidator.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtSignatureValidator.java index b1ee1b77998ec..e183ee7d73ac2 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtSignatureValidator.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtSignatureValidator.java @@ -35,13 +35,14 @@ import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; +import org.elasticsearch.xpack.core.security.authc.jwt.JwtUtil; import org.elasticsearch.xpack.core.ssl.SSLService; import java.util.Arrays; import java.util.List; import java.util.stream.Stream; -import static org.elasticsearch.xpack.security.authc.jwt.JwtUtil.toStringRedactSignature; +import static org.elasticsearch.xpack.core.security.authc.jwt.JwtUtil.toStringRedactSignature; public interface JwtSignatureValidator extends Releasable { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java index 0f34850b861b7..e637bda19d886 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java @@ -91,9 +91,9 @@ import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; +import org.elasticsearch.xpack.core.security.authc.jwt.JwtUtil; import org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings; import org.elasticsearch.xpack.core.ssl.SSLService; -import org.elasticsearch.xpack.security.authc.jwt.JwtUtil; import java.io.IOException; import java.net.URI; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java index 475102e1a2152..798396c249e75 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java @@ -27,6 +27,7 @@ import static org.elasticsearch.synonyms.SynonymsManagementAPIService.SYNONYMS_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.APM_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.ASYNC_SEARCH_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.CONNECTORS_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.DEPRECATION_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.ENRICH_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.ENT_SEARCH_ORIGIN; @@ -151,6 +152,7 @@ public static void switchUserBasedOnActionOriginAndExecute( case LOGSTASH_MANAGEMENT_ORIGIN: case FLEET_ORIGIN: case ENT_SEARCH_ORIGIN: + case CONNECTORS_ORIGIN: case INFERENCE_ORIGIN: case TASKS_ORIGIN: // TODO use a more limited user for tasks securityContext.executeAsInternalUser(InternalUsers.XPACK_USER, version, consumer); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/PreAuthorizationUtils.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/PreAuthorizationUtils.java index 67a4d14177fac..aeb6bfc8de796 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/PreAuthorizationUtils.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/PreAuthorizationUtils.java @@ -10,8 +10,8 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.IndicesRequest; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchTransportService; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.xpack.core.security.SecurityContext; import org.elasticsearch.xpack.core.security.authz.AuthorizationEngine.AuthorizationContext; import org.elasticsearch.xpack.core.security.authz.AuthorizationEngine.AuthorizationInfo; @@ -38,14 +38,13 @@ public final class PreAuthorizationUtils { * on a remote node as they only access a subset of resources. */ public static final Map> CHILD_ACTIONS_PRE_AUTHORIZED_BY_PARENT = Map.of( - SearchAction.NAME, + TransportSearchAction.TYPE.name(), Set.of( SearchTransportService.FREE_CONTEXT_ACTION_NAME, SearchTransportService.DFS_ACTION_NAME, SearchTransportService.QUERY_ACTION_NAME, SearchTransportService.QUERY_ID_ACTION_NAME, SearchTransportService.FETCH_ID_ACTION_NAME, - SearchTransportService.QUERY_CAN_MATCH_NAME, SearchTransportService.QUERY_CAN_MATCH_NODE_NAME ) ); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java index 1bb638795615a..cdbb690098cbe 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java @@ -20,15 +20,16 @@ import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.bulk.BulkShardRequest; +import org.elasticsearch.action.bulk.SimulateBulkAction; import org.elasticsearch.action.delete.DeleteAction; import org.elasticsearch.action.get.MultiGetAction; import org.elasticsearch.action.index.IndexAction; -import org.elasticsearch.action.search.ClearScrollAction; -import org.elasticsearch.action.search.ClosePointInTimeAction; -import org.elasticsearch.action.search.MultiSearchAction; -import org.elasticsearch.action.search.SearchScrollAction; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.action.search.SearchTransportService; +import org.elasticsearch.action.search.TransportClearScrollAction; +import org.elasticsearch.action.search.TransportClosePointInTimeAction; +import org.elasticsearch.action.search.TransportMultiSearchAction; +import org.elasticsearch.action.search.TransportSearchScrollAction; import org.elasticsearch.action.termvectors.MultiTermVectorsAction; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.common.Strings; @@ -251,6 +252,7 @@ static boolean checkSameUserPermissions(String action, TransportRequest request, private static boolean shouldAuthorizeIndexActionNameOnly(String action, TransportRequest request) { switch (action) { case BulkAction.NAME: + case SimulateBulkAction.NAME: case IndexAction.NAME: case DeleteAction.NAME: case INDEX_SUB_REQUEST_PRIMARY: @@ -259,7 +261,7 @@ private static boolean shouldAuthorizeIndexActionNameOnly(String action, Transpo case DELETE_SUB_REQUEST_REPLICA: case MultiGetAction.NAME: case MultiTermVectorsAction.NAME: - case MultiSearchAction.NAME: + case TransportMultiSearchAction.NAME: case "indices:data/read/mpercolate": case "indices:data/read/msearch/template": case "indices:data/read/search/template": @@ -324,7 +326,7 @@ public void authorizeIndexAction( // if the action is a search scroll action, we first authorize that the user can execute the action for some // index and if they cannot, we can fail the request early before we allow the execution of the action and in // turn the shard actions - if (SearchScrollAction.NAME.equals(action)) { + if (TransportSearchScrollAction.TYPE.name().equals(action)) { ActionRunnable.supply(listener.delegateFailureAndWrap((l, parsedScrollId) -> { if (parsedScrollId.hasLocalIndices()) { l.onResponse( @@ -356,7 +358,7 @@ public void authorizeIndexAction( // the same as the user that submitted the original request so no additional checks are needed here. listener.onResponse(IndexAuthorizationResult.ALLOW_NO_INDICES); } - } else if (action.equals(ClosePointInTimeAction.NAME)) { + } else if (action.equals(TransportClosePointInTimeAction.TYPE.name())) { listener.onResponse(IndexAuthorizationResult.ALLOW_NO_INDICES); } else { assert false @@ -946,12 +948,12 @@ public int hashCode() { } private static boolean isScrollRelatedAction(String action) { - return action.equals(SearchScrollAction.NAME) + return action.equals(TransportSearchScrollAction.TYPE.name()) || action.equals(SearchTransportService.FETCH_ID_SCROLL_ACTION_NAME) || action.equals(SearchTransportService.QUERY_FETCH_SCROLL_ACTION_NAME) || action.equals(SearchTransportService.QUERY_SCROLL_ACTION_NAME) || action.equals(SearchTransportService.FREE_CONTEXT_SCROLL_ACTION_NAME) - || action.equals(ClearScrollAction.NAME) + || action.equals(TransportClearScrollAction.NAME) || action.equals("indices:data/read/sql/close_cursor") || action.equals(SearchTransportService.CLEAR_SCROLL_CONTEXTS_ACTION_NAME); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStore.java index 0e509c8af26b0..004874f5b63b9 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStore.java @@ -11,7 +11,10 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.delete.DeleteResponse; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.action.support.GroupedActionListener; @@ -57,6 +60,7 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -366,52 +370,79 @@ protected void cacheFetchedDescriptors( public void putPrivileges( Collection privileges, WriteRequest.RefreshPolicy refreshPolicy, - ActionListener>> listener + ActionListener>> listener ) { - securityIndexManager.prepareIndexIfNeededThenExecute(listener::onFailure, () -> { - ActionListener groupListener = new GroupedActionListener<>( - privileges.size(), - ActionListener.wrap((Collection responses) -> { - final Map> createdNames = responses.stream() - .filter(r -> r.getResult() == DocWriteResponse.Result.CREATED) - .map(r -> r.getId()) - .map(NativePrivilegeStore::nameFromDocId) - .collect(TUPLES_TO_MAP); - clearCaches( - listener, - privileges.stream().map(ApplicationPrivilegeDescriptor::getApplication).collect(Collectors.toUnmodifiableSet()), - createdNames - ); - }, listener::onFailure) - ); + if (privileges.isEmpty()) { + listener.onResponse(Map.of()); + return; + } + + final BulkRequestBuilder bulkRequestBuilder = client.prepareBulk(); + bulkRequestBuilder.setRefreshPolicy(refreshPolicy); + + try { for (ApplicationPrivilegeDescriptor privilege : privileges) { - innerPutPrivilege(privilege, refreshPolicy, groupListener); + bulkRequestBuilder.add(preparePutPrivilege(privilege)); } + } catch (IOException e) { + listener.onFailure(e); + } + + securityIndexManager.prepareIndexIfNeededThenExecute(listener::onFailure, () -> { + ClientHelper.executeAsyncWithOrigin( + client.threadPool().getThreadContext(), + SECURITY_ORIGIN, + bulkRequestBuilder.request(), + ActionListener.wrap(bulkResponse -> handleBulkResponse(bulkResponse, listener), ex -> { + logger.warn(Strings.format("Failed to write application privileges to %s", securityIndexManager.aliasName()), ex); + listener.onFailure(ex); + }), + client::bulk + ); }); } - private void innerPutPrivilege( - ApplicationPrivilegeDescriptor privilege, - WriteRequest.RefreshPolicy refreshPolicy, - ActionListener listener - ) { + private IndexRequest preparePutPrivilege(ApplicationPrivilegeDescriptor privilege) throws IOException { try { final String name = privilege.getName(); final XContentBuilder xContentBuilder = privilege.toXContent(jsonBuilder(), true); - ClientHelper.executeAsyncWithOrigin( - client.threadPool().getThreadContext(), - SECURITY_ORIGIN, - client.prepareIndex(SECURITY_MAIN_ALIAS) - .setId(toDocId(privilege.getApplication(), name)) - .setSource(xContentBuilder) - .setRefreshPolicy(refreshPolicy) - .request(), - listener, - client::index - ); - } catch (Exception e) { - logger.warn("Failed to put privilege {} - {}", Strings.toString(privilege), e.toString()); - listener.onFailure(e); + return client.prepareIndex(SECURITY_MAIN_ALIAS) + .setId(toDocId(privilege.getApplication(), name)) + .setSource(xContentBuilder) + .request(); + } catch (IOException e) { + logger.warn("Failed to build application privilege {} - {}", Strings.toString(privilege), e.toString()); + throw e; + } + } + + private void handleBulkResponse(BulkResponse bulkResponse, ActionListener>> listener) { + ElasticsearchException failure = null; + final Map> privilegeResultByAppName = new HashMap<>(); + for (var item : bulkResponse.getItems()) { + if (item.isFailed()) { + if (failure == null) { + failure = new ElasticsearchException("Failed to put application privileges", item.getFailure().getCause()); + } else { + failure.addSuppressed(item.getFailure().getCause()); + } + } else { + final Tuple name = nameFromDocId(item.getId()); + final String appName = name.v1(); + final String privilegeName = name.v2(); + + var privileges = privilegeResultByAppName.get(appName); + if (privileges == null) { + privileges = new HashMap<>(); + privilegeResultByAppName.put(appName, privileges); + } + privileges.put(privilegeName, item.getResponse().getResult()); + } + } + if (failure != null) { + listener.onFailure(failure); + } else { + clearCaches(listener, privilegeResultByAppName.keySet(), privilegeResultByAppName); } } @@ -465,7 +496,7 @@ public void onFailure(Exception e) { logger.error("unable to clear application privileges and role cache", e); listener.onFailure( new ElasticsearchException( - "clearing the application privileges and role cache failed. " + "please clear the caches manually", + "clearing the application privileges and role cache failed, please clear the caches manually", e ) ); @@ -473,6 +504,9 @@ public void onFailure(Exception e) { }); } + /** + * @return A Tuple of (application-name, privilege-name) + */ private static Tuple nameFromDocId(String docId) { final String name = docId.substring(DOC_TYPE_VALUE.length() + 1); assert name != null && name.length() > 0 : "Invalid name '" + name + "'"; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/profile/ProfileService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/profile/ProfileService.java index 054583d94cbb1..d572932670f23 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/profile/ProfileService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/profile/ProfileService.java @@ -24,11 +24,11 @@ import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.MultiGetItemResponse; import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.action.search.MultiSearchAction; import org.elasticsearch.action.search.MultiSearchRequest; import org.elasticsearch.action.search.MultiSearchResponse; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.TransportMultiSearchAction; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.update.UpdateAction; @@ -45,6 +45,7 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.MultiMatchQueryBuilder; @@ -53,7 +54,6 @@ import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.tasks.TaskId; -import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; @@ -102,7 +102,7 @@ import static org.elasticsearch.xpack.security.support.SecurityIndexManager.Availability.PRIMARY_SHARDS; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.Availability.SEARCH_SHARDS; import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_PROFILE_ALIAS; -import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.VERSION_SECURITY_PROFILE_ORIGIN; +import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_PROFILE_ORIGIN_FEATURE; public class ProfileService { private static final Logger logger = LogManager.getLogger(ProfileService.class); @@ -116,8 +116,8 @@ public class ProfileService { private final Client client; private final SecurityIndexManager profileIndex; private final ClusterService clusterService; + private final FeatureService featureService; private final Function domainConfigLookup; - private final ThreadPool threadPool; public ProfileService( Settings settings, @@ -125,16 +125,16 @@ public ProfileService( Client client, SecurityIndexManager profileIndex, ClusterService clusterService, - Function domainConfigLookup, - ThreadPool threadPool + FeatureService featureService, + Function domainConfigLookup ) { this.settings = settings; this.clock = clock; this.client = client; this.profileIndex = profileIndex; this.clusterService = clusterService; + this.featureService = featureService; this.domainConfigLookup = domainConfigLookup; - this.threadPool = threadPool; } public void getProfiles(List uids, Set dataKeys, ActionListener> listener) { @@ -271,7 +271,7 @@ public void suggestProfile(SuggestProfilesRequest request, TaskId parentTaskId, () -> executeAsyncWithOrigin( client, getActionOrigin(), - SearchAction.INSTANCE, + TransportSearchAction.TYPE, searchRequest, ActionListener.wrap(searchResponse -> { final SearchHits searchHits = searchResponse.getHits(); @@ -373,7 +373,7 @@ public void usageStats(ActionListener> listener) { () -> executeAsyncWithOrigin( client, getActionOrigin(), - MultiSearchAction.INSTANCE, + TransportMultiSearchAction.TYPE, multiSearchRequest, ActionListener.wrap(multiSearchResponse -> { final MultiSearchResponse.Item[] items = multiSearchResponse.getResponses(); @@ -553,7 +553,7 @@ private void searchVersionedDocumentsForSubjects( executeAsyncWithOrigin( client, getActionOrigin(), - MultiSearchAction.INSTANCE, + TransportMultiSearchAction.TYPE, multiSearchRequest, ActionListener.wrap( multiSearchResponse -> listener.onResponse(convertSubjectMultiSearchResponse(multiSearchResponse, subjects)), @@ -956,7 +956,7 @@ void doUpdate(UpdateRequest updateRequest, ActionListener listen private String getActionOrigin() { // profile origin and user is not available before v8.3.0 - if (clusterService.state().nodes().getMinNodeVersion().onOrAfter(VERSION_SECURITY_PROFILE_ORIGIN)) { + if (featureService.clusterHasFeature(clusterService.state(), SECURITY_PROFILE_ORIGIN_FEATURE)) { return SECURITY_PROFILE_ORIGIN; } else { return SECURITY_ORIGIN; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGrantApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGrantApiKeyAction.java index 46d2fa4605f9d..d07c5529e3ca1 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGrantApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGrantApiKeyAction.java @@ -20,9 +20,11 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.security.action.Grant; import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequestBuilder; import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyResponse; import org.elasticsearch.xpack.core.security.action.apikey.GrantApiKeyAction; @@ -44,6 +46,18 @@ @ServerlessScope(Scope.INTERNAL) public final class RestGrantApiKeyAction extends ApiKeyBaseRestHandler implements RestRequestFilter { + private static final ConstructingObjectParser CLIENT_AUTHENTICATION_PARSER = + new ConstructingObjectParser<>("client_authentication", a -> new Grant.ClientAuthentication((String) a[0], (SecureString) a[1])); + static { + CLIENT_AUTHENTICATION_PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("scheme")); + CLIENT_AUTHENTICATION_PARSER.declareField( + ConstructingObjectParser.constructorArg(), + RestGrantApiKeyAction::getSecureString, + new ParseField("value"), + ObjectParser.ValueType.STRING + ); + } + static final ObjectParser PARSER = new ObjectParser<>("grant_api_key_request", GrantApiKeyRequest::new); static { PARSER.declareString((req, str) -> req.getGrant().setType(str), new ParseField("grant_type")); @@ -61,6 +75,11 @@ public final class RestGrantApiKeyAction extends ApiKeyBaseRestHandler implement ObjectParser.ValueType.STRING ); PARSER.declareString((req, str) -> req.getGrant().setRunAsUsername(str), new ParseField("run_as")); + PARSER.declareObject( + (req, clientAuthentication) -> req.getGrant().setClientAuthentication(clientAuthentication), + CLIENT_AUTHENTICATION_PARSER, + new ParseField("client_authentication") + ); PARSER.declareObject( (req, api) -> req.setApiKeyRequest(api), (parser, ignore) -> CreateApiKeyRequestBuilder.parse(parser), @@ -88,11 +107,15 @@ public String getName() { return "xpack_security_grant_api_key"; } + public static GrantApiKeyRequest fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + @Override protected RestChannelConsumer innerPrepareRequest(final RestRequest request, final NodeClient client) throws IOException { String refresh = request.param("refresh"); try (XContentParser parser = request.contentParser()) { - final GrantApiKeyRequest grantRequest = PARSER.parse(parser, null); + final GrantApiKeyRequest grantRequest = fromXContent(parser); if (refresh != null) { grantRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.parse(refresh)); } else { @@ -115,7 +138,7 @@ protected RestChannelConsumer innerPrepareRequest(final RestRequest request, fin } } - private static final Set FILTERED_FIELDS = Set.of("password", "access_token"); + private static final Set FILTERED_FIELDS = Set.of("password", "access_token", "client_authentication.value"); @Override public Set getFilteredFields() { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/ApiKeyBoolQueryBuilder.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/ApiKeyBoolQueryBuilder.java index fe793c0d90286..28ecd5ffe5b57 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/ApiKeyBoolQueryBuilder.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/ApiKeyBoolQueryBuilder.java @@ -37,6 +37,7 @@ public class ApiKeyBoolQueryBuilder extends BoolQueryBuilder { "doc_type", "name", "api_key_invalidated", + "invalidation_time", "creation_time", "expiration_time" ); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/ApiKeyFieldNameTranslators.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/ApiKeyFieldNameTranslators.java index 5392399dde201..4d7cc9d978cd4 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/ApiKeyFieldNameTranslators.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/ApiKeyFieldNameTranslators.java @@ -24,6 +24,7 @@ public class ApiKeyFieldNameTranslators { new ExactFieldNameTranslator(s -> "creation_time", "creation"), new ExactFieldNameTranslator(s -> "expiration_time", "expiration"), new ExactFieldNameTranslator(s -> "api_key_invalidated", "invalidated"), + new ExactFieldNameTranslator(s -> "invalidation_time", "invalidation"), new PrefixFieldNameTranslator(s -> "metadata_flattened" + s.substring(8), "metadata.") ); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java index 22aa7e296d6ed..6c21b1f275f24 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java @@ -15,6 +15,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.indices.ExecutorNames; import org.elasticsearch.indices.SystemIndexDescriptor; @@ -51,6 +52,7 @@ public class SecuritySystemIndices { public static final String INTERNAL_SECURITY_PROFILE_INDEX_8 = ".security-profile-8"; public static final String SECURITY_PROFILE_ALIAS = ".security-profile"; public static final Version VERSION_SECURITY_PROFILE_ORIGIN = Version.V_8_3_0; + public static final NodeFeature SECURITY_PROFILE_ORIGIN_FEATURE = new NodeFeature("security.security_profile_origin"); private static final Logger logger = LogManager.getLogger(SecuritySystemIndices.class); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java index 123814ec38c6f..53dd31fe46793 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java @@ -543,7 +543,7 @@ public static class ProfileSecuredRequestHandler imp AbstractRunnable getReceiveRunnable(T request, TransportChannel channel, Task task) { final Runnable releaseRequest = new RunOnce(request::decRef); - request.incRef(); + request.mustIncRef(); return new AbstractRunnable() { @Override public boolean isForceExecution() { diff --git a/x-pack/plugin/security/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification b/x-pack/plugin/security/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification new file mode 100644 index 0000000000000..f26b117ebc472 --- /dev/null +++ b/x-pack/plugin/security/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification @@ -0,0 +1,8 @@ +# +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License +# 2.0; you may not use this file except in compliance with the Elastic License +# 2.0. +# + +org.elasticsearch.xpack.security.SecurityFeatures diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java index 4e6ea34d3dc9e..1ed371229d074 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java @@ -362,7 +362,7 @@ protected void createIndicesWithRandomAliases(String... indices) { } for (String index : indices) { - client().prepareIndex(index).setSource("field", "value").get(); + prepareIndex(index).setSource("field", "value").get(); } refresh(indices); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/TestSecurityClient.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/TestSecurityClient.java index 13c8612487d89..4888c0f4c9721 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/TestSecurityClient.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/TestSecurityClient.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.xcontent.XContentParserUtils; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Tuple; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xcontent.ObjectPath; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -51,6 +52,7 @@ import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; import static org.elasticsearch.test.rest.ESRestTestCase.entityAsMap; +import static org.elasticsearch.test.rest.ESRestTestCase.setIgnoredErrorResponseCodes; public class TestSecurityClient { @@ -395,7 +397,7 @@ public TokenInvalidation invalidateTokens(String requestBody) throws IOException final Request request = new Request(HttpDelete.METHOD_NAME, endpoint); // This API returns 404 (with the same body as a 200 response) if there's nothing to delete. // RestClient will throw an exception on 404, but we don't want that, we want to parse the body and return it - request.addParameter("ignore", "404"); + setIgnoredErrorResponseCodes(request, RestStatus.NOT_FOUND); request.setJsonEntity(requestBody); final Map responseBody = entityAsMap(execute(request)); final List> errors = (List>) responseBody.get("error_details"); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java index 74c5a17844892..3f29944631d42 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java @@ -33,6 +33,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeMetadata; import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; @@ -207,6 +208,7 @@ private Collection createComponentsUtil(Settings settings) throws Except client, threadPool, clusterService, + new FeatureService(List.of(new SecurityFeatures())), mock(ResourceWatcherService.class), mock(ScriptService.class), xContentRegistry(), diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/SecurityActionMapperTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/SecurityActionMapperTests.java index 696932931b576..eca1503019dd2 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/SecurityActionMapperTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/SecurityActionMapperTests.java @@ -8,8 +8,8 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.admin.indices.analyze.AnalyzeAction; -import org.elasticsearch.action.search.ClearScrollAction; import org.elasticsearch.action.search.ClearScrollRequest; +import org.elasticsearch.action.search.TransportClearScrollAction; import org.elasticsearch.action.support.single.shard.SingleShardRequest; import org.elasticsearch.test.ESTestCase; @@ -42,7 +42,7 @@ public void testThatAllOrdinaryActionsRemainTheSame() { String randomAction = actionNameBuilder.toString(); assumeFalse( "Random action is one of the known mapped values: " + randomAction, - randomAction.equals(ClearScrollAction.NAME) + randomAction.equals(TransportClearScrollAction.NAME) || randomAction.equals(AnalyzeAction.NAME) || randomAction.equals(AnalyzeAction.NAME + "[s]") ); @@ -56,7 +56,10 @@ public void testClearScroll() { for (int i = 0; i < scrollIds; i++) { clearScrollRequest.addScrollId(randomAlphaOfLength(randomIntBetween(1, 30))); } - assertThat(SecurityActionMapper.action(ClearScrollAction.NAME, clearScrollRequest), equalTo(ClearScrollAction.NAME)); + assertThat( + SecurityActionMapper.action(TransportClearScrollAction.NAME, clearScrollRequest), + equalTo(TransportClearScrollAction.NAME) + ); } public void testClearScrollAll() { @@ -70,7 +73,7 @@ public void testClearScrollAll() { Collections.shuffle(clearScrollRequest.getScrollIds(), random()); assertThat( - SecurityActionMapper.action(ClearScrollAction.NAME, clearScrollRequest), + SecurityActionMapper.action(TransportClearScrollAction.NAME, clearScrollRequest), equalTo(SecurityActionMapper.CLUSTER_PERMISSION_SCROLL_CLEAR_ALL_NAME) ); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/apikey/TransportGrantApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/apikey/TransportGrantApiKeyActionTests.java index c8c996f37ebfc..5b077c615f9eb 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/apikey/TransportGrantApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/apikey/TransportGrantApiKeyActionTests.java @@ -10,8 +10,10 @@ import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.rest.RestStatus; @@ -19,6 +21,7 @@ import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.Grant; import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequest; import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyResponse; import org.elasticsearch.xpack.core.security.action.apikey.GrantApiKeyAction; @@ -70,7 +73,6 @@ public class TransportGrantApiKeyActionTests extends ESTestCase { private ApiKeyUserRoleDescriptorResolver resolver; private AuthenticationService authenticationService; private ThreadPool threadPool; - private TransportService transportService; private AuthorizationService authorizationService; @Before @@ -87,7 +89,7 @@ public void setupMocks() throws Exception { action = new TransportGrantApiKeyAction( transportService, - mock(ActionFilters.class), + new ActionFilters(Set.of()), threadContext, authenticationService, authorizationService, @@ -136,12 +138,70 @@ public void testGrantApiKeyWithUsernamePassword() { setupApiKeyServiceWithRoleResolution(authentication, request, response); final PlainActionFuture future = new PlainActionFuture<>(); - action.doExecute(null, request, future); + action.execute(null, request, future); assertThat(future.actionGet(), sameInstance(response)); verify(authorizationService, never()).authorize(any(), any(), any(), anyActionListener()); } + public void testClientAuthenticationForNonJWTFails() { + final GrantApiKeyRequest request = mockRequest(); + request.getGrant().setType("access_token"); + request.getGrant().setAccessToken(new SecureString("obviously a non JWT token".toCharArray())); + // only JWT tokens support client authentication + request.getGrant().setClientAuthentication(new Grant.ClientAuthentication(new SecureString("whatever".toCharArray()))); + + final PlainActionFuture future = new PlainActionFuture<>(); + action.execute(null, request, future); + + final ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, future::actionGet); + assertThat(exception, throwableWithMessage("[client_authentication] not supported with the supplied access_token type")); + + verifyNoMoreInteractions(authenticationService); + verifyNoMoreInteractions(authorizationService); + verifyNoMoreInteractions(apiKeyService); + verifyNoMoreInteractions(resolver); + } + + public void testClientAuthenticationWithUsernamePasswordFails() { + final GrantApiKeyRequest request = mockRequest(); + request.getGrant().setType("password"); + request.getGrant().setUsername(randomAlphaOfLengthBetween(4, 12)); + request.getGrant().setPassword(new SecureString(randomAlphaOfLengthBetween(8, 24).toCharArray())); + // username & password does not support client authentication + request.getGrant().setClientAuthentication(new Grant.ClientAuthentication(new SecureString("whatever".toCharArray()))); + + final PlainActionFuture future = new PlainActionFuture<>(); + action.execute(null, request, future); + + final ActionRequestValidationException exception = expectThrows(ActionRequestValidationException.class, future::actionGet); + assertThat(exception.getMessage(), containsString("[client_authentication] is not supported for grant_type [password]")); + + verifyNoMoreInteractions(authenticationService); + verifyNoMoreInteractions(authorizationService); + verifyNoMoreInteractions(apiKeyService); + verifyNoMoreInteractions(resolver); + } + + public void testUnsupportedClientAuthenticationScheme() { + final GrantApiKeyRequest request = mockRequest(); + request.getGrant().setType("access_token"); + request.getGrant().setAccessToken(new SecureString("some token".toCharArray())); + request.getGrant() + .setClientAuthentication(new Grant.ClientAuthentication("wrong scheme", new SecureString("whatever".toCharArray()))); + + final PlainActionFuture future = new PlainActionFuture<>(); + action.execute(null, request, future); + + final ActionRequestValidationException exception = expectThrows(ActionRequestValidationException.class, future::actionGet); + assertThat(exception.getMessage(), containsString("[client_authentication.scheme] must be set to [SharedSecret]")); + + verifyNoMoreInteractions(authenticationService); + verifyNoMoreInteractions(authorizationService); + verifyNoMoreInteractions(apiKeyService); + verifyNoMoreInteractions(resolver); + } + public void testGrantApiKeyWithAccessToken() { final String username = randomAlphaOfLengthBetween(4, 12); final Authentication authentication = buildAuthentication(username); @@ -173,7 +233,7 @@ public void testGrantApiKeyWithAccessToken() { setupApiKeyServiceWithRoleResolution(authentication, request, response); final PlainActionFuture future = new PlainActionFuture<>(); - action.doExecute(null, request, future); + action.execute(null, request, future); assertThat(future.actionGet(), sameInstance(response)); verify(authorizationService, never()).authorize(any(), any(), any(), anyActionListener()); @@ -227,7 +287,7 @@ public void testGrantApiKeyWithInvalidatedCredentials() { setupApiKeyServiceWithRoleResolution(authentication, request, response); final PlainActionFuture future = new PlainActionFuture<>(); - action.doExecute(null, request, future); + action.execute(null, request, future); final ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, future::actionGet); assertThat(exception, throwableWithMessage("authentication failed for testing")); @@ -285,7 +345,7 @@ public void testGrantWithRunAs() { .authorize(eq(authentication), eq(AuthenticateAction.NAME), any(AuthenticateRequest.class), anyActionListener()); final PlainActionFuture future = new PlainActionFuture<>(); - action.doExecute(null, request, future); + action.execute(null, request, future); assertThat(future.actionGet(), sameInstance(response)); verify(authorizationService).authorize( @@ -343,7 +403,7 @@ public void testGrantWithRunAsFailureDueToAuthorization() { .authorize(eq(authentication), eq(AuthenticateAction.NAME), any(AuthenticateRequest.class), anyActionListener()); final PlainActionFuture future = new PlainActionFuture<>(); - action.doExecute(null, request, future); + action.execute(null, request, future); assertThat(expectThrows(ElasticsearchSecurityException.class, future::actionGet), sameInstance(e)); verify(authorizationService).authorize( @@ -376,7 +436,7 @@ public void testGrantFailureDueToUnsupportedRunAs() { .authenticate(eq(GrantApiKeyAction.NAME), same(request), any(AuthenticationToken.class), anyActionListener()); final PlainActionFuture future = new PlainActionFuture<>(); - action.doExecute(null, request, future); + action.execute(null, request, future); final ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, future::actionGet); assertThat(e.getMessage(), containsString("the provided grant credentials do not support run-as")); @@ -402,7 +462,9 @@ private CreateApiKeyResponse mockResponse(GrantApiKeyRequest request) { private GrantApiKeyRequest mockRequest() { final String keyName = randomAlphaOfLengthBetween(6, 32); final GrantApiKeyRequest request = new GrantApiKeyRequest(); - request.setApiKeyRequest(new CreateApiKeyRequest(keyName, List.of(), null)); + CreateApiKeyRequest createApiKeyRequest = new CreateApiKeyRequest(keyName, List.of(), null); + createApiKeyRequest.setRefreshPolicy(randomFrom(WriteRequest.RefreshPolicy.values())); + request.setApiKeyRequest(createApiKeyRequest); return request; } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/enrollment/TransportNodeEnrollmentActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/enrollment/TransportNodeEnrollmentActionTests.java index 5c02f29ab7742..26bb64eb0b073 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/enrollment/TransportNodeEnrollmentActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/enrollment/TransportNodeEnrollmentActionTests.java @@ -7,8 +7,8 @@ package org.elasticsearch.xpack.security.action.enrollment; +import org.elasticsearch.Build; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; @@ -102,7 +102,7 @@ public void testDoExecute() throws Exception { DiscoveryNode n = node(i); nodeInfos.add( new NodeInfo( - Version.CURRENT, + Build.current().version(), TransportVersion.current(), IndexVersion.current(), Map.of(), diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java index a748de0c89413..28f8a77c422f7 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java @@ -24,15 +24,15 @@ import org.elasticsearch.action.index.IndexAction; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.action.search.ClearScrollAction; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.search.ClearScrollResponse; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchResponseSections; -import org.elasticsearch.action.search.SearchScrollAction; import org.elasticsearch.action.search.SearchScrollRequest; +import org.elasticsearch.action.search.TransportClearScrollAction; +import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.action.search.TransportSearchScrollAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.update.UpdateRequest; @@ -193,7 +193,7 @@ protected void } BulkResponse response = new BulkResponse(bulkItemResponses, 1); listener.onResponse((Response) response); - } else if (SearchAction.NAME.equals(action.name())) { + } else if (TransportSearchAction.TYPE.name().equals(action.name())) { assertThat(request, instanceOf(SearchRequest.class)); SearchRequest searchRequest = (SearchRequest) request; searchRequests.add(searchRequest); @@ -217,7 +217,7 @@ protected void null ); listener.onResponse((Response) response); - } else if (SearchScrollAction.NAME.equals(action.name())) { + } else if (TransportSearchScrollAction.TYPE.name().equals(action.name())) { assertThat(request, instanceOf(SearchScrollRequest.class)); final SearchHit[] hits = new SearchHit[0]; final SearchResponse response = new SearchResponse( @@ -239,7 +239,7 @@ protected void null ); listener.onResponse((Response) response); - } else if (ClearScrollAction.NAME.equals(action.name())) { + } else if (TransportClearScrollAction.NAME.equals(action.name())) { assertThat(request, instanceOf(ClearScrollRequest.class)); ClearScrollRequest scrollRequest = (ClearScrollRequest) request; assertEquals("_scrollId1", scrollRequest.getScrollIds().get(0)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailFilterTests.java index 9da5de1a5dd9b..4f4e35e1a30c7 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailFilterTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailFilterTests.java @@ -2893,6 +2893,7 @@ private ClusterSettings mockClusterSettings() { LoggingAuditTrail.registerSettings(settingsList); settingsList.addAll(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); settingsList.add(ApiKeyService.DELETE_RETENTION_PERIOD); + settingsList.add(ApiKeyService.DELETE_INTERVAL); return new ClusterSettings(settings, new HashSet<>(settingsList)); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java index 9ec2e8be383b6..47811fe8a3e7e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java @@ -31,7 +31,6 @@ import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.SecureString; -import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.TimeValue; @@ -145,11 +144,9 @@ import java.nio.file.Files; import java.nio.file.Path; import java.time.Clock; -import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.LinkedHashSet; import java.util.List; import java.util.Locale; @@ -327,7 +324,8 @@ public void init() throws Exception { LoggingAuditTrail.FILTER_POLICY_IGNORE_INDICES, LoggingAuditTrail.FILTER_POLICY_IGNORE_ACTIONS, Loggers.LOG_LEVEL_SETTING, - ApiKeyService.DELETE_RETENTION_PERIOD + ApiKeyService.DELETE_RETENTION_PERIOD, + ApiKeyService.DELETE_INTERVAL ) ); when(clusterService.getClusterSettings()).thenReturn(clusterSettings); @@ -2978,13 +2976,6 @@ public void clearCredentials() {} }; } - private ClusterSettings mockClusterSettings() { - final List> settingsList = new ArrayList<>(); - LoggingAuditTrail.registerSettings(settingsList); - settingsList.addAll(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - return new ClusterSettings(settings, new HashSet<>(settingsList)); - } - private Authentication createApiKeyAuthenticationAndMaybeWithRunAs(Authentication authentication) throws Exception { authentication = createApiKeyAuthentication(apiKeyService, authentication); if (randomBoolean()) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java index 40965a8735fee..fe433560652da 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java @@ -28,10 +28,10 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.update.UpdateAction; import org.elasticsearch.action.update.UpdateRequestBuilder; @@ -271,7 +271,7 @@ public void testGetApiKeys() throws Exception { when(clock.instant()).thenReturn(Instant.ofEpochMilli(now)); final Settings settings = Settings.builder().put(XPackSettings.API_KEY_SERVICE_ENABLED_SETTING.getKey(), true).build(); when(client.threadPool()).thenReturn(threadPool); - SearchRequestBuilder searchRequestBuilder = Mockito.spy(new SearchRequestBuilder(client, SearchAction.INSTANCE)); + SearchRequestBuilder searchRequestBuilder = Mockito.spy(new SearchRequestBuilder(client, TransportSearchAction.TYPE)); when(client.prepareSearch(eq(SECURITY_MAIN_ALIAS))).thenReturn(searchRequestBuilder); final ApiKeyService service = createApiKeyService(settings); final AtomicReference searchRequest = new AtomicReference<>(); @@ -332,7 +332,7 @@ public void testGetApiKeys() throws Exception { public void testInvalidateApiKeys() throws Exception { final Settings settings = Settings.builder().put(XPackSettings.API_KEY_SERVICE_ENABLED_SETTING.getKey(), true).build(); when(client.threadPool()).thenReturn(threadPool); - SearchRequestBuilder searchRequestBuilder = Mockito.spy(new SearchRequestBuilder(client, SearchAction.INSTANCE)); + SearchRequestBuilder searchRequestBuilder = Mockito.spy(new SearchRequestBuilder(client, TransportSearchAction.TYPE)); when(client.prepareSearch(eq(SECURITY_MAIN_ALIAS))).thenReturn(searchRequestBuilder); final ApiKeyService service = createApiKeyService(settings); final AtomicReference searchRequest = new AtomicReference<>(); @@ -406,7 +406,7 @@ public void testInvalidateApiKeysWillSetInvalidatedFlagAndRecordTimestamp() { // Mock the search request for keys to invalidate when(client.threadPool()).thenReturn(threadPool); - when(client.prepareSearch(eq(SECURITY_MAIN_ALIAS))).thenReturn(new SearchRequestBuilder(client, SearchAction.INSTANCE)); + when(client.prepareSearch(eq(SECURITY_MAIN_ALIAS))).thenReturn(new SearchRequestBuilder(client, TransportSearchAction.TYPE)); doAnswer(invocation -> { final var listener = (ActionListener) invocation.getArguments()[1]; final var searchHit = new SearchHit(docId, apiKeyId); @@ -478,8 +478,8 @@ public void testInvalidateApiKeysWillSetInvalidatedFlagAndRecordTimestamp() { return null; }).when(client).execute(eq(ClearSecurityCacheAction.INSTANCE), any(ClearSecurityCacheRequest.class), anyActionListener()); - final long invalidationTime = randomMillisUpToYear9999(); - when(clock.instant()).thenReturn(Instant.ofEpochMilli(invalidationTime)); + final long invalidation = randomMillisUpToYear9999(); + when(clock.instant()).thenReturn(Instant.ofEpochMilli(invalidation)); final ApiKeyService service = createApiKeyService(); PlainActionFuture future = new PlainActionFuture<>(); service.invalidateApiKeys(null, null, null, new String[] { apiKeyId }, future); @@ -488,12 +488,8 @@ public void testInvalidateApiKeysWillSetInvalidatedFlagAndRecordTimestamp() { assertThat(invalidateApiKeyResponse.getInvalidatedApiKeys(), equalTo(List.of(apiKeyId))); verify(updateRequestBuilder).setDoc( argThat( - (ArgumentMatcher>) argument -> Map.of( - "api_key_invalidated", - true, - "invalidation_time", - invalidationTime - ).equals(argument) + (ArgumentMatcher>) argument -> Map.of("api_key_invalidated", true, "invalidation_time", invalidation) + .equals(argument) ) ); } @@ -743,7 +739,7 @@ public void testCrossClusterApiKeyUsageStats() { final Instant now = Instant.now(); when(clock.instant()).thenReturn(now); when(client.threadPool()).thenReturn(threadPool); - SearchRequestBuilder searchRequestBuilder = Mockito.spy(new SearchRequestBuilder(client, SearchAction.INSTANCE)); + SearchRequestBuilder searchRequestBuilder = Mockito.spy(new SearchRequestBuilder(client, TransportSearchAction.TYPE)); when(client.prepareSearch(eq(SECURITY_MAIN_ALIAS))).thenReturn(searchRequestBuilder); final List searchHits = new ArrayList<>(); @@ -958,7 +954,7 @@ public void testValidateApiKey() throws Exception { Hasher hasher = getFastStoredHashAlgoForTests(); final char[] hash = hasher.hash(new SecureString(apiKey.toCharArray())); - ApiKeyDoc apiKeyDoc = buildApiKeyDoc(hash, -1, false); + ApiKeyDoc apiKeyDoc = buildApiKeyDoc(hash, -1, false, -1); ApiKeyService service = createApiKeyService(Settings.EMPTY); PlainActionFuture> future = new PlainActionFuture<>(); @@ -985,7 +981,7 @@ public void testValidateApiKey() throws Exception { assertThat(result.getMetadata().get(AuthenticationField.API_KEY_CREATOR_REALM_NAME), is("realm1")); assertThat(result.getMetadata().get(API_KEY_TYPE_KEY), is(apiKeyDoc.type.value())); - apiKeyDoc = buildApiKeyDoc(hash, Clock.systemUTC().instant().plus(1L, ChronoUnit.HOURS).toEpochMilli(), false); + apiKeyDoc = buildApiKeyDoc(hash, Clock.systemUTC().instant().plus(1L, ChronoUnit.HOURS).toEpochMilli(), false, -1); future = new PlainActionFuture<>(); service.validateApiKeyCredentials( apiKeyId, @@ -1010,7 +1006,7 @@ public void testValidateApiKey() throws Exception { assertThat(result.getMetadata().get(AuthenticationField.API_KEY_CREATOR_REALM_NAME), is("realm1")); assertThat(result.getMetadata().get(API_KEY_TYPE_KEY), is(apiKeyDoc.type.value())); - apiKeyDoc = buildApiKeyDoc(hash, Clock.systemUTC().instant().minus(1L, ChronoUnit.HOURS).toEpochMilli(), false); + apiKeyDoc = buildApiKeyDoc(hash, Clock.systemUTC().instant().minus(1L, ChronoUnit.HOURS).toEpochMilli(), false, -1); future = new PlainActionFuture<>(); service.validateApiKeyCredentials( apiKeyId, @@ -1024,7 +1020,7 @@ public void testValidateApiKey() throws Exception { assertFalse(result.isAuthenticated()); // key is invalidated - apiKeyDoc = buildApiKeyDoc(hash, -1, true); + apiKeyDoc = buildApiKeyDoc(hash, -1, true, randomLongBetween(0, 3000000000L)); service.getApiKeyAuthCache().put(apiKeyId, new ListenableFuture<>()); assertNotNull(service.getApiKeyAuthCache().get(apiKeyId)); future = new PlainActionFuture<>(); @@ -1215,7 +1211,7 @@ public void testApiKeyCache() throws IOException { Hasher hasher = getFastStoredHashAlgoForTests(); final char[] hash = hasher.hash(new SecureString(apiKey.toCharArray())); - ApiKeyDoc apiKeyDoc = buildApiKeyDoc(hash, -1, false); + ApiKeyDoc apiKeyDoc = buildApiKeyDoc(hash, -1, false, -1); ApiKeyService service = createApiKeyService(Settings.EMPTY); ApiKeyCredentials creds = getApiKeyCredentials(apiKeyId, apiKey, apiKeyDoc.type); @@ -1236,7 +1232,7 @@ public void testApiKeyCache() throws IOException { assertNotNull(shouldBeSame); assertThat(shouldBeSame, sameInstance(cachedApiKeyHashResult)); - apiKeyDoc = buildApiKeyDoc(hasher.hash(new SecureString("somelongenoughrandomstring".toCharArray())), -1, false); + apiKeyDoc = buildApiKeyDoc(hasher.hash(new SecureString("somelongenoughrandomstring".toCharArray())), -1, false, -1); creds = getApiKeyCredentials(randomAlphaOfLength(12), "otherlongenoughrandomstring", apiKeyDoc.type); future = new PlainActionFuture<>(); service.validateApiKeyCredentials(creds.getId(), apiKeyDoc, creds, Clock.systemUTC(), future); @@ -1514,7 +1510,7 @@ public void testApiKeyCacheDisabled() throws IOException { final char[] hash = hasher.hash(new SecureString(apiKey.toCharArray())); final Settings settings = Settings.builder().put(ApiKeyService.CACHE_TTL_SETTING.getKey(), "0s").build(); - ApiKeyDoc apiKeyDoc = buildApiKeyDoc(hash, -1, false); + ApiKeyDoc apiKeyDoc = buildApiKeyDoc(hash, -1, false, -1); ApiKeyService service = createApiKeyService(settings); ApiKeyCredentials creds = getApiKeyCredentials(randomAlphaOfLength(12), apiKey, apiKeyDoc.type); @@ -1534,7 +1530,7 @@ public void testApiKeyDocCacheCanBeDisabledSeparately() throws IOException { final char[] hash = hasher.hash(new SecureString(apiKey.toCharArray())); final Settings settings = Settings.builder().put(ApiKeyService.DOC_CACHE_TTL_SETTING.getKey(), "0s").build(); - ApiKeyDoc apiKeyDoc = buildApiKeyDoc(hash, -1, false); + ApiKeyDoc apiKeyDoc = buildApiKeyDoc(hash, -1, false, -1); ApiKeyService service = createApiKeyService(settings); @@ -2078,7 +2074,7 @@ public void testValidateApiKeyDocBeforeUpdate() throws IOException { final char[] hash = hasher.hash(new SecureString(apiKey.toCharArray())); final var apiKeyService = createApiKeyService(); - final var apiKeyDocWithNullName = buildApiKeyDoc(hash, -1, false, null, Version.V_8_2_0.id); + final var apiKeyDocWithNullName = buildApiKeyDoc(hash, -1, false, -1, null, Version.V_8_2_0.id); final var auth = Authentication.newRealmAuthentication( new User("test_user", "role"), new Authentication.RealmRef("realm1", "realm_type1", "node") @@ -2090,14 +2086,14 @@ public void testValidateApiKeyDocBeforeUpdate() throws IOException { ); assertThat(ex.getMessage(), containsString("cannot update legacy API key [" + apiKeyId + "] without name")); - final var apiKeyDocWithEmptyName = buildApiKeyDoc(hash, -1, false, "", Version.V_8_2_0.id); + final var apiKeyDocWithEmptyName = buildApiKeyDoc(hash, -1, false, -1, "", Version.V_8_2_0.id); ex = expectThrows( IllegalArgumentException.class, () -> apiKeyService.validateForUpdate(apiKeyId, apiKeyDocWithEmptyName.type, auth, apiKeyDocWithEmptyName) ); assertThat(ex.getMessage(), containsString("cannot update legacy API key [" + apiKeyId + "] without name")); - final ApiKeyDoc apiKeyDoc = buildApiKeyDoc(hash, -1, false, randomAlphaOfLengthBetween(3, 8), Version.CURRENT.id); + final ApiKeyDoc apiKeyDoc = buildApiKeyDoc(hash, -1, false, -1, randomAlphaOfLengthBetween(3, 8), Version.CURRENT.id); final ApiKey.Type expectedType = randomValueOtherThan(apiKeyDoc.type, () -> randomFrom(ApiKey.Type.values())); ex = expectThrows(IllegalArgumentException.class, () -> apiKeyService.validateForUpdate(apiKeyId, expectedType, auth, apiKeyDoc)); assertThat( @@ -2430,7 +2426,7 @@ public void testCreateCrossClusterApiKeyMinVersionConstraint() { final ClusterService clusterService = mock(ClusterService.class); when(clusterService.getClusterSettings()).thenReturn( - new ClusterSettings(Settings.EMPTY, Set.of(ApiKeyService.DELETE_RETENTION_PERIOD)) + new ClusterSettings(Settings.EMPTY, Set.of(ApiKeyService.DELETE_RETENTION_PERIOD, ApiKeyService.DELETE_INTERVAL)) ); final ClusterState clusterState = mock(ClusterState.class); when(clusterService.state()).thenReturn(clusterState); @@ -2511,6 +2507,7 @@ public void testValidateApiKeyTypeAndExpiration() throws IOException { hash, randomFrom(-1L, futureTime), false, + -1, randomAlphaOfLengthBetween(3, 8), Version.CURRENT.id ); @@ -2535,7 +2532,7 @@ public void testValidateApiKeyTypeAndExpiration() throws IOException { ); // Expired API key - final var apiKeyDoc2 = buildApiKeyDoc(hash, pastTime, false, randomAlphaOfLengthBetween(3, 8), Version.CURRENT.id); + final var apiKeyDoc2 = buildApiKeyDoc(hash, pastTime, false, -1, randomAlphaOfLengthBetween(3, 8), Version.CURRENT.id); final ApiKeyCredentials apiKeyCredentials2 = getApiKeyCredentials(apiKeyId, apiKey, apiKeyDoc2.type); final PlainActionFuture> future2 = new PlainActionFuture<>(); ApiKeyService.validateApiKeyTypeAndExpiration(apiKeyDoc2, apiKeyCredentials2, clock, future2); @@ -2549,6 +2546,7 @@ public void testValidateApiKeyTypeAndExpiration() throws IOException { hash, randomFrom(-1L, futureTime), false, + -1, randomAlphaOfLengthBetween(3, 8), Version.CURRENT.id ); @@ -2565,7 +2563,7 @@ public void testCreateOrUpdateApiKeyWithWorkflowsRestrictionForUnsupportedVersio final Authentication authentication = AuthenticationTestHelper.builder().build(); final ClusterService clusterService = mock(ClusterService.class); when(clusterService.getClusterSettings()).thenReturn( - new ClusterSettings(Settings.EMPTY, Set.of(ApiKeyService.DELETE_RETENTION_PERIOD)) + new ClusterSettings(Settings.EMPTY, Set.of(ApiKeyService.DELETE_RETENTION_PERIOD, ApiKeyService.DELETE_INTERVAL)) ); final ClusterState clusterState = mock(ClusterState.class); when(clusterService.state()).thenReturn(clusterState); @@ -2630,7 +2628,7 @@ public void testValidateOwnerUserRoleDescriptorsWithWorkflowsRestriction() { final Authentication authentication = AuthenticationTestHelper.builder().build(); final ClusterService clusterService = mock(ClusterService.class); when(clusterService.getClusterSettings()).thenReturn( - new ClusterSettings(Settings.EMPTY, Set.of(ApiKeyService.DELETE_RETENTION_PERIOD)) + new ClusterSettings(Settings.EMPTY, Set.of(ApiKeyService.DELETE_RETENTION_PERIOD, ApiKeyService.DELETE_INTERVAL)) ); final ClusterState clusterState = mock(ClusterState.class); when(clusterService.state()).thenReturn(clusterState); @@ -2742,7 +2740,7 @@ public static Authentication createApiKeyAuthentication( XContentType.JSON ) ); - PlainActionFuture> authenticationResultFuture = PlainActionFuture.newFuture(); + PlainActionFuture> authenticationResultFuture = new PlainActionFuture<>(); ApiKeyService.validateApiKeyTypeAndExpiration( apiKeyDoc, new ApiKeyService.ApiKeyCredentials("id", new SecureString(randomAlphaOfLength(16).toCharArray()), ApiKey.Type.REST), @@ -2807,7 +2805,10 @@ private ApiKeyService createApiKeyService(Settings baseSettings) { .build(); final ClusterSettings clusterSettings = new ClusterSettings( settings, - Sets.union(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS, Set.of(ApiKeyService.DELETE_RETENTION_PERIOD)) + Sets.union( + ClusterSettings.BUILT_IN_CLUSTER_SETTINGS, + Set.of(ApiKeyService.DELETE_RETENTION_PERIOD, ApiKeyService.DELETE_INTERVAL) + ) ); final ApiKeyService service = new ApiKeyService( settings, @@ -2861,15 +2862,17 @@ private void mockSourceDocument(String id, Map sourceMap) throws } } - private ApiKeyDoc buildApiKeyDoc(char[] hash, long expirationTime, boolean invalidated) throws IOException { - return buildApiKeyDoc(hash, expirationTime, invalidated, randomAlphaOfLength(12)); + private ApiKeyDoc buildApiKeyDoc(char[] hash, long expirationTime, boolean invalidated, long invalidation) throws IOException { + return buildApiKeyDoc(hash, expirationTime, invalidated, invalidation, randomAlphaOfLength(12)); } - private ApiKeyDoc buildApiKeyDoc(char[] hash, long expirationTime, boolean invalidated, String name) throws IOException { - return buildApiKeyDoc(hash, expirationTime, invalidated, name, 0); + private ApiKeyDoc buildApiKeyDoc(char[] hash, long expirationTime, boolean invalidated, long invalidation, String name) + throws IOException { + return buildApiKeyDoc(hash, expirationTime, invalidated, invalidation, name, 0); } - private ApiKeyDoc buildApiKeyDoc(char[] hash, long expirationTime, boolean invalidated, String name, int version) throws IOException { + private ApiKeyDoc buildApiKeyDoc(char[] hash, long expirationTime, boolean invalidated, long invalidation, String name, int version) + throws IOException { final BytesReference metadataBytes = XContentTestUtils.convertToXContent(ApiKeyTests.randomMetadata(), XContentType.JSON); return new ApiKeyDoc( "api_key", @@ -2877,6 +2880,7 @@ private ApiKeyDoc buildApiKeyDoc(char[] hash, long expirationTime, boolean inval Clock.systemUTC().instant().toEpochMilli(), expirationTime, invalidated, + invalidation, new String(hash), name, version, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java index cf343f790d85c..34e88be58de3b 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java @@ -324,7 +324,10 @@ public void init() throws Exception { }).when(securityIndex).checkIndexVersionThenExecute(anyConsumer(), any(Runnable.class)); final ClusterSettings clusterSettings = new ClusterSettings( settings, - Sets.union(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS, Set.of(ApiKeyService.DELETE_RETENTION_PERIOD)) + Sets.union( + ClusterSettings.BUILT_IN_CLUSTER_SETTINGS, + Set.of(ApiKeyService.DELETE_RETENTION_PERIOD, ApiKeyService.DELETE_INTERVAL) + ) ); ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool, clusterSettings); final SecurityContext securityContext = new SecurityContext(settings, threadContext); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java index 35335fd5e4a53..0188907462fc7 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java @@ -29,10 +29,10 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.update.UpdateAction; import org.elasticsearch.action.update.UpdateRequestBuilder; @@ -174,7 +174,7 @@ public void setupClient() { final String id = (String) inv.getArguments()[1]; return new UpdateRequestBuilder(client, UpdateAction.INSTANCE).setIndex(index).setId(id); }); - when(client.prepareSearch(any(String.class))).thenReturn(new SearchRequestBuilder(client, SearchAction.INSTANCE)); + when(client.prepareSearch(any(String.class))).thenReturn(new SearchRequestBuilder(client, TransportSearchAction.TYPE)); doAnswer(invocationOnMock -> { @SuppressWarnings("unchecked") ActionListener responseActionListener = (ActionListener) invocationOnMock.getArguments()[2]; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtAuthenticatorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtAuthenticatorTests.java index 4d16732104237..dd1a984a0dcb5 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtAuthenticatorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtAuthenticatorTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; +import org.elasticsearch.xpack.core.security.authc.jwt.JwtAuthenticationToken; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; import org.junit.Before; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtIssuer.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtIssuer.java index 789ac04c40622..3d4d9eae6acd0 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtIssuer.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtIssuer.java @@ -14,6 +14,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; +import org.elasticsearch.xpack.core.security.authc.jwt.JwtUtil; import org.elasticsearch.xpack.core.security.user.User; import java.io.Closeable; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmAuthenticateTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmAuthenticateTests.java index f75876a755557..bf6c64242701b 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmAuthenticateTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmAuthenticateTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmSettings; +import org.elasticsearch.xpack.core.security.authc.jwt.JwtAuthenticationToken; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; import org.elasticsearch.xpack.core.security.user.User; @@ -369,7 +370,7 @@ public void testJwtValidationFailures() throws Exception { { // Do one more direct SUCCESS scenario by checking token() and authenticate() directly before moving on to FAILURE scenarios. final ThreadContext requestThreadContext = createThreadContext(jwt, clientSecret); final JwtAuthenticationToken token = (JwtAuthenticationToken) jwtIssuerAndRealm.realm().token(requestThreadContext); - final PlainActionFuture> plainActionFuture = PlainActionFuture.newFuture(); + final PlainActionFuture> plainActionFuture = new PlainActionFuture<>(); jwtIssuerAndRealm.realm().authenticate(token, plainActionFuture); assertThat(plainActionFuture.get(), notNullValue()); assertThat(plainActionFuture.get().isAuthenticated(), is(true)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmGenerateTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmGenerateTests.java index 2f77923c6c50f..7a0e138305b83 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmGenerateTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmGenerateTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; +import org.elasticsearch.xpack.core.security.authc.jwt.JwtUtil; import org.elasticsearch.xpack.core.security.authc.support.DelegatedAuthorizationSettings; import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; import org.elasticsearch.xpack.core.security.user.User; @@ -428,7 +429,7 @@ private void printArtifacts( + (Strings.hasText(clientSecret) ? JwtRealm.HEADER_CLIENT_AUTHENTICATION + ": " - + JwtRealm.HEADER_SHARED_SECRET_AUTHENTICATION_SCHEME + + JwtRealmSettings.HEADER_SHARED_SECRET_AUTHENTICATION_SCHEME + " " + clientSecret + "\n" diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmInspector.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmInspector.java index 7697849179acf..40a613a0907c8 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmInspector.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmInspector.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; +import org.elasticsearch.xpack.core.security.authc.jwt.JwtUtil; import org.elasticsearch.xpack.core.security.authc.support.ClaimSetting; import java.net.URI; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmTestCase.java index 64f2444e0182d..1bc49cb628464 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmTestCase.java @@ -28,6 +28,7 @@ import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; +import org.elasticsearch.xpack.core.security.authc.jwt.JwtAuthenticationToken; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings.ClientAuthenticationType; import org.elasticsearch.xpack.core.security.authc.support.DelegatedAuthorizationSettings; @@ -104,7 +105,7 @@ protected void verifyAuthenticateFailureHelper( ) throws InterruptedException, ExecutionException { final ThreadContext tc = createThreadContext(jwt, clientSecret); final JwtAuthenticationToken token = (JwtAuthenticationToken) jwtIssuerAndRealm.realm.token(tc); - final PlainActionFuture> plainActionFuture = PlainActionFuture.newFuture(); + final PlainActionFuture> plainActionFuture = new PlainActionFuture<>(); jwtIssuerAndRealm.realm.authenticate(token, plainActionFuture); assertThat(plainActionFuture.get(), notNullValue()); assertThat(plainActionFuture.get().isAuthenticated(), is(false)); @@ -398,7 +399,7 @@ protected void doMultipleAuthcAuthzAndVerifySuccess( User authenticatedUser = null; realmLoop: for (final JwtRealm candidateJwtRealm : jwtRealmsList) { logger.debug("TRY AUTHC: expected=[" + jwtRealm.name() + "], candidate[" + candidateJwtRealm.name() + "]."); - final PlainActionFuture> authenticateFuture = PlainActionFuture.newFuture(); + final PlainActionFuture> authenticateFuture = new PlainActionFuture<>(); candidateJwtRealm.authenticate(jwtAuthenticationToken, authenticateFuture); final AuthenticationResult authenticationResult = authenticateFuture.actionGet(); logger.debug("Authentication result with realm [{}]: [{}]", candidateJwtRealm.name(), authenticationResult); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtTestCase.java index cfb153d233e9d..f244544460ebf 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtTestCase.java @@ -623,7 +623,7 @@ public ThreadContext createThreadContext(final CharSequence jwt, final CharSeque if (sharedSecret != null) { requestThreadContext.putHeader( JwtRealm.HEADER_CLIENT_AUTHENTICATION, - JwtRealm.HEADER_SHARED_SECRET_AUTHENTICATION_SCHEME + " " + sharedSecret + JwtRealmSettings.HEADER_SHARED_SECRET_AUTHENTICATION_SCHEME + " " + sharedSecret ); } return requestThreadContext; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtTokenExtractionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtTokenExtractionTests.java index 7cfac9978081b..8662561aca1ae 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtTokenExtractionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtTokenExtractionTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; import org.elasticsearch.xpack.core.security.authc.Realm; +import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; import org.elasticsearch.xpack.core.security.authc.support.BearerToken; import org.elasticsearch.xpack.security.authc.AuthenticationService; import org.elasticsearch.xpack.security.authc.Authenticator; @@ -69,7 +70,7 @@ public void testRealmLetsThroughInvalidJWTs() { if (randomBoolean()) { threadContext.putHeader( JwtRealm.HEADER_CLIENT_AUTHENTICATION, - JwtRealm.HEADER_SHARED_SECRET_AUTHENTICATION_SCHEME + " " + "some shared secret" + JwtRealmSettings.HEADER_SHARED_SECRET_AUTHENTICATION_SCHEME + " " + "some shared secret" ); } AuthenticationToken authenticationToken = realmsAuthenticator.extractCredentials(context); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtUtilTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtUtilTests.java index 6fab33b4d6adf..7d90dffd7517c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtUtilTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtUtilTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; +import org.elasticsearch.xpack.core.security.authc.jwt.JwtUtil; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmAuthenticateFailedTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmAuthenticateFailedTests.java index da8bd2b4a61a2..12a2420ac13cc 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmAuthenticateFailedTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmAuthenticateFailedTests.java @@ -52,7 +52,7 @@ public void testAuthenticateWithNonKerberosAuthenticationToken() { randomAlphaOfLength(5), new SecureString(new char[] { 'a', 'b', 'c' }) ); - expectThrows(AssertionError.class, () -> kerberosRealm.authenticate(usernamePasswordToken, PlainActionFuture.newFuture())); + expectThrows(AssertionError.class, () -> kerberosRealm.authenticate(usernamePasswordToken, new PlainActionFuture<>())); } public void testAuthenticateDifferentFailureScenarios() throws LoginException, GSSException { @@ -83,10 +83,7 @@ public void testAuthenticateDifferentFailureScenarios() throws LoginException, G ? null : new KerberosAuthenticationToken(decodedTicket); if (nullKerberosAuthnToken) { - expectThrows( - AssertionError.class, - () -> kerberosRealm.authenticate(kerberosAuthenticationToken, PlainActionFuture.newFuture()) - ); + expectThrows(AssertionError.class, () -> kerberosRealm.authenticate(kerberosAuthenticationToken, new PlainActionFuture<>())); } else { final PlainActionFuture> future = new PlainActionFuture<>(); kerberosRealm.authenticate(kerberosAuthenticationToken, future); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmCacheTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmCacheTests.java index dce1019dd6f3b..3fb849afd90d2 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmCacheTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmCacheTests.java @@ -165,7 +165,7 @@ private User authenticateAndAssertResult( final KerberosAuthenticationToken kerberosAuthenticationToken, String outToken ) { - final PlainActionFuture> future = PlainActionFuture.newFuture(); + final PlainActionFuture> future = new PlainActionFuture<>(); kerberosRealm.authenticate(kerberosAuthenticationToken, future); final AuthenticationResult result = future.actionGet(); assertSuccessAuthenticationResult(expectedUser, outToken, result); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticatorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticatorTests.java index 19ae53caca086..60f6cc53902b9 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticatorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticatorTests.java @@ -1417,9 +1417,14 @@ private Encrypter getEncrypter(Tuple keyPair) throw final Credential keyEncryptionCredential = new BasicCredential(keyPair.v1().getPublicKey(), keyPair.v2()); KeyEncryptionParameters keyEncryptionParameters = new KeyEncryptionParameters(); keyEncryptionParameters.setEncryptionCredential(keyEncryptionCredential); - keyEncryptionParameters.setAlgorithm( - randomFrom(EncryptionConstants.ALGO_ID_KEYTRANSPORT_RSAOAEP, EncryptionConstants.ALGO_ID_KEYTRANSPORT_RSA15) - ); + if (inFipsJvm()) { + // RSA v1.5 is not allowed when running in FIPS mode + keyEncryptionParameters.setAlgorithm(EncryptionConstants.ALGO_ID_KEYTRANSPORT_RSAOAEP); + } else { + keyEncryptionParameters.setAlgorithm( + randomFrom(EncryptionConstants.ALGO_ID_KEYTRANSPORT_RSAOAEP, EncryptionConstants.ALGO_ID_KEYTRANSPORT_RSA15) + ); + } final Encrypter samlEncrypter = new Encrypter(encryptionParameters, keyEncryptionParameters); samlEncrypter.setKeyPlacement(Encrypter.KeyPlacement.INLINE); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/ElasticServiceAccountsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/ElasticServiceAccountsTests.java index 08cfdde03815d..fa37a2abee77f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/ElasticServiceAccountsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/ElasticServiceAccountsTests.java @@ -24,9 +24,8 @@ import org.elasticsearch.action.get.GetAction; import org.elasticsearch.action.get.MultiGetAction; import org.elasticsearch.action.index.IndexAction; -import org.elasticsearch.action.search.MultiSearchAction; -import org.elasticsearch.action.search.SearchAction; -import org.elasticsearch.action.update.UpdateAction; +import org.elasticsearch.action.search.TransportMultiSearchAction; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; @@ -34,68 +33,6 @@ import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.xpack.core.ilm.action.GetLifecycleAction; import org.elasticsearch.xpack.core.ilm.action.PutLifecycleAction; -import org.elasticsearch.xpack.core.ml.action.CloseJobAction; -import org.elasticsearch.xpack.core.ml.action.DeleteCalendarAction; -import org.elasticsearch.xpack.core.ml.action.DeleteCalendarEventAction; -import org.elasticsearch.xpack.core.ml.action.DeleteDatafeedAction; -import org.elasticsearch.xpack.core.ml.action.DeleteExpiredDataAction; -import org.elasticsearch.xpack.core.ml.action.DeleteFilterAction; -import org.elasticsearch.xpack.core.ml.action.DeleteForecastAction; -import org.elasticsearch.xpack.core.ml.action.DeleteJobAction; -import org.elasticsearch.xpack.core.ml.action.DeleteModelSnapshotAction; -import org.elasticsearch.xpack.core.ml.action.DeleteTrainedModelAction; -import org.elasticsearch.xpack.core.ml.action.EstimateModelMemoryAction; -import org.elasticsearch.xpack.core.ml.action.EvaluateDataFrameAction; -import org.elasticsearch.xpack.core.ml.action.ExplainDataFrameAnalyticsAction; -import org.elasticsearch.xpack.core.ml.action.FinalizeJobExecutionAction; -import org.elasticsearch.xpack.core.ml.action.FlushJobAction; -import org.elasticsearch.xpack.core.ml.action.ForecastJobAction; -import org.elasticsearch.xpack.core.ml.action.GetBucketsAction; -import org.elasticsearch.xpack.core.ml.action.GetCalendarEventsAction; -import org.elasticsearch.xpack.core.ml.action.GetCalendarsAction; -import org.elasticsearch.xpack.core.ml.action.GetCategoriesAction; -import org.elasticsearch.xpack.core.ml.action.GetDataFrameAnalyticsAction; -import org.elasticsearch.xpack.core.ml.action.GetDataFrameAnalyticsStatsAction; -import org.elasticsearch.xpack.core.ml.action.GetDatafeedsAction; -import org.elasticsearch.xpack.core.ml.action.GetDatafeedsStatsAction; -import org.elasticsearch.xpack.core.ml.action.GetFiltersAction; -import org.elasticsearch.xpack.core.ml.action.GetInfluencersAction; -import org.elasticsearch.xpack.core.ml.action.GetJobsAction; -import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction; -import org.elasticsearch.xpack.core.ml.action.GetModelSnapshotsAction; -import org.elasticsearch.xpack.core.ml.action.GetOverallBucketsAction; -import org.elasticsearch.xpack.core.ml.action.GetRecordsAction; -import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction; -import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsStatsAction; -import org.elasticsearch.xpack.core.ml.action.InferModelAction; -import org.elasticsearch.xpack.core.ml.action.IsolateDatafeedAction; -import org.elasticsearch.xpack.core.ml.action.KillProcessAction; -import org.elasticsearch.xpack.core.ml.action.MlInfoAction; -import org.elasticsearch.xpack.core.ml.action.OpenJobAction; -import org.elasticsearch.xpack.core.ml.action.PersistJobAction; -import org.elasticsearch.xpack.core.ml.action.PostCalendarEventsAction; -import org.elasticsearch.xpack.core.ml.action.PostDataAction; -import org.elasticsearch.xpack.core.ml.action.PreviewDatafeedAction; -import org.elasticsearch.xpack.core.ml.action.PutCalendarAction; -import org.elasticsearch.xpack.core.ml.action.PutDataFrameAnalyticsAction; -import org.elasticsearch.xpack.core.ml.action.PutDatafeedAction; -import org.elasticsearch.xpack.core.ml.action.PutFilterAction; -import org.elasticsearch.xpack.core.ml.action.PutJobAction; -import org.elasticsearch.xpack.core.ml.action.PutTrainedModelAction; -import org.elasticsearch.xpack.core.ml.action.RevertModelSnapshotAction; -import org.elasticsearch.xpack.core.ml.action.SetUpgradeModeAction; -import org.elasticsearch.xpack.core.ml.action.StartDataFrameAnalyticsAction; -import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; -import org.elasticsearch.xpack.core.ml.action.StopDataFrameAnalyticsAction; -import org.elasticsearch.xpack.core.ml.action.StopDatafeedAction; -import org.elasticsearch.xpack.core.ml.action.UpdateCalendarJobAction; -import org.elasticsearch.xpack.core.ml.action.UpdateDatafeedAction; -import org.elasticsearch.xpack.core.ml.action.UpdateFilterAction; -import org.elasticsearch.xpack.core.ml.action.UpdateJobAction; -import org.elasticsearch.xpack.core.ml.action.UpdateModelSnapshotAction; -import org.elasticsearch.xpack.core.ml.action.UpdateProcessAction; -import org.elasticsearch.xpack.core.ml.action.ValidateDetectorAction; -import org.elasticsearch.xpack.core.ml.action.ValidateJobConfigAction; import org.elasticsearch.xpack.core.monitoring.action.MonitoringBulkAction; import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequest; @@ -117,7 +54,6 @@ import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.service.ElasticServiceAccounts.ElasticServiceAccount; -import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Set; @@ -204,8 +140,8 @@ public void testElasticFleetServerPrivileges() { assertThat(role.indices().allowedIndicesMatcher(DeleteIndexAction.NAME).test(index), is(false)); assertThat(role.indices().allowedIndicesMatcher(GetAction.NAME).test(index), is(false)); assertThat(role.indices().allowedIndicesMatcher(MultiGetAction.NAME).test(index), is(false)); - assertThat(role.indices().allowedIndicesMatcher(SearchAction.NAME).test(index), is(false)); - assertThat(role.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(index), is(false)); + assertThat(role.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(index), is(false)); + assertThat(role.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(index), is(false)); assertThat(role.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(index), is(false)); }); @@ -219,8 +155,8 @@ public void testElasticFleetServerPrivileges() { assertThat(role.indices().allowedIndicesMatcher(DeleteIndexAction.NAME).test(profilingIndex), is(false)); assertThat(role.indices().allowedIndicesMatcher(GetAction.NAME).test(profilingIndex), is(true)); assertThat(role.indices().allowedIndicesMatcher(MultiGetAction.NAME).test(profilingIndex), is(true)); - assertThat(role.indices().allowedIndicesMatcher(SearchAction.NAME).test(profilingIndex), is(true)); - assertThat(role.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(profilingIndex), is(true)); + assertThat(role.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(profilingIndex), is(true)); + assertThat(role.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(profilingIndex), is(true)); assertThat(role.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(profilingIndex), is(false)); List.of("synthetics-" + randomAlphaOfLengthBetween(1, 20)).stream().map(this::mockIndexAbstraction).forEach(index -> { @@ -233,8 +169,8 @@ public void testElasticFleetServerPrivileges() { assertThat(role.indices().allowedIndicesMatcher(DeleteIndexAction.NAME).test(index), is(false)); assertThat(role.indices().allowedIndicesMatcher(GetAction.NAME).test(index), is(true)); assertThat(role.indices().allowedIndicesMatcher(MultiGetAction.NAME).test(index), is(true)); - assertThat(role.indices().allowedIndicesMatcher(SearchAction.NAME).test(index), is(true)); - assertThat(role.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(index), is(true)); + assertThat(role.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(index), is(true)); + assertThat(role.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(index), is(true)); assertThat(role.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(index), is(false)); }); @@ -256,8 +192,8 @@ public void testElasticFleetServerPrivileges() { assertThat(role.indices().allowedIndicesMatcher(BulkAction.NAME).test(dotFleetIndex), is(true)); assertThat(role.indices().allowedIndicesMatcher(GetAction.NAME).test(dotFleetIndex), is(true)); assertThat(role.indices().allowedIndicesMatcher(MultiGetAction.NAME).test(dotFleetIndex), is(true)); - assertThat(role.indices().allowedIndicesMatcher(SearchAction.NAME).test(dotFleetIndex), is(true)); - assertThat(role.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(dotFleetIndex), is(true)); + assertThat(role.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(dotFleetIndex), is(true)); + assertThat(role.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(dotFleetIndex), is(true)); assertThat(role.indices().allowedIndicesMatcher(IndicesStatsAction.NAME).test(dotFleetIndex), is(true)); assertThat(role.indices().allowedIndicesMatcher(DeleteIndexAction.NAME).test(dotFleetIndex), is(false)); assertThat(role.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(dotFleetIndex), is(false)); @@ -271,8 +207,8 @@ public void testElasticFleetServerPrivileges() { assertThat(role.indices().allowedIndicesMatcher(BulkAction.NAME).test(dotFleetSecretsIndex), is(false)); assertThat(role.indices().allowedIndicesMatcher(GetAction.NAME).test(dotFleetSecretsIndex), is(true)); assertThat(role.indices().allowedIndicesMatcher(MultiGetAction.NAME).test(dotFleetSecretsIndex), is(true)); - assertThat(role.indices().allowedIndicesMatcher(SearchAction.NAME).test(dotFleetSecretsIndex), is(true)); - assertThat(role.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(dotFleetSecretsIndex), is(true)); + assertThat(role.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(dotFleetSecretsIndex), is(true)); + assertThat(role.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(dotFleetSecretsIndex), is(true)); assertThat(role.indices().allowedIndicesMatcher(IndicesStatsAction.NAME).test(dotFleetSecretsIndex), is(false)); assertThat(role.indices().allowedIndicesMatcher(DeleteIndexAction.NAME).test(dotFleetSecretsIndex), is(false)); assertThat(role.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(dotFleetSecretsIndex), is(false)); @@ -290,8 +226,8 @@ public void testElasticFleetServerPrivileges() { assertThat(role.indices().allowedIndicesMatcher(BulkAction.NAME).test(apmSampledTracesIndex), is(true)); assertThat(role.indices().allowedIndicesMatcher(GetAction.NAME).test(apmSampledTracesIndex), is(true)); assertThat(role.indices().allowedIndicesMatcher(MultiGetAction.NAME).test(apmSampledTracesIndex), is(true)); - assertThat(role.indices().allowedIndicesMatcher(SearchAction.NAME).test(apmSampledTracesIndex), is(true)); - assertThat(role.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(apmSampledTracesIndex), is(true)); + assertThat(role.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(apmSampledTracesIndex), is(true)); + assertThat(role.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(apmSampledTracesIndex), is(true)); assertThat(role.indices().allowedIndicesMatcher(IndicesStatsAction.NAME).test(apmSampledTracesIndex), is(true)); assertThat(role.indices().allowedIndicesMatcher(DeleteIndexAction.NAME).test(apmSampledTracesIndex), is(false)); assertThat(role.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(apmSampledTracesIndex), is(false)); @@ -436,8 +372,8 @@ public void testElasticEnterpriseSearchServerAccount() { assertThat(role.indices().allowedIndicesMatcher(BulkAction.NAME).test(enterpriseSearchIndex), is(true)); assertThat(role.indices().allowedIndicesMatcher(GetAction.NAME).test(enterpriseSearchIndex), is(true)); assertThat(role.indices().allowedIndicesMatcher(MultiGetAction.NAME).test(enterpriseSearchIndex), is(true)); - assertThat(role.indices().allowedIndicesMatcher(SearchAction.NAME).test(enterpriseSearchIndex), is(true)); - assertThat(role.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(enterpriseSearchIndex), is(true)); + assertThat(role.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(enterpriseSearchIndex), is(true)); + assertThat(role.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(enterpriseSearchIndex), is(true)); assertThat(role.indices().allowedIndicesMatcher(IndicesStatsAction.NAME).test(enterpriseSearchIndex), is(true)); assertThat(role.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(enterpriseSearchIndex), is(true)); assertThat(role.indices().allowedIndicesMatcher(RefreshAction.NAME).test(enterpriseSearchIndex), is(true)); @@ -453,91 +389,4 @@ private IndexAbstraction mockIndexAbstraction(String name) { ); return mock; } - - private void assertNoAccessAllowed(Role role, Collection indices) { - for (String index : indices) { - assertNoAccessAllowed(role, index); - } - } - - private void assertNoAccessAllowed(Role role, String index) { - assertThat(role.indices().allowedIndicesMatcher(DeleteIndexAction.NAME).test(mockIndexAbstraction(index)), is(false)); - assertThat(role.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(mockIndexAbstraction(index)), is(false)); - assertThat(role.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(mockIndexAbstraction(index)), is(false)); - assertThat(role.indices().allowedIndicesMatcher(SearchAction.NAME).test(mockIndexAbstraction(index)), is(false)); - assertThat(role.indices().allowedIndicesMatcher(GetAction.NAME).test(mockIndexAbstraction(index)), is(false)); - assertThat(role.indices().allowedIndicesMatcher(IndexAction.NAME).test(mockIndexAbstraction(index)), is(false)); - assertThat(role.indices().allowedIndicesMatcher(UpdateAction.NAME).test(mockIndexAbstraction(index)), is(false)); - assertThat(role.indices().allowedIndicesMatcher(DeleteAction.NAME).test(mockIndexAbstraction(index)), is(false)); - assertThat(role.indices().allowedIndicesMatcher(BulkAction.NAME).test(mockIndexAbstraction(index)), is(false)); - } - - private void assertRoleHasManageMl(Role role) { - final TransportRequest request = mock(TransportRequest.class); - final Authentication authentication = AuthenticationTestHelper.builder().serviceAccount().build(); - - assertThat(role.cluster().check(CloseJobAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(DeleteCalendarAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(DeleteCalendarEventAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(DeleteDatafeedAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(DeleteExpiredDataAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(DeleteFilterAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(DeleteForecastAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(DeleteJobAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(DeleteModelSnapshotAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(DeleteTrainedModelAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(EstimateModelMemoryAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(EvaluateDataFrameAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(ExplainDataFrameAnalyticsAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(FinalizeJobExecutionAction.NAME, request, authentication), is(false)); // internal use only - assertThat(role.cluster().check(FlushJobAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(ForecastJobAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(GetBucketsAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(GetCalendarEventsAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(GetCalendarsAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(GetCategoriesAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(GetDatafeedsAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(GetDatafeedsStatsAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(GetDataFrameAnalyticsAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(GetDataFrameAnalyticsStatsAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(GetFiltersAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(GetInfluencersAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(GetJobsAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(GetJobsStatsAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(GetModelSnapshotsAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(GetOverallBucketsAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(GetRecordsAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(GetTrainedModelsAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(GetTrainedModelsStatsAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(InferModelAction.EXTERNAL_NAME, request, authentication), is(true)); - assertThat(role.cluster().check(InferModelAction.NAME, request, authentication), is(false)); // internal use only - assertThat(role.cluster().check(IsolateDatafeedAction.NAME, request, authentication), is(false)); // internal use only - assertThat(role.cluster().check(KillProcessAction.NAME, request, authentication), is(false)); // internal use only - assertThat(role.cluster().check(MlInfoAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(OpenJobAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(PersistJobAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(PostCalendarEventsAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(PostDataAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(PreviewDatafeedAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(PutCalendarAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(PutDatafeedAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(PutDataFrameAnalyticsAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(PutFilterAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(PutJobAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(PutTrainedModelAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(RevertModelSnapshotAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(SetUpgradeModeAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(StartDatafeedAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(StartDataFrameAnalyticsAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(StopDatafeedAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(StopDataFrameAnalyticsAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(UpdateCalendarJobAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(UpdateDatafeedAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(UpdateFilterAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(UpdateJobAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(UpdateModelSnapshotAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(UpdateProcessAction.NAME, request, authentication), is(false)); // internal use only - assertThat(role.cluster().check(ValidateDetectorAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(ValidateJobConfigAction.NAME, request, authentication), is(true)); - } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/SecondaryAuthenticatorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/SecondaryAuthenticatorTests.java index e642abf6682aa..b3a6bed9a5a94 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/SecondaryAuthenticatorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/SecondaryAuthenticatorTests.java @@ -123,7 +123,9 @@ public void setupMocks() throws Exception { final ClusterService clusterService = mock(ClusterService.class); final ClusterState clusterState = ClusterState.EMPTY_STATE; when(clusterService.state()).thenReturn(clusterState); - when(clusterService.getClusterSettings()).thenReturn(new ClusterSettings(settings, Set.of(ApiKeyService.DELETE_RETENTION_PERIOD))); + when(clusterService.getClusterSettings()).thenReturn( + new ClusterSettings(settings, Set.of(ApiKeyService.DELETE_RETENTION_PERIOD, ApiKeyService.DELETE_INTERVAL)) + ); securityContext = new SecurityContext(settings, threadContext); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java index efc97ca30cd1a..d083c1700c302 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java @@ -8,10 +8,10 @@ import org.apache.lucene.search.TotalHits; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; @@ -195,7 +195,7 @@ public void testResolveRolesDoesNotUseLastLoadCacheWhenSecurityIndexAvailable() when(mockThreadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); when(client.threadPool()).thenReturn(mockThreadPool); when(client.prepareSearch(eq(SECURITY_MAIN_ALIAS))).thenReturn( - Mockito.spy(new SearchRequestBuilder(client, SearchAction.INSTANCE)) + Mockito.spy(new SearchRequestBuilder(client, TransportSearchAction.TYPE)) ); final ExpressionRoleMapping mapping = new ExpressionRoleMapping( "mapping", @@ -239,7 +239,7 @@ public void testResolveRolesUsesLastLoadCacheWhenSecurityIndexUnavailable() thro when(mockThreadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); when(client.threadPool()).thenReturn(mockThreadPool); when(client.prepareSearch(eq(SECURITY_MAIN_ALIAS))).thenReturn( - Mockito.spy(new SearchRequestBuilder(client, SearchAction.INSTANCE)) + Mockito.spy(new SearchRequestBuilder(client, TransportSearchAction.TYPE)) ); final ExpressionRoleMapping mapping = new ExpressionRoleMapping( "mapping", @@ -303,7 +303,7 @@ public void testResolveRolesDoesNotUseLastLoadCacheWhenSecurityIndexDoesNotExist when(mockThreadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); when(client.threadPool()).thenReturn(mockThreadPool); when(client.prepareSearch(eq(SECURITY_MAIN_ALIAS))).thenReturn( - Mockito.spy(new SearchRequestBuilder(client, SearchAction.INSTANCE)) + Mockito.spy(new SearchRequestBuilder(client, TransportSearchAction.TYPE)) ); final ExpressionRoleMapping mapping = new ExpressionRoleMapping( "mapping", diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java index edff46cef16e4..bf358f03e16a5 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java @@ -56,20 +56,20 @@ import org.elasticsearch.action.get.MultiGetRequest; import org.elasticsearch.action.index.IndexAction; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.ClearScrollAction; import org.elasticsearch.action.search.ClearScrollRequest; -import org.elasticsearch.action.search.ClosePointInTimeAction; import org.elasticsearch.action.search.ClosePointInTimeRequest; -import org.elasticsearch.action.search.MultiSearchAction; import org.elasticsearch.action.search.MultiSearchRequest; -import org.elasticsearch.action.search.OpenPointInTimeAction; import org.elasticsearch.action.search.OpenPointInTimeRequest; import org.elasticsearch.action.search.ParsedScrollId; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchScrollAction; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.action.search.SearchTransportService; +import org.elasticsearch.action.search.TransportClearScrollAction; +import org.elasticsearch.action.search.TransportClosePointInTimeAction; +import org.elasticsearch.action.search.TransportMultiSearchAction; +import org.elasticsearch.action.search.TransportOpenPointInTimeAction; +import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.action.search.TransportSearchScrollAction; import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.PlainActionFuture; @@ -746,11 +746,11 @@ public void testUserWithNoRolesCanPerformRemoteSearch() { final Authentication authentication = createAuthentication(new User("test user")); mockEmptyMetadata(); final String requestId = AuditUtil.getOrGenerateRequestId(threadContext); - authorize(authentication, SearchAction.NAME, request); + authorize(authentication, TransportSearchAction.TYPE.name(), request); verify(auditTrail).accessGranted( eq(requestId), eq(authentication), - eq(SearchAction.NAME), + eq(TransportSearchAction.TYPE.name()), eq(request), authzInfoRoles(Role.EMPTY.names()) ); @@ -768,7 +768,7 @@ public void testUserWithNoRolesPerformsRemoteSearchWithScroll() { when(parsedScrollId.hasLocalIndices()).thenReturn(hasLocalIndices); if (hasLocalIndices) { assertThrowsAuthorizationException( - () -> authorize(authentication, SearchScrollAction.NAME, searchScrollRequest), + () -> authorize(authentication, TransportSearchScrollAction.TYPE.name(), searchScrollRequest), "indices:data/read/scroll", "test user" ); @@ -780,11 +780,11 @@ public void testUserWithNoRolesPerformsRemoteSearchWithScroll() { authzInfoRoles(Role.EMPTY.names()) ); } else { - authorize(authentication, SearchScrollAction.NAME, searchScrollRequest); + authorize(authentication, TransportSearchScrollAction.TYPE.name(), searchScrollRequest); verify(auditTrail).accessGranted( eq(requestId), eq(authentication), - eq(SearchScrollAction.NAME), + eq(TransportSearchScrollAction.TYPE.name()), eq(searchScrollRequest), authzInfoRoles(Role.EMPTY.names()) ); @@ -804,11 +804,15 @@ public void testUserWithNoRolesCannotPerformLocalSearch() { final Authentication authentication = createAuthentication(new User("test user")); mockEmptyMetadata(); final String requestId = AuditUtil.getOrGenerateRequestId(threadContext); - assertThrowsAuthorizationException(() -> authorize(authentication, SearchAction.NAME, request), SearchAction.NAME, "test user"); + assertThrowsAuthorizationException( + () -> authorize(authentication, TransportSearchAction.TYPE.name(), request), + TransportSearchAction.TYPE.name(), + "test user" + ); verify(auditTrail).accessDenied( eq(requestId), eq(authentication), - eq(SearchAction.NAME), + eq(TransportSearchAction.TYPE.name()), eq(request), authzInfoRoles(Role.EMPTY.names()) ); @@ -825,11 +829,15 @@ public void testUserWithNoRolesCanPerformMultiClusterSearch() { final Authentication authentication = createAuthentication(new User("test user")); mockEmptyMetadata(); final String requestId = AuditUtil.getOrGenerateRequestId(threadContext); - assertThrowsAuthorizationException(() -> authorize(authentication, SearchAction.NAME, request), SearchAction.NAME, "test user"); + assertThrowsAuthorizationException( + () -> authorize(authentication, TransportSearchAction.TYPE.name(), request), + TransportSearchAction.TYPE.name(), + "test user" + ); verify(auditTrail).accessDenied( eq(requestId), eq(authentication), - eq(SearchAction.NAME), + eq(TransportSearchAction.TYPE.name()), eq(request), authzInfoRoles(Role.EMPTY.names()) ); @@ -898,7 +906,7 @@ public void testUserWithNoRolesOpenPointInTimeWithRemoteIndices() { } if (hasLocalIndices) { assertThrowsAuthorizationException( - () -> authorize(authentication, OpenPointInTimeAction.NAME, openPointInTimeRequest), + () -> authorize(authentication, TransportOpenPointInTimeAction.TYPE.name(), openPointInTimeRequest), "indices:data/read/open_point_in_time", "test user" ); @@ -910,7 +918,7 @@ public void testUserWithNoRolesOpenPointInTimeWithRemoteIndices() { authzInfoRoles(Role.EMPTY.names()) ); } else { - authorize(authentication, OpenPointInTimeAction.NAME, openPointInTimeRequest); + authorize(authentication, TransportOpenPointInTimeAction.TYPE.name(), openPointInTimeRequest); verify(auditTrail).accessGranted( eq(requestId), eq(authentication), @@ -928,7 +936,7 @@ public void testUserWithNoRolesCanClosePointInTime() { final Authentication authentication = createAuthentication(new User("test user")); mockEmptyMetadata(); final String requestId = AuditUtil.getOrGenerateRequestId(threadContext); - authorize(authentication, ClosePointInTimeAction.NAME, closePointInTimeRequest); + authorize(authentication, TransportClosePointInTimeAction.TYPE.name(), closePointInTimeRequest); verify(auditTrail).accessGranted( eq(requestId), eq(authentication), @@ -941,7 +949,10 @@ public void testUserWithNoRolesCanClosePointInTime() { public void testUnknownRoleCausesDenial() { Tuple tuple = randomFrom( - asList(new Tuple<>(SearchAction.NAME, new SearchRequest()), new Tuple<>(SqlQueryAction.NAME, new SqlQueryRequest())) + asList( + new Tuple<>(TransportSearchAction.TYPE.name(), new SearchRequest()), + new Tuple<>(SqlQueryAction.NAME, new SqlQueryRequest()) + ) ); String action = tuple.v1(); TransportRequest request = tuple.v2(); @@ -973,7 +984,10 @@ public void testUnknownRoleCausesDenial() { public void testServiceAccountDenial() { Tuple tuple = randomFrom( - asList(new Tuple<>(SearchAction.NAME, new SearchRequest()), new Tuple<>(SqlQueryAction.NAME, new SqlQueryRequest())) + asList( + new Tuple<>(TransportSearchAction.TYPE.name(), new SearchRequest()), + new Tuple<>(SqlQueryAction.NAME, new SqlQueryRequest()) + ) ); String action = tuple.v1(); TransportRequest request = tuple.v2(); @@ -1037,7 +1051,7 @@ public void testThatNonIndicesAndNonClusterActionIsDenied() { public void testThatRoleWithNoIndicesIsDenied() { Tuple tuple = randomFrom( - new Tuple<>(SearchAction.NAME, new SearchRequest()), + new Tuple<>(TransportSearchAction.TYPE.name(), new SearchRequest()), new Tuple<>(SqlQueryAction.NAME, new SqlQueryRequest()) ); String action = tuple.v1(); @@ -1104,14 +1118,14 @@ public void testSearchAgainstEmptyCluster() throws Exception { ); assertThrowsAuthorizationException( - () -> authorize(authentication, SearchAction.NAME, searchRequest), - SearchAction.NAME, + () -> authorize(authentication, TransportSearchAction.TYPE.name(), searchRequest), + TransportSearchAction.TYPE.name(), "test user" ); verify(auditTrail).accessDenied( eq(requestId), eq(authentication), - eq(SearchAction.NAME), + eq(TransportSearchAction.TYPE.name()), eq(searchRequest), authzInfoRoles(new String[] { role.getName() }) ); @@ -1135,12 +1149,17 @@ public void testSearchAgainstEmptyCluster() throws Exception { assertFalse(indexAccessControl.getDocumentPermissions().hasDocumentLevelPermissions()); }); final CountDownLatch latch = new CountDownLatch(1); - authorizationService.authorize(authentication, SearchAction.NAME, searchRequest, new LatchedActionListener<>(listener, latch)); + authorizationService.authorize( + authentication, + TransportSearchAction.TYPE.name(), + searchRequest, + new LatchedActionListener<>(listener, latch) + ); latch.await(); verify(auditTrail).accessGranted( eq(requestId), eq(authentication), - eq(SearchAction.NAME), + eq(TransportSearchAction.TYPE.name()), eq(searchRequest), authzInfoRoles(new String[] { role.getName() }) ); @@ -1176,11 +1195,11 @@ public void testSearchAgainstIndex() { null ); this.setFakeOriginatingAction = false; - authorize(authentication, SearchAction.NAME, searchRequest, true, () -> { + authorize(authentication, TransportSearchAction.TYPE.name(), searchRequest, true, () -> { verify(rolesStore).getRoles(Mockito.same(authentication), Mockito.any()); IndicesAccessControl iac = threadContext.getTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY); // Successful search action authorization should set a parent authorization header. - assertThat(securityContext.getParentAuthorization().action(), equalTo(SearchAction.NAME)); + assertThat(securityContext.getParentAuthorization().action(), equalTo(TransportSearchAction.TYPE.name())); // Within the action handler, execute a child action (the query phase of search) authorize(authentication, SearchTransportService.QUERY_ACTION_NAME, shardRequest, false, () -> { // This child action triggers a second interaction with the role store (which is cached) @@ -1196,7 +1215,7 @@ public void testSearchAgainstIndex() { verify(auditTrail).accessGranted( eq(requestId), eq(authentication), - eq(SearchAction.NAME), + eq(TransportSearchAction.TYPE.name()), eq(searchRequest), authzInfoRoles(new String[] { role.getName() }) ); @@ -1223,11 +1242,11 @@ public void testScrollRelatedRequestsAllowed() { final String requestId = AuditUtil.getOrGenerateRequestId(threadContext); final ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); - authorize(authentication, ClearScrollAction.NAME, clearScrollRequest); + authorize(authentication, TransportClearScrollAction.NAME, clearScrollRequest); verify(auditTrail).accessGranted( eq(requestId), eq(authentication), - eq(ClearScrollAction.NAME), + eq(TransportClearScrollAction.NAME), eq(clearScrollRequest), authzInfoRoles(new String[] { role.getName() }) ); @@ -1236,11 +1255,11 @@ public void testScrollRelatedRequestsAllowed() { when(parsedScrollId.hasLocalIndices()).thenReturn(true); final SearchScrollRequest searchScrollRequest = mock(SearchScrollRequest.class); when(searchScrollRequest.parseScrollId()).thenReturn(parsedScrollId); - authorize(authentication, SearchScrollAction.NAME, searchScrollRequest); + authorize(authentication, TransportSearchScrollAction.TYPE.name(), searchScrollRequest); verify(auditTrail).accessGranted( eq(requestId), eq(authentication), - eq(SearchScrollAction.NAME), + eq(TransportSearchScrollAction.TYPE.name()), eq(searchScrollRequest), authzInfoRoles(new String[] { role.getName() }) ); @@ -1415,14 +1434,14 @@ public void testDenialErrorMessagesForSearchAction() { ElasticsearchSecurityException securityException = expectThrows( ElasticsearchSecurityException.class, - () -> authorize(authentication, SearchAction.NAME, request) + () -> authorize(authentication, TransportSearchAction.TYPE.name(), request) ); assertThat( securityException, throwableWithMessage( containsString( "[" - + SearchAction.NAME + + TransportSearchAction.TYPE.name() + "] is unauthorized" + " for user [" + user.principal() @@ -1963,7 +1982,12 @@ public void testGrantAllRestrictedUserCannotExecuteOperationAgainstSecurityIndic requests.add( new Tuple<>(BulkAction.NAME + "[s]", new IndexRequest(randomFrom(SECURITY_MAIN_ALIAS, INTERNAL_SECURITY_MAIN_INDEX_7))) ); - requests.add(new Tuple<>(SearchAction.NAME, new SearchRequest(randomFrom(SECURITY_MAIN_ALIAS, INTERNAL_SECURITY_MAIN_INDEX_7)))); + requests.add( + new Tuple<>( + TransportSearchAction.TYPE.name(), + new SearchRequest(randomFrom(SECURITY_MAIN_ALIAS, INTERNAL_SECURITY_MAIN_INDEX_7)) + ) + ); requests.add( new Tuple<>( TermVectorsAction.NAME, @@ -2050,7 +2074,7 @@ public void testGrantAllRestrictedUserCannotExecuteOperationAgainstSecurityIndic verifyNoMoreInteractions(auditTrail); final SearchRequest searchRequest = new SearchRequest("_all"); - authorize(authentication, SearchAction.NAME, searchRequest); + authorize(authentication, TransportSearchAction.TYPE.name(), searchRequest); assertEquals(2, searchRequest.indices().length); assertEquals(IndicesAndAliasesResolverField.NO_INDICES_OR_ALIASES_LIST, Arrays.asList(searchRequest.indices())); } @@ -2144,7 +2168,12 @@ public void testSuperusersCanExecuteReadOperationAgainstSecurityIndex() { final String requestId = AuditUtil.getOrGenerateRequestId(threadContext); List> requests = new ArrayList<>(); - requests.add(new Tuple<>(SearchAction.NAME, new SearchRequest(randomFrom(SECURITY_MAIN_ALIAS, INTERNAL_SECURITY_MAIN_INDEX_7)))); + requests.add( + new Tuple<>( + TransportSearchAction.TYPE.name(), + new SearchRequest(randomFrom(SECURITY_MAIN_ALIAS, INTERNAL_SECURITY_MAIN_INDEX_7)) + ) + ); requests.add( new Tuple<>( TermVectorsAction.NAME, @@ -2274,7 +2303,7 @@ public void testSuperusersCanExecuteReadOperationAgainstSecurityIndexWithWildcar ); final String requestId = AuditUtil.getOrGenerateRequestId(threadContext); - String action = SearchAction.NAME; + String action = TransportSearchAction.TYPE.name(); SearchRequest request = new SearchRequest("_all"); authorize(authentication, action, request); verify(auditTrail).accessGranted(eq(requestId), eq(authentication), eq(action), eq(request), authzInfoRoles(superuser.roles())); @@ -2360,7 +2389,7 @@ public void testCompositeActionsIndicesAreCheckedAtTheShardLevel() { } case 1 -> { // reindex, msearch, search template, and multi search template delegate to search - action = SearchAction.NAME; + action = TransportSearchAction.TYPE.name(); request = mockRequest; } case 2 -> { @@ -2955,7 +2984,7 @@ private BulkShardRequest createBulkShardRequest(String indexName, BiFunction randomCompositeRequest() { return switch (randomIntBetween(0, 7)) { case 0 -> Tuple.tuple(MultiGetAction.NAME, new MultiGetRequest().add("index", "id")); - case 1 -> Tuple.tuple(MultiSearchAction.NAME, new MultiSearchRequest().add(new SearchRequest())); + case 1 -> Tuple.tuple(TransportMultiSearchAction.TYPE.name(), new MultiSearchRequest().add(new SearchRequest())); case 2 -> Tuple.tuple(MultiTermVectorsAction.NAME, new MultiTermVectorsRequest().add("index", "id")); case 3 -> Tuple.tuple(BulkAction.NAME, new BulkRequest().add(new DeleteRequest("index", "id"))); case 4 -> Tuple.tuple("indices:data/read/mpercolate", new MockCompositeIndicesRequest()); @@ -3461,7 +3490,10 @@ public void testActionDeniedForCrossClusterAccessAuthentication() { public void testRoleRestrictionAccessDenial() { Tuple tuple = randomFrom( - asList(new Tuple<>(SearchAction.NAME, new SearchRequest()), new Tuple<>(SqlQueryAction.NAME, new SqlQueryRequest())) + asList( + new Tuple<>(TransportSearchAction.TYPE.name(), new SearchRequest()), + new Tuple<>(SqlQueryAction.NAME, new SqlQueryRequest()) + ) ); String action = tuple.v1(); TransportRequest request = tuple.v2(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizedIndicesTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizedIndicesTests.java index 35071628603c2..fd2c0c7c6e8d8 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizedIndicesTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizedIndicesTests.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.security.authz; import org.elasticsearch.action.admin.indices.resolve.ResolveIndexAction; -import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.metadata.AliasMetadata; import org.elasticsearch.cluster.metadata.DataStream; @@ -110,7 +110,7 @@ public void testAuthorizedIndicesUserWithSomeRoles() { Role roles = future.actionGet(); AuthorizedIndices authorizedIndices = RBACEngine.resolveAuthorizedIndicesFromRole( roles, - getRequestInfo(SearchAction.NAME), + getRequestInfo(TransportSearchAction.TYPE.name()), metadata.getIndicesLookup(), () -> ignore -> {} ); @@ -129,7 +129,7 @@ public void testAuthorizedIndicesUserWithSomeRolesEmptyMetadata() { Role role = Role.builder(RESTRICTED_INDICES, "role").add(IndexPrivilege.ALL, "*").build(); AuthorizedIndices authorizedIndices = RBACEngine.resolveAuthorizedIndicesFromRole( role, - getRequestInfo(SearchAction.NAME), + getRequestInfo(TransportSearchAction.TYPE.name()), Metadata.EMPTY_METADATA.getIndicesLookup(), () -> ignore -> {} ); @@ -140,7 +140,7 @@ public void testSecurityIndicesAreRemovedFromRegularUser() { Role role = Role.builder(RESTRICTED_INDICES, "user_role").add(IndexPrivilege.ALL, "*").cluster(Set.of("all"), Set.of()).build(); AuthorizedIndices authorizedIndices = RBACEngine.resolveAuthorizedIndicesFromRole( role, - getRequestInfo(SearchAction.NAME), + getRequestInfo(TransportSearchAction.TYPE.name()), Metadata.EMPTY_METADATA.getIndicesLookup(), () -> ignore -> {} ); @@ -172,7 +172,7 @@ public void testSecurityIndicesAreRestrictedForDefaultRole() { AuthorizedIndices authorizedIndices = RBACEngine.resolveAuthorizedIndicesFromRole( role, - getRequestInfo(SearchAction.NAME), + getRequestInfo(TransportSearchAction.TYPE.name()), metadata.getIndicesLookup(), () -> ignore -> {} ); @@ -210,7 +210,7 @@ public void testSecurityIndicesAreNotRemovedFromUnrestrictedRole() { AuthorizedIndices authorizedIndices = RBACEngine.resolveAuthorizedIndicesFromRole( role, - getRequestInfo(SearchAction.NAME), + getRequestInfo(TransportSearchAction.TYPE.name()), metadata.getIndicesLookup(), () -> ignore -> {} ); @@ -221,7 +221,7 @@ public void testSecurityIndicesAreNotRemovedFromUnrestrictedRole() { AuthorizedIndices authorizedIndicesSuperUser = RBACEngine.resolveAuthorizedIndicesFromRole( role, - getRequestInfo(SearchAction.NAME), + getRequestInfo(TransportSearchAction.TYPE.name()), metadata.getIndicesLookup(), () -> ignore -> {} ); @@ -292,7 +292,7 @@ public void testDataStreamsAreNotIncludedInAuthorizedIndices() { Role roles = future.actionGet(); AuthorizedIndices authorizedIndices = RBACEngine.resolveAuthorizedIndicesFromRole( roles, - getRequestInfo(SearchAction.NAME), + getRequestInfo(TransportSearchAction.TYPE.name()), metadata.getIndicesLookup(), () -> ignore -> {} ); @@ -374,7 +374,7 @@ public void testDataStreamsAreIncludedInAuthorizedIndices() { ); Role roles = future.actionGet(); TransportRequest request = new ResolveIndexAction.Request(new String[] { "a*" }); - AuthorizationEngine.RequestInfo requestInfo = getRequestInfo(request, SearchAction.NAME); + AuthorizationEngine.RequestInfo requestInfo = getRequestInfo(request, TransportSearchAction.TYPE.name()); AuthorizedIndices authorizedIndices = RBACEngine.resolveAuthorizedIndicesFromRole( roles, requestInfo, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java index 81bc002b4ca7e..0709e775776f1 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java @@ -24,12 +24,12 @@ import org.elasticsearch.action.fieldcaps.FieldCapabilitiesAction; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; import org.elasticsearch.action.get.MultiGetRequest; -import org.elasticsearch.action.search.MultiSearchAction; import org.elasticsearch.action.search.MultiSearchRequest; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchShardsAction; import org.elasticsearch.action.search.SearchShardsRequest; +import org.elasticsearch.action.search.TransportMultiSearchAction; +import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.action.search.TransportSearchShardsAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.termvectors.MultiTermVectorsRequest; @@ -404,8 +404,10 @@ public void testDashIndicesAreAllowedInShardLevelRequests() { // aliases with names starting with '-' or '+' can be created up to version 5.x and can be around in 6.x ShardSearchRequest request = mock(ShardSearchRequest.class); when(request.indices()).thenReturn(new String[] { "-index10", "-index20", "+index30" }); - List indices = IndicesAndAliasesResolver.resolveIndicesAndAliasesWithoutWildcards(SearchAction.NAME + "[s]", request) - .getLocal(); + List indices = IndicesAndAliasesResolver.resolveIndicesAndAliasesWithoutWildcards( + TransportSearchAction.TYPE.name() + "[s]", + request + ).getLocal(); String[] expectedIndices = new String[] { "-index10", "-index20", "+index30" }; assertThat(indices, hasSize(expectedIndices.length)); assertThat(indices, hasItems(expectedIndices)); @@ -416,7 +418,7 @@ public void testWildcardsAreNotAllowedInShardLevelRequests() { when(request.indices()).thenReturn(new String[] { "index*" }); IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, - () -> IndicesAndAliasesResolver.resolveIndicesAndAliasesWithoutWildcards(SearchAction.NAME + "[s]", request) + () -> IndicesAndAliasesResolver.resolveIndicesAndAliasesWithoutWildcards(TransportSearchAction.TYPE.name() + "[s]", request) ); assertThat( exception, @@ -441,7 +443,7 @@ public void testAllIsNotAllowedInShardLevelRequests() { } IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, - () -> IndicesAndAliasesResolver.resolveIndicesAndAliasesWithoutWildcards(SearchAction.NAME + "[s]", request) + () -> IndicesAndAliasesResolver.resolveIndicesAndAliasesWithoutWildcards(TransportSearchAction.TYPE.name() + "[s]", request) ); assertThat( @@ -457,7 +459,8 @@ public void testAllIsNotAllowedInShardLevelRequests() { public void testExplicitDashIndices() { SearchRequest request = new SearchRequest("-index10", "-index20"); - List indices = resolveIndices(request, buildAuthorizedIndices(userDashIndices, SearchAction.NAME)).getLocal(); + List indices = resolveIndices(request, buildAuthorizedIndices(userDashIndices, TransportSearchAction.TYPE.name())) + .getLocal(); String[] expectedIndices = new String[] { "-index10", "-index20" }; assertThat(indices, hasSize(expectedIndices.length)); assertThat(request.indices().length, equalTo(expectedIndices.length)); @@ -472,7 +475,8 @@ public void testWildcardDashIndices() { } else { request = new SearchRequest("*", "--index20"); } - List indices = resolveIndices(request, buildAuthorizedIndices(userDashIndices, SearchAction.NAME)).getLocal(); + List indices = resolveIndices(request, buildAuthorizedIndices(userDashIndices, TransportSearchAction.TYPE.name())) + .getLocal(); String[] expectedIndices = new String[] { "-index10", "-index11", "-index21" }; assertThat(indices, hasSize(expectedIndices.length)); assertThat(request.indices().length, equalTo(expectedIndices.length)); @@ -482,7 +486,8 @@ public void testWildcardDashIndices() { public void testExplicitMixedWildcardDashIndices() { SearchRequest request = new SearchRequest("-index21", "-does_not_exist", "-index1*", "--index11"); - List indices = resolveIndices(request, buildAuthorizedIndices(userDashIndices, SearchAction.NAME)).getLocal(); + List indices = resolveIndices(request, buildAuthorizedIndices(userDashIndices, TransportSearchAction.TYPE.name())) + .getLocal(); String[] expectedIndices = new String[] { "-index10", "-index21", "-does_not_exist" }; assertThat(indices, hasSize(expectedIndices.length)); assertThat(request.indices().length, equalTo(expectedIndices.length)); @@ -493,7 +498,8 @@ public void testExplicitMixedWildcardDashIndices() { public void testDashIndicesNoExpandWildcard() { SearchRequest request = new SearchRequest("-index1*", "--index11"); request.indicesOptions(IndicesOptions.fromOptions(false, randomBoolean(), false, false)); - List indices = resolveIndices(request, buildAuthorizedIndices(userDashIndices, SearchAction.NAME)).getLocal(); + List indices = resolveIndices(request, buildAuthorizedIndices(userDashIndices, TransportSearchAction.TYPE.name())) + .getLocal(); String[] expectedIndices = new String[] { "-index1*", "--index11" }; assertThat(indices, hasSize(expectedIndices.length)); assertThat(request.indices().length, equalTo(expectedIndices.length)); @@ -504,7 +510,8 @@ public void testDashIndicesNoExpandWildcard() { public void testDashIndicesMinus() { SearchRequest request = new SearchRequest("-index10", "-index11", "--index11", "-index20"); request.indicesOptions(IndicesOptions.fromOptions(false, randomBoolean(), randomBoolean(), randomBoolean())); - List indices = resolveIndices(request, buildAuthorizedIndices(userDashIndices, SearchAction.NAME)).getLocal(); + List indices = resolveIndices(request, buildAuthorizedIndices(userDashIndices, TransportSearchAction.TYPE.name())) + .getLocal(); String[] expectedIndices = new String[] { "-index10", "-index11", "--index11", "-index20" }; assertThat(indices, hasSize(expectedIndices.length)); assertThat(request.indices().length, equalTo(expectedIndices.length)); @@ -517,14 +524,15 @@ public void testDashIndicesPlus() { request.indicesOptions(IndicesOptions.fromOptions(true, false, randomBoolean(), randomBoolean())); expectThrows( IndexNotFoundException.class, - () -> resolveIndices(request, buildAuthorizedIndices(userDashIndices, SearchAction.NAME)) + () -> resolveIndices(request, buildAuthorizedIndices(userDashIndices, TransportSearchAction.TYPE.name())) ); } public void testDashNotExistingIndex() { SearchRequest request = new SearchRequest("-does_not_exist"); request.indicesOptions(IndicesOptions.fromOptions(false, randomBoolean(), randomBoolean(), randomBoolean())); - List indices = resolveIndices(request, buildAuthorizedIndices(userDashIndices, SearchAction.NAME)).getLocal(); + List indices = resolveIndices(request, buildAuthorizedIndices(userDashIndices, TransportSearchAction.TYPE.name())) + .getLocal(); String[] expectedIndices = new String[] { "-does_not_exist" }; assertThat(indices, hasSize(expectedIndices.length)); assertThat(request.indices().length, equalTo(expectedIndices.length)); @@ -535,7 +543,7 @@ public void testDashNotExistingIndex() { public void testResolveEmptyIndicesExpandWilcardsOpenAndClosed() { SearchRequest request = new SearchRequest(); request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), true, true)); - List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + List indices = resolveIndices(request, buildAuthorizedIndices(user, TransportSearchAction.TYPE.name())).getLocal(); String[] replacedIndices = new String[] { "bar", "bar-closed", "foofoobar", "foobarfoo", "foofoo", "foofoo-closed" }; assertThat(indices, hasSize(replacedIndices.length)); assertThat(request.indices().length, equalTo(replacedIndices.length)); @@ -546,7 +554,7 @@ public void testResolveEmptyIndicesExpandWilcardsOpenAndClosed() { public void testResolveEmptyIndicesExpandWilcardsOpen() { SearchRequest request = new SearchRequest(); request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), true, false)); - List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + List indices = resolveIndices(request, buildAuthorizedIndices(user, TransportSearchAction.TYPE.name())).getLocal(); String[] replacedIndices = new String[] { "bar", "foofoobar", "foobarfoo", "foofoo" }; assertSameValues(indices, replacedIndices); assertThat(request.indices(), arrayContainingInAnyOrder(replacedIndices)); @@ -555,7 +563,7 @@ public void testResolveEmptyIndicesExpandWilcardsOpen() { public void testResolveAllExpandWilcardsOpenAndClosed() { SearchRequest request = new SearchRequest("_all"); request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), true, true)); - List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + List indices = resolveIndices(request, buildAuthorizedIndices(user, TransportSearchAction.TYPE.name())).getLocal(); String[] replacedIndices = new String[] { "bar", "bar-closed", "foofoobar", "foobarfoo", "foofoo", "foofoo-closed" }; assertThat(indices, hasSize(replacedIndices.length)); assertThat(request.indices().length, equalTo(replacedIndices.length)); @@ -566,7 +574,7 @@ public void testResolveAllExpandWilcardsOpenAndClosed() { public void testResolveAllExpandWilcardsOpen() { SearchRequest request = new SearchRequest("_all"); request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), true, false)); - List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + List indices = resolveIndices(request, buildAuthorizedIndices(user, TransportSearchAction.TYPE.name())).getLocal(); String[] replacedIndices = new String[] { "bar", "foofoobar", "foobarfoo", "foofoo" }; assertThat(indices, hasSize(replacedIndices.length)); assertThat(request.indices().length, equalTo(replacedIndices.length)); @@ -577,7 +585,7 @@ public void testResolveAllExpandWilcardsOpen() { public void testResolveWildcardsStrictExpand() { SearchRequest request = new SearchRequest("barbaz", "foofoo*"); request.indicesOptions(IndicesOptions.fromOptions(false, randomBoolean(), true, true)); - List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + List indices = resolveIndices(request, buildAuthorizedIndices(user, TransportSearchAction.TYPE.name())).getLocal(); String[] replacedIndices = new String[] { "barbaz", "foofoobar", "foofoo", "foofoo-closed" }; assertThat(indices, hasSize(replacedIndices.length)); assertThat(request.indices().length, equalTo(replacedIndices.length)); @@ -588,7 +596,7 @@ public void testResolveWildcardsStrictExpand() { public void testResolveWildcardsExpandOpenAndClosedIgnoreUnavailable() { SearchRequest request = new SearchRequest("barbaz", "foofoo*"); request.indicesOptions(IndicesOptions.fromOptions(true, randomBoolean(), true, true)); - List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + List indices = resolveIndices(request, buildAuthorizedIndices(user, TransportSearchAction.TYPE.name())).getLocal(); String[] replacedIndices = new String[] { "foofoobar", "foofoo", "foofoo-closed" }; assertThat(indices, hasSize(replacedIndices.length)); assertThat(request.indices().length, equalTo(replacedIndices.length)); @@ -599,7 +607,7 @@ public void testResolveWildcardsExpandOpenAndClosedIgnoreUnavailable() { public void testResolveWildcardsStrictExpandOpen() { SearchRequest request = new SearchRequest("barbaz", "foofoo*"); request.indicesOptions(IndicesOptions.fromOptions(false, randomBoolean(), true, false)); - List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + List indices = resolveIndices(request, buildAuthorizedIndices(user, TransportSearchAction.TYPE.name())).getLocal(); String[] replacedIndices = new String[] { "barbaz", "foofoobar", "foofoo" }; assertThat(indices, hasSize(replacedIndices.length)); assertThat(request.indices().length, equalTo(replacedIndices.length)); @@ -610,7 +618,7 @@ public void testResolveWildcardsStrictExpandOpen() { public void testResolveWildcardsLenientExpandOpen() { SearchRequest request = new SearchRequest("barbaz", "foofoo*"); request.indicesOptions(IndicesOptions.fromOptions(true, randomBoolean(), true, false)); - List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + List indices = resolveIndices(request, buildAuthorizedIndices(user, TransportSearchAction.TYPE.name())).getLocal(); String[] replacedIndices = new String[] { "foofoobar", "foofoo" }; assertThat(indices, hasSize(replacedIndices.length)); assertThat(request.indices().length, equalTo(replacedIndices.length)); @@ -621,7 +629,7 @@ public void testResolveWildcardsLenientExpandOpen() { public void testResolveWildcardsMinusExpandWilcardsOpen() { SearchRequest request = new SearchRequest("*", "-foofoo*"); request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), true, false)); - List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + List indices = resolveIndices(request, buildAuthorizedIndices(user, TransportSearchAction.TYPE.name())).getLocal(); String[] replacedIndices = new String[] { "bar", "foobarfoo" }; assertThat(indices, hasSize(replacedIndices.length)); assertThat(request.indices().length, equalTo(replacedIndices.length)); @@ -632,7 +640,7 @@ public void testResolveWildcardsMinusExpandWilcardsOpen() { public void testResolveWildcardsMinusExpandWilcardsOpenAndClosed() { SearchRequest request = new SearchRequest("*", "-foofoo*"); request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), true, true)); - List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + List indices = resolveIndices(request, buildAuthorizedIndices(user, TransportSearchAction.TYPE.name())).getLocal(); String[] replacedIndices = new String[] { "bar", "foobarfoo", "bar-closed" }; assertThat(indices, hasSize(replacedIndices.length)); assertThat(request.indices().length, equalTo(replacedIndices.length)); @@ -644,20 +652,20 @@ public void testResolveWildcardsNoExpand() { SearchRequest request = new SearchRequest("*", "-foofoo*"); // no wildcard expand and no ignore unavailable request.indicesOptions(IndicesOptions.fromOptions(false, randomBoolean(), false, false)); - ResolvedIndices indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)); + ResolvedIndices indices = resolveIndices(request, buildAuthorizedIndices(user, TransportSearchAction.TYPE.name())); String[] replacedIndices = new String[] { "*", "-foofoo*" }; assertThat(indices.getLocal(), containsInAnyOrder(replacedIndices)); assertThat(request.indices(), arrayContainingInAnyOrder(replacedIndices)); // no wildcard expand but ignore unavailable request = new SearchRequest("*", "-foofoo*"); request.indicesOptions(IndicesOptions.fromOptions(true, true, false, false)); - indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)); + indices = resolveIndices(request, buildAuthorizedIndices(user, TransportSearchAction.TYPE.name())); assertNoIndices(request, indices); SearchRequest disallowNoIndicesRequest = new SearchRequest("*", "-foofoo*"); disallowNoIndicesRequest.indicesOptions(IndicesOptions.fromOptions(true, false, false, false)); IndexNotFoundException e = expectThrows( IndexNotFoundException.class, - () -> resolveIndices(disallowNoIndicesRequest, buildAuthorizedIndices(user, SearchAction.NAME)) + () -> resolveIndices(disallowNoIndicesRequest, buildAuthorizedIndices(user, TransportSearchAction.TYPE.name())) ); assertEquals("no such index [[*, -foofoo*]]", e.getMessage()); } @@ -665,7 +673,7 @@ public void testResolveWildcardsNoExpand() { public void testResolveWildcardsExclusionsExpandWilcardsOpenStrict() { SearchRequest request = new SearchRequest("*", "-foofoo*", "barbaz", "foob*"); request.indicesOptions(IndicesOptions.fromOptions(false, true, true, false)); - List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + List indices = resolveIndices(request, buildAuthorizedIndices(user, TransportSearchAction.TYPE.name())).getLocal(); String[] replacedIndices = new String[] { "bar", "foobarfoo", "barbaz" }; assertSameValues(indices, replacedIndices); assertThat(request.indices(), arrayContainingInAnyOrder("bar", "foobarfoo", "barbaz", "foobarfoo")); @@ -674,7 +682,7 @@ public void testResolveWildcardsExclusionsExpandWilcardsOpenStrict() { public void testResolveWildcardsPlusAndMinusExpandWilcardsOpenIgnoreUnavailable() { SearchRequest request = new SearchRequest("*", "-foofoo*", "+barbaz", "+foob*"); request.indicesOptions(IndicesOptions.fromOptions(true, true, true, false)); - List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + List indices = resolveIndices(request, buildAuthorizedIndices(user, TransportSearchAction.TYPE.name())).getLocal(); String[] replacedIndices = new String[] { "bar", "foobarfoo" }; assertThat(indices, hasSize(replacedIndices.length)); assertThat(request.indices().length, equalTo(replacedIndices.length)); @@ -685,7 +693,7 @@ public void testResolveWildcardsPlusAndMinusExpandWilcardsOpenIgnoreUnavailable( public void testResolveWildcardsExclusionExpandWilcardsOpenAndClosedStrict() { SearchRequest request = new SearchRequest("*", "-foofoo*", "barbaz"); request.indicesOptions(IndicesOptions.fromOptions(false, randomBoolean(), true, true)); - List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + List indices = resolveIndices(request, buildAuthorizedIndices(user, TransportSearchAction.TYPE.name())).getLocal(); String[] replacedIndices = new String[] { "bar", "bar-closed", "barbaz", "foobarfoo" }; assertSameValues(indices, replacedIndices); assertThat(request.indices(), arrayContainingInAnyOrder(replacedIndices)); @@ -694,7 +702,7 @@ public void testResolveWildcardsExclusionExpandWilcardsOpenAndClosedStrict() { public void testResolveWildcardsExclusionExpandWilcardsOpenAndClosedIgnoreUnavailable() { SearchRequest request = new SearchRequest("*", "-foofoo*", "barbaz"); request.indicesOptions(IndicesOptions.fromOptions(true, randomBoolean(), true, true)); - List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + List indices = resolveIndices(request, buildAuthorizedIndices(user, TransportSearchAction.TYPE.name())).getLocal(); String[] replacedIndices = new String[] { "bar", "bar-closed", "foobarfoo" }; assertThat(indices, hasSize(replacedIndices.length)); assertThat(indices, hasItems(replacedIndices)); @@ -704,7 +712,7 @@ public void testResolveWildcardsExclusionExpandWilcardsOpenAndClosedIgnoreUnavai public void testResolveNonMatchingIndicesAllowNoIndices() { SearchRequest request = new SearchRequest("missing*"); request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), true, true, randomBoolean())); - assertNoIndices(request, resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME))); + assertNoIndices(request, resolveIndices(request, buildAuthorizedIndices(user, TransportSearchAction.TYPE.name()))); } public void testResolveNonMatchingIndicesDisallowNoIndices() { @@ -712,7 +720,7 @@ public void testResolveNonMatchingIndicesDisallowNoIndices() { request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), false, true, randomBoolean())); IndexNotFoundException e = expectThrows( IndexNotFoundException.class, - () -> resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)) + () -> resolveIndices(request, buildAuthorizedIndices(user, TransportSearchAction.TYPE.name())) ); assertEquals("no such index [missing*]", e.getMessage()); } @@ -720,7 +728,7 @@ public void testResolveNonMatchingIndicesDisallowNoIndices() { public void testResolveExplicitIndicesStrict() { SearchRequest request = new SearchRequest("missing", "bar", "barbaz"); request.indicesOptions(IndicesOptions.fromOptions(false, randomBoolean(), randomBoolean(), randomBoolean())); - List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + List indices = resolveIndices(request, buildAuthorizedIndices(user, TransportSearchAction.TYPE.name())).getLocal(); String[] replacedIndices = new String[] { "missing", "bar", "barbaz" }; assertThat(indices, hasSize(replacedIndices.length)); assertThat(request.indices().length, equalTo(replacedIndices.length)); @@ -731,7 +739,7 @@ public void testResolveExplicitIndicesStrict() { public void testResolveExplicitIndicesIgnoreUnavailable() { SearchRequest request = new SearchRequest("missing", "missing-and-unauthorized", "bar", "barbaz"); request.indicesOptions(IndicesOptions.fromOptions(true, randomBoolean(), randomBoolean(), randomBoolean())); - List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + List indices = resolveIndices(request, buildAuthorizedIndices(user, TransportSearchAction.TYPE.name())).getLocal(); assertThat(indices, containsInAnyOrder("bar", "missing")); assertThat(request.indices(), arrayContainingInAnyOrder("bar", "missing")); } @@ -739,7 +747,7 @@ public void testResolveExplicitIndicesIgnoreUnavailable() { public void testResolveNoAuthorizedIndicesAllowNoIndices() { SearchRequest request = new SearchRequest(); request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), true, true, randomBoolean())); - assertNoIndices(request, resolveIndices(request, buildAuthorizedIndices(userNoIndices, SearchAction.NAME))); + assertNoIndices(request, resolveIndices(request, buildAuthorizedIndices(userNoIndices, TransportSearchAction.TYPE.name()))); } public void testResolveNoAuthorizedIndicesDisallowNoIndices() { @@ -747,7 +755,7 @@ public void testResolveNoAuthorizedIndicesDisallowNoIndices() { request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), false, true, randomBoolean())); IndexNotFoundException e = expectThrows( IndexNotFoundException.class, - () -> resolveIndices(request, buildAuthorizedIndices(userNoIndices, SearchAction.NAME)) + () -> resolveIndices(request, buildAuthorizedIndices(userNoIndices, TransportSearchAction.TYPE.name())) ); assertEquals("no such index [[]]", e.getMessage()); } @@ -755,7 +763,7 @@ public void testResolveNoAuthorizedIndicesDisallowNoIndices() { public void testResolveMissingIndexStrict() { SearchRequest request = new SearchRequest("bar*", "missing"); request.indicesOptions(IndicesOptions.fromOptions(false, true, true, false)); - List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + List indices = resolveIndices(request, buildAuthorizedIndices(user, TransportSearchAction.TYPE.name())).getLocal(); String[] expectedIndices = new String[] { "bar", "missing" }; assertThat(indices, hasSize(expectedIndices.length)); assertThat(request.indices().length, equalTo(expectedIndices.length)); @@ -766,7 +774,7 @@ public void testResolveMissingIndexStrict() { public void testResolveMissingIndexIgnoreUnavailable() { SearchRequest request = new SearchRequest("bar*", "missing", "missing-and-unauthorized"); request.indicesOptions(IndicesOptions.fromOptions(true, randomBoolean(), true, false)); - List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + List indices = resolveIndices(request, buildAuthorizedIndices(user, TransportSearchAction.TYPE.name())).getLocal(); assertThat(indices, containsInAnyOrder("bar", "missing")); assertThat(request.indices(), arrayContainingInAnyOrder("bar", "missing")); } @@ -774,7 +782,7 @@ public void testResolveMissingIndexIgnoreUnavailable() { public void testResolveNonMatchingIndicesAndExplicit() { SearchRequest request = new SearchRequest("missing*", "bar"); request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), true, true, randomBoolean())); - List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + List indices = resolveIndices(request, buildAuthorizedIndices(user, TransportSearchAction.TYPE.name())).getLocal(); String[] expectedIndices = new String[] { "bar" }; assertThat(indices.toArray(new String[indices.size()]), equalTo(expectedIndices)); assertThat(request.indices(), equalTo(expectedIndices)); @@ -783,7 +791,7 @@ public void testResolveNonMatchingIndicesAndExplicit() { public void testResolveNoExpandStrict() { SearchRequest request = new SearchRequest("missing*"); request.indicesOptions(IndicesOptions.fromOptions(false, randomBoolean(), false, false)); - List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + List indices = resolveIndices(request, buildAuthorizedIndices(user, TransportSearchAction.TYPE.name())).getLocal(); String[] expectedIndices = new String[] { "missing*" }; assertThat(indices.toArray(new String[indices.size()]), equalTo(expectedIndices)); assertThat(request.indices(), equalTo(expectedIndices)); @@ -792,13 +800,13 @@ public void testResolveNoExpandStrict() { public void testResolveNoExpandIgnoreUnavailable() { SearchRequest request = new SearchRequest("missing*"); request.indicesOptions(IndicesOptions.fromOptions(true, true, false, false)); - assertNoIndices(request, resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME))); + assertNoIndices(request, resolveIndices(request, buildAuthorizedIndices(user, TransportSearchAction.TYPE.name()))); } public void testSearchWithRemoteIndex() { SearchRequest request = new SearchRequest("remote:indexName"); request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); - final ResolvedIndices resolved = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)); + final ResolvedIndices resolved = resolveIndices(request, buildAuthorizedIndices(user, TransportSearchAction.TYPE.name())); assertThat(resolved.getLocal(), emptyIterable()); assertThat(resolved.getRemote(), containsInAnyOrder("remote:indexName")); assertThat(request.indices(), arrayContaining("remote:indexName")); @@ -807,7 +815,7 @@ public void testSearchWithRemoteIndex() { public void testSearchWithRemoteAndLocalIndices() { SearchRequest request = new SearchRequest("remote:indexName", "bar", "bar2"); request.indicesOptions(IndicesOptions.fromOptions(true, randomBoolean(), randomBoolean(), randomBoolean())); - final ResolvedIndices resolved = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)); + final ResolvedIndices resolved = resolveIndices(request, buildAuthorizedIndices(user, TransportSearchAction.TYPE.name())); assertThat(resolved.getLocal(), containsInAnyOrder("bar")); assertThat(resolved.getRemote(), containsInAnyOrder("remote:indexName")); assertThat(request.indices(), arrayContainingInAnyOrder("remote:indexName", "bar")); @@ -816,7 +824,7 @@ public void testSearchWithRemoteAndLocalIndices() { public void testSearchWithRemoteAndLocalWildcards() { SearchRequest request = new SearchRequest("*:foo", "r*:bar*", "remote:baz*", "bar*", "foofoo"); request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), true, false)); - final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, SearchAction.NAME); + final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, TransportSearchAction.TYPE.name()); final ResolvedIndices resolved = resolveIndices(request, authorizedIndices); assertThat(resolved.getRemote(), containsInAnyOrder("remote:foo", "other_remote:foo", "remote:bar*", "remote:baz*")); assertThat(resolved.getLocal(), containsInAnyOrder("bar", "foofoo")); @@ -1503,7 +1511,7 @@ public void testResolveAliasesAllGetAliasesRequestNoAuthorizedIndices() { public void testRemotableRequestsAllowRemoteIndices() { IndicesOptions options = IndicesOptions.fromOptions(true, false, false, false); Tuple tuple = randomFrom( - new Tuple(new SearchRequest("remote:foo").indicesOptions(options), SearchAction.NAME), + new Tuple(new SearchRequest("remote:foo").indicesOptions(options), TransportSearchAction.TYPE.name()), new Tuple( new FieldCapabilitiesRequest().indices("remote:foo").indicesOptions(options), FieldCapabilitiesAction.NAME @@ -1555,7 +1563,10 @@ public void testCompositeIndicesRequestIsNotSupported() { new MultiTermVectorsRequest(), new BulkRequest() ); - expectThrows(IllegalStateException.class, () -> resolveIndices(request, buildAuthorizedIndices(user, MultiSearchAction.NAME))); + expectThrows( + IllegalStateException.class, + () -> resolveIndices(request, buildAuthorizedIndices(user, TransportMultiSearchAction.TYPE.name())) + ); } public void testResolveAdminAction() { @@ -1581,7 +1592,10 @@ public void testResolveAdminAction() { public void testXPackSecurityUserHasAccessToSecurityIndex() { SearchRequest request = new SearchRequest(); { - final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(InternalUsers.XPACK_SECURITY_USER, SearchAction.NAME); + final AuthorizedIndices authorizedIndices = buildAuthorizedIndices( + InternalUsers.XPACK_SECURITY_USER, + TransportSearchAction.TYPE.name() + ); List indices = resolveIndices(request, authorizedIndices).getLocal(); assertThat(indices, hasItem(SECURITY_MAIN_ALIAS)); } @@ -1599,7 +1613,7 @@ public void testXPackSecurityUserHasAccessToSecurityIndex() { public void testXPackUserDoesNotHaveAccessToSecurityIndex() { SearchRequest request = new SearchRequest(); - final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(InternalUsers.XPACK_USER, SearchAction.NAME); + final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(InternalUsers.XPACK_USER, TransportSearchAction.TYPE.name()); List indices = resolveIndices(request, authorizedIndices).getLocal(); assertThat(indices, not(hasItem(SECURITY_MAIN_ALIAS))); } @@ -1618,7 +1632,7 @@ public void testNonXPackUserAccessingSecurityIndex() { { SearchRequest request = new SearchRequest(); - final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(allAccessUser, SearchAction.NAME); + final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(allAccessUser, TransportSearchAction.TYPE.name()); List indices = resolveIndices(request, authorizedIndices).getLocal(); assertThat(indices, not(hasItem(SECURITY_MAIN_ALIAS))); } @@ -1635,7 +1649,7 @@ public void testNonXPackUserAccessingSecurityIndex() { public void testUnauthorizedDateMathExpressionIgnoreUnavailable() { SearchRequest request = new SearchRequest(""); request.indicesOptions(IndicesOptions.fromOptions(true, true, randomBoolean(), randomBoolean())); - assertNoIndices(request, resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME))); + assertNoIndices(request, resolveIndices(request, buildAuthorizedIndices(user, TransportSearchAction.TYPE.name()))); } public void testUnauthorizedDateMathExpressionIgnoreUnavailableDisallowNoIndices() { @@ -1643,7 +1657,7 @@ public void testUnauthorizedDateMathExpressionIgnoreUnavailableDisallowNoIndices request.indicesOptions(IndicesOptions.fromOptions(true, false, randomBoolean(), randomBoolean())); IndexNotFoundException e = expectThrows( IndexNotFoundException.class, - () -> resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)) + () -> resolveIndices(request, buildAuthorizedIndices(user, TransportSearchAction.TYPE.name())) ); assertEquals("no such index [[]]", e.getMessage()); } @@ -1653,7 +1667,7 @@ public void testUnauthorizedDateMathExpressionStrict() { + DateTimeFormatter.ofPattern("uuuu.MM.dd", Locale.ROOT).format(ZonedDateTime.now(ZoneOffset.UTC).withDayOfMonth(1)); SearchRequest request = new SearchRequest(""); request.indicesOptions(IndicesOptions.fromOptions(false, randomBoolean(), randomBoolean(), randomBoolean())); - List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + List indices = resolveIndices(request, buildAuthorizedIndices(user, TransportSearchAction.TYPE.name())).getLocal(); assertThat(indices, contains(expectedIndex)); } @@ -1677,7 +1691,7 @@ public void testResolveDateMathExpression() { final boolean expandIndicesOpen = Regex.isSimpleMatchPattern(pattern) ? true : randomBoolean(); request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), expandIndicesOpen, randomBoolean())); } - List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + List indices = resolveIndices(request, buildAuthorizedIndices(user, TransportSearchAction.TYPE.name())).getLocal(); assertThat(indices, hasSize(1)); assertThat(request.indices()[0], equalTo(dateTimeIndex)); } @@ -1685,7 +1699,7 @@ public void testResolveDateMathExpression() { public void testMissingDateMathExpressionIgnoreUnavailable() { SearchRequest request = new SearchRequest(""); request.indicesOptions(IndicesOptions.fromOptions(true, true, randomBoolean(), randomBoolean())); - assertNoIndices(request, resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME))); + assertNoIndices(request, resolveIndices(request, buildAuthorizedIndices(user, TransportSearchAction.TYPE.name()))); } public void testMissingDateMathExpressionIgnoreUnavailableDisallowNoIndices() { @@ -1693,7 +1707,7 @@ public void testMissingDateMathExpressionIgnoreUnavailableDisallowNoIndices() { request.indicesOptions(IndicesOptions.fromOptions(true, false, randomBoolean(), randomBoolean())); IndexNotFoundException e = expectThrows( IndexNotFoundException.class, - () -> resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)) + () -> resolveIndices(request, buildAuthorizedIndices(user, TransportSearchAction.TYPE.name())) ); assertEquals("no such index [[]]", e.getMessage()); } @@ -1703,7 +1717,7 @@ public void testMissingDateMathExpressionStrict() { + DateTimeFormatter.ofPattern("uuuu.MM.dd", Locale.ROOT).format(ZonedDateTime.now(ZoneOffset.UTC).withDayOfMonth(1)); SearchRequest request = new SearchRequest(""); request.indicesOptions(IndicesOptions.fromOptions(false, randomBoolean(), randomBoolean(), randomBoolean())); - List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + List indices = resolveIndices(request, buildAuthorizedIndices(user, TransportSearchAction.TYPE.name())).getLocal(); assertThat(indices, contains(expectedIndex)); } @@ -1774,9 +1788,9 @@ public void testWhenAliasToMultipleIndicesAndUserIsAuthorizedUsingAliasReturnsIn public void testHiddenIndicesResolution() { SearchRequest searchRequest = new SearchRequest(); searchRequest.indicesOptions(IndicesOptions.fromOptions(false, false, true, true, true)); - AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, SearchAction.NAME); + AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, TransportSearchAction.TYPE.name()); ResolvedIndices resolvedIndices = defaultIndicesResolver.resolveIndicesAndAliases( - SearchAction.NAME, + TransportSearchAction.TYPE.name(), searchRequest, metadata, authorizedIndices @@ -1803,7 +1817,12 @@ public void testHiddenIndicesResolution() { // open + hidden searchRequest = new SearchRequest(); searchRequest.indicesOptions(IndicesOptions.fromOptions(false, false, true, false, true)); - resolvedIndices = defaultIndicesResolver.resolveIndicesAndAliases(SearchAction.NAME, searchRequest, metadata, authorizedIndices); + resolvedIndices = defaultIndicesResolver.resolveIndicesAndAliases( + TransportSearchAction.TYPE.name(), + searchRequest, + metadata, + authorizedIndices + ); assertThat( resolvedIndices.getLocal(), containsInAnyOrder( @@ -1822,52 +1841,77 @@ public void testHiddenIndicesResolution() { // open + implicit hidden for . indices searchRequest = new SearchRequest(randomFrom(".h*", ".hid*")); searchRequest.indicesOptions(IndicesOptions.fromOptions(false, false, true, false, false)); - authorizedIndices = buildAuthorizedIndices(user, SearchAction.NAME); - resolvedIndices = defaultIndicesResolver.resolveIndicesAndAliases(SearchAction.NAME, searchRequest, metadata, authorizedIndices); + authorizedIndices = buildAuthorizedIndices(user, TransportSearchAction.TYPE.name()); + resolvedIndices = defaultIndicesResolver.resolveIndicesAndAliases( + TransportSearchAction.TYPE.name(), + searchRequest, + metadata, + authorizedIndices + ); assertThat(resolvedIndices.getLocal(), containsInAnyOrder(".hidden-open")); assertThat(resolvedIndices.getRemote(), emptyIterable()); // closed + hidden, ignore aliases searchRequest = new SearchRequest(); searchRequest.indicesOptions(IndicesOptions.fromOptions(false, false, false, true, true, true, false, true, false)); - authorizedIndices = buildAuthorizedIndices(user, SearchAction.NAME); - resolvedIndices = defaultIndicesResolver.resolveIndicesAndAliases(SearchAction.NAME, searchRequest, metadata, authorizedIndices); + authorizedIndices = buildAuthorizedIndices(user, TransportSearchAction.TYPE.name()); + resolvedIndices = defaultIndicesResolver.resolveIndicesAndAliases( + TransportSearchAction.TYPE.name(), + searchRequest, + metadata, + authorizedIndices + ); assertThat(resolvedIndices.getLocal(), containsInAnyOrder("bar-closed", "foofoo-closed", "hidden-closed", ".hidden-closed")); assertThat(resolvedIndices.getRemote(), emptyIterable()); // closed + implicit hidden for . indices searchRequest = new SearchRequest(randomFrom(".h*", ".hid*")); searchRequest.indicesOptions(IndicesOptions.fromOptions(false, false, false, true, false)); - authorizedIndices = buildAuthorizedIndices(user, SearchAction.NAME); - resolvedIndices = defaultIndicesResolver.resolveIndicesAndAliases(SearchAction.NAME, searchRequest, metadata, authorizedIndices); + authorizedIndices = buildAuthorizedIndices(user, TransportSearchAction.TYPE.name()); + resolvedIndices = defaultIndicesResolver.resolveIndicesAndAliases( + TransportSearchAction.TYPE.name(), + searchRequest, + metadata, + authorizedIndices + ); assertThat(resolvedIndices.getLocal(), containsInAnyOrder(".hidden-closed")); assertThat(resolvedIndices.getRemote(), emptyIterable()); // allow no indices, do not expand to open or closed, expand hidden, ignore aliases searchRequest = new SearchRequest(); searchRequest.indicesOptions(IndicesOptions.fromOptions(false, true, false, false, false, true, false, true, false)); - authorizedIndices = buildAuthorizedIndices(user, SearchAction.NAME); - resolvedIndices = defaultIndicesResolver.resolveIndicesAndAliases(SearchAction.NAME, searchRequest, metadata, authorizedIndices); + authorizedIndices = buildAuthorizedIndices(user, TransportSearchAction.TYPE.name()); + resolvedIndices = defaultIndicesResolver.resolveIndicesAndAliases( + TransportSearchAction.TYPE.name(), + searchRequest, + metadata, + authorizedIndices + ); assertThat(resolvedIndices.getLocal(), contains("-*")); assertThat(resolvedIndices.getRemote(), emptyIterable()); // date math with default indices options searchRequest = new SearchRequest(""); - authorizedIndices = buildAuthorizedIndices(user, SearchAction.NAME); - resolvedIndices = defaultIndicesResolver.resolveIndicesAndAliases(SearchAction.NAME, searchRequest, metadata, authorizedIndices); + authorizedIndices = buildAuthorizedIndices(user, TransportSearchAction.TYPE.name()); + resolvedIndices = defaultIndicesResolver.resolveIndicesAndAliases( + TransportSearchAction.TYPE.name(), + searchRequest, + metadata, + authorizedIndices + ); assertThat(resolvedIndices.getLocal(), contains(oneOf("date-hidden-" + todaySuffix, "date-hidden-" + tomorrowSuffix))); assertThat(resolvedIndices.getRemote(), emptyIterable()); } public void testHiddenAliasesResolution() { final User user = new User("hidden-alias-tester", "hidden_alias_test"); - final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, SearchAction.NAME); + final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, TransportSearchAction.TYPE.name()); // Visible only SearchRequest searchRequest = new SearchRequest(); searchRequest.indicesOptions(IndicesOptions.fromOptions(false, false, true, false, false)); ResolvedIndices resolvedIndices = defaultIndicesResolver.resolveIndicesAndAliases( - SearchAction.NAME, + TransportSearchAction.TYPE.name(), searchRequest, metadata, authorizedIndices @@ -1878,7 +1922,12 @@ public void testHiddenAliasesResolution() { // Include hidden explicitly searchRequest = new SearchRequest(); searchRequest.indicesOptions(IndicesOptions.fromOptions(false, false, true, false, true)); - resolvedIndices = defaultIndicesResolver.resolveIndicesAndAliases(SearchAction.NAME, searchRequest, metadata, authorizedIndices); + resolvedIndices = defaultIndicesResolver.resolveIndicesAndAliases( + TransportSearchAction.TYPE.name(), + searchRequest, + metadata, + authorizedIndices + ); assertThat( resolvedIndices.getLocal(), containsInAnyOrder( @@ -1896,28 +1945,48 @@ public void testHiddenAliasesResolution() { // Include hidden with a wildcard searchRequest = new SearchRequest("alias-h*"); searchRequest.indicesOptions(IndicesOptions.fromOptions(false, false, true, false, true)); - resolvedIndices = defaultIndicesResolver.resolveIndicesAndAliases(SearchAction.NAME, searchRequest, metadata, authorizedIndices); + resolvedIndices = defaultIndicesResolver.resolveIndicesAndAliases( + TransportSearchAction.TYPE.name(), + searchRequest, + metadata, + authorizedIndices + ); assertThat(resolvedIndices.getLocal(), containsInAnyOrder("alias-hidden", "alias-hidden-datemath-" + todaySuffix)); assertThat(resolvedIndices.getRemote(), emptyIterable()); // Dot prefix, implicitly including hidden searchRequest = new SearchRequest(".a*"); searchRequest.indicesOptions(IndicesOptions.fromOptions(false, false, true, false, false)); - resolvedIndices = defaultIndicesResolver.resolveIndicesAndAliases(SearchAction.NAME, searchRequest, metadata, authorizedIndices); + resolvedIndices = defaultIndicesResolver.resolveIndicesAndAliases( + TransportSearchAction.TYPE.name(), + searchRequest, + metadata, + authorizedIndices + ); assertThat(resolvedIndices.getLocal(), containsInAnyOrder(".alias-hidden", ".alias-hidden-datemath-" + todaySuffix)); assertThat(resolvedIndices.getRemote(), emptyIterable()); // Make sure ignoring aliases works (visible only) searchRequest = new SearchRequest(); searchRequest.indicesOptions(IndicesOptions.fromOptions(false, true, true, false, false, true, false, true, false)); - resolvedIndices = defaultIndicesResolver.resolveIndicesAndAliases(SearchAction.NAME, searchRequest, metadata, authorizedIndices); + resolvedIndices = defaultIndicesResolver.resolveIndicesAndAliases( + TransportSearchAction.TYPE.name(), + searchRequest, + metadata, + authorizedIndices + ); assertThat(resolvedIndices.getLocal(), contains("-*")); assertThat(resolvedIndices.getRemote(), emptyIterable()); // Make sure ignoring aliases works (including hidden) searchRequest = new SearchRequest(); searchRequest.indicesOptions(IndicesOptions.fromOptions(false, false, true, false, true, true, false, true, false)); - resolvedIndices = defaultIndicesResolver.resolveIndicesAndAliases(SearchAction.NAME, searchRequest, metadata, authorizedIndices); + resolvedIndices = defaultIndicesResolver.resolveIndicesAndAliases( + TransportSearchAction.TYPE.name(), + searchRequest, + metadata, + authorizedIndices + ); assertThat(resolvedIndices.getLocal(), containsInAnyOrder("hidden-open")); assertThat(resolvedIndices.getRemote(), emptyIterable()); } @@ -1930,9 +1999,9 @@ public void testDataStreamResolution() { SearchRequest searchRequest = new SearchRequest(); searchRequest.indices("logs-*"); searchRequest.indicesOptions(IndicesOptions.fromOptions(false, false, true, false, false, true, true, true, true)); - final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, SearchAction.NAME, searchRequest); + final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, TransportSearchAction.TYPE.name(), searchRequest); ResolvedIndices resolvedIndices = defaultIndicesResolver.resolveIndicesAndAliases( - SearchAction.NAME, + TransportSearchAction.TYPE.name(), searchRequest, metadata, authorizedIndices @@ -1945,7 +2014,7 @@ public void testDataStreamResolution() { searchRequest.indices("logs-*"); searchRequest.indicesOptions(IndicesOptions.fromOptions(false, true, true, false, false, true, true, true, true)); resolvedIndices = defaultIndicesResolver.resolveIndicesAndAliases( - SearchAction.NAME, + TransportSearchAction.TYPE.name(), searchRequest, metadata, authorizedIndices @@ -1961,9 +2030,9 @@ public void testDataStreamResolution() { SearchRequest searchRequest = new SearchRequest(); searchRequest.indices("logs-*"); searchRequest.indicesOptions(IndicesOptions.fromOptions(false, false, true, false, false, true, true, true, true)); - final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, SearchAction.NAME, searchRequest); + final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, TransportSearchAction.TYPE.name(), searchRequest); ResolvedIndices resolvedIndices = defaultIndicesResolver.resolveIndicesAndAliases( - SearchAction.NAME, + TransportSearchAction.TYPE.name(), searchRequest, metadata, authorizedIndices @@ -2055,7 +2124,7 @@ public void testDataStreamsAreVisibleWhenIncludedByRequestWithWildcard() { // data streams and their backing indices should be in the authorized list List expectedDataStreams = List.of("logs-foo", "logs-foobar"); - final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, SearchAction.NAME, request); + final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, TransportSearchAction.TYPE.name(), request); for (String dsName : expectedDataStreams) { DataStream dataStream = metadata.dataStreams().get(dsName); assertThat(authorizedIndices.all().get(), hasItem(dsName)); @@ -2069,7 +2138,7 @@ public void testDataStreamsAreVisibleWhenIncludedByRequestWithWildcard() { // data streams without their backing indices will be in the resolved list since the backing indices do not match the requested // pattern ResolvedIndices resolvedIndices = defaultIndicesResolver.resolveIndicesAndAliases( - SearchAction.NAME, + TransportSearchAction.TYPE.name(), request, metadata, authorizedIndices @@ -2097,7 +2166,7 @@ public void testDataStreamsAreVisibleWhenIncludedByRequestWithoutWildcard() { assertThat(request, instanceOf(IndicesRequest.Replaceable.class)); assertThat(request.includeDataStreams(), is(true)); - final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, SearchAction.NAME, request); + final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, TransportSearchAction.TYPE.name(), request); // data streams and their backing indices should be in the authorized list assertThat(authorizedIndices.all().get(), hasItem(dataStreamName)); assertThat(authorizedIndices.check(dataStreamName), is(true)); @@ -2107,7 +2176,7 @@ public void testDataStreamsAreVisibleWhenIncludedByRequestWithoutWildcard() { } ResolvedIndices resolvedIndices = defaultIndicesResolver.resolveIndicesAndAliases( - SearchAction.NAME, + TransportSearchAction.TYPE.name(), request, metadata, authorizedIndices @@ -2128,7 +2197,7 @@ public void testBackingIndicesAreVisibleWhenIncludedByRequestWithWildcard() { // data streams and their backing indices should be included in the authorized list List expectedDataStreams = List.of("logs-foo", "logs-foobar"); - final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, SearchAction.NAME, request); + final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, TransportSearchAction.TYPE.name(), request); for (String dsName : expectedDataStreams) { DataStream dataStream = metadata.dataStreams().get(dsName); assertThat(authorizedIndices.all().get(), hasItem(dsName)); @@ -2142,7 +2211,7 @@ public void testBackingIndicesAreVisibleWhenIncludedByRequestWithWildcard() { // data streams should _not_ be included in the resolved list because they do not match the pattern but their backing indices // should be in the resolved list because they match the pattern and are authorized via extension from their parent data stream ResolvedIndices resolvedIndices = defaultIndicesResolver.resolveIndicesAndAliases( - SearchAction.NAME, + TransportSearchAction.TYPE.name(), request, metadata, authorizedIndices @@ -2214,7 +2283,7 @@ public void testDataStreamNotAuthorizedWhenBackingIndicesAreAuthorizedViaWildcar // only the backing indices will be in the resolved list since the request does not support data streams // but the backing indices match the requested pattern ResolvedIndices resolvedIndices = defaultIndicesResolver.resolveIndicesAndAliases( - SearchAction.NAME, + TransportSearchAction.TYPE.name(), request, metadata, authorizedIndices @@ -2243,7 +2312,7 @@ public void testDataStreamNotAuthorizedWhenBackingIndicesAreAuthorizedViaNameAnd // only the single backing index will be in the resolved list since the request does not support data streams // but one of the backing indices matched the requested pattern ResolvedIndices resolvedIndices = defaultIndicesResolver.resolveIndicesAndAliases( - SearchAction.NAME, + TransportSearchAction.TYPE.name(), request, metadata, authorizedIndices @@ -2326,9 +2395,9 @@ public void testResolveSearchShardRequestAgainstDataStream() { randomBoolean(), null ); - final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, SearchShardsAction.NAME, request); + final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, TransportSearchShardsAction.TYPE.name(), request); final ResolvedIndices resolvedIndices = defaultIndicesResolver.resolveIndicesAndAliases( - SearchShardsAction.NAME, + TransportSearchShardsAction.TYPE.name(), request, metadata, authorizedIndices @@ -2348,9 +2417,9 @@ public void testResolveSearchShardRequestAgainstDataStream() { randomBoolean(), null ); - final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, SearchAction.NAME, request); + final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, TransportSearchAction.TYPE.name(), request); final ResolvedIndices resolvedIndices = defaultIndicesResolver.resolveIndicesAndAliases( - SearchShardsAction.NAME, + TransportSearchShardsAction.TYPE.name(), request, metadata, authorizedIndices diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/LoadAuthorizedIndicesTimeCheckerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/LoadAuthorizedIndicesTimeCheckerTests.java index 1e669417c02e0..ed9250cb82826 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/LoadAuthorizedIndicesTimeCheckerTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/LoadAuthorizedIndicesTimeCheckerTests.java @@ -10,8 +10,8 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -135,7 +135,7 @@ public void testWarning() throws Exception { "WARN-Slow Index Resolution", timerLogger.getName(), Level.WARN, - Pattern.quote("Resolving [0] indices for action [" + SearchAction.NAME + "] and user [slow-user] took [") + Pattern.quote("Resolving [0] indices for action [" + TransportSearchAction.TYPE.name() + "] and user [slow-user] took [") + "\\d{3}" + Pattern.quote( "ms] which is greater than the threshold of " @@ -163,7 +163,7 @@ public void testInfo() throws Exception { Level.INFO, Pattern.quote("Took [") + "\\d{2,3}" - + Pattern.quote("ms] to resolve [0] indices for action [" + SearchAction.NAME + "] and user [slow-user]") + + Pattern.quote("ms] to resolve [0] indices for action [" + TransportSearchAction.TYPE.name() + "] and user [slow-user]") ); testLogging(thresholds, elapsedMs, expectation); @@ -182,7 +182,7 @@ private void testLogging( final AuthorizationEngine.RequestInfo requestInfo = new AuthorizationEngine.RequestInfo( authentication, new SearchRequest(), - SearchAction.NAME, + TransportSearchAction.TYPE.name(), null ); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/PreAuthorizationUtilsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/PreAuthorizationUtilsTests.java index 866626e7d01f7..ff92b7a1e7dcd 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/PreAuthorizationUtilsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/PreAuthorizationUtilsTests.java @@ -7,8 +7,8 @@ package org.elasticsearch.xpack.security.authz; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.test.ESTestCase; @@ -39,7 +39,7 @@ public class PreAuthorizationUtilsTests extends ESTestCase { public void testMaybeSkipChildrenActionAuthorizationAddsParentAuthorizationHeader() { - String action = SearchAction.NAME; + String action = TransportSearchAction.TYPE.name(); Role role = Role.builder(RESTRICTED_INDICES, "test-role").add(IndexPrivilege.READ, "test-*").build(); @@ -64,7 +64,7 @@ public void testMaybeSkipChildrenActionAuthorizationDoesNotAddHeaderForRandomAct } public void testShouldRemoveParentAuthorizationFromThreadContext() { - final String parentAction = SearchAction.NAME; + final String parentAction = TransportSearchAction.TYPE.name(); SecurityContext securityContextWithParentAuthorization = new SecurityContext(Settings.EMPTY, new ThreadContext(Settings.EMPTY)); securityContextWithParentAuthorization.setParentAuthorization(new ParentActionAuthorization(parentAction)); @@ -113,7 +113,7 @@ public void testShouldRemoveParentAuthorizationFromThreadContext() { } public void testShouldPreAuthorizeChildByParentAction() { - final String parentAction = SearchAction.NAME; + final String parentAction = TransportSearchAction.TYPE.name(); final String childAction = randomWhitelistedChildAction(parentAction); ParentActionAuthorization parentAuthorization = new ParentActionAuthorization(parentAction); @@ -130,7 +130,7 @@ public void testShouldPreAuthorizeChildByParentAction() { } public void testShouldPreAuthorizeChildByParentActionWhenParentAndChildAreSame() { - final String parentAction = SearchAction.NAME; + final String parentAction = TransportSearchAction.TYPE.name(); final String childAction = parentAction; ParentActionAuthorization parentAuthorization = new ParentActionAuthorization(parentAction); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java index 2420d7c2269a6..251b692f42827 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java @@ -16,8 +16,8 @@ import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.delete.DeleteAction; import org.elasticsearch.action.index.IndexAction; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.ElasticsearchClient; @@ -1385,7 +1385,7 @@ public void testBackingIndicesAreIncludedForAuthorizedDataStreams() { SearchRequest request = new SearchRequest("*"); AuthorizedIndices authorizedIndices = RBACEngine.resolveAuthorizedIndicesFromRole( role, - getRequestInfo(request, SearchAction.NAME), + getRequestInfo(request, TransportSearchAction.TYPE.name()), lookup, () -> ignore -> {} ); @@ -1773,8 +1773,10 @@ public void testChildSearchActionAuthorizationIsSkipped() { final String[] indices = { "test-index" }; final Role role = Mockito.spy(Role.builder(RESTRICTED_INDICES, "test-role").add(IndexPrivilege.READ, indices).build()); - final String action = randomFrom(PreAuthorizationUtils.CHILD_ACTIONS_PRE_AUTHORIZED_BY_PARENT.get(SearchAction.NAME)); - final ParentActionAuthorization parentAuthorization = new ParentActionAuthorization(SearchAction.NAME); + final String action = randomFrom( + PreAuthorizationUtils.CHILD_ACTIONS_PRE_AUTHORIZED_BY_PARENT.get(TransportSearchAction.TYPE.name()) + ); + final ParentActionAuthorization parentAuthorization = new ParentActionAuthorization(TransportSearchAction.TYPE.name()); authorizeIndicesAction(indices, role, action, parentAuthorization, new ActionListener() { @Override @@ -1796,7 +1798,9 @@ public void testChildSearchActionIsAuthorizedWithoutSkipping() { final String[] indices = { "test-index" }; final Role role = Mockito.spy(Role.builder(RESTRICTED_INDICES, "test-role").add(IndexPrivilege.READ, indices).build()); - final String action = randomFrom(PreAuthorizationUtils.CHILD_ACTIONS_PRE_AUTHORIZED_BY_PARENT.get(SearchAction.NAME)); + final String action = randomFrom( + PreAuthorizationUtils.CHILD_ACTIONS_PRE_AUTHORIZED_BY_PARENT.get(TransportSearchAction.TYPE.name()) + ); final ParentActionAuthorization parentAuthorization = null; authorizeIndicesAction(indices, role, action, parentAuthorization, new ActionListener() { @@ -1830,8 +1834,10 @@ public void testChildSearchActionAuthorizationIsNotSkippedWhenRoleHasDLS() { .build() ); - final String action = randomFrom(PreAuthorizationUtils.CHILD_ACTIONS_PRE_AUTHORIZED_BY_PARENT.get(SearchAction.NAME)); - final ParentActionAuthorization parentAuthorization = new ParentActionAuthorization(SearchAction.NAME); + final String action = randomFrom( + PreAuthorizationUtils.CHILD_ACTIONS_PRE_AUTHORIZED_BY_PARENT.get(TransportSearchAction.TYPE.name()) + ); + final ParentActionAuthorization parentAuthorization = new ParentActionAuthorization(TransportSearchAction.TYPE.name()); authorizeIndicesAction(indices, role, action, parentAuthorization, new ActionListener() { @Override @@ -1852,8 +1858,8 @@ public void testRandomChildSearchActionAuthorizionIsNotSkipped() { final String[] indices = { "test-index" }; final Role role = Mockito.spy(Role.builder(RESTRICTED_INDICES, "test-role").add(IndexPrivilege.READ, indices).build()); - final String action = SearchAction.NAME + "[" + randomAlphaOfLength(3) + "]"; - final ParentActionAuthorization parentAuthorization = new ParentActionAuthorization(SearchAction.NAME); + final String action = TransportSearchAction.TYPE.name() + "[" + randomAlphaOfLength(3) + "]"; + final ParentActionAuthorization parentAuthorization = new ParentActionAuthorization(TransportSearchAction.TYPE.name()); authorizeIndicesAction(indices, role, action, parentAuthorization, new ActionListener() { @Override diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java index aa89cdad6eb7a..afc1d0931547a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java @@ -10,7 +10,7 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.action.admin.indices.mapping.put.AutoPutMappingAction; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; -import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.cluster.metadata.AliasMetadata; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamTestHelper; @@ -74,7 +74,12 @@ public void testAuthorize() { Role role = Role.builder(RESTRICTED_INDICES, "_role") .add(new FieldPermissions(fieldPermissionDef(fields, null)), query, IndexPrivilege.ALL, randomBoolean(), "_index") .build(); - IndicesAccessControl permissions = role.authorize(SearchAction.NAME, Sets.newHashSet("_index"), lookup, fieldPermissionsCache); + IndicesAccessControl permissions = role.authorize( + TransportSearchAction.TYPE.name(), + Sets.newHashSet("_index"), + lookup, + fieldPermissionsCache + ); assertThat(permissions.getIndexPermissions("_index"), notNullValue()); assertTrue(permissions.getIndexPermissions("_index").getFieldPermissions().grantsAccessTo("_field")); assertTrue(permissions.getIndexPermissions("_index").getFieldPermissions().hasFieldLevelSecurity()); @@ -86,7 +91,7 @@ public void testAuthorize() { role = Role.builder(RESTRICTED_INDICES, "_role") .add(new FieldPermissions(fieldPermissionDef(fields, null)), null, IndexPrivilege.ALL, randomBoolean(), "_index") .build(); - permissions = role.authorize(SearchAction.NAME, Sets.newHashSet("_index"), lookup, fieldPermissionsCache); + permissions = role.authorize(TransportSearchAction.TYPE.name(), Sets.newHashSet("_index"), lookup, fieldPermissionsCache); assertThat(permissions.getIndexPermissions("_index"), notNullValue()); assertTrue(permissions.getIndexPermissions("_index").getFieldPermissions().grantsAccessTo("_field")); assertTrue(permissions.getIndexPermissions("_index").getFieldPermissions().hasFieldLevelSecurity()); @@ -97,7 +102,7 @@ public void testAuthorize() { role = Role.builder(RESTRICTED_INDICES, "_role") .add(FieldPermissions.DEFAULT, query, IndexPrivilege.ALL, randomBoolean(), "_index") .build(); - permissions = role.authorize(SearchAction.NAME, Sets.newHashSet("_index"), lookup, fieldPermissionsCache); + permissions = role.authorize(TransportSearchAction.TYPE.name(), Sets.newHashSet("_index"), lookup, fieldPermissionsCache); assertThat(permissions.getIndexPermissions("_index"), notNullValue()); assertFalse(permissions.getIndexPermissions("_index").getFieldPermissions().hasFieldLevelSecurity()); assertThat(permissions.getIndexPermissions("_index").getDocumentPermissions().hasDocumentLevelPermissions(), is(true)); @@ -108,7 +113,7 @@ public void testAuthorize() { role = Role.builder(RESTRICTED_INDICES, "_role") .add(new FieldPermissions(fieldPermissionDef(fields, null)), query, IndexPrivilege.ALL, randomBoolean(), "_alias") .build(); - permissions = role.authorize(SearchAction.NAME, Sets.newHashSet("_alias"), lookup, fieldPermissionsCache); + permissions = role.authorize(TransportSearchAction.TYPE.name(), Sets.newHashSet("_alias"), lookup, fieldPermissionsCache); assertThat(permissions.getIndexPermissions("_index"), notNullValue()); assertTrue(permissions.getIndexPermissions("_index").getFieldPermissions().grantsAccessTo("_field")); assertTrue(permissions.getIndexPermissions("_index").getFieldPermissions().hasFieldLevelSecurity()); @@ -132,7 +137,7 @@ public void testAuthorize() { role = Role.builder(RESTRICTED_INDICES, "_role") .add(new FieldPermissions(fieldPermissionDef(allFields, null)), query, IndexPrivilege.ALL, randomBoolean(), "_alias") .build(); - permissions = role.authorize(SearchAction.NAME, Sets.newHashSet("_alias"), lookup, fieldPermissionsCache); + permissions = role.authorize(TransportSearchAction.TYPE.name(), Sets.newHashSet("_alias"), lookup, fieldPermissionsCache); assertThat(permissions.getIndexPermissions("_index"), notNullValue()); assertFalse(permissions.getIndexPermissions("_index").getFieldPermissions().hasFieldLevelSecurity()); assertThat(permissions.getIndexPermissions("_index").getDocumentPermissions().hasDocumentLevelPermissions(), is(true)); @@ -158,7 +163,7 @@ public void testAuthorize() { .add(new FieldPermissions(fieldPermissionDef(allFields, null)), fooQuery, IndexPrivilege.ALL, randomBoolean(), "_alias") .add(new FieldPermissions(fieldPermissionDef(allFields, null)), query, IndexPrivilege.ALL, randomBoolean(), "_alias") .build(); - permissions = role.authorize(SearchAction.NAME, Sets.newHashSet("_alias"), lookup, fieldPermissionsCache); + permissions = role.authorize(TransportSearchAction.TYPE.name(), Sets.newHashSet("_alias"), lookup, fieldPermissionsCache); Set bothQueries = Sets.union(fooQuery, query); assertThat(permissions.getIndexPermissions("_index"), notNullValue()); assertFalse(permissions.getIndexPermissions("_index").getFieldPermissions().hasFieldLevelSecurity()); @@ -194,7 +199,12 @@ public void testAuthorizeMultipleGroupsMixedDls() { .add(new FieldPermissions(fieldPermissionDef(fields, null)), query, IndexPrivilege.ALL, randomBoolean(), "_index") .add(new FieldPermissions(fieldPermissionDef(null, null)), null, IndexPrivilege.ALL, randomBoolean(), "*") .build(); - IndicesAccessControl permissions = role.authorize(SearchAction.NAME, Sets.newHashSet("_index"), lookup, fieldPermissionsCache); + IndicesAccessControl permissions = role.authorize( + TransportSearchAction.TYPE.name(), + Sets.newHashSet("_index"), + lookup, + fieldPermissionsCache + ); assertThat(permissions.getIndexPermissions("_index"), notNullValue()); assertTrue(permissions.getIndexPermissions("_index").getFieldPermissions().grantsAccessTo("_field")); assertFalse(permissions.getIndexPermissions("_index").getFieldPermissions().hasFieldLevelSecurity()); @@ -261,13 +271,18 @@ public void testCorePermissionAuthorize() { "a1" ) .build(); - IndicesAccessControl iac = core.authorize(SearchAction.NAME, Sets.newHashSet("a1", "ba"), lookup, fieldPermissionsCache); + IndicesAccessControl iac = core.authorize( + TransportSearchAction.TYPE.name(), + Sets.newHashSet("a1", "ba"), + lookup, + fieldPermissionsCache + ); assertTrue(iac.getIndexPermissions("a1").getFieldPermissions().grantsAccessTo("denied_field")); assertTrue(iac.getIndexPermissions("a1").getFieldPermissions().grantsAccessTo(randomAlphaOfLength(5))); // did not define anything for ba so we allow all assertFalse(iac.hasIndexPermissions("ba")); - assertTrue(core.check(SearchAction.NAME)); + assertTrue(core.check(TransportSearchAction.TYPE.name())); assertTrue(core.check(PutMappingAction.NAME)); assertTrue(core.check(AutoPutMappingAction.NAME)); assertFalse(core.check("unknown")); @@ -302,7 +317,7 @@ public void testCorePermissionAuthorize() { "a2" ) .build(); - iac = core.authorize(SearchAction.NAME, Sets.newHashSet("a1", "a2"), lookup, fieldPermissionsCache); + iac = core.authorize(TransportSearchAction.TYPE.name(), Sets.newHashSet("a1", "a2"), lookup, fieldPermissionsCache); assertFalse(iac.getIndexPermissions("a1").getFieldPermissions().hasFieldLevelSecurity()); assertFalse(iac.getIndexPermissions("a2").getFieldPermissions().grantsAccessTo("denied_field2")); assertFalse(iac.getIndexPermissions("a2").getFieldPermissions().grantsAccessTo("denied_field")); @@ -310,7 +325,7 @@ public void testCorePermissionAuthorize() { assertTrue(iac.getIndexPermissions("a2").getFieldPermissions().grantsAccessTo(randomAlphaOfLength(5) + "_field2")); assertTrue(iac.getIndexPermissions("a2").getFieldPermissions().hasFieldLevelSecurity()); - assertTrue(core.check(SearchAction.NAME)); + assertTrue(core.check(TransportSearchAction.TYPE.name())); assertTrue(core.check(PutMappingAction.NAME)); assertTrue(core.check(AutoPutMappingAction.NAME)); assertFalse(core.check("unknown")); @@ -364,7 +379,7 @@ public void testSecurityIndicesPermissions() { "*" ).build(); IndicesAccessControl iac = indicesPermission.authorize( - SearchAction.NAME, + TransportSearchAction.TYPE.name(), Sets.newHashSet(internalSecurityIndex, SecuritySystemIndices.SECURITY_MAIN_ALIAS), lookup, fieldPermissionsCache @@ -384,7 +399,7 @@ public void testSecurityIndicesPermissions() { "*" ).build(); iac = indicesPermission.authorize( - SearchAction.NAME, + TransportSearchAction.TYPE.name(), Sets.newHashSet(internalSecurityIndex, SecuritySystemIndices.SECURITY_MAIN_ALIAS), lookup, fieldPermissionsCache @@ -415,7 +430,7 @@ public void testAsyncSearchIndicesPermissions() { "*" ).build(); IndicesAccessControl iac = indicesPermission.authorize( - SearchAction.NAME, + TransportSearchAction.TYPE.name(), Sets.newHashSet(asyncSearchIndex), lookup, fieldPermissionsCache @@ -432,7 +447,12 @@ public void testAsyncSearchIndicesPermissions() { true, "*" ).build(); - iac = indicesPermission.authorize(SearchAction.NAME, Sets.newHashSet(asyncSearchIndex), lookup, fieldPermissionsCache); + iac = indicesPermission.authorize( + TransportSearchAction.TYPE.name(), + Sets.newHashSet(asyncSearchIndex), + lookup, + fieldPermissionsCache + ); assertThat(iac.isGranted(), is(true)); assertThat(iac.hasIndexPermissions(asyncSearchIndex), is(true)); assertThat(iac.getIndexPermissions(asyncSearchIndex), is(notNullValue())); @@ -466,7 +486,7 @@ public void testAuthorizationForBackingIndices() { dataStreamName ).build(); IndicesAccessControl iac = indicesPermission.authorize( - SearchAction.NAME, + TransportSearchAction.TYPE.name(), Sets.newHashSet(backingIndices.stream().map(im -> im.getIndex().getName()).collect(Collectors.toList())), lookup, fieldPermissionsCache diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/interceptor/SearchRequestCacheDisablingInterceptorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/interceptor/SearchRequestCacheDisablingInterceptorTests.java index 19f4faf620643..2c4a03b7df501 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/interceptor/SearchRequestCacheDisablingInterceptorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/interceptor/SearchRequestCacheDisablingInterceptorTests.java @@ -8,8 +8,8 @@ package org.elasticsearch.xpack.security.authz.interceptor; import org.elasticsearch.Version; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -82,7 +82,7 @@ public void testRequestCacheWillBeDisabledWhenSearchRemoteIndices() { RequestInfo requestInfo = new RequestInfo( Authentication.newAnonymousAuthentication(new AnonymousUser(Settings.EMPTY), randomAlphaOfLengthBetween(3, 8)), searchRequest, - SearchAction.NAME, + TransportSearchAction.TYPE.name(), null ); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java index 5c21edd7226c4..7bf9508ab451a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java @@ -21,7 +21,7 @@ import org.elasticsearch.action.delete.DeleteAction; import org.elasticsearch.action.get.GetAction; import org.elasticsearch.action.index.IndexAction; -import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.health.ClusterHealthStatus; @@ -542,17 +542,17 @@ private void trySuccessfullyLoadSuperuserRole(CompositeRolesStore compositeRoles ); assertThat(role.cluster().privileges(), containsInAnyOrder(ClusterPrivilegeResolver.ALL)); - assertThat(role.indices().check(SearchAction.NAME), Matchers.is(true)); + assertThat(role.indices().check(TransportSearchAction.TYPE.name()), Matchers.is(true)); assertThat(role.indices().check(IndexAction.NAME), Matchers.is(true)); final Predicate indexActionPredicate = Automatons.predicate( role.indices().allowedActionsMatcher("index-" + randomAlphaOfLengthBetween(1, 12)) ); - assertThat(indexActionPredicate.test(SearchAction.NAME), is(true)); + assertThat(indexActionPredicate.test(TransportSearchAction.TYPE.name()), is(true)); assertThat(indexActionPredicate.test(IndexAction.NAME), is(true)); final Predicate securityActionPredicate = Automatons.predicate(role.indices().allowedActionsMatcher(".security")); - assertThat(securityActionPredicate.test(SearchAction.NAME), is(true)); + assertThat(securityActionPredicate.test(TransportSearchAction.TYPE.name()), is(true)); assertThat(securityActionPredicate.test(IndexAction.NAME), is(false)); } @@ -1848,7 +1848,7 @@ public void testApiKeyAuthUsesApiKeyService() throws Exception { ThreadContext threadContext = new ThreadContext(SECURITY_ENABLED_SETTINGS); final ClusterService clusterService = mock(ClusterService.class); when(clusterService.getClusterSettings()).thenReturn( - new ClusterSettings(SECURITY_ENABLED_SETTINGS, Set.of(ApiKeyService.DELETE_RETENTION_PERIOD)) + new ClusterSettings(SECURITY_ENABLED_SETTINGS, Set.of(ApiKeyService.DELETE_RETENTION_PERIOD, ApiKeyService.DELETE_INTERVAL)) ); ApiKeyService apiKeyService = spy( new ApiKeyService( @@ -1931,7 +1931,7 @@ public void testApiKeyAuthUsesApiKeyServiceWithScopedRole() throws Exception { final ClusterService clusterService = mock(ClusterService.class); when(clusterService.getClusterSettings()).thenReturn( - new ClusterSettings(SECURITY_ENABLED_SETTINGS, Set.of(ApiKeyService.DELETE_RETENTION_PERIOD)) + new ClusterSettings(SECURITY_ENABLED_SETTINGS, Set.of(ApiKeyService.DELETE_RETENTION_PERIOD, ApiKeyService.DELETE_INTERVAL)) ); ApiKeyService apiKeyService = spy( new ApiKeyService( @@ -2031,7 +2031,7 @@ public void testGetRoleForCrossClusterAccessAuthentication() throws Exception { ThreadContext threadContext = new ThreadContext(SECURITY_ENABLED_SETTINGS); final ClusterService clusterService = mock(ClusterService.class); when(clusterService.getClusterSettings()).thenReturn( - new ClusterSettings(SECURITY_ENABLED_SETTINGS, Set.of(ApiKeyService.DELETE_RETENTION_PERIOD)) + new ClusterSettings(SECURITY_ENABLED_SETTINGS, Set.of(ApiKeyService.DELETE_RETENTION_PERIOD, ApiKeyService.DELETE_INTERVAL)) ); final ApiKeyService apiKeyService = spy( new ApiKeyService( @@ -2127,7 +2127,8 @@ public void testGetRoleForCrossClusterAccessAuthentication() throws Exception { .build(); final var emptyCache = new FieldPermissionsCache(Settings.EMPTY); assertThat( - role.authorize(SearchAction.NAME, Sets.newHashSet("index1"), indexMetadata.getIndicesLookup(), emptyCache).isGranted(), + role.authorize(TransportSearchAction.TYPE.name(), Sets.newHashSet("index1"), indexMetadata.getIndicesLookup(), emptyCache) + .isGranted(), is(false == emptyRemoteRole) ); assertThat( @@ -2135,7 +2136,8 @@ public void testGetRoleForCrossClusterAccessAuthentication() throws Exception { is(false) ); assertThat( - role.authorize(SearchAction.NAME, Sets.newHashSet("index2"), indexMetadata.getIndicesLookup(), emptyCache).isGranted(), + role.authorize(TransportSearchAction.TYPE.name(), Sets.newHashSet("index2"), indexMetadata.getIndicesLookup(), emptyCache) + .isGranted(), is(false) ); } @@ -2184,7 +2186,9 @@ public void testGetRolesForRunAs() { public void testGetRoleForWorkflowWithRestriction() { final Settings settings = Settings.EMPTY; final ClusterService clusterService = mock(ClusterService.class); - when(clusterService.getClusterSettings()).thenReturn(new ClusterSettings(settings, Set.of(ApiKeyService.DELETE_RETENTION_PERIOD))); + when(clusterService.getClusterSettings()).thenReturn( + new ClusterSettings(settings, Set.of(ApiKeyService.DELETE_RETENTION_PERIOD, ApiKeyService.DELETE_INTERVAL)) + ); final ApiKeyService apiKeyService = new ApiKeyService( settings, Clock.systemUTC(), @@ -2277,7 +2281,7 @@ public void testGetRoleForWorkflowWithRestriction() { Role role = future1.actionGet(); assertThat(role.hasWorkflowsRestriction(), equalTo(true)); assertThat(role, not(sameInstance(Role.EMPTY_RESTRICTED_BY_WORKFLOW))); - assertThat(role.checkIndicesAction(SearchAction.NAME), is(true)); + assertThat(role.checkIndicesAction(TransportSearchAction.TYPE.name()), is(true)); } // 2. an "empty-restricted" role if originating workflow does not match (or is null) @@ -2296,7 +2300,9 @@ public void testGetRoleForWorkflowWithRestriction() { public void testGetRoleForWorkflowWithoutRestriction() { final Settings settings = Settings.EMPTY; final ClusterService clusterService = mock(ClusterService.class); - when(clusterService.getClusterSettings()).thenReturn(new ClusterSettings(settings, Set.of(ApiKeyService.DELETE_RETENTION_PERIOD))); + when(clusterService.getClusterSettings()).thenReturn( + new ClusterSettings(settings, Set.of(ApiKeyService.DELETE_RETENTION_PERIOD, ApiKeyService.DELETE_INTERVAL)) + ); final ApiKeyService apiKeyService = new ApiKeyService( settings, Clock.systemUTC(), @@ -2389,7 +2395,7 @@ public void testGetRoleForWorkflowWithoutRestriction() { Role role = future1.actionGet(); assertThat(role.hasWorkflowsRestriction(), equalTo(false)); assertThat(role, not(sameInstance(Role.EMPTY_RESTRICTED_BY_WORKFLOW))); - assertThat(role.checkIndicesAction(SearchAction.NAME), is(true)); + assertThat(role.checkIndicesAction(TransportSearchAction.TYPE.name()), is(true)); } } @@ -2586,7 +2592,7 @@ public void testCacheEntryIsReusedForIdenticalApiKeyRoles() { } public void testXPackSecurityUserCanAccessAnyIndex() { - for (String action : Arrays.asList(GetAction.NAME, DeleteAction.NAME, SearchAction.NAME, IndexAction.NAME)) { + for (String action : Arrays.asList(GetAction.NAME, DeleteAction.NAME, TransportSearchAction.TYPE.name(), IndexAction.NAME)) { IsResourceAuthorizedPredicate predicate = getXPackSecurityRole().indices().allowedIndicesMatcher(action); IndexAbstraction index = mockIndexAbstraction(randomAlphaOfLengthBetween(3, 12)); @@ -2601,7 +2607,7 @@ public void testXPackSecurityUserCanAccessAnyIndex() { } public void testSecurityProfileUserHasAccessForOnlyProfileIndex() { - for (String action : Arrays.asList(GetAction.NAME, DeleteAction.NAME, SearchAction.NAME, IndexAction.NAME)) { + for (String action : Arrays.asList(GetAction.NAME, DeleteAction.NAME, TransportSearchAction.TYPE.name(), IndexAction.NAME)) { IsResourceAuthorizedPredicate predicate = getSecurityProfileRole().indices().allowedIndicesMatcher(action); List.of( @@ -2625,7 +2631,7 @@ public void testSecurityProfileUserHasAccessForOnlyProfileIndex() { } public void testXPackUserCanAccessNonRestrictedIndices() { - for (String action : Arrays.asList(GetAction.NAME, DeleteAction.NAME, SearchAction.NAME, IndexAction.NAME)) { + for (String action : Arrays.asList(GetAction.NAME, DeleteAction.NAME, TransportSearchAction.TYPE.name(), IndexAction.NAME)) { IsResourceAuthorizedPredicate predicate = getXPackUserRole().indices().allowedIndicesMatcher(action); IndexAbstraction index = mockIndexAbstraction(randomAlphaOfLengthBetween(3, 12)); if (false == TestRestrictedIndices.RESTRICTED_INDICES.isRestricted(index.getName())) { @@ -2639,7 +2645,7 @@ public void testXPackUserCanAccessNonRestrictedIndices() { } public void testXPackUserCannotAccessSecurityOrAsyncSearch() { - for (String action : Arrays.asList(GetAction.NAME, DeleteAction.NAME, SearchAction.NAME, IndexAction.NAME)) { + for (String action : Arrays.asList(GetAction.NAME, DeleteAction.NAME, TransportSearchAction.TYPE.name(), IndexAction.NAME)) { IsResourceAuthorizedPredicate predicate = getXPackUserRole().indices().allowedIndicesMatcher(action); for (String index : TestRestrictedIndices.SAMPLE_RESTRICTED_NAMES) { assertThat(predicate.test(mockIndexAbstraction(index)), Matchers.is(false)); @@ -2652,7 +2658,7 @@ public void testXPackUserCannotAccessSecurityOrAsyncSearch() { } public void testAsyncSearchUserCannotAccessNonRestrictedIndices() { - for (String action : Arrays.asList(GetAction.NAME, DeleteAction.NAME, SearchAction.NAME, IndexAction.NAME)) { + for (String action : Arrays.asList(GetAction.NAME, DeleteAction.NAME, TransportSearchAction.TYPE.name(), IndexAction.NAME)) { IsResourceAuthorizedPredicate predicate = getAsyncSearchUserRole().indices().allowedIndicesMatcher(action); IndexAbstraction index = mockIndexAbstraction(randomAlphaOfLengthBetween(3, 12)); if (false == TestRestrictedIndices.RESTRICTED_INDICES.isRestricted(index.getName())) { @@ -2666,7 +2672,7 @@ public void testAsyncSearchUserCannotAccessNonRestrictedIndices() { } public void testAsyncSearchUserCanAccessOnlyAsyncSearchRestrictedIndices() { - for (String action : Arrays.asList(GetAction.NAME, DeleteAction.NAME, SearchAction.NAME, IndexAction.NAME)) { + for (String action : Arrays.asList(GetAction.NAME, DeleteAction.NAME, TransportSearchAction.TYPE.name(), IndexAction.NAME)) { final IsResourceAuthorizedPredicate predicate = getAsyncSearchUserRole().indices().allowedIndicesMatcher(action); for (String index : TestRestrictedIndices.SAMPLE_RESTRICTED_NAMES) { assertThat(predicate.test(mockIndexAbstraction(index)), Matchers.is(false)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java index ecc69e957d8ba..53df6e6157282 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java @@ -11,6 +11,11 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexRequest; @@ -42,6 +47,7 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.security.action.privilege.ClearPrivilegesCacheRequest; +import org.elasticsearch.xpack.core.security.action.privilege.ClearPrivilegesCacheResponse; import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilegeDescriptor; import org.elasticsearch.xpack.core.security.test.TestRestrictedIndices; @@ -67,6 +73,7 @@ import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; +import java.util.function.Predicate; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; @@ -75,15 +82,20 @@ import static java.util.Collections.singletonList; import static org.elasticsearch.common.util.set.Sets.newHashSet; import static org.elasticsearch.search.SearchService.ALLOW_EXPENSIVE_QUERIES; +import static org.hamcrest.Matchers.aMapWithSize; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.arrayContaining; -import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.arrayContainingInAnyOrder; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.iterableWithSize; import static org.hamcrest.Matchers.not; import static org.mockito.ArgumentMatchers.any; @@ -169,8 +181,7 @@ public void testGetSinglePrivilegeByName() throws Exception { final PlainActionFuture> future = new PlainActionFuture<>(); store.getPrivileges(List.of("myapp"), List.of("admin"), future); assertThat(requests, iterableWithSize(1)); - assertThat(requests.get(0), instanceOf(SearchRequest.class)); - SearchRequest request = (SearchRequest) requests.get(0); + final SearchRequest request = getLastRequest(SearchRequest.class); final String query = Strings.toString(request.source().query()); assertThat(query, containsString(""" {"terms":{"application":["myapp"]""")); @@ -178,27 +189,7 @@ public void testGetSinglePrivilegeByName() throws Exception { {"term":{"type":{"value":"application-privilege\"""")); final SearchHit[] hits = buildHits(sourcePrivileges); - listener.get() - .onResponse( - new SearchResponse( - new SearchResponseSections( - new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), - null, - null, - false, - false, - null, - 1 - ), - "_scrollId1", - 1, - 1, - 0, - 1, - null, - null - ) - ); + listener.get().onResponse(buildSearchResponse(hits)); assertResult(sourcePrivileges, future); } @@ -207,27 +198,7 @@ public void testGetMissingPrivilege() throws InterruptedException, ExecutionExce final PlainActionFuture> future = new PlainActionFuture<>(); store.getPrivileges(List.of("myapp"), List.of("admin"), future); final SearchHit[] hits = new SearchHit[0]; - listener.get() - .onResponse( - new SearchResponse( - new SearchResponseSections( - new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), - null, - null, - false, - false, - null, - 1 - ), - "_scrollId1", - 1, - 1, - 0, - 1, - null, - null - ) - ); + listener.get().onResponse(buildSearchResponse(hits)); final Collection applicationPrivilegeDescriptors = future.get(1, TimeUnit.SECONDS); assertThat(applicationPrivilegeDescriptors, empty()); @@ -243,8 +214,7 @@ public void testGetPrivilegesByApplicationName() throws Exception { final PlainActionFuture> future = new PlainActionFuture<>(); store.getPrivileges(Arrays.asList("myapp", "yourapp"), null, future); assertThat(requests, iterableWithSize(1)); - assertThat(requests.get(0), instanceOf(SearchRequest.class)); - SearchRequest request = (SearchRequest) requests.get(0); + final SearchRequest request = getLastRequest(SearchRequest.class); assertThat(request.indices(), arrayContaining(SecuritySystemIndices.SECURITY_MAIN_ALIAS)); final String query = Strings.toString(request.source().query()); @@ -255,27 +225,7 @@ public void testGetPrivilegesByApplicationName() throws Exception { {"term":{"type":{"value":"application-privilege\"""")); final SearchHit[] hits = buildHits(sourcePrivileges); - listener.get() - .onResponse( - new SearchResponse( - new SearchResponseSections( - new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), - null, - null, - false, - false, - null, - 1 - ), - "_scrollId1", - 1, - 1, - 0, - 1, - null, - null - ) - ); + listener.get().onResponse(buildSearchResponse(hits)); assertResult(sourcePrivileges, future); } @@ -317,8 +267,7 @@ public void testGetPrivilegesByWildcardApplicationName() throws Exception { final PlainActionFuture> future = new PlainActionFuture<>(); store.getPrivileges(Arrays.asList("myapp-*", "yourapp"), null, future); assertThat(requests, iterableWithSize(1)); - assertThat(requests.get(0), instanceOf(SearchRequest.class)); - SearchRequest request = (SearchRequest) requests.get(0); + final SearchRequest request = getLastRequest(SearchRequest.class); assertThat(request.indices(), arrayContaining(SecuritySystemIndices.SECURITY_MAIN_ALIAS)); final String query = Strings.toString(request.source().query()); @@ -334,27 +283,7 @@ public void testGetPrivilegesByWildcardApplicationName() throws Exception { } final SearchHit[] hits = buildHits(allowExpensiveQueries ? sourcePrivileges.subList(1, 4) : sourcePrivileges); - listener.get() - .onResponse( - new SearchResponse( - new SearchResponseSections( - new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), - null, - null, - false, - false, - null, - 1 - ), - "_scrollId1", - 1, - 1, - 0, - 1, - null, - null - ) - ); + listener.get().onResponse(buildSearchResponse(hits)); // The first and last privilege should not be retrieved assertResult(sourcePrivileges.subList(1, 4), future); } @@ -363,8 +292,7 @@ public void testGetPrivilegesByStarApplicationName() throws Exception { final PlainActionFuture> future = new PlainActionFuture<>(); store.getPrivileges(Arrays.asList("*", "anything"), null, future); assertThat(requests, iterableWithSize(1)); - assertThat(requests.get(0), instanceOf(SearchRequest.class)); - SearchRequest request = (SearchRequest) requests.get(0); + final SearchRequest request = getLastRequest(SearchRequest.class); assertThat(request.indices(), arrayContaining(SecuritySystemIndices.SECURITY_MAIN_ALIAS)); final String query = Strings.toString(request.source().query()); @@ -372,27 +300,7 @@ public void testGetPrivilegesByStarApplicationName() throws Exception { assertThat(query, containsString("{\"term\":{\"type\":{\"value\":\"application-privilege\"")); final SearchHit[] hits = new SearchHit[0]; - listener.get() - .onResponse( - new SearchResponse( - new SearchResponseSections( - new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), - null, - null, - false, - false, - null, - 1 - ), - "_scrollId1", - 1, - 1, - 0, - 1, - null, - null - ) - ); + listener.get().onResponse(buildSearchResponse(hits)); } public void testGetAllPrivileges() throws Exception { @@ -405,8 +313,7 @@ public void testGetAllPrivileges() throws Exception { final PlainActionFuture> future = new PlainActionFuture<>(); store.getPrivileges(null, null, future); assertThat(requests, iterableWithSize(1)); - assertThat(requests.get(0), instanceOf(SearchRequest.class)); - SearchRequest request = (SearchRequest) requests.get(0); + final SearchRequest request = getLastRequest(SearchRequest.class); assertThat(request.indices(), arrayContaining(SecuritySystemIndices.SECURITY_MAIN_ALIAS)); final String query = Strings.toString(request.source().query()); @@ -414,27 +321,7 @@ public void testGetAllPrivileges() throws Exception { assertThat(query, not(containsString("{\"terms\""))); final SearchHit[] hits = buildHits(sourcePrivileges); - listener.get() - .onResponse( - new SearchResponse( - new SearchResponseSections( - new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), - null, - null, - false, - false, - null, - 1 - ), - "_scrollId1", - 1, - 1, - 0, - 1, - null, - null - ) - ); + listener.get().onResponse(buildSearchResponse(hits)); assertResult(sourcePrivileges, future); } @@ -450,27 +337,7 @@ public void testGetPrivilegesCacheByApplicationNames() throws Exception { store.getPrivileges(List.of("myapp", "yourapp"), null, future); final SearchHit[] hits = buildHits(sourcePrivileges); - listener.get() - .onResponse( - new SearchResponse( - new SearchResponseSections( - new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), - null, - null, - false, - false, - null, - 1 - ), - "_scrollId1", - 1, - 1, - 0, - 1, - null, - null - ) - ); + listener.get().onResponse(buildSearchResponse(hits)); assertEquals(Set.of("myapp"), store.getApplicationNamesCache().get(Set.of("myapp", "yourapp"))); assertEquals(Set.copyOf(sourcePrivileges), store.getDescriptorsCache().get("myapp")); @@ -502,27 +369,7 @@ public void testGetPrivilegesCacheWithApplicationAndPrivilegeName() throws Excep store.getPrivileges(Collections.singletonList("myapp"), singletonList("user"), future); final SearchHit[] hits = buildHits(sourcePrivileges); - listener.get() - .onResponse( - new SearchResponse( - new SearchResponseSections( - new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), - null, - null, - false, - false, - null, - 1 - ), - "_scrollId1", - 1, - 1, - 0, - 1, - null, - null - ) - ); + listener.get().onResponse(buildSearchResponse(hits)); // Not caching names with no wildcard assertNull(store.getApplicationNamesCache().get(singleton("myapp"))); @@ -541,27 +388,7 @@ public void testGetPrivilegesCacheWithNonExistentApplicationName() throws Except final PlainActionFuture> future = new PlainActionFuture<>(); store.getPrivileges(Collections.singletonList("no-such-app"), null, future); final SearchHit[] hits = buildHits(emptyList()); - listener.get() - .onResponse( - new SearchResponse( - new SearchResponseSections( - new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), - null, - null, - false, - false, - null, - 1 - ), - "_scrollId1", - 1, - 1, - 0, - 1, - null, - null - ) - ); + listener.get().onResponse(buildSearchResponse(hits)); assertEquals(emptySet(), store.getApplicationNamesCache().get(singleton("no-such-app"))); assertEquals(0, store.getDescriptorsCache().count()); @@ -578,27 +405,7 @@ public void testGetPrivilegesCacheWithDifferentMatchAllApplicationNames() throws final PlainActionFuture> future = new PlainActionFuture<>(); store.getPrivileges(emptyList(), null, future); final SearchHit[] hits = buildHits(emptyList()); - listener.get() - .onResponse( - new SearchResponse( - new SearchResponseSections( - new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), - null, - null, - false, - false, - null, - 1 - ), - "_scrollId1", - 1, - 1, - 0, - 1, - null, - null - ) - ); + listener.get().onResponse(buildSearchResponse(hits)); assertEquals(emptySet(), store.getApplicationNamesCache().get(singleton("*"))); assertEquals(1, store.getApplicationNamesCache().count()); assertResult(emptyList(), future); @@ -625,6 +432,68 @@ public void testGetPrivilegesCacheWithDifferentMatchAllApplicationNames() throws assertResult(emptyList(), future4); } + public void testCacheIsClearedByApplicationNameWhenPrivilegesAreModified() throws Exception { + final PlainActionFuture> getFuture = new PlainActionFuture<>(); + store.getPrivileges(emptyList(), null, getFuture); + final List sourcePrivileges = List.of( + new ApplicationPrivilegeDescriptor("app1", "priv1a", Set.of("action:1a"), Map.of()), + new ApplicationPrivilegeDescriptor("app1", "priv1b", Set.of("action:1b"), Map.of()), + new ApplicationPrivilegeDescriptor("app2", "priv2a", Set.of("action:2a"), Map.of()), + new ApplicationPrivilegeDescriptor("app2", "priv2b", Set.of("action:2b"), Map.of()) + ); + final SearchHit[] hits = buildHits(sourcePrivileges); + listener.get().onResponse(buildSearchResponse(hits)); + assertEquals(Set.of("app1", "app2"), store.getApplicationNamesCache().get(singleton("*"))); + assertResult(sourcePrivileges, getFuture); + + // add a new privilege to app1 + var priv1c = new ApplicationPrivilegeDescriptor("app1", "priv1c", Set.of("action:1c"), Map.of()); + PlainActionFuture>> putFuture = new PlainActionFuture<>(); + store.putPrivileges(List.of(priv1c), WriteRequest.RefreshPolicy.IMMEDIATE, putFuture); + + handleBulkRequest(1, item -> true); + + assertCacheCleared("app1"); + + Map> putResponse = putFuture.get(); + assertThat(putResponse, aMapWithSize(1)); + assertThat(putResponse, hasKey("app1")); + assertThat(putResponse.get("app1"), aMapWithSize(1)); + assertThat(putResponse.get("app1"), hasEntry("priv1c", DocWriteResponse.Result.CREATED)); + + // modify a privilege in app2 + var priv2a = new ApplicationPrivilegeDescriptor("app2", "priv2a", Set.of("action:2*"), Map.of()); + putFuture = new PlainActionFuture<>(); + store.putPrivileges(List.of(priv2a), WriteRequest.RefreshPolicy.IMMEDIATE, putFuture); + + handleBulkRequest(1, item -> false); + assertCacheCleared("app2"); + + putResponse = putFuture.get(); + assertThat(putResponse, aMapWithSize(1)); + assertThat(putResponse, hasKey("app2")); + assertThat(putResponse.get("app2"), aMapWithSize(1)); + assertThat(putResponse.get("app2"), hasEntry("priv2a", DocWriteResponse.Result.UPDATED)); + + // modify a privilege in app1, add a privilege in app2 + var priv1a = new ApplicationPrivilegeDescriptor("app1", "priv1a", Set.of("action:1*"), Map.of()); + var priv2c = new ApplicationPrivilegeDescriptor("app2", "priv2c", Set.of("action:2c"), Map.of()); + putFuture = new PlainActionFuture<>(); + store.putPrivileges(List.of(priv1a, priv2c), WriteRequest.RefreshPolicy.IMMEDIATE, putFuture); + + handleBulkRequest(2, item -> item.id().contains("app2")); + assertCacheCleared("app1", "app2"); + + putResponse = putFuture.get(); + assertThat(putResponse, aMapWithSize(2)); + assertThat(putResponse, hasKey("app1")); + assertThat(putResponse.get("app1"), aMapWithSize(1)); + assertThat(putResponse.get("app1"), hasEntry("priv1a", DocWriteResponse.Result.UPDATED)); + assertThat(putResponse, hasKey("app2")); + assertThat(putResponse.get("app2"), aMapWithSize(1)); + assertThat(putResponse.get("app2"), hasEntry("priv2c", DocWriteResponse.Result.CREATED)); + } + public void testStaleResultsWillNotBeCached() { final List sourcePrivileges = singletonList( new ApplicationPrivilegeDescriptor("myapp", "admin", newHashSet("action:admin/*", "action:login", "data:read/*"), emptyMap()) @@ -636,27 +505,7 @@ public void testStaleResultsWillNotBeCached() { // Before the results can be cached, invalidate the cache to simulate stale search results store.getDescriptorsAndApplicationNamesCache().invalidateAll(); final SearchHit[] hits = buildHits(sourcePrivileges); - listener.get() - .onResponse( - new SearchResponse( - new SearchResponseSections( - new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), - null, - null, - false, - false, - null, - 1 - ), - "_scrollId1", - 1, - 1, - 0, - 1, - null, - null - ) - ); + listener.get().onResponse(buildSearchResponse(hits)); // Nothing should be cached since the results are stale assertEquals(0, store.getApplicationNamesCache().count()); @@ -704,27 +553,7 @@ protected void cacheFetchedDescriptors( final PlainActionFuture> future = new PlainActionFuture<>(); store1.getPrivileges(null, null, future); final SearchHit[] hits = buildHits(sourcePrivileges); - listener.get() - .onResponse( - new SearchResponse( - new SearchResponseSections( - new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), - null, - null, - false, - false, - null, - 1 - ), - "_scrollId1", - 1, - 1, - 0, - 1, - null, - null - ) - ); + listener.get().onResponse(buildSearchResponse(hits)); // Make sure the caching is about to happen getPrivilegeCountDown.await(5, TimeUnit.SECONDS); @@ -746,17 +575,22 @@ public void testPutPrivileges() throws Exception { new ApplicationPrivilegeDescriptor("app2", "all", newHashSet("*"), emptyMap()) ); - final PlainActionFuture>> putPrivilegeFuture = new PlainActionFuture<>(); + final PlainActionFuture>> putPrivilegeFuture = new PlainActionFuture<>(); store.putPrivileges(putPrivileges, WriteRequest.RefreshPolicy.IMMEDIATE, putPrivilegeFuture); - assertThat(requests, iterableWithSize(putPrivileges.size())); - assertThat(requests, everyItem(instanceOf(IndexRequest.class))); + assertThat(requests, iterableWithSize(1)); + assertThat(requests, everyItem(instanceOf(BulkRequest.class))); - final List indexRequests = new ArrayList<>(requests.size()); - requests.stream().map(IndexRequest.class::cast).forEach(indexRequests::add); + final BulkRequest bulkRequest = (BulkRequest) requests.get(0); requests.clear(); - final ActionListener indexListener = listener.get(); + assertThat(bulkRequest.requests(), iterableWithSize(putPrivileges.size())); + assertThat(bulkRequest.requests(), everyItem(instanceOf(IndexRequest.class))); + + final List indexRequests = new ArrayList<>(putPrivileges.size()); + bulkRequest.requests().stream().map(IndexRequest.class::cast).forEach(indexRequests::add); + final String uuid = UUIDs.randomBase64UUID(random()); + final BulkItemResponse[] responses = new BulkItemResponse[putPrivileges.size()]; for (int i = 0; i < putPrivileges.size(); i++) { ApplicationPrivilegeDescriptor privilege = putPrivileges.get(i); IndexRequest request = indexRequests.get(i); @@ -765,23 +599,28 @@ public void testPutPrivileges() throws Exception { final XContentBuilder builder = privilege.toXContent(XContentBuilder.builder(XContentType.JSON.xContent()), true); assertThat(request.source(), equalTo(BytesReference.bytes(builder))); final boolean created = privilege.getName().equals("user") == false; - indexListener.onResponse( + responses[i] = BulkItemResponse.success( + i, + DocWriteRequest.OpType.INDEX, new IndexResponse(new ShardId(SecuritySystemIndices.SECURITY_MAIN_ALIAS, uuid, i), request.id(), 1, 1, 1, created) ); } + listener.get().onResponse(new BulkResponse(responses, randomLongBetween(1, 1_000))); + assertBusy(() -> assertFalse(requests.isEmpty()), 1, TimeUnit.SECONDS); assertThat(requests, iterableWithSize(1)); assertThat(requests.get(0), instanceOf(ClearPrivilegesCacheRequest.class)); listener.get().onResponse(null); - final Map> map = putPrivilegeFuture.actionGet(); + final Map> map = putPrivilegeFuture.actionGet(); assertThat(map.entrySet(), iterableWithSize(2)); - assertThat(map.get("app1"), iterableWithSize(1)); - assertThat(map.get("app2"), iterableWithSize(1)); - assertThat(map.get("app1"), contains("admin")); - assertThat(map.get("app2"), contains("all")); + assertThat(map.get("app1"), aMapWithSize(2)); + assertThat(map.get("app2"), aMapWithSize(1)); + assertThat(map.get("app1"), hasEntry("admin", DocWriteResponse.Result.CREATED)); + assertThat(map.get("app1"), hasEntry("user", DocWriteResponse.Result.UPDATED)); + assertThat(map.get("app2"), hasEntry("all", DocWriteResponse.Result.CREATED)); } public void testRetrieveActionNamePatternsInsteadOfPrivileges() throws Exception { @@ -940,27 +779,7 @@ public void testGetPrivilegesWorkWithoutCache() throws Exception { final PlainActionFuture> future = new PlainActionFuture<>(); store1.getPrivileges(singletonList("myapp"), null, future); final SearchHit[] hits = buildHits(sourcePrivileges); - listener.get() - .onResponse( - new SearchResponse( - new SearchResponseSections( - new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), - null, - null, - false, - false, - null, - 1 - ), - "_scrollId1", - 1, - 1, - 0, - 1, - null, - null - ) - ); + listener.get().onResponse(buildSearchResponse(hits)); assertResult(sourcePrivileges, future); } @@ -985,6 +804,12 @@ private SecurityIndexManager.State dummyState( ); } + private T getLastRequest(Class requestClass) { + final ActionRequest last = requests.get(requests.size() - 1); + assertThat(last, instanceOf(requestClass)); + return requestClass.cast(last); + } + private SearchHit[] buildHits(List sourcePrivileges) { final SearchHit[] hits = new SearchHit[sourcePrivileges.size()]; for (int i = 0; i < hits.length; i++) { @@ -995,6 +820,51 @@ private SearchHit[] buildHits(List sourcePrivile return hits; } + private static SearchResponse buildSearchResponse(SearchHit[] hits) { + return new SearchResponse( + new SearchResponseSections( + new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), + null, + null, + false, + false, + null, + 1 + ), + "_scrollId1", + 1, + 1, + 0, + 1, + null, + null + ); + } + + private void handleBulkRequest(int expectedCount, Predicate> isCreated) { + final BulkRequest bulkReq = getLastRequest(BulkRequest.class); + assertThat(bulkReq.requests(), hasSize(expectedCount)); + + final var uuid = UUIDs.randomBase64UUID(random()); + final var items = new BulkItemResponse[expectedCount]; + for (int i = 0; i < expectedCount; i++) { + final DocWriteRequest itemReq = bulkReq.requests().get(i); + items[i] = BulkItemResponse.success( + i, + itemReq.opType(), + new IndexResponse( + new ShardId(SecuritySystemIndices.SECURITY_MAIN_ALIAS, uuid, 0), + itemReq.id(), + 1, + 1, + 1, + isCreated.test(itemReq) + ) + ); + } + listener.get().onResponse(new BulkResponse(items, randomIntBetween(1, 999))); + } + private void assertResult( List sourcePrivileges, PlainActionFuture> future @@ -1004,6 +874,13 @@ private void assertResult( assertThat(new HashSet<>(getPrivileges), equalTo(new HashSet<>(sourcePrivileges))); } + private void assertCacheCleared(String... applicationNames) { + final ClearPrivilegesCacheRequest clearCacheReq = getLastRequest(ClearPrivilegesCacheRequest.class); + assertThat(clearCacheReq.applicationNames(), arrayContainingInAnyOrder(applicationNames)); + assertThat(clearCacheReq.clearRolesCache(), is(true)); + listener.get().onResponse(new ClearPrivilegesCacheResponse(clusterService.getClusterName(), List.of(), List.of())); + } + @SuppressWarnings("unchecked") private static Consumer anyConsumer() { return any(Consumer.class); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGeneratorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGeneratorTests.java index 0b4f349422e8b..2abbb6a610170 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGeneratorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGeneratorTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.security.enrollment; +import org.elasticsearch.Build; import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -231,7 +232,7 @@ public Answer answerNullHttpInfo(InvocationOnMock invocationO new ClusterName("cluster_name"), List.of( new NodeInfo( - Version.CURRENT, + Build.current().version(), TransportVersion.current(), IndexVersion.current(), Map.of(), @@ -266,7 +267,7 @@ private Answer answerWithInfo(InvocationOnMock invocationOnMo new ClusterName("cluster_name"), List.of( new NodeInfo( - Version.CURRENT, + Build.current().version(), TransportVersion.current(), IndexVersion.current(), Map.of(), diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/profile/ProfileServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/profile/ProfileServiceTests.java index 35efb12b278f2..17f1268b7f5e8 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/profile/ProfileServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/profile/ProfileServiceTests.java @@ -10,7 +10,6 @@ import org.apache.lucene.search.TotalHits; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.bulk.BulkRequest; @@ -23,14 +22,14 @@ import org.elasticsearch.action.get.MultiGetResponse; import org.elasticsearch.action.index.IndexAction; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.MultiSearchAction; import org.elasticsearch.action.search.MultiSearchRequest; import org.elasticsearch.action.search.MultiSearchRequestBuilder; import org.elasticsearch.action.search.MultiSearchResponse; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportMultiSearchAction; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.update.UpdateAction; import org.elasticsearch.action.update.UpdateRequest; @@ -49,6 +48,7 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.Tuple; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.query.BoolQueryBuilder; @@ -64,7 +64,6 @@ import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.VersionUtils; import org.elasticsearch.threadpool.FixedExecutorBuilder; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -86,6 +85,7 @@ import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.profile.ProfileDocument.ProfileDocumentUser; import org.elasticsearch.xpack.security.support.SecurityIndexManager; +import org.elasticsearch.xpack.security.support.SecuritySystemIndices; import org.elasticsearch.xpack.security.test.SecurityMocks; import org.hamcrest.Matchers; import org.junit.After; @@ -177,7 +177,7 @@ public class ProfileServiceTests extends ESTestCase { private Client client; private SecurityIndexManager profileIndex; private ProfileService profileService; - private Version minNodeVersion; + private boolean useProfileOrigin; @Before public void prepare() { @@ -197,7 +197,7 @@ public void prepare() { this.client = mock(Client.class); when(client.threadPool()).thenReturn(threadPool); when(client.prepareSearch(SECURITY_PROFILE_ALIAS)).thenReturn( - new SearchRequestBuilder(client, SearchAction.INSTANCE).setIndices(SECURITY_PROFILE_ALIAS) + new SearchRequestBuilder(client, TransportSearchAction.TYPE).setIndices(SECURITY_PROFILE_ALIAS) ); this.profileIndex = SecurityMocks.mockSecurityIndexManager(SECURITY_PROFILE_ALIAS); final ClusterService clusterService = mock(ClusterService.class); @@ -205,16 +205,19 @@ public void prepare() { when(clusterService.state()).thenReturn(clusterState); final DiscoveryNodes discoveryNodes = mock(DiscoveryNodes.class); when(clusterState.nodes()).thenReturn(discoveryNodes); - minNodeVersion = VersionUtils.randomVersionBetween(random(), Version.V_7_17_0, Version.CURRENT); - when(discoveryNodes.getMinNodeVersion()).thenReturn(minNodeVersion); + useProfileOrigin = randomBoolean(); + FeatureService featureService = mock(FeatureService.class); + when(featureService.clusterHasFeature(any(), eq(SecuritySystemIndices.SECURITY_PROFILE_ORIGIN_FEATURE))).thenReturn( + useProfileOrigin + ); this.profileService = new ProfileService( Settings.EMPTY, Clock.systemUTC(), client, profileIndex, clusterService, - name -> new DomainConfig(name, Set.of(), false, null), - threadPool + featureService, + name -> new DomainConfig(name, Set.of(), false, null) ); } @@ -318,7 +321,7 @@ public void testGetProfileSubjectsWithMissingUids() throws Exception { doAnswer(invocation -> { assertThat( threadPool.getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME), - equalTo(minNodeVersion.onOrAfter(Version.V_8_3_0) ? SECURITY_PROFILE_ORIGIN : SECURITY_ORIGIN) + equalTo(useProfileOrigin ? SECURITY_PROFILE_ORIGIN : SECURITY_ORIGIN) ); final MultiGetRequest multiGetRequest = (MultiGetRequest) invocation.getArguments()[1]; List responses = new ArrayList<>(); @@ -384,7 +387,7 @@ public void testGetProfileSubjectWithFailures() throws Exception { doAnswer(invocation -> { assertThat( threadPool.getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME), - equalTo(minNodeVersion.onOrAfter(Version.V_8_3_0) ? SECURITY_PROFILE_ORIGIN : SECURITY_ORIGIN) + equalTo(useProfileOrigin ? SECURITY_PROFILE_ORIGIN : SECURITY_ORIGIN) ); final ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onFailure(mGetException); @@ -400,7 +403,7 @@ public void testGetProfileSubjectWithFailures() throws Exception { doAnswer(invocation -> { assertThat( threadPool.getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME), - equalTo(minNodeVersion.onOrAfter(Version.V_8_3_0) ? SECURITY_PROFILE_ORIGIN : SECURITY_ORIGIN) + equalTo(useProfileOrigin ? SECURITY_PROFILE_ORIGIN : SECURITY_ORIGIN) ); final MultiGetRequest multiGetRequest = (MultiGetRequest) invocation.getArguments()[1]; List responses = new ArrayList<>(); @@ -493,8 +496,8 @@ public void testLiteralUsernameWillThrowOnDuplicate() throws IOException { client, profileIndex, mock(ClusterService.class), - domainName -> new DomainConfig(domainName, Set.of(), true, "suffix"), - threadPool + mock(FeatureService.class), + domainName -> new DomainConfig(domainName, Set.of(), true, "suffix") ); final PlainActionFuture future = new PlainActionFuture<>(); service.maybeIncrementDifferentiatorAndCreateNewProfile( @@ -578,19 +581,22 @@ public void testSecurityProfileOrigin() { doAnswer(invocation -> { assertThat( threadPool.getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME), - equalTo(minNodeVersion.onOrAfter(Version.V_8_3_0) ? SECURITY_PROFILE_ORIGIN : SECURITY_ORIGIN) + equalTo(useProfileOrigin ? SECURITY_PROFILE_ORIGIN : SECURITY_ORIGIN) ); @SuppressWarnings("unchecked") final ActionListener listener = (ActionListener) invocation.getArguments()[2]; - listener.onResponse( - new MultiSearchResponse( - new MultiSearchResponse.Item[] { - new MultiSearchResponse.Item(SearchResponse.empty(() -> 1L, SearchResponse.Clusters.EMPTY), null) }, - 1L - ) + var resp = new MultiSearchResponse( + new MultiSearchResponse.Item[] { + new MultiSearchResponse.Item(SearchResponse.empty(() -> 1L, SearchResponse.Clusters.EMPTY), null) }, + 1L ); + try { + listener.onResponse(resp); + } finally { + resp.decRef(); + } return null; - }).when(client).execute(eq(MultiSearchAction.INSTANCE), any(MultiSearchRequest.class), anyActionListener()); + }).when(client).execute(eq(TransportMultiSearchAction.TYPE), any(MultiSearchRequest.class), anyActionListener()); when(client.prepareIndex(SECURITY_PROFILE_ALIAS)).thenReturn( new IndexRequestBuilder(client, IndexAction.INSTANCE, SECURITY_PROFILE_ALIAS) @@ -600,7 +606,7 @@ public void testSecurityProfileOrigin() { doAnswer(invocation -> { assertThat( threadPool.getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME), - equalTo(minNodeVersion.onOrAfter(Version.V_8_3_0) ? SECURITY_PROFILE_ORIGIN : SECURITY_ORIGIN) + equalTo(useProfileOrigin ? SECURITY_PROFILE_ORIGIN : SECURITY_ORIGIN) ); final ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onFailure(expectedException); @@ -616,7 +622,7 @@ public void testSecurityProfileOrigin() { doAnswer(invocation -> { assertThat( threadPool.getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME), - equalTo(minNodeVersion.onOrAfter(Version.V_8_3_0) ? SECURITY_PROFILE_ORIGIN : SECURITY_ORIGIN) + equalTo(useProfileOrigin ? SECURITY_PROFILE_ORIGIN : SECURITY_ORIGIN) ); final ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onFailure(expectedException); @@ -631,12 +637,12 @@ public void testSecurityProfileOrigin() { doAnswer(invocation -> { assertThat( threadPool.getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME), - equalTo(minNodeVersion.onOrAfter(Version.V_8_3_0) ? SECURITY_PROFILE_ORIGIN : SECURITY_ORIGIN) + equalTo(useProfileOrigin ? SECURITY_PROFILE_ORIGIN : SECURITY_ORIGIN) ); final ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onFailure(expectedException); return null; - }).when(client).execute(eq(SearchAction.INSTANCE), any(SearchRequest.class), anyActionListener()); + }).when(client).execute(eq(TransportSearchAction.TYPE), any(SearchRequest.class), anyActionListener()); final PlainActionFuture future3 = new PlainActionFuture<>(); profileService.suggestProfile( new SuggestProfilesRequest(Set.of(), "", 1, null), @@ -649,13 +655,21 @@ public void testSecurityProfileOrigin() { public void testActivateProfileWithDifferentUidFormats() throws IOException { final ProfileService service = spy( - new ProfileService(Settings.EMPTY, Clock.systemUTC(), client, profileIndex, mock(ClusterService.class), domainName -> { - if (domainName.startsWith("hash")) { - return new DomainConfig(domainName, Set.of(), false, null); - } else { - return new DomainConfig(domainName, Set.of(), true, "suffix"); + new ProfileService( + Settings.EMPTY, + Clock.systemUTC(), + client, + profileIndex, + mock(ClusterService.class), + mock(FeatureService.class), + domainName -> { + if (domainName.startsWith("hash")) { + return new DomainConfig(domainName, Set.of(), false, null); + } else { + return new DomainConfig(domainName, Set.of(), true, "suffix"); + } } - }, threadPool) + ) ); doAnswer(invocation -> { @@ -1047,18 +1061,21 @@ public void testUsageStats() { }).toArray(MultiSearchResponse.Item[]::new); final MultiSearchResponse multiSearchResponse = new MultiSearchResponse(items, randomNonNegativeLong()); - - doAnswer(invocation -> { - @SuppressWarnings("unchecked") - final var listener = (ActionListener) invocation.getArgument(2); - listener.onResponse(multiSearchResponse); - return null; - }).when(client).execute(eq(MultiSearchAction.INSTANCE), any(MultiSearchRequest.class), anyActionListener()); - - when(client.prepareMultiSearch()).thenReturn(new MultiSearchRequestBuilder(client, MultiSearchAction.INSTANCE)); - final PlainActionFuture> future = new PlainActionFuture<>(); - profileService.usageStats(future); - assertThat(future.actionGet(), equalTo(metrics)); + try { + doAnswer(invocation -> { + @SuppressWarnings("unchecked") + final var listener = (ActionListener) invocation.getArgument(2); + listener.onResponse(multiSearchResponse); + return null; + }).when(client).execute(eq(TransportMultiSearchAction.TYPE), any(MultiSearchRequest.class), anyActionListener()); + + when(client.prepareMultiSearch()).thenReturn(new MultiSearchRequestBuilder(client, TransportMultiSearchAction.TYPE)); + final PlainActionFuture> future = new PlainActionFuture<>(); + profileService.usageStats(future); + assertThat(future.actionGet(), equalTo(metrics)); + } finally { + multiSearchResponse.decRef(); + } } public void testUsageStatsWhenNoIndex() { @@ -1078,7 +1095,7 @@ private void mockMultiGetRequest(List sampleDocumentPar doAnswer(invocation -> { assertThat( threadPool.getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME), - equalTo(minNodeVersion.onOrAfter(Version.V_8_3_0) ? SECURITY_PROFILE_ORIGIN : SECURITY_ORIGIN) + equalTo(useProfileOrigin ? SECURITY_PROFILE_ORIGIN : SECURITY_ORIGIN) ); final MultiGetRequest multiGetRequest = (MultiGetRequest) invocation.getArguments()[1]; @SuppressWarnings("unchecked") diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java index e842dd8588fa9..a1f696cc5dddd 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java @@ -113,6 +113,7 @@ public void sendResponse(RestResponse restResponse) { creation, expiration, false, + null, "user-x", "realm-1", metadata, @@ -171,6 +172,7 @@ public void doE creation, expiration, false, + null, "user-x", "realm-1", metadata, @@ -220,6 +222,7 @@ public void sendResponse(RestResponse restResponse) { creation, expiration, false, + null, "user-x", "realm-1", ApiKeyTests.randomMetadata(), @@ -235,6 +238,7 @@ public void sendResponse(RestResponse restResponse) { creation, expiration, false, + null, "user-y", "realm-1", ApiKeyTests.randomMetadata(), diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGrantApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGrantApiKeyActionTests.java new file mode 100644 index 0000000000000..e6744544a34da --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGrantApiKeyActionTests.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.rest.action.apikey; + +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.security.action.apikey.GrantApiKeyRequest; + +import static org.hamcrest.Matchers.is; + +public class RestGrantApiKeyActionTests extends ESTestCase { + + public void testParseXContentForGrantApiKeyRequest() throws Exception { + final String grantType = randomAlphaOfLength(8); + final String username = randomAlphaOfLength(8); + final String password = randomAlphaOfLength(8); + final String accessToken = randomAlphaOfLength(8); + final String clientAuthenticationScheme = randomAlphaOfLength(8); + final String clientAuthenticationValue = randomAlphaOfLength(8); + final String apiKeyName = randomAlphaOfLength(8); + final String apiKeyExpiration = randomTimeValue(); + final String runAs = randomAlphaOfLength(8); + try ( + XContentParser content = createParser( + XContentFactory.jsonBuilder() + .startObject() + .field("grant_type", grantType) + .field("username", username) + .field("password", password) + .field("access_token", accessToken) + .startObject("client_authentication") + .field("scheme", clientAuthenticationScheme) + .field("value", clientAuthenticationValue) + .endObject() + .startObject("api_key") + .field("name", apiKeyName) + .field("expiration", apiKeyExpiration) + .endObject() + .field("run_as", runAs) + .endObject() + ) + ) { + GrantApiKeyRequest grantApiKeyRequest = RestGrantApiKeyAction.fromXContent(content); + assertThat(grantApiKeyRequest.getGrant().getType(), is(grantType)); + assertThat(grantApiKeyRequest.getGrant().getUsername(), is(username)); + assertThat(grantApiKeyRequest.getGrant().getPassword(), is(new SecureString(password.toCharArray()))); + assertThat(grantApiKeyRequest.getGrant().getAccessToken(), is(new SecureString(accessToken.toCharArray()))); + assertThat(grantApiKeyRequest.getGrant().getClientAuthentication().scheme(), is(clientAuthenticationScheme)); + assertThat( + grantApiKeyRequest.getGrant().getClientAuthentication().value(), + is(new SecureString(clientAuthenticationValue.toCharArray())) + ); + assertThat(grantApiKeyRequest.getGrant().getRunAsUsername(), is(runAs)); + assertThat(grantApiKeyRequest.getApiKeyRequest().getName(), is(apiKeyName)); + assertThat( + grantApiKeyRequest.getApiKeyRequest().getExpiration(), + is(TimeValue.parseTimeValue(apiKeyExpiration, "api_key.expiration")) + ); + } + } + +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterTests.java index 03e64f1e616fd..c22892df2ce52 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.action.admin.indices.close.CloseIndexAction; import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; import org.elasticsearch.action.admin.indices.open.OpenIndexAction; -import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.PlainActionFuture; @@ -268,7 +268,7 @@ public void testInboundAuthorizationException() { ServerTransportFilter filter = crossClusterAccess ? getNodeCrossClusterAccessFilter() : getNodeFilter(); TransportRequest request = mock(TransportRequest.class); Authentication authentication = AuthenticationTestHelper.builder().build(); - String action = SearchAction.NAME; + String action = TransportSearchAction.TYPE.name(); doAnswer(getAnswer(authentication)).when(authcService).authenticate(eq(action), eq(request), eq(true), anyActionListener()); doAnswer(getAnswer(authentication, true)).when(crossClusterAccessAuthcService) .authenticate(eq(action), eq(request), anyActionListener()); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SimpleSecurityNetty4ServerTransportTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SimpleSecurityNetty4ServerTransportTests.java index 736f07582ac49..9ff23e5e7b9d8 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SimpleSecurityNetty4ServerTransportTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SimpleSecurityNetty4ServerTransportTests.java @@ -196,7 +196,7 @@ public void testTcpHandshake() { .roles(emptySet()) .version(version0) .build(); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); originalTransport.openConnection(node, connectionProfile, future); try (TcpTransport.NodeChannels connection = (TcpTransport.NodeChannels) future.actionGet()) { assertEquals(TransportVersion.current(), connection.getTransportVersion()); diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/apikey/README.md b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/apikey/README.md new file mode 100644 index 0000000000000..39b62a1bdfb00 --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/apikey/README.md @@ -0,0 +1,21 @@ +# Explanation of files in this directory + +`rsa-private-jwkset.json`, `rsa-public-jwkset.json` +----------------------------------------------------------------------- + +These files are created by running the tests in `JwtRealmGenerateTests`. + +Those tests generate the yaml settings, the keystore settings and the JWK Sets +for each sample realm. + +Copy the output from the test output into the applicable file (you may wish to +run it through `jq` first in order to make it more readable). + +------- + +If additional keys are needed (e.g. to add more algorithms / key sizes) we can +either extend the existing JWKSets with another set of keys (that is, modify the +existing method in `JwtRealmGenerateTests` so that it creates more keys in the +same JWKSet, then re-run and replace the files on disk) or create new files ( +that is, add another test to `JwtRealmGenerateTests` that creates the relevant +realm and/or JWKSet, then re-run and add the new files to disk). diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/apikey/rsa-private-jwkset.json b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/apikey/rsa-private-jwkset.json new file mode 100644 index 0000000000000..e11e83ea95812 --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/apikey/rsa-private-jwkset.json @@ -0,0 +1,23 @@ +{ + "keys": [ + { + "p": "2HaFCsfMmm56qeSlBEnQqSLpUM2S7pyqeGnGdrR9unoBrK_jvJHiW-pI0SWN70iTykPCCiXKTP_NWB4tPV-dB-jtYfwGaQKlKHBdi_5ZgZjcFt3nL-rVSdBIRYjIDqg-zWDyXeT-A98fUKJs9RJlktGjCI6EKVH9pubS-NPG1ts", + "kty": "RSA", + "q": "vF0nR0kH3TKwHZheU0U4ewbnG0rdgN9Lcx5dm2uYMKO77ifC8zWlufm-wTB-SfiA5mJIYGd-_kEdU0scOorJ2RZzxzWQ06CXD1gnWUwhSH-3yqALy5ip8qgtEJO2dLXHX-qfmdhQLvuXyPYg6dD7hF8SZ3A8Dixvk95T8HmmmfM", + "d": "A6XYotHcFGNPfQh5o6B1NDv9wMVb4wdnJd-xdMSgZcmtuiZgXIfXG3L1HROCbSGpfdyP7rjfTHjdcfjJroRpmeTVE-d5pQGmOS8px2UiLfCpjpaZXcNJlNcJeuJFTpcNKOfu5miusbuBcNulu355UpjUJ8uU75qBffjVwQSqKcTQzHn7dvRaChCNwtxPzsFM71HkPk3gfwMJPSeaJeswBSxo7OnCaOJQLiDuaD5EJEgeG7sNd4pwWeq_BC4wNRa8FVv9o18PjVIIn_lFG6eSVKLere4i8bV5qhYJS7bC-Z5mCNIJVanX5-iHyEPiB8L89_kXmLBY5wUoVG0h-HTJRQ", + "e": "AQAB", + "use": "sig", + "kid": "test-rsa-key", + "key_ops": [ + "verify", + "sign" + ], + "qi": "gfz4sx8cdErhh1NxVrHyHFm1APae_2qVits-HLEeIrDsXtqU0KKI68JWflrVD1PYMG7wm2rQkNVL66hKgLjF7GciFboYjDbYp0ezKwxHEHMaK6g9Ts7lm1Ukcu-ujSNgM6H5-LyeChchiwIegUxL_PfiuTxCFlvZND3t9g9TpuM", + "dp": "E5OMbrApIeJR96F1BxFB7Ln6jdM5sZi7lg5C46NpJ383PY3es8Qhy5xBn6Cc2IIg048TMmW-iP4tbQW1o7JM-lUnetAXKFIT67dVzn5fS-guJ2dELEI5reZHUvqO1TyECYD2CmXWTzVTmLBH2FYkl4WcD_8Lls0SepCvjc9hUTc", + "dq": "QPV7GzlTTfQyCjLGrL7daIgL4TyjQNNAyNV7AKNNW9DLeakasRcaLRW0tBkOJGJfyZOxVBW9FN_NxjDL7mB4lbYJfXS6mlDyZ2dGQqRfggoRjv48sxzV1woqaGIYdQ1PUYOvQLX5iQpY4QQIe7oHUzIaPbPV8ile3Ua5-d9qFgM", + "n": "n0XN-JSI02G0rJfoI9Upj_rhmdudJTre9b1evE34kPunSICvJFy4E3Q-Fkc0z2hQa3UigA1Og1qMramkH70p1nWBk0gRIy8cn9_CWuPJQ9pkf5mpFGhEPi_PNVcvY9WpAr6lJ1OWq0Tu1g8b_qu3L5wvLdV8iQeFSIpxON2USNQj-HmoA37ZQcIzWTWZO6dcjTeIRSUEW6V2OVF9UkkS6qXqNGDQoqUXKPO1VElY5mQQTY0X71aUI_B0_gtIYO0iKjNzyYCKTeNZuX4WZbamqAwBApujT7hvZByLHFVbWipGZ2Hl82avUM_yrMr6oLI6UUtzXmvk1pzfn7WPsSjU4Q" + } + ] +} + + diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/apikey/rsa-public-jwkset.json b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/apikey/rsa-public-jwkset.json new file mode 100644 index 0000000000000..5e6a88f403690 --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/apikey/rsa-public-jwkset.json @@ -0,0 +1,16 @@ +{ + "keys": [ + { + "kty": "RSA", + "e": "AQAB", + "use": "sig", + "kid": "test-rsa-key", + "key_ops": [ + "verify", + "sign" + ], + "n": "n0XN-JSI02G0rJfoI9Upj_rhmdudJTre9b1evE34kPunSICvJFy4E3Q-Fkc0z2hQa3UigA1Og1qMramkH70p1nWBk0gRIy8cn9_CWuPJQ9pkf5mpFGhEPi_PNVcvY9WpAr6lJ1OWq0Tu1g8b_qu3L5wvLdV8iQeFSIpxON2USNQj-HmoA37ZQcIzWTWZO6dcjTeIRSUEW6V2OVF9UkkS6qXqNGDQoqUXKPO1VElY5mQQTY0X71aUI_B0_gtIYO0iKjNzyYCKTeNZuX4WZbamqAwBApujT7hvZByLHFVbWipGZ2Hl82avUM_yrMr6oLI6UUtzXmvk1pzfn7WPsSjU4Q" + } + ] +} + diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/apikey/serialized-signed-RS256-jwt.txt b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/apikey/serialized-signed-RS256-jwt.txt new file mode 100644 index 0000000000000..b247f40dd1667 --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/apikey/serialized-signed-RS256-jwt.txt @@ -0,0 +1 @@ +eyJraWQiOiJ0ZXN0LXJzYS1rZXkiLCJhbGciOiJSUzI1NiJ9.eyJpc3MiOiJodHRwczpcL1wvaXNzdWVyLmV4YW1wbGUuY29tXC8iLCJhdWQiOiJodHRwczpcL1wvYXVkaWVuY2UuZXhhbXBsZS5jb21cLyIsInN1YiI6InVzZXIxIiwiZXhwIjo0MDcwOTA4ODAwLCJpYXQiOjk0NjY4NDgwMH0.IcJZXIEyd98T198_K4YOBE_4yJDbnNYugituAf_-M7nNI_rGAwD7uecK85xMco8mr0TSlyQWpbazHeOP4dh9jln27_Llf-D4xZeykESrlhM3zkMwUbDML2reM96NoTN42c_Cj5V9pZCEmcbk1BumnkmDD-RCTx4b_cB8CjiR4ODxXFpVnoJB-PdGFt7rImjkO0yacuUF09XOR-uUxH09WkqtmqoCnp-geSqNZbVzb2Kt1bTq66B0Wfiz6sG_cpM-NdhJ-JUZMO_oCJ9mfyje9fH5F1x8LA063qLVABRvQSEWP3t4wIRAnqS3Hj0sDqjfNBdcBgSCBY0_G8NmHw4toA \ No newline at end of file diff --git a/x-pack/plugin/shutdown/src/internalClusterTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownDelayedAllocationIT.java b/x-pack/plugin/shutdown/src/internalClusterTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownDelayedAllocationIT.java index 649e97e660e5a..8b272215928d1 100644 --- a/x-pack/plugin/shutdown/src/internalClusterTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownDelayedAllocationIT.java +++ b/x-pack/plugin/shutdown/src/internalClusterTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownDelayedAllocationIT.java @@ -213,7 +213,7 @@ private void indexRandomData() throws Exception { int numDocs = scaledRandomIntBetween(100, 1000); IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex("test").setSource("field", "value"); + builders[i] = prepareIndex("test").setSource("field", "value"); } indexRandom(true, builders); } diff --git a/x-pack/plugin/shutdown/src/internalClusterTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownShardsIT.java b/x-pack/plugin/shutdown/src/internalClusterTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownShardsIT.java index 4e2b1bd6c5a58..684100b45a743 100644 --- a/x-pack/plugin/shutdown/src/internalClusterTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownShardsIT.java +++ b/x-pack/plugin/shutdown/src/internalClusterTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownShardsIT.java @@ -425,7 +425,7 @@ private void indexRandomData(String index) throws Exception { int numDocs = scaledRandomIntBetween(100, 1000); IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex(index).setSource("field", "value"); + builders[i] = prepareIndex(index).setSource("field", "value"); } indexRandom(true, builders); } diff --git a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java index 29c7f1b98f4bf..3832bbf488045 100644 --- a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java +++ b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java @@ -187,7 +187,8 @@ static ShutdownShardMigrationStatus shardMigrationStatus( return new ShutdownShardMigrationStatus( SingleNodeShutdownMetadata.Status.COMPLETE, 0, - "no shard relocation is necessary for a node restart" + "no shard relocation is necessary for a node restart", + null ); } @@ -196,7 +197,8 @@ static ShutdownShardMigrationStatus shardMigrationStatus( return new ShutdownShardMigrationStatus( SingleNodeShutdownMetadata.Status.NOT_STARTED, 0, - "node is not currently part of the cluster" + "node is not currently part of the cluster", + null ); } @@ -242,7 +244,7 @@ static ShutdownShardMigrationStatus shardMigrationStatus( // The node is in `DiscoveryNodes`, but not `RoutingNodes` - so there are no shards assigned to it. We're done. if (currentState.getRoutingNodes().node(nodeId) == null) { // We don't know about that node - return new ShutdownShardMigrationStatus(SingleNodeShutdownMetadata.Status.COMPLETE, 0); + return new ShutdownShardMigrationStatus(SingleNodeShutdownMetadata.Status.COMPLETE, 0, 0, 0); } // Check if there are any shards currently on this node, and if there are any relocating shards @@ -256,12 +258,14 @@ static ShutdownShardMigrationStatus shardMigrationStatus( SingleNodeShutdownMetadata.Status shardStatus = totalRemainingShards == 0 ? SingleNodeShutdownMetadata.Status.COMPLETE : SingleNodeShutdownMetadata.Status.IN_PROGRESS; - return new ShutdownShardMigrationStatus(shardStatus, totalRemainingShards); + return new ShutdownShardMigrationStatus(shardStatus, startedShards, relocatingShards, initializingShards); } else if (initializingShards > 0 && relocatingShards == 0 && startedShards == 0) { // If there's only initializing shards left, return now with a note that only initializing shards are left return new ShutdownShardMigrationStatus( SingleNodeShutdownMetadata.Status.IN_PROGRESS, - totalRemainingShards, + startedShards, + relocatingShards, + initializingShards, "all remaining shards are currently INITIALIZING and must finish before they can be moved off this node" ); } @@ -314,7 +318,8 @@ static ShutdownShardMigrationStatus shardMigrationStatus( 0, "[" + shardsToIgnoreForFinalStatus.get() - + "] shards cannot be moved away from this node but have at least one copy on another node in the cluster" + + "] shards cannot be moved away from this node but have at least one copy on another node in the cluster", + null ); } else if (unmovableShard.isPresent()) { // We found a shard that can't be moved, so shard relocation is stalled. Blame the unmovable shard. @@ -334,7 +339,12 @@ static ShutdownShardMigrationStatus shardMigrationStatus( decision ); } else { - return new ShutdownShardMigrationStatus(SingleNodeShutdownMetadata.Status.IN_PROGRESS, totalRemainingShards); + return new ShutdownShardMigrationStatus( + SingleNodeShutdownMetadata.Status.IN_PROGRESS, + startedShards, + relocatingShards, + initializingShards + ); } } diff --git a/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/GetShutdownStatusResponseTests.java b/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/GetShutdownStatusResponseTests.java index 5c375152bf6c8..de579deafb44b 100644 --- a/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/GetShutdownStatusResponseTests.java +++ b/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/GetShutdownStatusResponseTests.java @@ -68,7 +68,7 @@ public static SingleNodeShutdownMetadata randomNodeShutdownMetadata() { public static SingleNodeShutdownStatus randomNodeShutdownStatus() { return new SingleNodeShutdownStatus( randomNodeShutdownMetadata(), - new ShutdownShardMigrationStatus(randomStatus(), randomNonNegativeLong()), + new ShutdownShardMigrationStatus(randomStatus(), randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()), new ShutdownPersistentTasksStatus(), new ShutdownPluginsStatus(randomBoolean()) ); diff --git a/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMFileSettingsIT.java b/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMFileSettingsIT.java index 4f1b887a7a02c..725bf412b3198 100644 --- a/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMFileSettingsIT.java +++ b/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMFileSettingsIT.java @@ -150,10 +150,7 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { } private void assertMasterNode(Client client, String node) { - assertThat( - client.admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), - equalTo(node) - ); + assertThat(client.admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), equalTo(node)); } private void writeJSONFile(String node, String json) throws Exception { diff --git a/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMSnapshotBlockingIntegTests.java b/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMSnapshotBlockingIntegTests.java index 9d962ccc838bc..89cc45fb6e5a5 100644 --- a/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMSnapshotBlockingIntegTests.java +++ b/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMSnapshotBlockingIntegTests.java @@ -424,8 +424,7 @@ private void testUnsuccessfulSnapshotRetention(boolean partialSuccess) throws Ex try { GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(REPO) .setSnapshots(successfulSnapshotName.get()) - .execute() - .actionGet(); + .get(); snapshotInfo = snapshotsStatusResponse.getSnapshots().get(0); } catch (SnapshotMissingException sme) { throw new AssertionError(sme); diff --git a/x-pack/plugin/snapshot-based-recoveries/qa/s3/build.gradle b/x-pack/plugin/snapshot-based-recoveries/qa/s3/build.gradle index dfac9046ec323..786af800b441a 100644 --- a/x-pack/plugin/snapshot-based-recoveries/qa/s3/build.gradle +++ b/x-pack/plugin/snapshot-based-recoveries/qa/s3/build.gradle @@ -6,16 +6,16 @@ * Side Public License, v 1. */ -import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE import org.elasticsearch.gradle.internal.info.BuildParams -apply plugin: 'elasticsearch.legacy-java-rest-test' +apply plugin: 'elasticsearch.internal-java-rest-test' apply plugin: 'elasticsearch.rest-resources' final Project fixture = project(':test:fixtures:s3-fixture') dependencies { javaRestTestImplementation(testArtifact(project(xpackModule('snapshot-based-recoveries')))) + javaRestTestImplementation project(':test:fixtures:s3-fixture') } restResources { @@ -41,41 +41,14 @@ if (!s3AccessKey && !s3SecretKey && !s3Bucket && !s3BasePath) { throw new IllegalArgumentException("not all options specified to run against external S3 service are present") } -if (useFixture) { - apply plugin: 'elasticsearch.test.fixtures' - testFixtures.useFixture(fixture.path, 's3-snapshot-based-recoveries') -} - -tasks.withType(Test).configureEach { +tasks.named("javaRestTest").configure { + usesDefaultDistribution() + systemProperty("tests.use.fixture", Boolean.toString(useFixture)) systemProperty 'test.s3.bucket', s3Bucket + systemProperty("s3AccessKey", s3AccessKey) + systemProperty("s3SecretKey", s3SecretKey) nonInputProperties.systemProperty 'test.s3.base_path', - s3BasePath ? s3BasePath + "_snapshot_based_recoveries_tests" + BuildParams.testSeed : 'base_path' -} - -testClusters.matching { it.name == "javaRestTest" }.configureEach { - testDistribution = 'DEFAULT' - numberOfNodes = 3 - - setting 'xpack.license.self_generated.type', 'trial' - - keystore 's3.client.snapshot_based_recoveries.access_key', s3AccessKey - keystore 's3.client.snapshot_based_recoveries.secret_key', s3SecretKey - - if (useFixture) { - def fixtureAddress = { fixtureName -> - assert useFixture: 'closure should not be used without a fixture' - int ephemeralPort = fixture.postProcessFixture.ext."test.fixtures.${fixtureName}.tcp.80" - assert ephemeralPort > 0 - '127.0.0.1:' + ephemeralPort - } - setting 's3.client.snapshot_based_recoveries.protocol', 'http' - setting 's3.client.snapshot_based_recoveries.endpoint', { "${-> fixtureAddress('s3-snapshot-based-recoveries')}" }, IGNORE_VALUE - - } else { - println "Using an external service to test " + project.name - } - - setting 'xpack.security.enabled', 'false' + s3BasePath ? s3BasePath + "_snapshot_based_recoveries_tests" + BuildParams.testSeed : 'base_path_integration_tests' } tasks.register("s3ThirdPartyTest") { diff --git a/x-pack/plugin/snapshot-based-recoveries/qa/s3/src/javaRestTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/S3SnapshotBasedRecoveryIT.java b/x-pack/plugin/snapshot-based-recoveries/qa/s3/src/javaRestTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/S3SnapshotBasedRecoveryIT.java index a585846b2b1f7..a19b39eec1619 100644 --- a/x-pack/plugin/snapshot-based-recoveries/qa/s3/src/javaRestTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/S3SnapshotBasedRecoveryIT.java +++ b/x-pack/plugin/snapshot-based-recoveries/qa/s3/src/javaRestTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/S3SnapshotBasedRecoveryIT.java @@ -7,13 +7,43 @@ package org.elasticsearch.xpack.snapshotbasedrecoveries.recovery; +import fixture.s3.S3HttpFixture; + import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; import static org.hamcrest.Matchers.blankOrNullString; import static org.hamcrest.Matchers.not; public class S3SnapshotBasedRecoveryIT extends AbstractSnapshotBasedRecoveryRestTestCase { + static final boolean USE_FIXTURE = Boolean.parseBoolean(System.getProperty("tests.use.fixture", "true")); + + public static final S3HttpFixture s3Fixture = new S3HttpFixture(USE_FIXTURE); + + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .nodes(3) + .keystore("s3.client.snapshot_based_recoveries.access_key", System.getProperty("s3AccessKey")) + .keystore("s3.client.snapshot_based_recoveries.secret_key", System.getProperty("s3SecretKey")) + .setting("xpack.license.self_generated.type", "trial") + .setting("s3.client.snapshot_based_recoveries.protocol", () -> "http", (n) -> USE_FIXTURE) + .setting("s3.client.snapshot_based_recoveries.endpoint", s3Fixture::getAddress, (n) -> USE_FIXTURE) + .setting("xpack.security.enabled", "false") + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(s3Fixture).around(cluster); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + @Override protected String repositoryType() { return "s3"; diff --git a/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java b/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java index 678612e2a1ed6..4670f0fd0b9b1 100644 --- a/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java +++ b/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java @@ -1574,9 +1574,7 @@ private void indexDocs(String indexName, int docIdOffset, int docCount) throws E IndexRequestBuilder[] builders = new IndexRequestBuilder[docCount]; for (int i = 0; i < builders.length; i++) { int docId = i + docIdOffset; - builders[i] = client().prepareIndex(indexName) - .setId(Integer.toString(docId)) - .setSource("field", docId, "field2", "Some text " + docId); + builders[i] = prepareIndex(indexName).setId(Integer.toString(docId)).setSource("field", docId, "field2", "Some text " + docId); } indexRandom(true, builders); diff --git a/x-pack/plugin/snapshot-based-recoveries/src/test/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/plan/SnapshotsRecoveryPlannerServiceTests.java b/x-pack/plugin/snapshot-based-recoveries/src/test/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/plan/SnapshotsRecoveryPlannerServiceTests.java index 45c7eb1b997b8..851d5f8f02b2a 100644 --- a/x-pack/plugin/snapshot-based-recoveries/src/test/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/plan/SnapshotsRecoveryPlannerServiceTests.java +++ b/x-pack/plugin/snapshot-based-recoveries/src/test/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/plan/SnapshotsRecoveryPlannerServiceTests.java @@ -462,7 +462,7 @@ private ShardRecoveryPlan computeShardRecoveryPlan( ) throws Exception { SnapshotsRecoveryPlannerService recoveryPlannerService = new SnapshotsRecoveryPlannerService(shardSnapshotsService, () -> true); - PlainActionFuture planFuture = PlainActionFuture.newFuture(); + PlainActionFuture planFuture = new PlainActionFuture<>(); recoveryPlannerService.computeRecoveryPlan( shardId, shardIdentifier, diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/s3/build.gradle b/x-pack/plugin/snapshot-repo-test-kit/qa/s3/build.gradle index 36b13ef8b12a7..21cf952f05bf1 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/s3/build.gradle +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/s3/build.gradle @@ -4,16 +4,15 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE + import org.elasticsearch.gradle.internal.info.BuildParams -apply plugin: 'elasticsearch.legacy-java-rest-test' +apply plugin: 'elasticsearch.internal-java-rest-test' apply plugin: 'elasticsearch.rest-resources' -final Project fixture = project(':test:fixtures:s3-fixture') - dependencies { javaRestTestImplementation testArtifact(project(xpackModule('snapshot-repo-test-kit'))) + javaRestTestImplementation project(':test:fixtures:s3-fixture') } restResources { @@ -39,36 +38,14 @@ if (!s3AccessKey && !s3SecretKey && !s3Bucket && !s3BasePath) { throw new IllegalArgumentException("not all options specified to run against external S3 service are present") } -if (useFixture) { - apply plugin: 'elasticsearch.test.fixtures' - testFixtures.useFixture(fixture.path, 's3-fixture-repository-test-kit') -} - tasks.named("javaRestTest").configure { + usesDefaultDistribution() + systemProperty("tests.use.fixture", Boolean.toString(useFixture)) systemProperty 'test.s3.bucket', s3Bucket - nonInputProperties.systemProperty 'test.s3.base_path', s3BasePath ? s3BasePath + "_repo_test_kit_tests" + BuildParams.testSeed : 'base_path' -} - -testClusters.matching { it.name == "javaRestTest" }.configureEach { - testDistribution = 'DEFAULT' - - keystore 's3.client.repo_test_kit.access_key', s3AccessKey - keystore 's3.client.repo_test_kit.secret_key', s3SecretKey - - if (useFixture) { - def fixtureAddress = { fixtureName -> - assert useFixture: 'closure should not be used without a fixture' - int ephemeralPort = fixture.postProcessFixture.ext."test.fixtures.${fixtureName}.tcp.80" - assert ephemeralPort > 0 - '127.0.0.1:' + ephemeralPort - } - setting 's3.client.repo_test_kit.protocol', 'http' - setting 's3.client.repo_test_kit.endpoint', { "${-> fixtureAddress('s3-fixture-repository-test-kit')}" }, IGNORE_VALUE - - } else { - println "Using an external service to test " + project.name - } - setting 'xpack.security.enabled', 'false' + systemProperty("s3AccessKey", s3AccessKey) + systemProperty("s3SecretKey", s3SecretKey) + nonInputProperties.systemProperty 'test.s3.base_path', + s3BasePath ? s3BasePath + "_repo_test_kit_tests" + BuildParams.testSeed : 'base_path_integration_tests' } tasks.register("s3ThirdPartyTest") { diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/s3/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/S3SnapshotRepoTestKitIT.java b/x-pack/plugin/snapshot-repo-test-kit/qa/s3/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/S3SnapshotRepoTestKitIT.java index a9a034eb9efd2..3b154cf4953be 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/s3/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/S3SnapshotRepoTestKitIT.java +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/s3/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/S3SnapshotRepoTestKitIT.java @@ -6,13 +6,46 @@ */ package org.elasticsearch.repositories.blobstore.testkit; +import fixture.s3.S3HttpFixture; + import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; import static org.hamcrest.Matchers.blankOrNullString; import static org.hamcrest.Matchers.not; public class S3SnapshotRepoTestKitIT extends AbstractSnapshotRepoTestKitRestTestCase { + static final boolean USE_FIXTURE = Boolean.parseBoolean(System.getProperty("tests.use.fixture", "true")); + + public static final S3HttpFixture s3Fixture = new S3HttpFixture(USE_FIXTURE); + + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .keystore("s3.client.repo_test_kit.access_key", System.getProperty("s3AccessKey")) + .keystore("s3.client.repo_test_kit.secret_key", System.getProperty("s3SecretKey")) + .setting("s3.client.repo_test_kit.protocol", () -> "http", (n) -> USE_FIXTURE) + .setting("s3.client.repo_test_kit.endpoint", s3Fixture::getAddress, (n) -> USE_FIXTURE) + .setting("xpack.security.enabled", "false") + // Additional tracing related to investigation into https://github.com/elastic/elasticsearch/issues/102294 + .setting("logger.org.elasticsearch.repositories.s3", "TRACE") + .setting("logger.org.elasticsearch.repositories.blobstore.testkit", "TRACE") + .setting("logger.com.amazonaws.request", "DEBUG") + .setting("logger.org.apache.http.wire", "DEBUG") + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(s3Fixture).around(cluster); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + @Override protected String repositoryType() { return "s3"; @@ -28,4 +61,9 @@ protected Settings repositorySettings() { return Settings.builder().put("client", "repo_test_kit").put("bucket", bucket).put("base_path", basePath).build(); } + + @Override + public void testRepositoryAnalysis() throws Exception { + super.testRepositoryAnalysis(); + } } diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeAction.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeAction.java index a86adaef2c1b1..cad66019a3bbb 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeAction.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeAction.java @@ -422,6 +422,7 @@ private boolean setFirstFailure(Exception e) { } private void fail(Exception e) { + logger.trace(() -> Strings.format("repository analysis in [%s] failed", blobPath), e); if (setFirstFailure(e) == false) { if (innerFailures.tryAcquire()) { final Throwable cause = ExceptionsHelper.unwrapCause(e); @@ -732,6 +733,7 @@ public void run() { if (currentValue <= request.getRegisterOperationCount() || otherAnalysisComplete.get() == false) { // complete at least request.getRegisterOperationCount() steps, but we may as well keep running for longer too + logger.trace("[{}] incrementing uncontended register [{}] from [{}]", blobPath, registerName, currentValue); transportService.sendChildRequest( nodes.get(currentValue < nodes.size() ? currentValue : random.nextInt(nodes.size())), UncontendedRegisterAnalyzeAction.NAME, @@ -745,13 +747,14 @@ public void run() { ) ); } else { + logger.trace("[{}] resetting uncontended register [{}] from [{}]", blobPath, registerName, currentValue); transportService.getThreadPool() .executor(ThreadPool.Names.SNAPSHOT) .execute( ActionRunnable.wrap( ActionListener.releaseAfter( ActionListener.wrap( - r -> logger.trace("uncontended register analysis succeeded"), + r -> logger.trace("[{}] uncontended register [{}] analysis succeeded", blobPath, registerName), AsyncAction.this::fail ), requestRefs.acquire() diff --git a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/SpatialDiskUsageIT.java b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/SpatialDiskUsageIT.java index 07b1c48a540be..43c72642c84a4 100644 --- a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/SpatialDiskUsageIT.java +++ b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/SpatialDiskUsageIT.java @@ -86,7 +86,7 @@ private void doTestSpatialField(String type) throws Exception { .field("coordinates", new double[] { GeoTestUtil.nextLatitude(), GeoTestUtil.nextLongitude() }) .endObject() .endObject(); - client().prepareIndex(index).setId("id-" + i).setSource(doc).get(); + prepareIndex(index).setId("id-" + i).setSource(doc).get(); } AnalyzeIndexDiskUsageResponse resp = client().execute( AnalyzeIndexDiskUsageAction.INSTANCE, diff --git a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/GeoShapeScriptDocValuesIT.java b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/GeoShapeScriptDocValuesIT.java index 57e654fc0901c..aa7860aac1a40 100644 --- a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/GeoShapeScriptDocValuesIT.java +++ b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/GeoShapeScriptDocValuesIT.java @@ -249,8 +249,7 @@ private void doTestGeometry(Geometry geometry, GeoShapeValues.GeoShapeValue expe private void doTestGeometry(Geometry geometry, GeoShapeValues.GeoShapeValue expectedLabelPosition, boolean fallbackToCentroid) throws IOException { - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject().field("name", "TestPosition").field("location", WellKnownText.toWKT(geometry)).endObject() ) @@ -309,8 +308,7 @@ private void doTestLabelPosition(Map fields, GeoShapeValu } public void testNullShape() throws Exception { - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("name", "TestPosition").nullField("location").endObject()) .get(); diff --git a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/GeoShapeWithDocValuesIT.java b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/GeoShapeWithDocValuesIT.java index e354feb60c95f..b4a3a07502abf 100644 --- a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/GeoShapeWithDocValuesIT.java +++ b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/GeoShapeWithDocValuesIT.java @@ -110,8 +110,7 @@ public void testPercolatorGeoQueries() throws Exception { indicesAdmin().prepareCreate("test").setMapping("id", "type=keyword", "field1", "type=geo_shape", "query", "type=percolator") ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("query", geoDistanceQuery("field1").point(52.18, 4.38).distance(50, DistanceUnit.KILOMETERS)) @@ -120,8 +119,7 @@ public void testPercolatorGeoQueries() throws Exception { ) .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource( jsonBuilder().startObject() .field("query", geoBoundingBoxQuery("field1").setCorners(52.3, 4.4, 52.1, 4.6)) @@ -130,8 +128,7 @@ public void testPercolatorGeoQueries() throws Exception { ) .get(); - client().prepareIndex("test") - .setId("3") + prepareIndex("test").setId("3") .setSource( jsonBuilder().startObject() .field( @@ -175,7 +172,7 @@ public void testStorePolygonDateLine() throws Exception { "shape": "POLYGON((179 0, -179 0, -179 2, 179 2, 179 0))" }"""; - indexRandom(true, client().prepareIndex("test").setId("0").setSource(source, XContentType.JSON)); + indexRandom(true, prepareIndex("test").setId("0").setSource(source, XContentType.JSON)); assertNoFailuresAndResponse(client().prepareSearch("test").setFetchSource(false).addStoredField("shape"), response -> { assertThat(response.getHits().getTotalHits().value, equalTo(1L)); diff --git a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/LegacyGeoShapeWithDocValuesIT.java b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/LegacyGeoShapeWithDocValuesIT.java index 562f2fd681d97..3cf70b1d477b6 100644 --- a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/LegacyGeoShapeWithDocValuesIT.java +++ b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/LegacyGeoShapeWithDocValuesIT.java @@ -87,7 +87,7 @@ public void testLegacyCircle() throws Exception { ); ensureGreen(); - indexRandom(true, client().prepareIndex("test").setId("0").setSource("shape", (ToXContent) (builder, params) -> { + indexRandom(true, prepareIndex("test").setId("0").setSource("shape", (ToXContent) (builder, params) -> { builder.startObject() .field("type", "circle") .startArray("coordinates") diff --git a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryOverShapeTests.java b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryOverShapeTests.java index 0abf475e59048..5e4b778d6c093 100644 --- a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryOverShapeTests.java +++ b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryOverShapeTests.java @@ -91,8 +91,8 @@ public void setUp() throws Exception { .endObject(); try { - client().prepareIndex(INDEX).setSource(geoJson).setRefreshPolicy(IMMEDIATE).get(); - client().prepareIndex(IGNORE_MALFORMED_INDEX).setRefreshPolicy(IMMEDIATE).setSource(geoJson).get(); + prepareIndex(INDEX).setSource(geoJson).setRefreshPolicy(IMMEDIATE).get(); + prepareIndex(IGNORE_MALFORMED_INDEX).setRefreshPolicy(IMMEDIATE).setSource(geoJson).get(); } catch (Exception e) { // sometimes GeoTestUtil will create invalid geometry; catch and continue: if (queryGeometry == geometry) { @@ -107,8 +107,7 @@ public void setUp() throws Exception { public void testIndexedShapeReferenceSourceDisabled() throws Exception { Rectangle rectangle = new Rectangle(-45, 45, 45, -45); - client().prepareIndex(IGNORE_MALFORMED_INDEX) - .setId("Big_Rectangle") + prepareIndex(IGNORE_MALFORMED_INDEX).setId("Big_Rectangle") .setSource(jsonBuilder().startObject().field(FIELD, WellKnownText.toWKT(rectangle)).endObject()) .setRefreshPolicy(IMMEDIATE) .get(); @@ -131,11 +130,10 @@ public void testShapeFetchingPath() throws Exception { String location = """ "location" : {"type":"polygon", "coordinates":[[[-10,-10],[10,-10],[10,10],[-10,10],[-10,-10]]]}"""; - client().prepareIndex(indexName).setId("1").setSource(Strings.format(""" + prepareIndex(indexName).setId("1").setSource(Strings.format(""" { %s, "1" : { %s, "2" : { %s, "3" : { %s } }} } """, location, location, location, location), XContentType.JSON).setRefreshPolicy(IMMEDIATE).get(); - client().prepareIndex(searchIndex) - .setId("1") + prepareIndex(searchIndex).setId("1") .setSource( jsonBuilder().startObject() .startObject("location") @@ -218,7 +216,7 @@ public void testIndexShapeRouting() { } }""", args); - client().prepareIndex(INDEX).setId("0").setSource(source, XContentType.JSON).setRouting("ABC").get(); + prepareIndex(INDEX).setId("0").setSource(source, XContentType.JSON).setRouting("ABC").get(); indicesAdmin().prepareRefresh(INDEX).get(); assertHitCount( @@ -229,13 +227,8 @@ public void testIndexShapeRouting() { public void testNullShape() { // index a null shape - client().prepareIndex(INDEX) - .setId("aNullshape") - .setSource("{\"" + FIELD + "\": null}", XContentType.JSON) - .setRefreshPolicy(IMMEDIATE) - .get(); - client().prepareIndex(IGNORE_MALFORMED_INDEX) - .setId("aNullshape") + prepareIndex(INDEX).setId("aNullshape").setSource("{\"" + FIELD + "\": null}", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get(); + prepareIndex(IGNORE_MALFORMED_INDEX).setId("aNullshape") .setSource("{\"" + FIELD + "\": null}", XContentType.JSON) .setRefreshPolicy(IMMEDIATE) .get(); @@ -259,11 +252,11 @@ public void testFieldAlias() { public void testContainsShapeQuery() { - indicesAdmin().prepareCreate("test_contains").setMapping("location", "type=shape").execute().actionGet(); + indicesAdmin().prepareCreate("test_contains").setMapping("location", "type=shape").get(); String doc = """ {"location" : {"type":"envelope", "coordinates":[ [-100.0, 100.0], [100.0, -100.0]]}}"""; - client().prepareIndex("test_contains").setId("1").setSource(doc, XContentType.JSON).setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test_contains").setId("1").setSource(doc, XContentType.JSON).setRefreshPolicy(IMMEDIATE).get(); // index the mbr of the collection Rectangle rectangle = new Rectangle(-50, 50, 50, -50); diff --git a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryTestCase.java b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryTestCase.java index 1160af2a98071..38d0a30b593b6 100644 --- a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryTestCase.java +++ b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryTestCase.java @@ -56,8 +56,7 @@ public void testNullShape() throws Exception { indicesAdmin().prepareCreate(defaultIndexName).setMapping(mapping).get(); ensureGreen(); - client().prepareIndex(defaultIndexName) - .setId("aNullshape") + prepareIndex(defaultIndexName).setId("aNullshape") .setSource("{\"geo\": null}", XContentType.JSON) .setRefreshPolicy(IMMEDIATE) .get(); @@ -70,14 +69,12 @@ public void testIndexPointsFilterRectangle() throws Exception { indicesAdmin().prepareCreate(defaultIndexName).setMapping(mapping).get(); ensureGreen(); - client().prepareIndex(defaultIndexName) - .setId("1") + prepareIndex(defaultIndexName).setId("1") .setSource(jsonBuilder().startObject().field("name", "Document 1").field(defaultFieldName, "POINT(-30 -30)").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); - client().prepareIndex(defaultIndexName) - .setId("2") + prepareIndex(defaultIndexName).setId("2") .setSource(jsonBuilder().startObject().field("name", "Document 2").field(defaultFieldName, "POINT(-45 -50)").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); @@ -111,14 +108,12 @@ public void testIndexPointsCircle() throws Exception { indicesAdmin().prepareCreate(defaultIndexName).setMapping(mapping).get(); ensureGreen(); - client().prepareIndex(defaultIndexName) - .setId("1") + prepareIndex(defaultIndexName).setId("1") .setSource(jsonBuilder().startObject().field("name", "Document 1").field(defaultFieldName, "POINT(-30 -30)").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); - client().prepareIndex(defaultIndexName) - .setId("2") + prepareIndex(defaultIndexName).setId("2") .setSource(jsonBuilder().startObject().field("name", "Document 2").field(defaultFieldName, "POINT(-45 -50)").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); @@ -141,14 +136,12 @@ public void testIndexPointsPolygon() throws Exception { indicesAdmin().prepareCreate(defaultIndexName).setMapping(mapping).get(); ensureGreen(); - client().prepareIndex(defaultIndexName) - .setId("1") + prepareIndex(defaultIndexName).setId("1") .setSource(jsonBuilder().startObject().field(defaultFieldName, "POINT(-30 -30)").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); - client().prepareIndex(defaultIndexName) - .setId("2") + prepareIndex(defaultIndexName).setId("2") .setSource(jsonBuilder().startObject().field(defaultFieldName, "POINT(-45 -50)").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); @@ -171,20 +164,17 @@ public void testIndexPointsMultiPolygon() throws Exception { indicesAdmin().prepareCreate(defaultIndexName).setMapping(mapping).get(); ensureGreen(); - client().prepareIndex(defaultIndexName) - .setId("1") + prepareIndex(defaultIndexName).setId("1") .setSource(jsonBuilder().startObject().field("name", "Document 1").field(defaultFieldName, "POINT(-30 -30)").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); - client().prepareIndex(defaultIndexName) - .setId("2") + prepareIndex(defaultIndexName).setId("2") .setSource(jsonBuilder().startObject().field("name", "Document 2").field(defaultFieldName, "POINT(-40 -40)").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); - client().prepareIndex(defaultIndexName) - .setId("3") + prepareIndex(defaultIndexName).setId("3") .setSource(jsonBuilder().startObject().field("name", "Document 3").field(defaultFieldName, "POINT(-50 -50)").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); @@ -215,14 +205,12 @@ public void testIndexPointsRectangle() throws Exception { indicesAdmin().prepareCreate(defaultIndexName).setMapping(mapping).get(); ensureGreen(); - client().prepareIndex(defaultIndexName) - .setId("1") + prepareIndex(defaultIndexName).setId("1") .setSource(jsonBuilder().startObject().field("name", "Document 1").field(defaultFieldName, "POINT(-30 -30)").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); - client().prepareIndex(defaultIndexName) - .setId("2") + prepareIndex(defaultIndexName).setId("2") .setSource(jsonBuilder().startObject().field("name", "Document 2").field(defaultFieldName, "POINT(-45 -50)").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); @@ -245,14 +233,12 @@ public void testIndexPointsIndexedRectangle() throws Exception { indicesAdmin().prepareCreate(defaultIndexName).setMapping(mapping).get(); ensureGreen(); - client().prepareIndex(defaultIndexName) - .setId("point1") + prepareIndex(defaultIndexName).setId("point1") .setSource(jsonBuilder().startObject().field(defaultFieldName, "POINT(-30 -30)").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); - client().prepareIndex(defaultIndexName) - .setId("point2") + prepareIndex(defaultIndexName).setId("point2") .setSource(jsonBuilder().startObject().field(defaultFieldName, "POINT(-45 -50)").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); @@ -272,14 +258,12 @@ public void testIndexPointsIndexedRectangle() throws Exception { indicesAdmin().prepareCreate(indexedShapeIndex).setMapping(queryShapesMapping).get(); ensureGreen(); - client().prepareIndex(indexedShapeIndex) - .setId("shape1") + prepareIndex(indexedShapeIndex).setId("shape1") .setSource(jsonBuilder().startObject().field(indexedShapePath, "BBOX(-50, -40, -45, -55)").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); - client().prepareIndex(indexedShapeIndex) - .setId("shape2") + prepareIndex(indexedShapeIndex).setId("shape2") .setSource(jsonBuilder().startObject().field(indexedShapePath, "BBOX(-60, -50, -50, -60)").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); @@ -310,7 +294,7 @@ public void testIndexPointsIndexedRectangle() throws Exception { } public void testDistanceQuery() throws Exception { - indicesAdmin().prepareCreate("test_distance").setMapping("location", "type=shape").execute().actionGet(); + indicesAdmin().prepareCreate("test_distance").setMapping("location", "type=shape").get(); ensureGreen(); Circle circle = new Circle(1, 0, 10); diff --git a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/SpatialQueryStringIT.java b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/SpatialQueryStringIT.java index 04f260fa1f1c6..d68ff54e089ad 100644 --- a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/SpatialQueryStringIT.java +++ b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/SpatialQueryStringIT.java @@ -58,9 +58,7 @@ public void setup() { public void testBasicAllQuery() throws Exception { List reqs = new ArrayList<>(); - reqs.add( - client().prepareIndex("test").setId("1").setSource("geo_shape", "POINT(0 0)", "shape", "POINT(0 0)", "point", "POINT(0 0)") - ); + reqs.add(prepareIndex("test").setId("1").setSource("geo_shape", "POINT(0 0)", "shape", "POINT(0 0)", "point", "POINT(0 0)")); // nothing matches indexRandom(true, false, reqs); assertHitCount(prepareSearch("test").setQuery(queryStringQuery("foo")), 0L); diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/SpatialPlugin.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/SpatialPlugin.java index 7f171230e7628..7c2bc504eb768 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/SpatialPlugin.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/SpatialPlugin.java @@ -46,6 +46,8 @@ import org.elasticsearch.xpack.spatial.action.SpatialStatsTransportAction; import org.elasticsearch.xpack.spatial.action.SpatialUsageTransportAction; import org.elasticsearch.xpack.spatial.common.CartesianBoundingBox; +import org.elasticsearch.xpack.spatial.index.fielddata.CartesianShapeValues; +import org.elasticsearch.xpack.spatial.index.fielddata.GeoShapeValues; import org.elasticsearch.xpack.spatial.index.mapper.GeoShapeScriptFieldType; import org.elasticsearch.xpack.spatial.index.mapper.GeoShapeWithDocValuesFieldMapper; import org.elasticsearch.xpack.spatial.index.mapper.PointFieldMapper; @@ -201,7 +203,11 @@ public Map getProcessors(Processor.Parameters paramet @Override public List getGenericNamedWriteables() { - return List.of(new GenericNamedWriteableSpec("CartesianBoundingBox", CartesianBoundingBox::new)); + return List.of( + new GenericNamedWriteableSpec("CartesianBoundingBox", CartesianBoundingBox::new), + new GenericNamedWriteableSpec("GeoShapeValue", GeoShapeValues.GeoShapeValue::new), + new GenericNamedWriteableSpec("CartesianShapeValue", CartesianShapeValues.CartesianShapeValue::new) + ); } private static void registerGeoShapeBoundsAggregator(ValuesSourceRegistry.Builder builder) { diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/CartesianShapeValues.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/CartesianShapeValues.java index 2751dca2ef891..1bd7296b2da39 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/CartesianShapeValues.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/CartesianShapeValues.java @@ -10,6 +10,7 @@ import org.apache.lucene.geo.Component2D; import org.apache.lucene.geo.XYGeometry; import org.apache.lucene.geo.XYPoint; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.geometry.utils.GeometryValidator; import org.elasticsearch.geometry.utils.StandardValidator; import org.elasticsearch.search.aggregations.support.ValuesSourceType; @@ -66,6 +67,12 @@ public CartesianShapeValue() { super(CoordinateEncoder.CARTESIAN, CartesianPoint::new); } + @SuppressWarnings("this-escape") + public CartesianShapeValue(StreamInput in) throws IOException { + this(); + this.reset(in); + } + @Override protected Component2D centroidAsComponent2D() throws IOException { return XYGeometry.create(new XYPoint((float) getX(), (float) getY())); @@ -80,5 +87,10 @@ protected Component2D centroidAsComponent2D() throws IOException { public GeoRelation relate(XYGeometry geometry) throws IOException { return relate(XYGeometry.create(geometry)); } + + @Override + public String getWriteableName() { + return "CartesianShapeValue"; + } } } diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/GeoShapeValues.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/GeoShapeValues.java index b0bacf7d295f8..fb32e9e1c4e4f 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/GeoShapeValues.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/GeoShapeValues.java @@ -12,6 +12,7 @@ import org.apache.lucene.geo.Point; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.Orientation; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.geometry.utils.GeographyValidator; import org.elasticsearch.geometry.utils.GeometryValidator; import org.elasticsearch.index.mapper.GeoShapeIndexer; @@ -69,6 +70,12 @@ public GeoShapeValue() { this.tile2DVisitor = new Tile2DVisitor(); } + @SuppressWarnings("this-escape") + public GeoShapeValue(StreamInput in) throws IOException { + this(); + reset(in); + } + @Override protected Component2D centroidAsComponent2D() throws IOException { return LatLonGeometry.create(new Point(getY(), getX())); @@ -93,5 +100,10 @@ public GeoRelation relate(int minX, int maxX, int minY, int maxY) throws IOExcep public GeoRelation relate(LatLonGeometry geometry) throws IOException { return relate(LatLonGeometry.create(geometry)); } + + @Override + public String getWriteableName() { + return "GeoShapeValue"; + } } } diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/GeometryDocValueReader.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/GeometryDocValueReader.java index 51ce9124eac5b..16b655a1ad034 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/GeometryDocValueReader.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/GeometryDocValueReader.java @@ -40,6 +40,7 @@ public class GeometryDocValueReader { private final Extent extent; private int treeOffset; private int docValueOffset; + private BytesRef bytesRef; public GeometryDocValueReader() { this.extent = new Extent(); @@ -50,6 +51,7 @@ public GeometryDocValueReader() { * reset the geometry. */ public void reset(BytesRef bytesRef) throws IOException { + this.bytesRef = bytesRef; // Needed only for supporting Writable, maintaining original offset, not adjusted on from input this.input.reset(bytesRef.bytes, bytesRef.offset, bytesRef.length); docValueOffset = bytesRef.offset; treeOffset = 0; @@ -109,4 +111,7 @@ public void visit(TriangleTreeVisitor visitor) throws IOException { } } + public BytesRef getBytesRef() { + return bytesRef; + } } diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/ShapeValues.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/ShapeValues.java index 1036030546bcf..2dcb2ff99848c 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/ShapeValues.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/ShapeValues.java @@ -10,7 +10,14 @@ import org.apache.lucene.document.ShapeField; import org.apache.lucene.geo.Component2D; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.geo.SpatialPoint; +import org.elasticsearch.common.io.stream.GenericNamedWriteable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.utils.GeometryValidator; import org.elasticsearch.geometry.utils.WellKnownText; @@ -90,7 +97,7 @@ public T missing(String missing) { * thin wrapper around a {@link GeometryDocValueReader} which encodes / decodes values using * the provided decoder (could be geo or cartesian) */ - protected abstract static class ShapeValue implements ToXContentFragment { + protected abstract static class ShapeValue implements ToXContentFragment, GenericNamedWriteable { private final GeometryDocValueReader reader; private final BoundingBox boundingBox; private final CoordinateEncoder encoder; @@ -113,6 +120,11 @@ public void reset(BytesRef bytesRef) throws IOException { this.boundingBox.reset(reader.getExtent(), encoder); } + protected void reset(StreamInput in) throws IOException { + BytesReference bytes = in.readBytesReference(); + reset(bytes.toBytesRef()); + } + public BoundingBox boundingBox() { return boundingBox; } @@ -187,6 +199,29 @@ public double getX() throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { throw new IllegalArgumentException("cannot write xcontent for geo_shape doc value"); } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.SHAPE_VALUE_SERIALIZATION_ADDED; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeBytesReference(new BytesArray(reader.getBytesRef())); + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof ShapeValue other) { + return reader.getBytesRef().equals(other.reader.getBytesRef()); + } + return false; + } + + @Override + public int hashCode() { + return reader.getBytesRef().hashCode(); + } } public static class BoundingBox { diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/SpatialPluginTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/SpatialPluginTests.java index 8129b26c28241..38e79fa1dffb5 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/SpatialPluginTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/SpatialPluginTests.java @@ -141,7 +141,11 @@ public void testGenericNamedWriteables() { .filter(e -> e.categoryClass.equals(GenericNamedWriteable.class)) .map(e -> e.name) .collect(Collectors.toSet()); - assertThat("Expect both Geo and Cartesian BoundingBox", names, equalTo(Set.of("GeoBoundingBox", "CartesianBoundingBox"))); + assertThat( + "Expect both Geo and Cartesian BoundingBox and ShapeValue", + names, + equalTo(Set.of("GeoBoundingBox", "CartesianBoundingBox", "GeoShapeValue", "CartesianShapeValue")) + ); } private SpatialPlugin getPluginWithOperationMode(License.OperationMode operationMode) { @@ -171,7 +175,7 @@ private void checkLicenseNotRequired( ValuesSourceRegistry registry = registryBuilder.build(); T aggregator = registry.getAggregator( registryKey, - new ValuesSourceConfig(sourceType, null, true, null, null, null, null, null, null) + new ValuesSourceConfig(sourceType, null, true, null, null, null, null, null) ); NullPointerException exception = expectThrows(NullPointerException.class, () -> builder.accept(aggregator)); assertThat( @@ -199,7 +203,7 @@ private void checkLicenseRequired( ValuesSourceRegistry registry = registryBuilder.build(); T aggregator = registry.getAggregator( registryKey, - new ValuesSourceConfig(sourceType, null, true, null, null, null, null, null, null) + new ValuesSourceConfig(sourceType, null, true, null, null, null, null, null) ); if (License.OperationMode.TRIAL != operationMode && License.OperationMode.compare(operationMode, License.OperationMode.GOLD) < 0) { @@ -225,7 +229,7 @@ private void checkLicenseRequired( private static class TestValuesSourceConfig extends ValuesSourceConfig { private TestValuesSourceConfig(ValuesSourceType sourceType) { - super(sourceType, null, true, null, null, null, null, null, null); + super(sourceType, null, true, null, null, null, null, null); } } } diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/fielddata/CartesianShapeValuesGenericWriteableTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/fielddata/CartesianShapeValuesGenericWriteableTests.java new file mode 100644 index 0000000000000..37997bd291996 --- /dev/null +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/fielddata/CartesianShapeValuesGenericWriteableTests.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.spatial.index.fielddata; + +import org.elasticsearch.geometry.Rectangle; +import org.elasticsearch.xpack.spatial.util.GeoTestUtils; + +import java.io.IOException; + +public class CartesianShapeValuesGenericWriteableTests extends ShapeValuesGenericWriteableTests { + + @Override + protected String shapeValueName() { + return "CartesianShapeValue"; + } + + @Override + protected GenericWriteableWrapper createTestInstance() { + try { + double minX = randomDoubleBetween(-Float.MAX_VALUE, 0, false); + double minY = randomDoubleBetween(-Float.MAX_VALUE, 0, false); + double maxX = randomDoubleBetween(minX + 10, Float.MAX_VALUE, false); + double maxY = randomDoubleBetween(minY + 10, Float.MAX_VALUE, false); + Rectangle rectangle = new Rectangle(minX, maxX, maxY, minY); + CartesianShapeValues.CartesianShapeValue shapeValue = GeoTestUtils.cartesianShapeValue(rectangle); + return new GenericWriteableWrapper(shapeValue); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + protected GenericWriteableWrapper mutateInstance(GenericWriteableWrapper instance) throws IOException { + ShapeValues.ShapeValue shapeValue = instance.shapeValue(); + ShapeValues.BoundingBox bbox = shapeValue.boundingBox(); + double height = bbox.maxY() - bbox.minY(); + double width = bbox.maxX() - bbox.minX(); + double xs = width * 0.001; + double ys = height * 0.001; + Rectangle rectangle = new Rectangle(bbox.minX() + xs, bbox.maxX() - xs, bbox.maxY() - ys, bbox.minY() + ys); + return new GenericWriteableWrapper(GeoTestUtils.cartesianShapeValue(rectangle)); + } +} diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/fielddata/GeoShapeValuesGenericWriteableTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/fielddata/GeoShapeValuesGenericWriteableTests.java new file mode 100644 index 0000000000000..edd357738299c --- /dev/null +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/fielddata/GeoShapeValuesGenericWriteableTests.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.spatial.index.fielddata; + +import org.elasticsearch.common.geo.GeoBoundingBox; +import org.elasticsearch.geometry.Rectangle; +import org.elasticsearch.xpack.spatial.util.GeoTestUtils; + +import java.io.IOException; + +public class GeoShapeValuesGenericWriteableTests extends ShapeValuesGenericWriteableTests { + + @Override + protected String shapeValueName() { + return "GeoShapeValue"; + } + + @Override + protected GenericWriteableWrapper createTestInstance() { + try { + GeoBoundingBox bbox = GeoTestUtils.randomBBox(); + Rectangle rectangle = new Rectangle(bbox.left(), bbox.right(), bbox.top(), bbox.bottom()); + GeoShapeValues.GeoShapeValue shapeValue = GeoTestUtils.geoShapeValue(rectangle); + return new GenericWriteableWrapper(shapeValue); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + protected GenericWriteableWrapper mutateInstance(GenericWriteableWrapper instance) throws IOException { + ShapeValues.ShapeValue shapeValue = instance.shapeValue(); + ShapeValues.BoundingBox bbox = shapeValue.boundingBox(); + double height = bbox.maxY() - bbox.minY(); + double width = bbox.maxX() - bbox.minX(); + double xs = width * 0.001; + double ys = height * 0.001; + Rectangle rectangle = new Rectangle(bbox.minX() + xs, bbox.maxX() - xs, bbox.maxY() - ys, bbox.minY() + ys); + return new GenericWriteableWrapper(GeoTestUtils.geoShapeValue(rectangle)); + } +} diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/fielddata/ShapeValuesGenericWriteableTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/fielddata/ShapeValuesGenericWriteableTests.java new file mode 100644 index 0000000000000..cb123ad724dc0 --- /dev/null +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/fielddata/ShapeValuesGenericWriteableTests.java @@ -0,0 +1,81 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.spatial.index.fielddata; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.GenericNamedWriteable; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireTestCase; + +import java.io.IOException; +import java.util.List; + +import static org.hamcrest.Matchers.containsString; + +public abstract class ShapeValuesGenericWriteableTests extends AbstractWireTestCase< + ShapeValuesGenericWriteableTests.GenericWriteableWrapper> { + + /** + * Wrapper around a GeoShapeValue to verify that it round-trips via {@code writeGenericValue} and {@code readGenericValue} + */ + public record GenericWriteableWrapper(ShapeValues.ShapeValue shapeValue) implements Writeable { + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeGenericValue(shapeValue); + } + + public static GenericWriteableWrapper readFrom(StreamInput in) throws IOException { + return new GenericWriteableWrapper((ShapeValues.ShapeValue) in.readGenericValue()); + } + } + + private static final NamedWriteableRegistry NAMED_WRITEABLE_REGISTRY = new NamedWriteableRegistry( + List.of( + new NamedWriteableRegistry.Entry( + GenericNamedWriteable.class, + GeoShapeValues.GeoShapeValue.class.getSimpleName(), + GeoShapeValues.GeoShapeValue::new + ), + new NamedWriteableRegistry.Entry( + GenericNamedWriteable.class, + CartesianShapeValues.CartesianShapeValue.class.getSimpleName(), + CartesianShapeValues.CartesianShapeValue::new + ) + ) + ); + + @Override + protected NamedWriteableRegistry writableRegistry() { + return NAMED_WRITEABLE_REGISTRY; + } + + @Override + protected GenericWriteableWrapper copyInstance(GenericWriteableWrapper instance, TransportVersion version) throws IOException { + return copyInstance(instance, writableRegistry(), StreamOutput::writeWriteable, GenericWriteableWrapper::readFrom, version); + } + + protected abstract String shapeValueName(); + + public void testSerializationFailsWithOlderVersion() { + TransportVersion older = TransportVersions.KNN_AS_QUERY_ADDED; + assert older.before(TransportVersions.SHAPE_VALUE_SERIALIZATION_ADDED); + final var testInstance = createTestInstance().shapeValue(); + try (var output = new BytesStreamOutput()) { + output.setTransportVersion(older); + assertThat( + expectThrows(Throwable.class, () -> output.writeGenericValue(testInstance)).getMessage(), + containsString("[" + shapeValueName() + "] requires minimal transport version") + ); + } + } +} diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/LegacyGeoShapeWithDocValuesQueryTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/LegacyGeoShapeWithDocValuesQueryTests.java index f560c8591ac56..2bf77726870cf 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/LegacyGeoShapeWithDocValuesQueryTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/LegacyGeoShapeWithDocValuesQueryTests.java @@ -116,16 +116,14 @@ public void testPointsOnlyExplicit() throws Exception { // MULTIPOINT MultiPoint multiPoint = GeometryTestUtils.randomMultiPoint(false); - client().prepareIndex("geo_points_only") - .setId("1") + prepareIndex("geo_points_only").setId("1") .setSource(GeoJson.toXContent(multiPoint, jsonBuilder().startObject().field(defaultFieldName), null).endObject()) .setRefreshPolicy(IMMEDIATE) .get(); // POINT Point point = GeometryTestUtils.randomPoint(false); - client().prepareIndex("geo_points_only") - .setId("2") + prepareIndex("geo_points_only").setId("2") .setSource(GeoJson.toXContent(point, jsonBuilder().startObject().field(defaultFieldName), null).endObject()) .setRefreshPolicy(IMMEDIATE) .get(); @@ -169,8 +167,7 @@ public void testPointsOnly() throws Exception { Geometry geometry = GeometryTestUtils.randomGeometry(false); try { - client().prepareIndex("geo_points_only") - .setId("1") + prepareIndex("geo_points_only").setId("1") .setSource(GeoJson.toXContent(geometry, jsonBuilder().startObject().field(defaultFieldName), null).endObject()) .setRefreshPolicy(IMMEDIATE) .get(); @@ -216,8 +213,7 @@ public void testFieldAlias() throws IOException { ensureGreen(); MultiPoint multiPoint = GeometryTestUtils.randomMultiPoint(false); - client().prepareIndex(defaultIndexName) - .setId("1") + prepareIndex(defaultIndexName).setId("1") .setSource(GeoJson.toXContent(multiPoint, jsonBuilder().startObject().field(defaultFieldName), null).endObject()) .setRefreshPolicy(IMMEDIATE) .get(); diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/GeoLineAggregatorTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/GeoLineAggregatorTests.java index 2d6bbaf8ccd97..b79bc11c36a2b 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/GeoLineAggregatorTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/GeoLineAggregatorTests.java @@ -547,9 +547,6 @@ private static double decodeLatitude(long encoded) { return GeoEncodingUtils.decodeLatitude((int) (encoded & 0xffffffffL)); } - private void reset(int index) { - super.reset(index, this.x(), this.y()); - } } /** Allow test to use own objects for internal use in geometry simplifier, so we can track the sort-fields together with the points */ diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoGridTilerTestCase.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoGridTilerTestCase.java index 253eb85450446..ef9a2c747f225 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoGridTilerTestCase.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoGridTilerTestCase.java @@ -27,6 +27,7 @@ import org.elasticsearch.geometry.Rectangle; import org.elasticsearch.index.mapper.GeoShapeIndexer; import org.elasticsearch.indices.breaker.BreakerSettings; +import org.elasticsearch.indices.breaker.CircuitBreakerMetrics; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.search.aggregations.support.ValuesSourceType; @@ -248,6 +249,7 @@ public void testGridCircuitBreaker() throws IOException { } CircuitBreakerService service = new HierarchyCircuitBreakerService( + CircuitBreakerMetrics.NOOP, Settings.EMPTY, Collections.singletonList(new BreakerSettings("limited", maxNumBytes - 1, 1.0)), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/util/GeoTestUtils.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/util/GeoTestUtils.java index 52d4e6ebb10cc..4e20e872ac446 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/util/GeoTestUtils.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/util/GeoTestUtils.java @@ -28,6 +28,7 @@ import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.spatial.index.fielddata.CartesianShapeValues; import org.elasticsearch.xpack.spatial.index.fielddata.CentroidCalculator; import org.elasticsearch.xpack.spatial.index.fielddata.CoordinateEncoder; import org.elasticsearch.xpack.spatial.index.fielddata.GeoShapeValues; @@ -70,6 +71,12 @@ public static GeoShapeValues.GeoShapeValue geoShapeValue(Geometry geometry) thro return value; } + public static CartesianShapeValues.CartesianShapeValue cartesianShapeValue(Geometry geometry) throws IOException { + CartesianShapeValues.CartesianShapeValue value = new CartesianShapeValues.CartesianShapeValue(); + value.reset(binaryCartesianShapeDocValuesField("test", geometry).binaryValue()); + return value; + } + public static GeoBoundingBox randomBBox() { Rectangle rectangle = GeometryTestUtils.randomRectangle(); return new GeoBoundingBox( diff --git a/x-pack/plugin/sql/build.gradle b/x-pack/plugin/sql/build.gradle index 0ece466dcdfad..85d778f9ec87f 100644 --- a/x-pack/plugin/sql/build.gradle +++ b/x-pack/plugin/sql/build.gradle @@ -37,7 +37,6 @@ dependencies { compileOnly project(path: xpackModule('ql')) testImplementation project(':test:framework') testImplementation(testArtifact(project(xpackModule('core')))) - testImplementation(testArtifact(project(xpackModule('security')))) testImplementation(testArtifact(project(xpackModule('ql')))) testImplementation project(path: ':modules:reindex') testImplementation project(path: ':modules:parent-join') diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaData.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaData.java index 42126c2ed67c6..4a46bf6a5f4e1 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaData.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaData.java @@ -728,26 +728,6 @@ public ResultSet getProcedureColumns(String catalog, String schemaPattern, Strin ); } - // return the cluster name as the catalog (database) - // helps with the various UIs - private String defaultCatalog() throws SQLException { - return con.client.serverInfo().cluster; - } - - private boolean isDefaultCatalog(String catalog) throws SQLException { - // null means catalog info is irrelevant - // % means return all catalogs - // EMPTY means return those without a catalog - return catalog == null || catalog.equals(EMPTY) || catalog.equals(WILDCARD) || catalog.equals(defaultCatalog()); - } - - private boolean isDefaultSchema(String schema) { - // null means schema info is irrelevant - // % means return all schemas` - // EMPTY means return those without a schema - return schema == null || schema.equals(EMPTY) || schema.equals(WILDCARD); - } - @Override public ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) throws SQLException { String statement = "SYS TABLES CATALOG LIKE ? ESCAPE '\\' LIKE ? ESCAPE '\\' "; @@ -1373,10 +1353,6 @@ private static ResultSet emptySet(JdbcConfiguration cfg, String tableName, Objec return new JdbcResultSet(cfg, null, new InMemoryCursor(columnInfo(tableName, cols), null)); } - private static ResultSet emptySet(JdbcConfiguration cfg, List columns) { - return memorySet(cfg, columns, null); - } - private static ResultSet memorySet(JdbcConfiguration cfg, List columns, Object[][] data) { return new JdbcResultSet(cfg, null, new InMemoryCursor(columns, data)); } diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcPreparedStatement.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcPreparedStatement.java index 17a15fc026703..809ced6f8d5bf 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcPreparedStatement.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcPreparedStatement.java @@ -42,7 +42,6 @@ import java.util.Arrays; import java.util.Calendar; import java.util.List; -import java.util.Locale; import static java.time.ZoneOffset.UTC; import static org.elasticsearch.xpack.sql.jdbc.TypeUtils.scaleOrLength; @@ -464,10 +463,6 @@ private void checkKnownUnsupportedTypes(Object x) throws SQLFeatureNotSupportedE } } - private Calendar getDefaultCalendar() { - return Calendar.getInstance(cfg.timeZone(), Locale.ROOT); - } - @Override public void setAsciiStream(int parameterIndex, InputStream x, long length) throws SQLException { throw new SQLFeatureNotSupportedException("AsciiStream not supported"); diff --git a/x-pack/plugin/sql/qa/jdbc/build.gradle b/x-pack/plugin/sql/qa/jdbc/build.gradle index 65d074538361e..42bf524dac17e 100644 --- a/x-pack/plugin/sql/qa/jdbc/build.gradle +++ b/x-pack/plugin/sql/qa/jdbc/build.gradle @@ -74,7 +74,7 @@ subprojects { // Compatibility testing for JDBC driver started with version 7.9.0 BuildParams.bwcVersions.allIndexCompatible.findAll({ it.onOrAfter(Version.fromString("7.9.0")) && it != VersionProperties.elasticsearchVersion }).each { bwcVersion -> def baseName = "v${bwcVersion}" - def cluster = testClusters.maybeCreate(baseName) + def cluster = testClusters.register(baseName) UnreleasedVersionInfo unreleasedVersion = BuildParams.bwcVersions.unreleasedInfo(bwcVersion) Configuration driverConfiguration = configurations.create("jdbcDriver${baseName}") { @@ -101,7 +101,7 @@ subprojects { classpath = sourceSets.javaRestTest.runtimeClasspath + driverConfiguration testClassesDirs = sourceSets.javaRestTest.output.classesDirs systemProperty 'jdbc.driver.version', bwcVersionString - nonInputProperties.systemProperty('tests.rest.cluster', "${-> cluster.allHttpSocketURI.join(",")}") + nonInputProperties.systemProperty('tests.rest.cluster', "${-> cluster.get().allHttpSocketURI.join(",")}") nonInputProperties.systemProperty('tests.clustername', baseName) } } diff --git a/x-pack/plugin/sql/qa/jdbc/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcIntegrationTestCase.java b/x-pack/plugin/sql/qa/jdbc/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcIntegrationTestCase.java index 16bd33ca31d74..43d6d04bfac2c 100644 --- a/x-pack/plugin/sql/qa/jdbc/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcIntegrationTestCase.java +++ b/x-pack/plugin/sql/qa/jdbc/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcIntegrationTestCase.java @@ -38,8 +38,8 @@ public void checkSearchContent() throws IOException { /** * Read an address for Elasticsearch suitable for the JDBC driver from the system properties. */ - public static String elasticsearchAddress() { - String cluster = System.getProperty("tests.rest.cluster"); + public String elasticsearchAddress() { + String cluster = getTestRestCluster(); // JDBC only supports a single node at a time so we just give it one. return cluster.split(",")[0]; /* This doesn't include "jdbc:es://" because we want the example in diff --git a/x-pack/plugin/sql/qa/server/build.gradle b/x-pack/plugin/sql/qa/server/build.gradle index c2c9731b8d363..cee10d81c9573 100644 --- a/x-pack/plugin/sql/qa/server/build.gradle +++ b/x-pack/plugin/sql/qa/server/build.gradle @@ -38,73 +38,80 @@ subprojects { apply plugin: 'elasticsearch.java' } - if (project.name != 'security') { // The security project just configures its subprojects - apply plugin: 'elasticsearch.legacy-java-rest-test' - - testClusters.matching { it.name == "javaRestTest" }.configureEach { - testDistribution = 'DEFAULT' - setting 'xpack.ml.enabled', 'false' - setting 'xpack.watcher.enabled', 'false' - } - - - dependencies { - configurations.javaRestTestRuntimeClasspath { - resolutionStrategy.force "org.slf4j:slf4j-api:1.7.25" - } - configurations.javaRestTestRuntimeOnly { - // This is also required to make resolveAllDependencies work - resolutionStrategy.force "org.slf4j:slf4j-api:1.7.25" - } - - /* Since we're a standalone rest test we actually get transitive - * dependencies but we don't really want them because they cause - * all kinds of trouble with the jar hell checks. So we suppress - * them explicitly for non-es projects. */ - javaRestTestImplementation(project(':x-pack:plugin:sql:qa:server')) { - transitive = false - } - javaRestTestImplementation project(":test:framework") - javaRestTestImplementation project(xpackModule('ql:test-fixtures')) - // JDBC testing dependencies - javaRestTestRuntimeOnly "net.sourceforge.csvjdbc:csvjdbc:${csvjdbcVersion}" - javaRestTestRuntimeOnly "com.h2database:h2:${h2Version}" - - // H2GIS testing dependencies - javaRestTestRuntimeOnly("org.orbisgis:h2gis:${h2gisVersion}") - javaRestTestRuntimeOnly("org.orbisgis:h2gis-api:${h2gisVersion}") - javaRestTestRuntimeOnly("org.orbisgis:h2gis-utilities:${h2gisVersion}") - javaRestTestRuntimeOnly("org.orbisgis:cts:1.5.2") - - - javaRestTestRuntimeOnly project(path: xpackModule('sql:jdbc')) - javaRestTestRuntimeOnly project(':x-pack:plugin:sql:sql-client') - - // CLI testing dependencies - javaRestTestRuntimeOnly project(path: xpackModule('sql:sql-cli')) - javaRestTestRuntimeOnly(project(':x-pack:plugin:sql:sql-action')) { - transitive = false + if (project.parent.name == 'security') + { + apply plugin: 'elasticsearch.legacy-java-rest-test' + + testClusters.matching { it.name == "javaRestTest" }.configureEach { + testDistribution = 'DEFAULT' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.watcher.enabled', 'false' + } + } else { + apply plugin: 'elasticsearch.internal-java-rest-test' + tasks.named('javaRestTest') { + usesDefaultDistribution() + } } - javaRestTestRuntimeOnly("org.jline:jline-terminal-jna:${jlineVersion}") { - exclude group: "net.java.dev.jna" + dependencies { + configurations.javaRestTestRuntimeClasspath { + resolutionStrategy.force "org.slf4j:slf4j-api:1.7.25" + } + configurations.javaRestTestRuntimeOnly { + // This is also required to make resolveAllDependencies work + resolutionStrategy.force "org.slf4j:slf4j-api:1.7.25" + } + + /* Since we're a standalone rest test we actually get transitive + * dependencies but we don't really want them because they cause + * all kinds of trouble with the jar hell checks. So we suppress + * them explicitly for non-es projects. */ + javaRestTestImplementation(project(':x-pack:plugin:sql:qa:server')) { + transitive = false + } + javaRestTestImplementation project(":test:framework") + javaRestTestImplementation project(xpackModule('ql:test-fixtures')) + + // JDBC testing dependencies + javaRestTestRuntimeOnly "net.sourceforge.csvjdbc:csvjdbc:${csvjdbcVersion}" + javaRestTestRuntimeOnly "com.h2database:h2:${h2Version}" + + // H2GIS testing dependencies + javaRestTestRuntimeOnly("org.orbisgis:h2gis:${h2gisVersion}") + javaRestTestRuntimeOnly("org.orbisgis:h2gis-api:${h2gisVersion}") + javaRestTestRuntimeOnly("org.orbisgis:h2gis-utilities:${h2gisVersion}") + javaRestTestRuntimeOnly("org.orbisgis:cts:1.5.2") + + + javaRestTestRuntimeOnly project(path: xpackModule('sql:jdbc')) + javaRestTestRuntimeOnly project(':x-pack:plugin:sql:sql-client') + + // CLI testing dependencies + javaRestTestRuntimeOnly project(path: xpackModule('sql:sql-cli')) + javaRestTestRuntimeOnly(project(':x-pack:plugin:sql:sql-action')) { + transitive = false + } + + javaRestTestRuntimeOnly("org.jline:jline-terminal-jna:${jlineVersion}") { + exclude group: "net.java.dev.jna" + } + javaRestTestRuntimeOnly "org.jline:jline-terminal:${jlineVersion}" + javaRestTestRuntimeOnly "org.jline:jline-reader:${jlineVersion}" + javaRestTestRuntimeOnly "org.jline:jline-style:${jlineVersion}" + + javaRestTestRuntimeOnly "net.java.dev.jna:jna:${versions.jna}" + + // spatial dependency + javaRestTestRuntimeOnly project(path: xpackModule('spatial')) + javaRestTestRuntimeOnly project(path: ':modules:legacy-geo') + + javaRestTestRuntimeOnly project(path: ':modules:rest-root') + + javaRestTestRuntimeOnly "org.slf4j:slf4j-api:1.7.25" } - javaRestTestRuntimeOnly "org.jline:jline-terminal:${jlineVersion}" - javaRestTestRuntimeOnly "org.jline:jline-reader:${jlineVersion}" - javaRestTestRuntimeOnly "org.jline:jline-style:${jlineVersion}" - - javaRestTestRuntimeOnly "net.java.dev.jna:jna:${versions.jna}" - - // spatial dependency - javaRestTestRuntimeOnly project(path: xpackModule('spatial')) - javaRestTestRuntimeOnly project(path: ':modules:legacy-geo') - - javaRestTestRuntimeOnly project(path: ':modules:rest-root') - - javaRestTestRuntimeOnly "org.slf4j:slf4j-api:1.7.25" - } } } diff --git a/x-pack/plugin/sql/qa/server/multi-cluster-with-security/build.gradle b/x-pack/plugin/sql/qa/server/multi-cluster-with-security/build.gradle index b42ae29e257f0..04f25f7175451 100644 --- a/x-pack/plugin/sql/qa/server/multi-cluster-with-security/build.gradle +++ b/x-pack/plugin/sql/qa/server/multi-cluster-with-security/build.gradle @@ -1,56 +1,6 @@ -import org.elasticsearch.gradle.testclusters.DefaultTestClustersTask -import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE - -apply plugin: 'elasticsearch.legacy-java-rest-test' - dependencies { javaRestTestImplementation project(path: xpackModule('ql:test-fixtures')) + clusterPlugins project(':x-pack:qa:freeze-plugin') } -def remoteClusterReg = testClusters.register('remote-cluster') { - testDistribution = 'DEFAULT' - setting 'node.roles', '[data,ingest,master]' - setting 'xpack.ml.enabled', 'false' - setting 'xpack.watcher.enabled', 'false' - setting 'xpack.security.enabled', 'true' - setting 'xpack.license.self_generated.type', 'trial' - setting 'xpack.security.autoconfiguration.enabled', 'false' - - user username: "test_user", password: "x-pack-test-password" - plugin ':x-pack:qa:freeze-plugin' -} - -def javaRestTestClusterReg = testClusters.register('javaRestTest') { - testDistribution = 'DEFAULT' - setting 'xpack.ml.enabled', 'false' - setting 'xpack.watcher.enabled', 'false' - setting 'cluster.remote.my_remote_cluster.seeds', { - remoteClusterReg.get().getAllTransportPortURI().collect { "\"$it\"" }.toString() - }, IGNORE_VALUE - setting 'cluster.remote.connections_per_cluster', "1" - setting 'xpack.security.enabled', 'true' - setting 'xpack.license.self_generated.type', 'trial' - setting 'xpack.security.autoconfiguration.enabled', 'false' - - user username: "test_user", password: "x-pack-test-password" - plugin ':x-pack:qa:freeze-plugin' -} - -tasks.register("startRemoteCluster", DefaultTestClustersTask.class) { - useCluster remoteClusterReg - doLast { - "Starting remote cluster before integ tests and integ test cluster is started" - } -} - -tasks.named("javaRestTest").configure { - dependsOn 'startRemoteCluster' - useCluster remoteClusterReg - doFirst { - nonInputProperties.systemProperty 'tests.rest.cluster.remote.host', remoteClusterReg.map(c->c.getAllHttpSocketURI().get(0)) - // credentials for both local and remote clusters - nonInputProperties.systemProperty 'tests.rest.cluster.multi.user', "test_user" - nonInputProperties.systemProperty 'tests.rest.cluster.multi.password', "x-pack-test-password" - } -} tasks.named("check").configure {dependsOn("javaRestTest") } // run these tests as part of the "check" task diff --git a/x-pack/plugin/sql/qa/server/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_cluster_with_security/JdbcCatalogIT.java b/x-pack/plugin/sql/qa/server/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_cluster_with_security/JdbcCatalogIT.java index 8807eb679cc27..edd4f6c375e75 100644 --- a/x-pack/plugin/sql/qa/server/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_cluster_with_security/JdbcCatalogIT.java +++ b/x-pack/plugin/sql/qa/server/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_cluster_with_security/JdbcCatalogIT.java @@ -8,44 +8,82 @@ package org.elasticsearch.xpack.sql.qa.multi_cluster_with_security; import org.elasticsearch.client.Request; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.xpack.sql.qa.jdbc.JdbcIntegrationTestCase; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; +import org.junit.runner.Description; +import org.junit.runners.model.Statement; -import java.io.IOException; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.util.List; +import java.util.Properties; import static org.elasticsearch.transport.RemoteClusterAware.buildRemoteIndexName; +import static org.elasticsearch.xpack.sql.qa.multi_cluster_with_security.SqlTestClusterWithRemote.LOCAL_CLUSTER_NAME; +import static org.elasticsearch.xpack.sql.qa.multi_cluster_with_security.SqlTestClusterWithRemote.PASSWORD; +import static org.elasticsearch.xpack.sql.qa.multi_cluster_with_security.SqlTestClusterWithRemote.REMOTE_CLUSTER_ALIAS; +import static org.elasticsearch.xpack.sql.qa.multi_cluster_with_security.SqlTestClusterWithRemote.USER_NAME; public class JdbcCatalogIT extends JdbcIntegrationTestCase { + public static SqlTestClusterWithRemote clusterAndRemote = new SqlTestClusterWithRemote(); + public static TestRule setupIndex = new TestRule() { + @Override + public Statement apply(Statement base, Description description) { + return new Statement() { + @Override + public void evaluate() throws Throwable { + try { + index(INDEX_NAME, body -> body.field("zero", 0), clusterAndRemote.getRemoteClient()); + base.evaluate(); + } finally { + clusterAndRemote.getRemoteClient().performRequest(new Request("DELETE", "/" + INDEX_NAME)); + } + } + }; + } + }; - // gradle defines - public static final String LOCAL_CLUSTER_NAME = "javaRestTest"; - public static final String REMOTE_CLUSTER_NAME = "my_remote_cluster"; + @ClassRule + public static RuleChain testSetup = RuleChain.outerRule(clusterAndRemote).around(setupIndex); - private static final String INDEX_NAME = "test"; + @Override + protected String getTestRestCluster() { + return clusterAndRemote.getCluster().getHttpAddresses(); + } - @BeforeClass - static void setupIndex() throws IOException { - index(INDEX_NAME, body -> body.field("zero", 0)); + @Override + protected Settings restClientSettings() { + return clusterAndRemote.clusterAuthSettings(); } - @AfterClass - static void cleanupIndex() throws IOException { - provisioningClient().performRequest(new Request("DELETE", "/" + INDEX_NAME)); + @Override + protected RestClient provisioningClient() { + return clusterAndRemote.getRemoteClient(); } + @Override + protected Properties connectionProperties() { + Properties connectionProperties = super.connectionProperties(); + connectionProperties.put("user", USER_NAME); + connectionProperties.put("password", PASSWORD); + return connectionProperties; + } + + private static final String INDEX_NAME = "test"; + public void testJdbcSetCatalog() throws Exception { try (Connection es = esJdbc()) { PreparedStatement ps = es.prepareStatement("SELECT count(*) FROM " + INDEX_NAME); SQLException ex = expectThrows(SQLException.class, ps::executeQuery); assertTrue(ex.getMessage().contains("Unknown index [" + INDEX_NAME + "]")); - String catalog = REMOTE_CLUSTER_NAME.substring(0, randomIntBetween(0, REMOTE_CLUSTER_NAME.length())) + "*"; + String catalog = REMOTE_CLUSTER_ALIAS.substring(0, randomIntBetween(0, REMOTE_CLUSTER_ALIAS.length())) + "*"; es.setCatalog(catalog); assertEquals(catalog, es.getCatalog()); @@ -62,7 +100,7 @@ public void testJdbcSetCatalog() throws Exception { public void testQueryCatalogPrecedence() throws Exception { try (Connection es = esJdbc()) { - PreparedStatement ps = es.prepareStatement("SELECT count(*) FROM " + buildRemoteIndexName(REMOTE_CLUSTER_NAME, INDEX_NAME)); + PreparedStatement ps = es.prepareStatement("SELECT count(*) FROM " + buildRemoteIndexName(REMOTE_CLUSTER_ALIAS, INDEX_NAME)); es.setCatalog(LOCAL_CLUSTER_NAME); ResultSet rs = ps.executeQuery(); assertTrue(rs.next()); @@ -73,7 +111,7 @@ public void testQueryCatalogPrecedence() throws Exception { public void testQueryWithQualifierAndSetCatalog() throws Exception { try (Connection es = esJdbc()) { PreparedStatement ps = es.prepareStatement("SELECT " + INDEX_NAME + ".zero FROM " + INDEX_NAME); - es.setCatalog(REMOTE_CLUSTER_NAME); + es.setCatalog(REMOTE_CLUSTER_ALIAS); ResultSet rs = ps.executeQuery(); assertTrue(rs.next()); assertEquals(0, rs.getInt(1)); @@ -84,7 +122,7 @@ public void testQueryWithQualifierAndSetCatalog() throws Exception { public void testQueryWithQualifiedFieldAndIndex() throws Exception { try (Connection es = esJdbc()) { PreparedStatement ps = es.prepareStatement( - "SELECT " + INDEX_NAME + ".zero FROM " + buildRemoteIndexName(REMOTE_CLUSTER_NAME, INDEX_NAME) + "SELECT " + INDEX_NAME + ".zero FROM " + buildRemoteIndexName(REMOTE_CLUSTER_ALIAS, INDEX_NAME) ); es.setCatalog(LOCAL_CLUSTER_NAME); // set, but should be ignored ResultSet rs = ps.executeQuery(); @@ -105,7 +143,7 @@ public void testCatalogDependentCommands() throws Exception { ResultSet rs = ps.executeQuery(); assertFalse(rs.next()); - es.setCatalog(REMOTE_CLUSTER_NAME); + es.setCatalog(REMOTE_CLUSTER_ALIAS); rs = ps.executeQuery(); assertTrue(rs.next()); assertFalse(rs.next()); diff --git a/x-pack/plugin/sql/qa/server/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_cluster_with_security/JdbcCsvSpecIT.java b/x-pack/plugin/sql/qa/server/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_cluster_with_security/JdbcCsvSpecIT.java index 5a6e1956d39d1..6552cd0df2355 100644 --- a/x-pack/plugin/sql/qa/server/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_cluster_with_security/JdbcCsvSpecIT.java +++ b/x-pack/plugin/sql/qa/server/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_cluster_with_security/JdbcCsvSpecIT.java @@ -8,24 +8,55 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.xpack.ql.SpecReader; import org.elasticsearch.xpack.sql.qa.jdbc.CsvSpecTestCase; +import org.junit.ClassRule; import java.sql.Connection; import java.sql.SQLException; import java.util.ArrayList; import java.util.List; import java.util.Locale; +import java.util.Properties; import java.util.regex.Pattern; import static org.elasticsearch.transport.RemoteClusterAware.buildRemoteIndexName; import static org.elasticsearch.xpack.ql.CsvSpecReader.CsvTestCase; import static org.elasticsearch.xpack.ql.CsvSpecReader.specParser; import static org.elasticsearch.xpack.ql.TestUtils.classpathResources; +import static org.elasticsearch.xpack.sql.qa.multi_cluster_with_security.SqlTestClusterWithRemote.PASSWORD; +import static org.elasticsearch.xpack.sql.qa.multi_cluster_with_security.SqlTestClusterWithRemote.REMOTE_CLUSTER_ALIAS; +import static org.elasticsearch.xpack.sql.qa.multi_cluster_with_security.SqlTestClusterWithRemote.USER_NAME; public class JdbcCsvSpecIT extends CsvSpecTestCase { + @ClassRule + public static SqlTestClusterWithRemote clusterAndRemote = new SqlTestClusterWithRemote(); + + @Override + protected String getTestRestCluster() { + return clusterAndRemote.getCluster().getHttpAddresses(); + } + + @Override + protected Settings restClientSettings() { + return clusterAndRemote.clusterAuthSettings(); + } + + @Override + protected RestClient provisioningClient() { + return clusterAndRemote.getRemoteClient(); + } + + @Override + protected Properties connectionProperties() { + Properties connectionProperties = super.connectionProperties(); + connectionProperties.put("user", USER_NAME); + connectionProperties.put("password", PASSWORD); + return connectionProperties; + } - public static final String REMOTE_CLUSTER_NAME = "my_remote_cluster"; // gradle defined public static final String EXTRACT_FN_NAME = "EXTRACT"; private static final Pattern DESCRIBE_OR_SHOW = Pattern.compile("(?i)\\s*(DESCRIBE|SHOW).*"); @@ -58,7 +89,7 @@ private static CsvTestCase qualifyFromClause(CsvTestCase testCase) { j = j >= 0 ? i + j : query.length(); sb.append( query.substring(i, j) - .replaceAll("(?i)(FROM)(\\s+)(\\w+|\"[^\"]+\")", "$1$2" + buildRemoteIndexName(REMOTE_CLUSTER_NAME, "$3")) + .replaceAll("(?i)(FROM)(\\s+)(\\w+|\"[^\"]+\")", "$1$2" + buildRemoteIndexName(REMOTE_CLUSTER_ALIAS, "$3")) ); boolean inString = false, escaping = false; char stringDelim = 0, crrChar; @@ -104,7 +135,7 @@ public Connection esJdbc() throws SQLException { // Only set the default catalog if the query index isn't yet qualified with the catalog, which can happen if query has been written // qualified from the start (for the documentation) or edited in qualifyFromClause() above. if (isFromQualified(csvTestCase().query) == false) { - connection.setCatalog(REMOTE_CLUSTER_NAME); + connection.setCatalog(REMOTE_CLUSTER_ALIAS); } return connection; } diff --git a/x-pack/plugin/sql/qa/server/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_cluster_with_security/JdbcMetadataIT.java b/x-pack/plugin/sql/qa/server/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_cluster_with_security/JdbcMetadataIT.java index ce18532dc12a2..8317b8975382c 100644 --- a/x-pack/plugin/sql/qa/server/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_cluster_with_security/JdbcMetadataIT.java +++ b/x-pack/plugin/sql/qa/server/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_cluster_with_security/JdbcMetadataIT.java @@ -7,17 +7,47 @@ package org.elasticsearch.xpack.sql.qa.multi_cluster_with_security; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.xpack.sql.qa.jdbc.JdbcIntegrationTestCase; +import org.junit.ClassRule; import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; +import java.util.Properties; + +import static org.elasticsearch.xpack.sql.qa.multi_cluster_with_security.SqlTestClusterWithRemote.LOCAL_CLUSTER_NAME; +import static org.elasticsearch.xpack.sql.qa.multi_cluster_with_security.SqlTestClusterWithRemote.PASSWORD; +import static org.elasticsearch.xpack.sql.qa.multi_cluster_with_security.SqlTestClusterWithRemote.REMOTE_CLUSTER_ALIAS; +import static org.elasticsearch.xpack.sql.qa.multi_cluster_with_security.SqlTestClusterWithRemote.USER_NAME; public class JdbcMetadataIT extends JdbcIntegrationTestCase { + @ClassRule + public static SqlTestClusterWithRemote clusterAndRemote = new SqlTestClusterWithRemote(); + + @Override + protected String getTestRestCluster() { + return clusterAndRemote.getCluster().getHttpAddresses(); + } + + @Override + protected Settings restClientSettings() { + return clusterAndRemote.clusterAuthSettings(); + } + + @Override + protected RestClient provisioningClient() { + return clusterAndRemote.getRemoteClient(); + } - // gradle defines - public static final String LOCAL_CLUSTER_NAME = "javaRestTest"; - public static final String REMOTE_CLUSTER_NAME = "my_remote_cluster"; + @Override + protected Properties connectionProperties() { + Properties connectionProperties = super.connectionProperties(); + connectionProperties.put("user", USER_NAME); + connectionProperties.put("password", PASSWORD); + return connectionProperties; + } public void testJdbcGetClusters() throws SQLException { try (Connection es = esJdbc()) { @@ -26,7 +56,7 @@ public void testJdbcGetClusters() throws SQLException { assertTrue(rs.next()); assertEquals(LOCAL_CLUSTER_NAME, rs.getString(1)); assertTrue(rs.next()); - assertEquals(REMOTE_CLUSTER_NAME, rs.getString(1)); + assertEquals(REMOTE_CLUSTER_ALIAS, rs.getString(1)); assertFalse(rs.next()); } } diff --git a/x-pack/plugin/sql/qa/server/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_cluster_with_security/RestSqlIT.java b/x-pack/plugin/sql/qa/server/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_cluster_with_security/RestSqlIT.java index b56cde303446e..c8bb5608db1df 100644 --- a/x-pack/plugin/sql/qa/server/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_cluster_with_security/RestSqlIT.java +++ b/x-pack/plugin/sql/qa/server/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_cluster_with_security/RestSqlIT.java @@ -6,20 +6,39 @@ */ package org.elasticsearch.xpack.sql.qa.multi_cluster_with_security; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase; +import org.junit.ClassRule; import static org.elasticsearch.transport.RemoteClusterAware.buildRemoteIndexName; +import static org.elasticsearch.xpack.sql.qa.multi_cluster_with_security.SqlTestClusterWithRemote.REMOTE_CLUSTER_ALIAS; public class RestSqlIT extends RestSqlTestCase { + @ClassRule + public static SqlTestClusterWithRemote clusterAndRemote = new SqlTestClusterWithRemote(); - public static final String REMOTE_CLUSTER_NAME = "my_remote_cluster"; // gradle defined + @Override + protected String getTestRestCluster() { + return clusterAndRemote.getCluster().getHttpAddresses(); + } + + @Override + protected Settings restClientSettings() { + return clusterAndRemote.clusterAuthSettings(); + } + + @Override + protected RestClient provisioningClient() { + return clusterAndRemote.getRemoteClient(); + } @Override protected String indexPattern(String pattern) { if (randomBoolean()) { - return buildRemoteIndexName(REMOTE_CLUSTER_NAME, pattern); + return buildRemoteIndexName(REMOTE_CLUSTER_ALIAS, pattern); } else { - String cluster = REMOTE_CLUSTER_NAME.substring(0, randomIntBetween(0, REMOTE_CLUSTER_NAME.length())) + "*"; + String cluster = REMOTE_CLUSTER_ALIAS.substring(0, randomIntBetween(0, REMOTE_CLUSTER_ALIAS.length())) + "*"; if (pattern.startsWith("\\\"") && pattern.endsWith("\\\"") && pattern.length() > 4) { pattern = pattern.substring(2, pattern.length() - 2); } diff --git a/x-pack/plugin/sql/qa/server/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_cluster_with_security/SqlTestClusterWithRemote.java b/x-pack/plugin/sql/qa/server/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_cluster_with_security/SqlTestClusterWithRemote.java new file mode 100644 index 0000000000000..a6e5baabd98f3 --- /dev/null +++ b/x-pack/plugin/sql/qa/server/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_cluster_with_security/SqlTestClusterWithRemote.java @@ -0,0 +1,131 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.sql.qa.multi_cluster_with_security; + +import org.apache.http.HttpHost; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.core.IOUtils; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.junit.rules.TestRule; +import org.junit.runner.Description; +import org.junit.runners.model.Statement; + +import java.io.IOException; + +import static org.elasticsearch.test.rest.ESRestTestCase.basicAuthHeaderValue; +import static org.elasticsearch.xpack.sql.qa.rest.RemoteClusterAwareSqlRestTestCase.clientBuilder; + +public class SqlTestClusterWithRemote implements TestRule { + public static final String LOCAL_CLUSTER_NAME = "javaRestTest"; + public static final String REMOTE_CLUSTER_NAME = "remote-cluster"; + public static final String REMOTE_CLUSTER_ALIAS = "my_remote_cluster"; + public static final String USER_NAME = "test_user"; + public static final String PASSWORD = "x-pack-test-password"; + + private static ElasticsearchCluster clusterSettings(String remoteAddress) { + return ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .name(LOCAL_CLUSTER_NAME) + .setting("xpack.ml.enabled", "false") + .setting("xpack.watcher.enabled", "false") + .setting("cluster.remote." + REMOTE_CLUSTER_ALIAS + ".seeds", remoteAddress) + .setting("cluster.remote.connections_per_cluster", "1") + .setting("xpack.security.enabled", "true") + .setting("xpack.license.self_generated.type", "trial") + .setting("xpack.security.autoconfiguration.enabled", "false") + .user(USER_NAME, PASSWORD) + .plugin(":x-pack:qa:freeze-plugin") + .build(); + } + + private static ElasticsearchCluster remoteClusterSettings() { + return ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .name(REMOTE_CLUSTER_NAME) + .setting("node.roles", "[data,ingest,master]") + .setting("xpack.ml.enabled", "false") + .setting("xpack.watcher.enabled", "false") + .setting("xpack.security.enabled", "true") + .setting("xpack.license.self_generated.type", "trial") + .setting("xpack.security.autoconfiguration.enabled", "false") + .user(USER_NAME, PASSWORD) + .plugin(":x-pack:qa:freeze-plugin") + .build(); + } + + /** + * Auth settings for both the cluster and the remote. + */ + private static Settings clientAuthSettings() { + final String value = basicAuthHeaderValue(USER_NAME, new SecureString(PASSWORD.toCharArray())); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", value).build(); + } + + private ElasticsearchCluster cluster; + private final ElasticsearchCluster remote = remoteClusterSettings(); + private RestClient remoteClient; + + public Statement apply(Statement base, Description description) { + return remote.apply(startRemoteClient(startCluster(base)), null); + } + + public ElasticsearchCluster getCluster() { + return cluster; + } + + public Settings clusterAuthSettings() { + return clientAuthSettings(); + } + + public RestClient getRemoteClient() { + return remoteClient; + } + + private Statement startCluster(Statement base) { + return new Statement() { + @Override + public void evaluate() throws Throwable { + // Remote address will look like [::1]:12345 - elasticsearch.yml does not like the square brackets. + String remoteAddress = remote.getTransportEndpoint(0).replaceAll("\\[|\\]", ""); + cluster = clusterSettings(remoteAddress); + cluster.apply(base, null).evaluate(); + } + }; + } + + private Statement startRemoteClient(Statement base) { + return new Statement() { + @Override + public void evaluate() throws Throwable { + try { + remoteClient = initRemoteClient(); + base.evaluate(); + } finally { + IOUtils.close(remoteClient); + } + } + }; + } + + private RestClient initRemoteClient() throws IOException { + String crossClusterHost = remote.getHttpAddress(0); + int portSeparator = crossClusterHost.lastIndexOf(':'); + if (portSeparator < 0) { + throw new IllegalArgumentException("Illegal cluster url [" + crossClusterHost + "]"); + } + String host = crossClusterHost.substring(0, portSeparator); + int port = Integer.parseInt(crossClusterHost.substring(portSeparator + 1)); + HttpHost[] remoteHttpHosts = new HttpHost[] { new HttpHost(host, port) }; + + return clientBuilder(clientAuthSettings(), remoteHttpHosts); + } +} diff --git a/x-pack/plugin/sql/qa/server/multi-node/build.gradle b/x-pack/plugin/sql/qa/server/multi-node/build.gradle index 4ded053302803..e7a558ba68dd9 100644 --- a/x-pack/plugin/sql/qa/server/multi-node/build.gradle +++ b/x-pack/plugin/sql/qa/server/multi-node/build.gradle @@ -6,9 +6,6 @@ description = 'Run a subset of SQL tests against multiple nodes' * feel should need to be tested against more than one node. */ -testClusters.matching { it.name == "javaRestTest" }.configureEach { - numberOfNodes = 2 - setting 'xpack.security.enabled', 'false' - setting 'xpack.license.self_generated.type', 'trial' - plugin ':x-pack:qa:freeze-plugin' +dependencies { + clusterPlugins project(':x-pack:qa:freeze-plugin') } diff --git a/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/CliLenientIT.java b/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/CliLenientIT.java index fc4a04570ff67..6a920dcc00b7c 100644 --- a/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/CliLenientIT.java +++ b/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/CliLenientIT.java @@ -6,6 +6,16 @@ */ package org.elasticsearch.xpack.sql.qa.multi_node; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.sql.qa.cli.LenientTestCase; +import org.junit.ClassRule; -public class CliLenientIT extends LenientTestCase {} +public class CliLenientIT extends LenientTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = SqlTestCluster.getCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/CliSelectIT.java b/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/CliSelectIT.java index 6e8162ef11b67..c1ec6ffd25251 100644 --- a/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/CliSelectIT.java +++ b/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/CliSelectIT.java @@ -6,6 +6,16 @@ */ package org.elasticsearch.xpack.sql.qa.multi_node; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.sql.qa.cli.SelectTestCase; +import org.junit.ClassRule; -public class CliSelectIT extends SelectTestCase {} +public class CliSelectIT extends SelectTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = SqlTestCluster.getCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/CliShowIT.java b/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/CliShowIT.java index db1e506f74301..86d8d89e591ed 100644 --- a/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/CliShowIT.java +++ b/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/CliShowIT.java @@ -6,6 +6,16 @@ */ package org.elasticsearch.xpack.sql.qa.multi_node; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.sql.qa.cli.ShowTestCase; +import org.junit.ClassRule; -public class CliShowIT extends ShowTestCase {} +public class CliShowIT extends ShowTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = SqlTestCluster.getCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/CustomDateFormatIT.java b/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/CustomDateFormatIT.java index 81b3fd59c6bed..5b8b52e5312c8 100644 --- a/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/CustomDateFormatIT.java +++ b/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/CustomDateFormatIT.java @@ -7,8 +7,16 @@ package org.elasticsearch.xpack.sql.qa.multi_node; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.sql.qa.CustomDateFormatTestCase; +import org.junit.ClassRule; public class CustomDateFormatIT extends CustomDateFormatTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = SqlTestCluster.getCluster(); + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } } diff --git a/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/GeoJdbcCsvSpecIT.java b/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/GeoJdbcCsvSpecIT.java index bca7c41b539c8..e21e5cb64a7ab 100644 --- a/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/GeoJdbcCsvSpecIT.java +++ b/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/GeoJdbcCsvSpecIT.java @@ -7,11 +7,21 @@ package org.elasticsearch.xpack.sql.qa.multi_node; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.sql.qa.geo.GeoCsvSpecTestCase; +import org.junit.ClassRule; import static org.elasticsearch.xpack.ql.CsvSpecReader.CsvTestCase; public class GeoJdbcCsvSpecIT extends GeoCsvSpecTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = SqlTestCluster.getCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + public GeoJdbcCsvSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { super(fileName, groupName, testName, lineNumber, testCase); } diff --git a/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/GeoJdbcSqlSpecIT.java b/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/GeoJdbcSqlSpecIT.java index 65b433afcd102..68f6701892ec6 100644 --- a/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/GeoJdbcSqlSpecIT.java +++ b/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/GeoJdbcSqlSpecIT.java @@ -7,9 +7,19 @@ package org.elasticsearch.xpack.sql.qa.multi_node; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.sql.qa.geo.GeoSqlSpecTestCase; +import org.junit.ClassRule; public class GeoJdbcSqlSpecIT extends GeoSqlSpecTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = SqlTestCluster.getCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + public GeoJdbcSqlSpecIT(String fileName, String groupName, String testName, Integer lineNumber, String query) { super(fileName, groupName, testName, lineNumber, query); } diff --git a/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/JdbcDatabaseMetaDataIT.java b/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/JdbcDatabaseMetaDataIT.java index 2477a04f95c8a..0de80872a0fa0 100644 --- a/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/JdbcDatabaseMetaDataIT.java +++ b/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/JdbcDatabaseMetaDataIT.java @@ -6,6 +6,16 @@ */ package org.elasticsearch.xpack.sql.qa.multi_node; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.sql.qa.jdbc.DatabaseMetaDataTestCase; +import org.junit.ClassRule; -public class JdbcDatabaseMetaDataIT extends DatabaseMetaDataTestCase {} +public class JdbcDatabaseMetaDataIT extends DatabaseMetaDataTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = SqlTestCluster.getCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/JdbcShowTablesIT.java b/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/JdbcShowTablesIT.java index ded5bb81663de..3c8356b9e88f3 100644 --- a/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/JdbcShowTablesIT.java +++ b/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/JdbcShowTablesIT.java @@ -6,6 +6,16 @@ */ package org.elasticsearch.xpack.sql.qa.multi_node; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.sql.qa.jdbc.ShowTablesTestCase; +import org.junit.ClassRule; -public class JdbcShowTablesIT extends ShowTablesTestCase {} +public class JdbcShowTablesIT extends ShowTablesTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = SqlTestCluster.getCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/RestSqlIT.java b/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/RestSqlIT.java index 98a8441f8cdab..ae909789f9c66 100644 --- a/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/RestSqlIT.java +++ b/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/RestSqlIT.java @@ -6,10 +6,20 @@ */ package org.elasticsearch.xpack.sql.qa.multi_node; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase; +import org.junit.ClassRule; /** * Integration test for the rest sql action. The one that speaks json directly to a * user rather than to the JDBC driver or CLI. */ -public class RestSqlIT extends RestSqlTestCase {} +public class RestSqlIT extends RestSqlTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = SqlTestCluster.getCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/RestSqlMultinodeIT.java b/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/RestSqlMultinodeIT.java index 37e19fe428b4a..a51a2f0d34342 100644 --- a/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/RestSqlMultinodeIT.java +++ b/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/RestSqlMultinodeIT.java @@ -12,10 +12,12 @@ import org.elasticsearch.client.RestClient; import org.elasticsearch.common.Strings; import org.elasticsearch.test.NotEqualMessageBuilder; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.sql.qa.rest.BaseRestSqlTestCase; +import org.junit.ClassRule; import java.io.IOException; import java.nio.charset.UnsupportedCharsetException; @@ -34,6 +36,14 @@ * Tests specific to multiple nodes. */ public class RestSqlMultinodeIT extends ESRestTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = SqlTestCluster.getCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + /** * Tests count of index run across multiple nodes. */ diff --git a/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/SqlProtocolIT.java b/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/SqlProtocolIT.java index 7ea96c39f3b44..cd99bb3744864 100644 --- a/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/SqlProtocolIT.java +++ b/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/SqlProtocolIT.java @@ -7,6 +7,16 @@ package org.elasticsearch.xpack.sql.qa.multi_node; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.sql.qa.SqlProtocolTestCase; +import org.junit.ClassRule; -public class SqlProtocolIT extends SqlProtocolTestCase {} +public class SqlProtocolIT extends SqlProtocolTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = SqlTestCluster.getCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/SqlTestCluster.java b/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/SqlTestCluster.java new file mode 100644 index 0000000000000..9859be524ce6a --- /dev/null +++ b/x-pack/plugin/sql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/multi_node/SqlTestCluster.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.sql.qa.multi_node; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; + +public class SqlTestCluster { + public static ElasticsearchCluster getCluster() { + return ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .nodes(2) + .name("javaRestTest") + .setting("xpack.ml.enabled", "false") + .setting("xpack.watcher.enabled", "false") + .setting("xpack.security.enabled", "false") + .setting("xpack.license.self_generated.type", "trial") + .plugin(":x-pack:qa:freeze-plugin") + .build(); + } +} diff --git a/x-pack/plugin/sql/qa/server/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/CliSecurityIT.java b/x-pack/plugin/sql/qa/server/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/CliSecurityIT.java index 4fb5143860380..d0eb8a4b6eade 100644 --- a/x-pack/plugin/sql/qa/server/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/CliSecurityIT.java +++ b/x-pack/plugin/sql/qa/server/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/CliSecurityIT.java @@ -21,12 +21,10 @@ import java.util.List; import java.util.Map; -import static org.elasticsearch.xpack.sql.qa.cli.CliIntegrationTestCase.elasticsearchAddress; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.startsWith; public class CliSecurityIT extends SqlSecurityTestCase { - @Override public void testDescribeWorksAsFullAccess() {} @@ -64,7 +62,7 @@ static SecurityConfig adminSecurityConfig() { /** * Perform security test actions using the CLI. */ - private static class CliActions implements Actions { + private class CliActions implements Actions { @Override public String minimalPermissionsForAllActions() { return "cli_or_drivers_minimal"; @@ -227,7 +225,14 @@ protected void assertConnectionTest() throws IOException { } } + private final Actions actions; + + @Override + Actions actions() { + return actions; + } + public CliSecurityIT() { - super(new CliActions()); + actions = new CliActions(); } } diff --git a/x-pack/plugin/sql/qa/server/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcSecurityIT.java b/x-pack/plugin/sql/qa/server/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcSecurityIT.java index 1d88bf4f59100..0e0c2bc8d78b4 100644 --- a/x-pack/plugin/sql/qa/server/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcSecurityIT.java +++ b/x-pack/plugin/sql/qa/server/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcSecurityIT.java @@ -26,7 +26,6 @@ import java.util.Properties; import static org.elasticsearch.xpack.sql.qa.jdbc.JdbcAssert.assertResultSets; -import static org.elasticsearch.xpack.sql.qa.jdbc.JdbcIntegrationTestCase.elasticsearchAddress; import static org.elasticsearch.xpack.sql.qa.security.RestSqlIT.SSL_ENABLED; import static org.hamcrest.Matchers.containsString; @@ -41,7 +40,7 @@ static Properties adminProperties() { return properties; } - static Connection es(Properties properties) throws SQLException { + Connection es(Properties properties) throws SQLException { Properties props = new Properties(); props.put("timezone", randomZone().getId()); props.putAll(properties); @@ -82,7 +81,7 @@ private static void addSslPropertiesIfNeeded(Properties properties) { properties.put("ssl.truststore.pass", "keypass"); } - static void expectActionMatchesAdmin( + void expectActionMatchesAdmin( CheckedFunction adminAction, String user, CheckedFunction userAction @@ -92,15 +91,15 @@ static void expectActionMatchesAdmin( } } - static void expectForbidden(String user, CheckedConsumer action) throws Exception { + void expectForbidden(String user, CheckedConsumer action) throws Exception { expectError(user, action, "is unauthorized for user [" + user + "]"); } - static void expectUnknownIndex(String user, CheckedConsumer action) throws Exception { + void expectUnknownIndex(String user, CheckedConsumer action) throws Exception { expectError(user, action, "Unknown index"); } - static void expectError(String user, CheckedConsumer action, String errorMessage) throws Exception { + void expectError(String user, CheckedConsumer action, String errorMessage) throws Exception { SQLException e; try (Connection connection = es(userProperties(user))) { e = expectThrows(SQLException.class, () -> action.accept(connection)); @@ -108,8 +107,7 @@ static void expectError(String user, CheckedConsumer a assertThat(e.getMessage(), containsString(errorMessage)); } - static void expectActionThrowsUnknownColumn(String user, CheckedConsumer action, String column) - throws Exception { + void expectActionThrowsUnknownColumn(String user, CheckedConsumer action, String column) throws Exception { SQLException e; try (Connection connection = es(userProperties(user))) { e = expectThrows(SQLException.class, () -> action.accept(connection)); @@ -117,7 +115,7 @@ static void expectActionThrowsUnknownColumn(String user, CheckedConsumer tables, String user) throws Exception @Override public void expectForbidden(String user, String sql) throws Exception { - JdbcSecurityIT.expectForbidden(user, con -> con.createStatement().executeQuery(sql)); + JdbcSecurityIT.this.expectForbidden(user, con -> con.createStatement().executeQuery(sql)); } @Override public void expectUnknownIndex(String user, String sql) throws Exception { - JdbcSecurityIT.expectUnknownIndex(user, con -> con.createStatement().executeQuery(sql)); + JdbcSecurityIT.this.expectUnknownIndex(user, con -> con.createStatement().executeQuery(sql)); } @Override @@ -245,8 +243,15 @@ private void expectUnauthorized(String action, String user, ThrowingRunnable r) } } + private final Actions actions; + + @Override + Actions actions() { + return actions; + } + public JdbcSecurityIT() { - super(new JdbcActions()); + actions = new JdbcActions(); } // Metadata methods only available to JDBC diff --git a/x-pack/plugin/sql/qa/server/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/RestSqlSecurityIT.java b/x-pack/plugin/sql/qa/server/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/RestSqlSecurityIT.java index 3b23daf9dde54..7195fa00ce350 100644 --- a/x-pack/plugin/sql/qa/server/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/RestSqlSecurityIT.java +++ b/x-pack/plugin/sql/qa/server/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/RestSqlSecurityIT.java @@ -271,8 +271,15 @@ private static Map toMap(Response response, String mode) throws } } + private final Actions actions; + + @Override + Actions actions() { + return actions; + } + public RestSqlSecurityIT() { - super(new RestActions()); + actions = new RestActions(); } @Override diff --git a/x-pack/plugin/sql/qa/server/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/SqlSecurityTestCase.java b/x-pack/plugin/sql/qa/server/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/SqlSecurityTestCase.java index 7fd65a19b090e..0ab942fcff39f 100644 --- a/x-pack/plugin/sql/qa/server/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/SqlSecurityTestCase.java +++ b/x-pack/plugin/sql/qa/server/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/SqlSecurityTestCase.java @@ -56,6 +56,11 @@ import static org.hamcrest.Matchers.is; public abstract class SqlSecurityTestCase extends ESRestTestCase { + public String elasticsearchAddress() { + // CLI only supports a single node at a time so we just give it one. + return getTestRestCluster().split(",")[0]; + } + /** * Actions taken by this test. *

    @@ -131,7 +136,7 @@ private static Path lookupRolledOverAuditLog() { /** * The actions taken by this test. */ - private final Actions actions; + abstract Actions actions(); /** * How much of the audit log was written before the test started. @@ -143,10 +148,6 @@ private static Path lookupRolledOverAuditLog() { */ private static boolean auditFileRolledOver = false; - public SqlSecurityTestCase(Actions actions) { - this.actions = actions; - } - /** * All tests run as a an administrative user but use * es-security-runas-user to become a less privileged user when needed. @@ -237,23 +238,23 @@ protected String getProtocol() { } public void testQueryWorksAsAdmin() throws Exception { - actions.queryWorksAsAdmin(); + actions().queryWorksAsAdmin(); createAuditLogAsserter().expectSqlCompositeActionFieldCaps("test_admin", "test").assertLogs(); } public void testQueryWithFullAccess() throws Exception { - createUser("full_access", actions.minimalPermissionsForAllActions()); + createUser("full_access", actions().minimalPermissionsForAllActions()); - actions.expectMatchesAdmin("SELECT * FROM test ORDER BY a", "full_access", "SELECT * FROM test ORDER BY a"); + actions().expectMatchesAdmin("SELECT * FROM test ORDER BY a", "full_access", "SELECT * FROM test ORDER BY a"); createAuditLogAsserter().expectSqlCompositeActionFieldCaps("test_admin", "test") .expectSqlCompositeActionFieldCaps("full_access", "test") .assertLogs(); } public void testScrollWithFullAccess() throws Exception { - createUser("full_access", actions.minimalPermissionsForAllActions()); + createUser("full_access", actions().minimalPermissionsForAllActions()); - actions.expectScrollMatchesAdmin("SELECT * FROM test ORDER BY a", "full_access", "SELECT * FROM test ORDER BY a"); + actions().expectScrollMatchesAdmin("SELECT * FROM test ORDER BY a", "full_access", "SELECT * FROM test ORDER BY a"); createAuditLogAsserter().expectSqlCompositeActionFieldCaps("test_admin", "test") /* Scrolling doesn't have to access the index again, at least not through sql. * If we asserted query and scroll logs then we would see the scroll. */ @@ -268,14 +269,14 @@ public void testScrollWithFullAccess() throws Exception { public void testQueryNoAccess() throws Exception { createUser("no_access", "read_nothing"); - actions.expectForbidden("no_access", "SELECT * FROM test"); + actions().expectForbidden("no_access", "SELECT * FROM test"); createAuditLogAsserter().expect(false, SQL_ACTION_NAME, "no_access", empty()).assertLogs(); } public void testQueryWrongAccess() throws Exception { createUser("wrong_access", "read_something_else"); - actions.expectUnknownIndex("wrong_access", "SELECT * FROM test"); + actions().expectUnknownIndex("wrong_access", "SELECT * FROM test"); createAuditLogAsserter() // This user has permission to run sql queries so they are given preliminary authorization .expect(true, SQL_ACTION_NAME, "wrong_access", empty()) @@ -287,7 +288,7 @@ public void testQueryWrongAccess() throws Exception { public void testQuerySingleFieldGranted() throws Exception { createUser("only_a", "read_test_a"); - actions.expectMatchesAdmin("SELECT a FROM test ORDER BY a", "only_a", "SELECT * FROM test ORDER BY a"); + actions().expectMatchesAdmin("SELECT a FROM test ORDER BY a", "only_a", "SELECT * FROM test ORDER BY a"); createAuditLogAsserter().expectSqlCompositeActionFieldCaps("test_admin", "test") .expectSqlCompositeActionFieldCaps("only_a", "test") .assertLogs(); @@ -296,7 +297,7 @@ public void testQuerySingleFieldGranted() throws Exception { public void testScrollWithSingleFieldGranted() throws Exception { createUser("only_a", "read_test_a"); - actions.expectScrollMatchesAdmin("SELECT a FROM test ORDER BY a", "only_a", "SELECT * FROM test ORDER BY a"); + actions().expectScrollMatchesAdmin("SELECT a FROM test ORDER BY a", "only_a", "SELECT * FROM test ORDER BY a"); createAuditLogAsserter().expectSqlCompositeActionFieldCaps("test_admin", "test") /* Scrolling doesn't have to access the index again, at least not through sql. * If we asserted query and scroll logs then we would see the scroll. */ @@ -311,7 +312,7 @@ public void testScrollWithSingleFieldGranted() throws Exception { public void testQueryStringSingleFieldGrantedWrongRequested() throws Exception { createUser("only_a", "read_test_a"); - actions.expectUnknownColumn("only_a", "SELECT c FROM test", "c"); + actions().expectUnknownColumn("only_a", "SELECT c FROM test", "c"); /* The user has permission to query the index but one of the * columns that they explicitly mention is hidden from them * by field level access control. This *looks* like a successful @@ -324,7 +325,7 @@ public void testQueryStringSingleFieldGrantedWrongRequested() throws Exception { public void testQuerySingleFieldExcepted() throws Exception { createUser("not_c", "read_test_a_and_b"); - actions.expectMatchesAdmin("SELECT a, b FROM test ORDER BY a", "not_c", "SELECT * FROM test ORDER BY a"); + actions().expectMatchesAdmin("SELECT a, b FROM test ORDER BY a", "not_c", "SELECT * FROM test ORDER BY a"); createAuditLogAsserter().expectSqlCompositeActionFieldCaps("test_admin", "test") .expectSqlCompositeActionFieldCaps("not_c", "test") .assertLogs(); @@ -333,7 +334,7 @@ public void testQuerySingleFieldExcepted() throws Exception { public void testScrollWithSingleFieldExcepted() throws Exception { createUser("not_c", "read_test_a_and_b"); - actions.expectScrollMatchesAdmin("SELECT a, b FROM test ORDER BY a", "not_c", "SELECT * FROM test ORDER BY a"); + actions().expectScrollMatchesAdmin("SELECT a, b FROM test ORDER BY a", "not_c", "SELECT * FROM test ORDER BY a"); createAuditLogAsserter().expectSqlCompositeActionFieldCaps("test_admin", "test") /* Scrolling doesn't have to access the index again, at least not through sql. * If we asserted query and scroll logs then we would see the scroll. */ @@ -348,7 +349,7 @@ public void testScrollWithSingleFieldExcepted() throws Exception { public void testQuerySingleFieldExceptionedWrongRequested() throws Exception { createUser("not_c", "read_test_a_and_b"); - actions.expectUnknownColumn("not_c", "SELECT c FROM test", "c"); + actions().expectUnknownColumn("not_c", "SELECT c FROM test", "c"); /* The user has permission to query the index but one of the * columns that they explicitly mention is hidden from them * by field level access control. This *looks* like a successful @@ -361,21 +362,21 @@ public void testQuerySingleFieldExceptionedWrongRequested() throws Exception { public void testQueryDocumentExcluded() throws Exception { createUser("no_3s", "read_test_without_c_3"); - actions.expectMatchesAdmin("SELECT * FROM test WHERE c != 3 ORDER BY a", "no_3s", "SELECT * FROM test ORDER BY a"); + actions().expectMatchesAdmin("SELECT * FROM test WHERE c != 3 ORDER BY a", "no_3s", "SELECT * FROM test ORDER BY a"); createAuditLogAsserter().expectSqlCompositeActionFieldCaps("test_admin", "test") .expectSqlCompositeActionFieldCaps("no_3s", "test") .assertLogs(); } public void testShowTablesWorksAsAdmin() throws Exception { - actions.expectShowTables(Arrays.asList("bort", "test"), null); + actions().expectShowTables(Arrays.asList("bort", "test"), null); createAuditLogAsserter().expectSqlCompositeActionGetIndex("test_admin", "bort", "test").assertLogs(); } public void testShowTablesWorksAsFullAccess() throws Exception { - createUser("full_access", actions.minimalPermissionsForAllActions()); + createUser("full_access", actions().minimalPermissionsForAllActions()); - actions.expectMatchesAdmin("SHOW TABLES LIKE '%t'", "full_access", "SHOW TABLES"); + actions().expectMatchesAdmin("SHOW TABLES LIKE '%t'", "full_access", "SHOW TABLES"); createAuditLogAsserter().expectSqlCompositeActionGetIndex("test_admin", "bort", "test") .expectSqlCompositeActionGetIndex("full_access", "bort", "test") .assertLogs(); @@ -384,14 +385,14 @@ public void testShowTablesWorksAsFullAccess() throws Exception { public void testShowTablesWithNoAccess() throws Exception { createUser("no_access", "read_nothing"); - actions.expectForbidden("no_access", "SHOW TABLES"); + actions().expectForbidden("no_access", "SHOW TABLES"); createAuditLogAsserter().expect(false, SQL_ACTION_NAME, "no_access", empty()).assertLogs(); } public void testShowTablesWithLimitedAccess() throws Exception { createUser("read_bort", "read_bort"); - actions.expectMatchesAdmin("SHOW TABLES LIKE 'bort'", "read_bort", "SHOW TABLES"); + actions().expectMatchesAdmin("SHOW TABLES LIKE 'bort'", "read_bort", "SHOW TABLES"); createAuditLogAsserter().expectSqlCompositeActionGetIndex("test_admin", "bort") .expectSqlCompositeActionGetIndex("read_bort", "bort") .assertLogs(); @@ -400,7 +401,7 @@ public void testShowTablesWithLimitedAccess() throws Exception { public void testShowTablesWithLimitedAccessUnaccessableIndex() throws Exception { createUser("read_bort", "read_bort"); - actions.expectMatchesAdmin("SHOW TABLES LIKE 'not-created'", "read_bort", "SHOW TABLES LIKE 'test'"); + actions().expectMatchesAdmin("SHOW TABLES LIKE 'not-created'", "read_bort", "SHOW TABLES LIKE 'test'"); createAuditLogAsserter().expect(true, SQL_ACTION_NAME, "test_admin", empty()) .expect(true, GetIndexAction.NAME, "test_admin", contains("not-created")) .expect(true, SQL_ACTION_NAME, "read_bort", empty()) @@ -413,14 +414,14 @@ public void testDescribeWorksAsAdmin() throws Exception { expected.put("a", asList("BIGINT", "long")); expected.put("b", asList("BIGINT", "long")); expected.put("c", asList("BIGINT", "long")); - actions.expectDescribe(expected, null); + actions().expectDescribe(expected, null); createAuditLogAsserter().expectSqlCompositeActionFieldCaps("test_admin", "test").assertLogs(); } public void testDescribeWorksAsFullAccess() throws Exception { - createUser("full_access", actions.minimalPermissionsForAllActions()); + createUser("full_access", actions().minimalPermissionsForAllActions()); - actions.expectMatchesAdmin("DESCRIBE test", "full_access", "DESCRIBE test"); + actions().expectMatchesAdmin("DESCRIBE test", "full_access", "DESCRIBE test"); createAuditLogAsserter().expectSqlCompositeActionFieldCaps("test_admin", "test") .expectSqlCompositeActionFieldCaps("full_access", "test") .assertLogs(); @@ -429,14 +430,14 @@ public void testDescribeWorksAsFullAccess() throws Exception { public void testDescribeWithNoAccess() throws Exception { createUser("no_access", "read_nothing"); - actions.expectForbidden("no_access", "DESCRIBE test"); + actions().expectForbidden("no_access", "DESCRIBE test"); createAuditLogAsserter().expect(false, SQL_ACTION_NAME, "no_access", empty()).assertLogs(); } public void testDescribeWithWrongAccess() throws Exception { createUser("wrong_access", "read_something_else"); - actions.expectDescribe(Collections.emptyMap(), "wrong_access"); + actions().expectDescribe(Collections.emptyMap(), "wrong_access"); createAuditLogAsserter() // This user has permission to run sql queries so they are given preliminary authorization .expect(true, SQL_ACTION_NAME, "wrong_access", empty()) @@ -448,7 +449,7 @@ public void testDescribeWithWrongAccess() throws Exception { public void testDescribeSingleFieldGranted() throws Exception { createUser("only_a", "read_test_a"); - actions.expectDescribe(singletonMap("a", asList("BIGINT", "long")), "only_a"); + actions().expectDescribe(singletonMap("a", asList("BIGINT", "long")), "only_a"); createAuditLogAsserter().expectSqlCompositeActionFieldCaps("only_a", "test").assertLogs(); } @@ -458,14 +459,14 @@ public void testDescribeSingleFieldExcepted() throws Exception { Map> expected = new TreeMap<>(); expected.put("a", asList("BIGINT", "long")); expected.put("b", asList("BIGINT", "long")); - actions.expectDescribe(expected, "not_c"); + actions().expectDescribe(expected, "not_c"); createAuditLogAsserter().expectSqlCompositeActionFieldCaps("not_c", "test").assertLogs(); } public void testDescribeDocumentExcluded() throws Exception { createUser("no_3s", "read_test_without_c_3"); - actions.expectMatchesAdmin("DESCRIBE test", "no_3s", "DESCRIBE test"); + actions().expectMatchesAdmin("DESCRIBE test", "no_3s", "DESCRIBE test"); createAuditLogAsserter().expectSqlCompositeActionFieldCaps("test_admin", "test") .expectSqlCompositeActionFieldCaps("no_3s", "test") .assertLogs(); @@ -473,15 +474,15 @@ public void testDescribeDocumentExcluded() throws Exception { public void testNoMonitorMain() throws Exception { createUser("no_monitor_main", "no_monitor_main"); - actions.checkNoMonitorMain("no_monitor_main"); + actions().checkNoMonitorMain("no_monitor_main"); } public void testNoGetIndex() throws Exception { createUser("no_get_index", "no_get_index"); - actions.expectForbidden("no_get_index", "SELECT * FROM test"); - actions.expectForbidden("no_get_index", "SHOW TABLES LIKE 'test'"); - actions.expectForbidden("no_get_index", "DESCRIBE test"); + actions().expectForbidden("no_get_index", "SELECT * FROM test"); + actions().expectForbidden("no_get_index", "SHOW TABLES LIKE 'test'"); + actions().expectForbidden("no_get_index", "DESCRIBE test"); } protected static void createUser(String name, String role) throws IOException { diff --git a/x-pack/plugin/sql/qa/server/single-node/build.gradle b/x-pack/plugin/sql/qa/server/single-node/build.gradle index c58dca254db03..e4376edc683d1 100644 --- a/x-pack/plugin/sql/qa/server/single-node/build.gradle +++ b/x-pack/plugin/sql/qa/server/single-node/build.gradle @@ -1,7 +1,6 @@ -testClusters.matching { it.name == "javaRestTest" }.configureEach { - testDistribution = 'DEFAULT' - setting 'xpack.security.enabled', 'false' - setting 'xpack.license.self_generated.type', 'trial' - plugin ':x-pack:qa:freeze-plugin' -} +// Necessary to use tests in Serverless +apply plugin: 'elasticsearch.internal-test-artifact' +dependencies { + clusterPlugins project(':x-pack:qa:freeze-plugin') +} diff --git a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/CliErrorsIT.java b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/CliErrorsIT.java index 2256890f33a1b..3a92dd675203f 100644 --- a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/CliErrorsIT.java +++ b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/CliErrorsIT.java @@ -6,6 +6,16 @@ */ package org.elasticsearch.xpack.sql.qa.single_node; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.sql.qa.cli.ErrorsTestCase; +import org.junit.ClassRule; -public class CliErrorsIT extends ErrorsTestCase {} +public class CliErrorsIT extends ErrorsTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = SqlTestCluster.getCluster(false); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/CliExplainIT.java b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/CliExplainIT.java index 7f95afc32181a..ac4bffdb951d5 100644 --- a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/CliExplainIT.java +++ b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/CliExplainIT.java @@ -6,7 +6,9 @@ */ package org.elasticsearch.xpack.sql.qa.single_node; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.sql.qa.cli.CliIntegrationTestCase; +import org.junit.ClassRule; import java.io.IOException; @@ -14,6 +16,14 @@ import static org.hamcrest.Matchers.startsWith; public class CliExplainIT extends CliIntegrationTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = SqlTestCluster.getCluster(false); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + public void testExplainBasic() throws IOException { index("test", body -> body.field("test_field", "test_value")); diff --git a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/CliFetchSizeIT.java b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/CliFetchSizeIT.java index f7a6854b02fce..83daeccab0b0e 100644 --- a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/CliFetchSizeIT.java +++ b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/CliFetchSizeIT.java @@ -6,6 +6,16 @@ */ package org.elasticsearch.xpack.sql.qa.single_node; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.sql.qa.cli.FetchSizeTestCase; +import org.junit.ClassRule; -public class CliFetchSizeIT extends FetchSizeTestCase {} +public class CliFetchSizeIT extends FetchSizeTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = SqlTestCluster.getCluster(false); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/CliLenientIT.java b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/CliLenientIT.java index afcfca0a01ed2..ea7f793dd56ee 100644 --- a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/CliLenientIT.java +++ b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/CliLenientIT.java @@ -6,6 +6,16 @@ */ package org.elasticsearch.xpack.sql.qa.single_node; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.sql.qa.cli.LenientTestCase; +import org.junit.ClassRule; -public class CliLenientIT extends LenientTestCase {} +public class CliLenientIT extends LenientTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = SqlTestCluster.getCluster(false); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/CliPartialResultsIT.java b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/CliPartialResultsIT.java index 82e89da3cefb6..0d6f3fd530d22 100644 --- a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/CliPartialResultsIT.java +++ b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/CliPartialResultsIT.java @@ -6,8 +6,16 @@ */ package org.elasticsearch.xpack.sql.qa.single_node; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.sql.qa.cli.PartialResultsTestCase; +import org.junit.ClassRule; public class CliPartialResultsIT extends PartialResultsTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = SqlTestCluster.getCluster(false); + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } } diff --git a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/CliSelectIT.java b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/CliSelectIT.java index d45d82512fe55..bbc0c16393cb7 100644 --- a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/CliSelectIT.java +++ b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/CliSelectIT.java @@ -6,6 +6,16 @@ */ package org.elasticsearch.xpack.sql.qa.single_node; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.sql.qa.cli.SelectTestCase; +import org.junit.ClassRule; -public class CliSelectIT extends SelectTestCase {} +public class CliSelectIT extends SelectTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = SqlTestCluster.getCluster(false); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/CliShowIT.java b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/CliShowIT.java index 982dd744a6934..0c663be1e8706 100644 --- a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/CliShowIT.java +++ b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/CliShowIT.java @@ -6,6 +6,16 @@ */ package org.elasticsearch.xpack.sql.qa.single_node; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.sql.qa.cli.ShowTestCase; +import org.junit.ClassRule; -public class CliShowIT extends ShowTestCase {} +public class CliShowIT extends ShowTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = SqlTestCluster.getCluster(false); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/ConsistentFunctionArgHandlingIT.java b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/ConsistentFunctionArgHandlingIT.java index a3f966e712b29..3db713b5ed4db 100644 --- a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/ConsistentFunctionArgHandlingIT.java +++ b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/ConsistentFunctionArgHandlingIT.java @@ -14,7 +14,9 @@ import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.Strings; import org.elasticsearch.core.Tuple; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.sql.qa.jdbc.JdbcIntegrationTestCase; +import org.junit.ClassRule; import java.io.IOException; import java.nio.file.Files; @@ -49,6 +51,13 @@ * new Fn("ASCII", "foobar").ignore()

    */ public class ConsistentFunctionArgHandlingIT extends JdbcIntegrationTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = SqlTestCluster.getCluster(false); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } private static final List FUNCTION_CALLS_TO_TEST = asList( new Fn("ASCII", "foobar"), diff --git a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/CustomDateFormatIT.java b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/CustomDateFormatIT.java index c0d0127f17f77..fb312a75bcc9c 100644 --- a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/CustomDateFormatIT.java +++ b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/CustomDateFormatIT.java @@ -7,8 +7,16 @@ package org.elasticsearch.xpack.sql.qa.single_node; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.sql.qa.CustomDateFormatTestCase; +import org.junit.ClassRule; public class CustomDateFormatIT extends CustomDateFormatTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = SqlTestCluster.getCluster(false); + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } } diff --git a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/FieldExtractorIT.java b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/FieldExtractorIT.java index 2fa2457a5c608..daaa7e81154b4 100644 --- a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/FieldExtractorIT.java +++ b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/FieldExtractorIT.java @@ -7,8 +7,16 @@ package org.elasticsearch.xpack.sql.qa.single_node; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.sql.qa.FieldExtractorTestCase; +import org.junit.ClassRule; public class FieldExtractorIT extends FieldExtractorTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = SqlTestCluster.getCluster(false); + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } } diff --git a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/GeoJdbcCsvSpecIT.java b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/GeoJdbcCsvSpecIT.java index bb0d16cc5ec9a..3763169977873 100644 --- a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/GeoJdbcCsvSpecIT.java +++ b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/GeoJdbcCsvSpecIT.java @@ -9,7 +9,11 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.sql.qa.geo.GeoCsvSpecTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.DataLoader; +import org.junit.ClassRule; import java.util.ArrayList; import java.util.List; @@ -18,6 +22,18 @@ import static org.elasticsearch.xpack.ql.CsvSpecReader.specParser; public class GeoJdbcCsvSpecIT extends GeoCsvSpecTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = SqlTestCluster.getCluster(false); + + @Override + protected void loadDataset(RestClient client) throws Exception { + DataLoader.loadDatasetIntoEs(client, false); + } + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } @ParametersFactory(argumentFormatting = PARAM_FORMATTING) public static List readScriptSpec() throws Exception { diff --git a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/GeoJdbcSqlSpecIT.java b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/GeoJdbcSqlSpecIT.java index 7eb7a7be5febd..a2a8cc87f62bc 100644 --- a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/GeoJdbcSqlSpecIT.java +++ b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/GeoJdbcSqlSpecIT.java @@ -7,9 +7,26 @@ package org.elasticsearch.xpack.sql.qa.single_node; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.sql.qa.geo.GeoSqlSpecTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.DataLoader; +import org.junit.ClassRule; public class GeoJdbcSqlSpecIT extends GeoSqlSpecTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = SqlTestCluster.getCluster(false); + + @Override + protected void loadDataset(RestClient client) throws Exception { + DataLoader.loadDatasetIntoEs(client, false); + } + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + public GeoJdbcSqlSpecIT(String fileName, String groupName, String testName, Integer lineNumber, String query) { super(fileName, groupName, testName, lineNumber, query); } diff --git a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcCsvSpecIT.java b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcCsvSpecIT.java index 4346aad97e4cd..8f661fa037e25 100644 --- a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcCsvSpecIT.java +++ b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcCsvSpecIT.java @@ -8,7 +8,11 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.sql.qa.jdbc.CsvSpecTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.DataLoader; +import org.junit.ClassRule; import java.util.ArrayList; import java.util.List; @@ -17,6 +21,18 @@ import static org.elasticsearch.xpack.ql.CsvSpecReader.specParser; public class JdbcCsvSpecIT extends CsvSpecTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = SqlTestCluster.getCluster(false); + + @Override + protected void loadDataset(RestClient client) throws Exception { + DataLoader.loadDatasetIntoEs(client, false); + } + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } @ParametersFactory(argumentFormatting = PARAM_FORMATTING) public static List readScriptSpec() throws Exception { diff --git a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcDatabaseMetaDataIT.java b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcDatabaseMetaDataIT.java index da1ec865922a2..1a7337255fc78 100644 --- a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcDatabaseMetaDataIT.java +++ b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcDatabaseMetaDataIT.java @@ -6,6 +6,22 @@ */ package org.elasticsearch.xpack.sql.qa.single_node; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.sql.qa.jdbc.DatabaseMetaDataTestCase; +import org.junit.ClassRule; -public class JdbcDatabaseMetaDataIT extends DatabaseMetaDataTestCase {} +public class JdbcDatabaseMetaDataIT extends DatabaseMetaDataTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = SqlTestCluster.getCluster(false); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + @Override + protected String clusterName() { + // `GET /` on Serverless returns a response with `cluster_name: `, so we need to use a fixed string. + return SqlTestCluster.CLUSTER_NAME; + } +} diff --git a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcDocCsvSpecIT.java b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcDocCsvSpecIT.java index 73742a553f7a2..fb8a96ca4ea7c 100644 --- a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcDocCsvSpecIT.java +++ b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcDocCsvSpecIT.java @@ -10,10 +10,12 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.client.RestClient; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.sql.qa.jdbc.DataLoader; import org.elasticsearch.xpack.sql.qa.jdbc.JdbcAssert; import org.elasticsearch.xpack.sql.qa.jdbc.SpecBaseIntegrationTestCase; import org.elasticsearch.xpack.sql.qa.jdbc.SqlSpecTestCase; +import org.junit.ClassRule; import java.sql.Connection; import java.sql.ResultSet; @@ -39,6 +41,13 @@ * at this stage and, to not keep things stalling, started with this approach. */ public class JdbcDocCsvSpecIT extends SpecBaseIntegrationTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = SqlTestCluster.getCluster(false); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } private final CsvTestCase testCase; @@ -49,7 +58,7 @@ protected String indexName() { @Override protected void loadDataset(RestClient client) throws Exception { - DataLoader.loadDocsDatasetIntoEs(client); + DataLoader.loadDocsDatasetIntoEs(client, false); } @ParametersFactory(shuffle = false, argumentFormatting = SqlSpecTestCase.PARAM_FORMATTING) diff --git a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcDocFrozenCsvSpecIT.java b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcDocFrozenCsvSpecIT.java new file mode 100644 index 0000000000000..2276db4cff105 --- /dev/null +++ b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcDocFrozenCsvSpecIT.java @@ -0,0 +1,80 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.sql.qa.single_node; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.xpack.sql.qa.jdbc.DataLoader; +import org.elasticsearch.xpack.sql.qa.jdbc.JdbcAssert; +import org.elasticsearch.xpack.sql.qa.jdbc.SpecBaseIntegrationTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.SqlSpecTestCase; +import org.junit.ClassRule; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.List; + +import static org.elasticsearch.xpack.ql.CsvSpecReader.CsvTestCase; +import static org.elasticsearch.xpack.ql.CsvSpecReader.specParser; +import static org.elasticsearch.xpack.ql.SpecReader.Parser; +import static org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.csvConnection; +import static org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.executeCsvQuery; + +public class JdbcDocFrozenCsvSpecIT extends SpecBaseIntegrationTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = SqlTestCluster.getCluster(true); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + private final CsvTestCase testCase; + + @Override + protected String indexName() { + return "library"; + } + + @Override + protected void loadDataset(RestClient client) throws Exception { + DataLoader.loadDocsDatasetIntoEs(client, true); + } + + @ParametersFactory(shuffle = false, argumentFormatting = SqlSpecTestCase.PARAM_FORMATTING) + public static List readScriptSpec() throws Exception { + Parser parser = specParser(); + return readScriptSpec("/docs/docs-frozen.csv-spec", parser); + } + + public JdbcDocFrozenCsvSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { + super(fileName, groupName, testName, lineNumber); + this.testCase = testCase; + } + + @Override + protected void assertResults(ResultSet expected, ResultSet elastic) throws SQLException { + Logger log = logEsResultSet() ? logger : null; + + JdbcAssert.assertResultSets(expected, elastic, log, true, true); + } + + @Override + protected final void doTest() throws Throwable { + try (Connection csv = csvConnection(testCase); Connection es = esJdbc()) { + + // pass the testName as table for debugging purposes (in case the underlying reader is missing) + ResultSet expected = executeCsvQuery(csv, testName); + ResultSet elasticResults = executeJdbcQuery(es, testCase.query); + assertResults(expected, elasticResults); + } + } +} diff --git a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcFrozenCsvSpecIT.java b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcFrozenCsvSpecIT.java index d912eb5a6261e..11146bfb9aa28 100644 --- a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcFrozenCsvSpecIT.java +++ b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcFrozenCsvSpecIT.java @@ -8,7 +8,9 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.sql.qa.jdbc.CsvSpecTestCase; +import org.junit.ClassRule; import java.util.List; import java.util.Properties; @@ -18,6 +20,13 @@ import static org.elasticsearch.xpack.ql.CsvSpecReader.specParser; public class JdbcFrozenCsvSpecIT extends CsvSpecTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = SqlTestCluster.getCluster(true); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } @ParametersFactory(argumentFormatting = PARAM_FORMATTING) public static List readScriptSpec() throws Exception { diff --git a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcShardFailureIT.java b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcShardFailureIT.java index 492971cf2a13c..eace1b5ad1ced 100644 --- a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcShardFailureIT.java +++ b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcShardFailureIT.java @@ -8,7 +8,9 @@ import org.elasticsearch.client.Request; import org.elasticsearch.core.Strings; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.sql.qa.jdbc.JdbcIntegrationTestCase; +import org.junit.ClassRule; import java.io.IOException; import java.sql.Connection; @@ -22,7 +24,27 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; public class JdbcShardFailureIT extends JdbcIntegrationTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = SqlTestCluster.getCluster(false); + + private String nodeAddresses; + + /** + * Caches the node addresses when called for the first time. + * Once cluster is in red health, calling this will time out if it was not called before. + */ + @Override + protected String getTestRestCluster() { + if (nodeAddresses == null) { + nodeAddresses = cluster.getHttpAddresses(); + } + return nodeAddresses; + } + private void createTestIndex() throws IOException { + // This method will put the cluster into a red state intentionally, so cache the node addresses first. + getTestRestCluster(); + Request createTest1 = new Request("PUT", "/test1"); String body1 = """ {"aliases":{"test":{}}, "mappings": {"properties": {"test_field":{"type":"integer"}}}}"""; diff --git a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcShowTablesIT.java b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcShowTablesIT.java index d9677bd832226..e555448d3284d 100644 --- a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcShowTablesIT.java +++ b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcShowTablesIT.java @@ -6,6 +6,16 @@ */ package org.elasticsearch.xpack.sql.qa.single_node; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.sql.qa.jdbc.ShowTablesTestCase; +import org.junit.ClassRule; -public class JdbcShowTablesIT extends ShowTablesTestCase {} +public class JdbcShowTablesIT extends ShowTablesTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = SqlTestCluster.getCluster(false); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcSqlSpecIT.java b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcSqlSpecIT.java index 15b69f0158ef3..1c9d029063b12 100644 --- a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcSqlSpecIT.java +++ b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcSqlSpecIT.java @@ -6,9 +6,26 @@ */ package org.elasticsearch.xpack.sql.qa.single_node; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.xpack.sql.qa.jdbc.DataLoader; import org.elasticsearch.xpack.sql.qa.jdbc.SqlSpecTestCase; +import org.junit.ClassRule; public class JdbcSqlSpecIT extends SqlSpecTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = SqlTestCluster.getCluster(false); + + @Override + protected void loadDataset(RestClient client) throws Exception { + DataLoader.loadDatasetIntoEs(client, false); + } + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + public JdbcSqlSpecIT(String fileName, String groupName, String testName, Integer lineNumber, String query) { super(fileName, groupName, testName, lineNumber, query); } diff --git a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/RestSqlDeprecationIT.java b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/RestSqlDeprecationIT.java index 88af42929a741..df5d43f079de3 100644 --- a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/RestSqlDeprecationIT.java +++ b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/RestSqlDeprecationIT.java @@ -10,13 +10,22 @@ import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; import org.elasticsearch.client.Request; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.sql.qa.rest.BaseRestSqlTestCase; +import org.junit.ClassRule; import java.io.IOException; import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.SQL_QUERY_REST_ENDPOINT; public class RestSqlDeprecationIT extends BaseRestSqlTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = SqlTestCluster.getCluster(false); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } public void testIndexIncludeParameterIsDeprecated() throws IOException { testDeprecationWarning( diff --git a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/RestSqlIT.java b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/RestSqlIT.java index c0a1a79e4c9a7..3cc9844e6664e 100644 --- a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/RestSqlIT.java +++ b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/RestSqlIT.java @@ -10,7 +10,9 @@ import org.apache.http.entity.StringEntity; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase; +import org.junit.ClassRule; import java.io.IOException; @@ -21,6 +23,13 @@ * user rather than to the JDBC driver or CLI. */ public class RestSqlIT extends RestSqlTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = SqlTestCluster.getCluster(false); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } public void testErrorMessageForTranslatingQueryWithWhereEvaluatingToFalse() throws IOException { index("{\"foo\":1}"); diff --git a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/RestSqlPaginationIT.java b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/RestSqlPaginationIT.java index da26f550cafe4..088f5af1e0aef 100644 --- a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/RestSqlPaginationIT.java +++ b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/RestSqlPaginationIT.java @@ -7,6 +7,16 @@ package org.elasticsearch.xpack.sql.qa.single_node; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.sql.qa.rest.RestSqlPaginationTestCase; +import org.junit.ClassRule; -public class RestSqlPaginationIT extends RestSqlPaginationTestCase {} +public class RestSqlPaginationIT extends RestSqlPaginationTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = SqlTestCluster.getCluster(false); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/RestSqlUsageIT.java b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/RestSqlUsageIT.java index e1f9ff782146d..f50865979bc1b 100644 --- a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/RestSqlUsageIT.java +++ b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/RestSqlUsageIT.java @@ -7,6 +7,16 @@ package org.elasticsearch.xpack.sql.qa.single_node; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.sql.qa.rest.RestSqlUsageTestCase; +import org.junit.ClassRule; -public class RestSqlUsageIT extends RestSqlUsageTestCase {} +public class RestSqlUsageIT extends RestSqlUsageTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = SqlTestCluster.getCluster(false); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/SqlProtocolIT.java b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/SqlProtocolIT.java index c3d08d34542bd..5acf570b0a5da 100644 --- a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/SqlProtocolIT.java +++ b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/SqlProtocolIT.java @@ -7,6 +7,16 @@ package org.elasticsearch.xpack.sql.qa.single_node; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.sql.qa.SqlProtocolTestCase; +import org.junit.ClassRule; -public class SqlProtocolIT extends SqlProtocolTestCase {} +public class SqlProtocolIT extends SqlProtocolTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = SqlTestCluster.getCluster(false); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/SqlTestCluster.java b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/SqlTestCluster.java new file mode 100644 index 0000000000000..fd06aa0d7d055 --- /dev/null +++ b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/SqlTestCluster.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.sql.qa.single_node; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; + +public class SqlTestCluster { + public static String CLUSTER_NAME = "javaRestTest"; + + public static ElasticsearchCluster getCluster(boolean enableFreezing) { + var settings = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .name(CLUSTER_NAME) + .setting("xpack.ml.enabled", "false") + .setting("xpack.watcher.enabled", "false") + .setting("xpack.security.enabled", "false") + .setting("xpack.license.self_generated.type", "trial"); + + if (enableFreezing) { + settings = settings.plugin(":x-pack:qa:freeze-plugin"); + } + + return settings.build(); + } +} diff --git a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/SysColumnsIT.java b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/SysColumnsIT.java index 5c5dcd2afbe72..1fce9bfa18b49 100644 --- a/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/SysColumnsIT.java +++ b/x-pack/plugin/sql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/single_node/SysColumnsIT.java @@ -7,8 +7,16 @@ package org.elasticsearch.xpack.sql.qa.single_node; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.sql.qa.jdbc.SysColumnsTestCase; +import org.junit.ClassRule; public class SysColumnsIT extends SysColumnsTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = SqlTestCluster.getCluster(false); + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } } diff --git a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/cli/CliIntegrationTestCase.java b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/cli/CliIntegrationTestCase.java index eb253e16cd848..d60de617825bc 100644 --- a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/cli/CliIntegrationTestCase.java +++ b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/cli/CliIntegrationTestCase.java @@ -18,16 +18,18 @@ import java.io.IOException; +import static org.elasticsearch.common.Strings.hasText; import static org.elasticsearch.xpack.ql.TestUtils.assertNoSearchContexts; +import static org.elasticsearch.xpack.sql.qa.rest.RemoteClusterAwareSqlRestTestCase.AUTH_PASS; +import static org.elasticsearch.xpack.sql.qa.rest.RemoteClusterAwareSqlRestTestCase.AUTH_USER; public abstract class CliIntegrationTestCase extends ESRestTestCase { /** * Read an address for Elasticsearch suitable for the CLI from the system properties. */ - public static String elasticsearchAddress() { - String cluster = System.getProperty("tests.rest.cluster"); + public String elasticsearchAddress() { // CLI only supports a single node at a time so we just give it one. - return cluster.split(",")[0]; + return getTestRestCluster().split(",")[0]; } private EmbeddedCli cli; @@ -37,7 +39,7 @@ public static String elasticsearchAddress() { */ @Before public void startCli() throws IOException { - cli = new EmbeddedCli(CliIntegrationTestCase.elasticsearchAddress(), true, securityConfig()); + cli = new EmbeddedCli(elasticsearchAddress(), true, securityConfig()); } @After @@ -54,6 +56,10 @@ public void orderlyShutdown() throws Exception { * Override to add security configuration to the cli. */ protected SecurityConfig securityConfig() { + if (hasText(AUTH_USER) && hasText(AUTH_PASS)) { + return new SecurityConfig(false, AUTH_USER, AUTH_PASS, null, null); + } + return null; } diff --git a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoCsvSpecTestCase.java b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoCsvSpecTestCase.java index 9cc22f351f077..bc3fb6998a6ee 100644 --- a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoCsvSpecTestCase.java +++ b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoCsvSpecTestCase.java @@ -72,7 +72,7 @@ protected final void doTest() throws Throwable { // make sure ES uses UTC (otherwise JDBC driver picks up the JVM timezone per spec/convention) @Override protected Properties connectionProperties() { - Properties connectionProperties = new Properties(); + Properties connectionProperties = super.connectionProperties(); connectionProperties.setProperty(JdbcConfiguration.TIME_ZONE, "UTC"); return connectionProperties; } diff --git a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DataLoader.java b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DataLoader.java index cb0a8692ba34e..07bf55919b44a 100644 --- a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DataLoader.java +++ b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DataLoader.java @@ -32,14 +32,14 @@ public class DataLoader { public static void main(String[] args) throws Exception { try (RestClient client = RestClient.builder(new HttpHost("localhost", 9200)).build()) { - loadEmpDatasetIntoEs(client); - loadDocsDatasetIntoEs(client); + loadEmpDatasetIntoEs(client, true); + loadDocsDatasetIntoEs(client, true); LogManager.getLogger(DataLoader.class).info("Data loaded"); } } - protected static void loadDatasetIntoEs(RestClient client) throws Exception { - loadEmpDatasetIntoEs(client); + public static void loadDatasetIntoEs(RestClient client, boolean includeFrozenIndices) throws Exception { + loadEmpDatasetIntoEs(client, includeFrozenIndices); } public static void createEmptyIndex(RestClient client, String index) throws Exception { @@ -62,7 +62,7 @@ public static void createEmptyIndex(RestClient client, String index) throws Exce client.performRequest(request); } - protected static void loadEmpDatasetIntoEs(RestClient client) throws Exception { + private static void loadEmpDatasetIntoEs(RestClient client, boolean includeFrozenIndices) throws Exception { loadEmpDatasetIntoEs(client, "test_emp", "employees"); loadEmpDatasetWithExtraIntoEs(client, "test_emp_copy", "employees"); loadAppsDatasetIntoEs(client, "apps", "apps"); @@ -71,9 +71,10 @@ protected static void loadEmpDatasetIntoEs(RestClient client) throws Exception { loadLogUnsignedLongIntoEs(client, "logs_unsigned_long", "logs_unsigned_long"); makeAlias(client, "test_alias", "test_emp", "test_emp_copy"); makeAlias(client, "test_alias_emp", "test_emp", "test_emp_copy"); - // frozen index - loadEmpDatasetIntoEs(client, "frozen_emp", "employees"); - freeze(client, "frozen_emp"); + if (includeFrozenIndices) { + loadEmpDatasetIntoEs(client, "frozen_emp", "employees"); + freeze(client, "frozen_emp"); + } loadNoColsDatasetIntoEs(client, "empty_mapping"); } @@ -90,13 +91,14 @@ private static void loadNoColsDatasetIntoEs(RestClient client, String index) thr client.performRequest(request); } - public static void loadDocsDatasetIntoEs(RestClient client) throws Exception { + public static void loadDocsDatasetIntoEs(RestClient client, boolean includeFrozenIndices) throws Exception { loadEmpDatasetIntoEs(client, "emp", "employees"); loadLibDatasetIntoEs(client, "library"); makeAlias(client, "employees", "emp"); - // frozen index - loadLibDatasetIntoEs(client, "archive"); - freeze(client, "archive"); + if (includeFrozenIndices) { + loadLibDatasetIntoEs(client, "archive"); + freeze(client, "archive"); + } } public static void createString(String name, XContentBuilder builder) throws Exception { diff --git a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DatabaseMetaDataTestCase.java b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DatabaseMetaDataTestCase.java index e2fd7659fc7e7..0b2effe6e3e87 100644 --- a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DatabaseMetaDataTestCase.java +++ b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DatabaseMetaDataTestCase.java @@ -129,7 +129,7 @@ public void testGetDataStreamViewByName() throws IOException, SQLException { private void expectDataStreamTable(String dataStreamName, String tableNamePattern, String[] types) throws SQLException, IOException { try { - createDataStream(dataStreamName); + createDataStream(dataStreamName, provisioningClient()); try (Connection es = esJdbc(); ResultSet rs = es.getMetaData().getTables("%", "%", tableNamePattern, types)) { assertTrue(rs.next()); assertEquals(dataStreamName, rs.getString(3)); @@ -137,7 +137,7 @@ private void expectDataStreamTable(String dataStreamName, String tableNamePatter assertFalse(rs.next()); } } finally { - deleteDataStream(dataStreamName); + deleteDataStream(dataStreamName, provisioningClient()); } } diff --git a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcIntegrationTestCase.java b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcIntegrationTestCase.java index 34383404544a5..e5a77c0630575 100644 --- a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcIntegrationTestCase.java +++ b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcIntegrationTestCase.java @@ -8,6 +8,7 @@ import org.apache.http.util.EntityUtils; import org.elasticsearch.client.Request; +import org.elasticsearch.client.RestClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.CheckedConsumer; @@ -38,8 +39,8 @@ public void checkSearchContent() throws Exception { /** * Read an address for Elasticsearch suitable for the JDBC driver from the system properties. */ - public static String elasticsearchAddress() { - String cluster = System.getProperty("tests.rest.cluster"); + public String elasticsearchAddress() { + String cluster = getTestRestCluster(); // JDBC only supports a single node at a time so we just give it one. return cluster.split(",")[0]; /* This doesn't include "jdbc:es://" because we want the example in @@ -72,21 +73,35 @@ protected Connection createConnection(Properties connectionProperties) throws SQ return connection; } - public static void index(String index, CheckedConsumer body) throws IOException { - index(index, "1", body); + public static void index(String index, CheckedConsumer body, RestClient provisioningClient) + throws IOException { + index(index, "1", body, provisioningClient); } - public static void index(String index, String documentId, CheckedConsumer body) throws IOException { + public void index(String index, CheckedConsumer body) throws IOException { + index(index, body, provisioningClient()); + } + + public static void index( + String index, + String documentId, + CheckedConsumer body, + RestClient provisioningClient + ) throws IOException { Request request = new Request("PUT", "/" + index + "/_doc/" + documentId); request.addParameter("refresh", "true"); XContentBuilder builder = JsonXContent.contentBuilder().startObject(); body.accept(builder); builder.endObject(); request.setJsonEntity(Strings.toString(builder)); - provisioningClient().performRequest(request); + provisioningClient.performRequest(request); + } + + public void index(String index, String documentId, CheckedConsumer body) throws IOException { + index(index, documentId, body, provisioningClient()); } - public static void delete(String index, String documentId) throws IOException { + public void delete(String index, String documentId) throws IOException { Request request = new Request("DELETE", "/" + index + "/_doc/" + documentId); request.addParameter("refresh", "true"); provisioningClient().performRequest(request); @@ -116,7 +131,7 @@ protected Properties connectionProperties() { return connectionProperties; } - protected static void createIndexWithSettingsAndMappings(String index) throws IOException { + protected void createIndexWithSettingsAndMappings(String index) throws IOException { Request request = new Request("PUT", "/" + index); XContentBuilder createIndex = JsonXContent.contentBuilder().startObject(); createIndex.startObject("settings"); @@ -135,7 +150,7 @@ protected static void createIndexWithSettingsAndMappings(String index) throws IO provisioningClient().performRequest(request); } - protected static void updateMapping(String index, CheckedConsumer body) throws IOException { + protected void updateMapping(String index, CheckedConsumer body) throws IOException { Request request = new Request("PUT", "/" + index + "/_mapping"); XContentBuilder updateMapping = JsonXContent.contentBuilder().startObject(); updateMapping.startObject("properties"); diff --git a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SpecBaseIntegrationTestCase.java b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SpecBaseIntegrationTestCase.java index 3b97938838840..b45930a9f0d06 100644 --- a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SpecBaseIntegrationTestCase.java +++ b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SpecBaseIntegrationTestCase.java @@ -60,7 +60,7 @@ protected String indexName() { } protected void loadDataset(RestClient client) throws Exception { - DataLoader.loadDatasetIntoEs(client); + DataLoader.loadDatasetIntoEs(client, true); } @Override diff --git a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SysColumnsTestCase.java b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SysColumnsTestCase.java index 8239f9d2fc148..e2e3b1fe45af8 100644 --- a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SysColumnsTestCase.java +++ b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SysColumnsTestCase.java @@ -419,7 +419,7 @@ public void testMultiIndicesMultiAlias() throws Exception { ); } - private static void createIndexWithMapping(String indexName, CheckedConsumer mapping) throws Exception { + private void createIndexWithMapping(String indexName, CheckedConsumer mapping) throws Exception { createIndexWithSettingsAndMappings(indexName); updateMapping(indexName, mapping); } diff --git a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/BaseRestSqlTestCase.java b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/BaseRestSqlTestCase.java index d6fd9b23860fc..bd43d3d651e52 100644 --- a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/BaseRestSqlTestCase.java +++ b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/BaseRestSqlTestCase.java @@ -13,6 +13,7 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; +import org.elasticsearch.client.RestClient; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Tuple; @@ -221,24 +222,32 @@ protected void deleteTestIndex() throws IOException { deleteIndexWithProvisioningClient(TEST_INDEX); } - protected static void deleteIndexWithProvisioningClient(String name) throws IOException { + protected void deleteIndexWithProvisioningClient(String name) throws IOException { deleteIndex(provisioningClient(), name); } - public static void createDataStream(String dataStreamName) throws IOException { + public static void createDataStream(String dataStreamName, RestClient provisioningClient) throws IOException { Request request = new Request("PUT", "/_index_template/" + DATA_STREAM_TEMPLATE + "-" + dataStreamName); request.setJsonEntity("{\"index_patterns\": [\"" + dataStreamName + "*\"], \"data_stream\": {}}"); - assertOK(provisioningClient().performRequest(request)); + assertOK(provisioningClient.performRequest(request)); request = new Request("PUT", "/_data_stream/" + dataStreamName); - assertOK(provisioningClient().performRequest(request)); + assertOK(provisioningClient.performRequest(request)); } - public static void deleteDataStream(String dataStreamName) throws IOException { + public void createDataStream(String dataStreamName) throws IOException { + createDataStream(dataStreamName, provisioningClient()); + } + + public static void deleteDataStream(String dataStreamName, RestClient provisioningClient) throws IOException { Request request = new Request("DELETE", "_data_stream/" + dataStreamName); - provisioningClient().performRequest(request); + provisioningClient.performRequest(request); request = new Request("DELETE", "/_index_template/" + DATA_STREAM_TEMPLATE + "-" + dataStreamName); - provisioningClient().performRequest(request); + provisioningClient.performRequest(request); + } + + public void deleteDataStream(String dataStreamName) throws IOException { + deleteDataStream(dataStreamName, provisioningClient()); } public static RequestObjectBuilder query(String query) { diff --git a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RemoteClusterAwareSqlRestTestCase.java b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RemoteClusterAwareSqlRestTestCase.java index 1dfbe6ef34cce..c81fe83a96f66 100644 --- a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RemoteClusterAwareSqlRestTestCase.java +++ b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RemoteClusterAwareSqlRestTestCase.java @@ -30,7 +30,7 @@ public abstract class RemoteClusterAwareSqlRestTestCase extends ESRestTestCase { // client used for loading data on a remote cluster only. private static RestClient remoteClient; - // gradle defines + // gradle defines when using legacy-java-rest-test public static final String AUTH_USER = System.getProperty("tests.rest.cluster.multi.user"); public static final String AUTH_PASS = System.getProperty("tests.rest.cluster.multi.password"); @@ -59,7 +59,7 @@ public static void closeRemoteClients() throws IOException { } } - protected static RestClient clientBuilder(Settings settings, HttpHost[] hosts) throws IOException { + public static RestClient clientBuilder(Settings settings, HttpHost[] hosts) throws IOException { RestClientBuilder builder = RestClient.builder(hosts); doConfigureClient(builder, settings); @@ -77,12 +77,22 @@ protected static TimeValue timeout() { return TimeValue.timeValueSeconds(CLIENT_TIMEOUT); } - // returned client is used to load the test data, either in the local cluster (for rest/javaRestTests) or a remote one (for - // multi-cluster). note: the client()/adminClient() will always connect to the local cluster. - protected static RestClient provisioningClient() { + /** + * Use this when using the {@code legacy-java-rest-test} plugin. + * @return a client to the remote cluster if it exists, otherwise a client to the local cluster + */ + public static RestClient defaultProvisioningClient() { return remoteClient == null ? client() : remoteClient; } + /** + * Override if the test data must be provisioned on a remote cluster while not using the {@code legacy-java-rest-test} plugin. + * @return client to use for loading test data + */ + protected RestClient provisioningClient() { + return defaultProvisioningClient(); + } + @Override protected Settings restClientSettings() { return secureRemoteClientSettings(); diff --git a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java index 81cc54db19669..fb92ac096fc36 100644 --- a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java +++ b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java @@ -1434,7 +1434,7 @@ private void executeQueryWithNextPage(String format, String expectedHeader, Stri assertEquals(0, getNumberOfSearchContexts(provisioningClient(), "test")); } - private static void bulkLoadTestData(int count) throws IOException { + private void bulkLoadTestData(int count) throws IOException { Request request = new Request("POST", "/test/_bulk"); request.addParameter("refresh", "true"); StringBuilder bulk = new StringBuilder(); @@ -1801,7 +1801,7 @@ public void testDataStreamInShowTablesFiltered() throws IOException { expectDataStreamInShowTables(dataStreamName, "SHOW TABLES \\\"" + dataStreamName + "*\\\""); } - private static void expectDataStreamInShowTables(String dataStreamName, String sql) throws IOException { + private void expectDataStreamInShowTables(String dataStreamName, String sql) throws IOException { try { createDataStream(dataStreamName); diff --git a/x-pack/plugin/sql/qa/server/src/main/resources/docs/docs-frozen.csv-spec b/x-pack/plugin/sql/qa/server/src/main/resources/docs/docs-frozen.csv-spec new file mode 100644 index 0000000000000..9eeabcab25be4 --- /dev/null +++ b/x-pack/plugin/sql/qa/server/src/main/resources/docs/docs-frozen.csv-spec @@ -0,0 +1,45 @@ +// To mute tests follow example in file: example.csv-spec + +// +// CSV spec used by the docs +// + +/////////////////////////////// +// +// Show Tables +// +/////////////////////////////// + +// +// include FROZEN +// +showTablesIncludeFrozen +// tag::showTablesIncludeFrozen +SHOW TABLES INCLUDE FROZEN; + + catalog | name | type | kind +---------------+---------------+----------+--------------- +javaRestTest |archive |TABLE |FROZEN INDEX +javaRestTest |emp |TABLE |INDEX +javaRestTest |employees |VIEW |ALIAS +javaRestTest |library |TABLE |INDEX + +// end::showTablesIncludeFrozen +; + +/////////////////////////////// +// +// Select +// +/////////////////////////////// + +fromTableIncludeFrozen +// tag::fromTableIncludeFrozen +SELECT * FROM FROZEN archive LIMIT 1; + + author | name | page_count | release_date +-----------------+--------------------+---------------+-------------------- +James S.A. Corey |Leviathan Wakes |561 |2011-06-02T00:00:00Z + +// end::fromTableIncludeFrozen +; diff --git a/x-pack/plugin/sql/qa/server/src/main/resources/docs/docs.csv-spec b/x-pack/plugin/sql/qa/server/src/main/resources/docs/docs.csv-spec index ab2ff8463f798..8e3853cf187db 100644 --- a/x-pack/plugin/sql/qa/server/src/main/resources/docs/docs.csv-spec +++ b/x-pack/plugin/sql/qa/server/src/main/resources/docs/docs.csv-spec @@ -177,24 +177,6 @@ javaRestTest |employees |VIEW |ALIAS ; -// -// include FROZEN -// -showTablesIncludeFrozen -// tag::showTablesIncludeFrozen -SHOW TABLES INCLUDE FROZEN; - - catalog | name | type | kind ----------------+---------------+----------+--------------- -javaRestTest |archive |TABLE |FROZEN INDEX -javaRestTest |emp |TABLE |INDEX -javaRestTest |employees |VIEW |ALIAS -javaRestTest |library |TABLE |INDEX - -// end::showTablesIncludeFrozen -; - - /////////////////////////////// // // Show Functions @@ -518,17 +500,6 @@ SELECT * FROM "emp" LIMIT 1; // end::fromTableQuoted ; -fromTableIncludeFrozen -// tag::fromTableIncludeFrozen -SELECT * FROM FROZEN archive LIMIT 1; - - author | name | page_count | release_date ------------------+--------------------+---------------+-------------------- -James S.A. Corey |Leviathan Wakes |561 |2011-06-02T00:00:00Z - -// end::fromTableIncludeFrozen -; - fromTableQuoted // tag::fromTablePatternQuoted SELECT emp_no FROM "e*p" LIMIT 1; diff --git a/x-pack/plugin/sql/qa/server/src/main/resources/single-node-only/command-sys-geo.csv-spec b/x-pack/plugin/sql/qa/server/src/main/resources/single-node-only/command-sys-geo.csv-spec index a2cce61dc80a8..08097ea7933f9 100644 --- a/x-pack/plugin/sql/qa/server/src/main/resources/single-node-only/command-sys-geo.csv-spec +++ b/x-pack/plugin/sql/qa/server/src/main/resources/single-node-only/command-sys-geo.csv-spec @@ -7,7 +7,7 @@ geoSysColumns SYS COLUMNS TABLE LIKE 'geo'; - TABLE_CAT:s | TABLE_SCHEM:s| TABLE_NAME:s | COLUMN_NAME:s | DATA_TYPE:i | TYPE_NAME:s | COLUMN_SIZE:i|BUFFER_LENGTH:i|DECIMAL_DIGITS:i|NUM_PREC_RADIX:i| NULLABLE:i| REMARKS:s | COLUMN_DEF:s |SQL_DATA_TYPE:i|SQL_DATETIME_SUB:i|CHAR_OCTET_LENGTH:i|ORDINAL_POSITION:i|IS_NULLABLE:s|SCOPE_CATALOG:s|SCOPE_SCHEMA:s|SCOPE_TABLE:s|SOURCE_DATA_TYPE:sh|IS_AUTOINCREMENT:s|IS_GENERATEDCOLUMN:s +TABLE_CAT:s | TABLE_SCHEM:s| TABLE_NAME:s | COLUMN_NAME:s | DATA_TYPE:i | TYPE_NAME:s | COLUMN_SIZE:i|BUFFER_LENGTH:i|DECIMAL_DIGITS:i|NUM_PREC_RADIX:i| NULLABLE:i| REMARKS:s | COLUMN_DEF:s |SQL_DATA_TYPE:i|SQL_DATETIME_SUB:i|CHAR_OCTET_LENGTH:i|ORDINAL_POSITION:i|IS_NULLABLE:s|SCOPE_CATALOG:s|SCOPE_SCHEMA:s|SCOPE_TABLE:s|SOURCE_DATA_TYPE:sh|IS_AUTOINCREMENT:s|IS_GENERATEDCOLUMN:s javaRestTest|null |geo |city |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |1 |YES |null |null |null |null |NO |NO javaRestTest|null |geo |geoshape |114 |GEO_SHAPE |2147483647 |2147483647 |null |null |1 |null |null |114 |0 |null |2 |YES |null |null |null |null |NO |NO javaRestTest|null |geo |location |114 |GEO_POINT |58 |16 |null |null |1 |null |null |114 |0 |null |3 |YES |null |null |null |null |NO |NO diff --git a/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/AsyncSqlSearchActionIT.java b/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/AsyncSqlSearchActionIT.java index 7bffa67fe2a52..4863f973f163f 100644 --- a/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/AsyncSqlSearchActionIT.java +++ b/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/AsyncSqlSearchActionIT.java @@ -91,15 +91,14 @@ private void prepareIndex() throws Exception { for (int i = 0; i < numDocs; i++) { int fieldValue = randomIntBetween(0, 10); builders.add( - client().prepareIndex("test") - .setSource( - jsonBuilder().startObject() - .field("val", fieldValue) - .field("event_type", "my_event") - .field("@timestamp", "2020-04-09T12:35:48Z") - .field("i", i) - .endObject() - ) + prepareIndex("test").setSource( + jsonBuilder().startObject() + .field("val", fieldValue) + .field("event_type", "my_event") + .field("@timestamp", "2020-04-09T12:35:48Z") + .field("i", i) + .endObject() + ) ); } indexRandom(true, builders); diff --git a/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/RestSqlCancellationIT.java b/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/RestSqlCancellationIT.java index 48ee5b05ffe0e..a72612c0d6691 100644 --- a/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/RestSqlCancellationIT.java +++ b/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/RestSqlCancellationIT.java @@ -84,14 +84,13 @@ public void testRestCancellation() throws Exception { for (int i = 0; i < numDocs; i++) { int fieldValue = randomIntBetween(0, 10); builders.add( - client().prepareIndex("test") - .setSource( - jsonBuilder().startObject() - .field("val", fieldValue) - .field("event_type", "my_event") - .field("@timestamp", "2020-04-09T12:35:48Z") - .endObject() - ) + prepareIndex("test").setSource( + jsonBuilder().startObject() + .field("val", fieldValue) + .field("event_type", "my_event") + .field("@timestamp", "2020-04-09T12:35:48Z") + .endObject() + ) ); } diff --git a/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/SqlCancellationIT.java b/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/SqlCancellationIT.java index aba659de53874..f23d9de1c79e4 100644 --- a/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/SqlCancellationIT.java +++ b/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/SqlCancellationIT.java @@ -50,14 +50,13 @@ public void testCancellation() throws Exception { for (int i = 0; i < numDocs; i++) { int fieldValue = randomIntBetween(0, 10); builders.add( - client().prepareIndex("test") - .setSource( - jsonBuilder().startObject() - .field("val", fieldValue) - .field("event_type", "my_event") - .field("@timestamp", "2020-04-09T12:35:48Z") - .endObject() - ) + prepareIndex("test").setSource( + jsonBuilder().startObject() + .field("val", fieldValue) + .field("event_type", "my_event") + .field("@timestamp", "2020-04-09T12:35:48Z") + .endObject() + ) ); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java index 2debdccc7c999..79b3116bfa807 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java @@ -31,7 +31,6 @@ import org.elasticsearch.xpack.ql.expression.function.FunctionResolutionStrategy; import org.elasticsearch.xpack.ql.expression.function.Functions; import org.elasticsearch.xpack.ql.expression.function.UnresolvedFunction; -import org.elasticsearch.xpack.ql.expression.predicate.operator.arithmetic.ArithmeticOperation; import org.elasticsearch.xpack.ql.index.IndexResolution; import org.elasticsearch.xpack.ql.plan.TableIdentifier; import org.elasticsearch.xpack.ql.plan.logical.Aggregate; @@ -44,7 +43,6 @@ import org.elasticsearch.xpack.ql.plan.logical.UnresolvedRelation; import org.elasticsearch.xpack.ql.rule.ParameterizedRuleExecutor; import org.elasticsearch.xpack.ql.rule.RuleExecutor; -import org.elasticsearch.xpack.ql.type.DataType; import org.elasticsearch.xpack.ql.type.DataTypes; import org.elasticsearch.xpack.ql.util.CollectionUtils; import org.elasticsearch.xpack.ql.util.Holder; @@ -56,7 +54,6 @@ import org.elasticsearch.xpack.sql.plan.logical.Pivot; import org.elasticsearch.xpack.sql.plan.logical.SubQueryAlias; import org.elasticsearch.xpack.sql.plan.logical.With; -import org.elasticsearch.xpack.sql.type.SqlDataTypeConverter; import java.util.ArrayList; import java.util.Arrays; @@ -97,7 +94,6 @@ public final class Analyzer extends ParameterizedRuleExecutor( "Finish Analysis", @@ -1096,51 +1092,6 @@ protected LogicalPlan rule(OrderBy ob) { } } - private static class ImplicitCasting extends AnalyzerRule { - - @Override - protected boolean skipResolved() { - return false; - } - - @Override - protected LogicalPlan rule(LogicalPlan plan) { - return plan.transformExpressionsDown(ImplicitCasting::implicitCast); - } - - private static Expression implicitCast(Expression e) { - if (e.childrenResolved() == false) { - return e; - } - - Expression left = null, right = null; - - // BinaryOperations are ignored as they are pushed down to ES - // and casting (and thus Aliasing when folding) gets in the way - - if (e instanceof ArithmeticOperation f) { - left = f.left(); - right = f.right(); - } - - if (left != null) { - DataType l = left.dataType(); - DataType r = right.dataType(); - if (l != r) { - DataType common = SqlDataTypeConverter.commonType(l, r); - if (common == null) { - return e; - } - left = l == common ? left : new Cast(left.source(), left, common); - right = r == common ? right : new Cast(right.source(), right, common); - return e.replaceChildrenSameSize(Arrays.asList(left, right)); - } - } - - return e; - } - } - public static class ReplaceSubQueryAliases extends AnalyzerRule { @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java index 65492f29d9a0f..a0da67f3006a3 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java @@ -12,13 +12,13 @@ import org.apache.lucene.util.PriorityQueue; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DelegatingActionListener; -import org.elasticsearch.action.search.ClosePointInTimeAction; import org.elasticsearch.action.search.ClosePointInTimeRequest; -import org.elasticsearch.action.search.OpenPointInTimeAction; import org.elasticsearch.action.search.OpenPointInTimeRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.action.search.TransportClosePointInTimeAction; +import org.elasticsearch.action.search.TransportOpenPointInTimeAction; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.ParentTaskAssigningClient; import org.elasticsearch.common.Strings; @@ -157,7 +157,7 @@ private void searchWithPointInTime(SearchRequest search, ActionListener { String pitId = openPointInTimeResponse.getPointInTimeId(); @@ -188,7 +188,7 @@ public static void closePointInTime(Client client, String pointInTimeId, ActionL client = client instanceof ParentTaskAssigningClient wrapperClient ? wrapperClient.unwrap() : client; client.execute( - ClosePointInTimeAction.INSTANCE, + TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pointInTimeId), listener.delegateFailureAndWrap((l, clearPointInTimeResponse) -> l.onResponse(clearPointInTimeResponse.isSucceeded())) ); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/CancellationTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/CancellationTests.java index 9eb6b7a6f978d..1fbe85d873957 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/CancellationTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/CancellationTests.java @@ -9,15 +9,15 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.fieldcaps.FieldCapabilities; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; -import org.elasticsearch.action.search.ClosePointInTimeAction; import org.elasticsearch.action.search.ClosePointInTimeRequest; import org.elasticsearch.action.search.ClosePointInTimeResponse; -import org.elasticsearch.action.search.OpenPointInTimeAction; import org.elasticsearch.action.search.OpenPointInTimeResponse; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportClosePointInTimeAction; +import org.elasticsearch.action.search.TransportOpenPointInTimeAction; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -192,11 +192,11 @@ public void testCancellationDuringSearch(String query) throws InterruptedExcepti ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onResponse(new OpenPointInTimeResponse(pitId)); return null; - }).when(client).execute(eq(OpenPointInTimeAction.INSTANCE), any(), any()); + }).when(client).execute(eq(TransportOpenPointInTimeAction.TYPE), any(), any()); // Emulation of search cancellation ArgumentCaptor searchRequestCaptor = ArgumentCaptor.forClass(SearchRequest.class); - when(client.prepareSearch(any())).thenReturn(new SearchRequestBuilder(client, SearchAction.INSTANCE).setIndices(indices)); + when(client.prepareSearch(any())).thenReturn(new SearchRequestBuilder(client, TransportSearchAction.TYPE).setIndices(indices)); doAnswer((Answer) invocation -> { @SuppressWarnings("unchecked") SearchRequest request = (SearchRequest) invocation.getArguments()[1]; @@ -209,7 +209,7 @@ public void testCancellationDuringSearch(String query) throws InterruptedExcepti ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onFailure(new TaskCancelledException("cancelled")); return null; - }).when(client).execute(eq(SearchAction.INSTANCE), searchRequestCaptor.capture(), any()); + }).when(client).execute(eq(TransportSearchAction.TYPE), searchRequestCaptor.capture(), any()); // Emulation of close pit doAnswer(invocation -> { @@ -220,7 +220,7 @@ public void testCancellationDuringSearch(String query) throws InterruptedExcepti ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onResponse(new ClosePointInTimeResponse(true, 1)); return null; - }).when(client).execute(eq(ClosePointInTimeAction.INSTANCE), any(), any()); + }).when(client).execute(eq(TransportClosePointInTimeAction.TYPE), any(), any()); IndexResolver indexResolver = indexResolver(client); PlanExecutor planExecutor = new PlanExecutor(client, indexResolver, new NamedWriteableRegistry(Collections.emptyList())); @@ -242,9 +242,9 @@ public void onFailure(Exception e) { assertTrue(countDownLatch.await(5, TimeUnit.SECONDS)); // Final verification to ensure no more interaction verify(client).fieldCaps(any(), any()); - verify(client, times(1)).execute(eq(OpenPointInTimeAction.INSTANCE), any(), any()); - verify(client, times(1)).execute(eq(SearchAction.INSTANCE), any(), any()); - verify(client, times(1)).execute(eq(ClosePointInTimeAction.INSTANCE), any(), any()); + verify(client, times(1)).execute(eq(TransportOpenPointInTimeAction.TYPE), any(), any()); + verify(client, times(1)).execute(eq(TransportSearchAction.TYPE), any(), any()); + verify(client, times(1)).execute(eq(TransportClosePointInTimeAction.TYPE), any(), any()); verify(client, times(1)).settings(); verify(client, times(1)).threadPool(); verifyNoMoreInteractions(client); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolverTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolverTests.java index 16ddd8e058894..72de6c99191cc 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolverTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolverTests.java @@ -413,8 +413,8 @@ public void testMergeObjectIncompatibleTypes() throws Exception { "*", response, (fieldName, types) -> null, - IndexResolver.PRESERVE_PROPERTIES - + IndexResolver.PRESERVE_PROPERTIES, + null ); assertTrue(resolution.isValid()); @@ -442,8 +442,8 @@ public void testMergeObjectUnsupportedTypes() throws Exception { "*", response, (fieldName, types) -> null, - IndexResolver.PRESERVE_PROPERTIES - + IndexResolver.PRESERVE_PROPERTIES, + null ); assertTrue(resolution.isValid()); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/CursorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/CursorTests.java index 726b40616e2d4..8bda8f632a2a4 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/CursorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/CursorTests.java @@ -25,7 +25,6 @@ import java.time.ZoneId; import java.util.List; -import static org.elasticsearch.action.support.PlainActionFuture.newFuture; import static org.elasticsearch.xpack.sql.execution.search.SearchHitCursorTests.randomSearchHitCursor; import static org.elasticsearch.xpack.sql.session.Cursors.attachFormatter; import static org.elasticsearch.xpack.sql.session.Cursors.decodeFromStringWithZone; @@ -38,7 +37,7 @@ public class CursorTests extends ESTestCase { public void testEmptyCursorClearCursor() { Client clientMock = mock(Client.class); Cursor cursor = Cursor.EMPTY; - PlainActionFuture future = newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); cursor.clear(clientMock, future); assertFalse(future.actionGet()); verifyNoMoreInteractions(clientMock); diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/api_key/12_grant.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/api_key/12_grant.yml index cf158ad73a13e..9b2c0de0cfed1 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/api_key/12_grant.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/api_key/12_grant.yml @@ -192,6 +192,7 @@ teardown: - transform_and_set: { login_creds: "#base64EncodeCredentials(id,api_key)" } - match: { encoded: $login_creds } + # verify the granted API Key - do: headers: Authorization: ApiKey ${login_creds} @@ -210,6 +211,41 @@ teardown: - match: { _nodes.failed: 0 } +--- +"Test grant api key with non-JWT token and client authentication fails": + - do: + security.get_token: + body: + grant_type: "password" + username: "api_key_grant_target_user" + password: "x-pack-test-password-2" + + - match: { type: "Bearer" } + - is_true: access_token + - set: { access_token: token } + + - do: + headers: + Authorization: "Basic YXBpX2tleV9ncmFudGVyOngtcGFjay10ZXN0LXBhc3N3b3Jk" # api_key_granter + catch: bad_request + security.grant_api_key: + body: > + { + "api_key": { + "name": "wrong-api-key" + }, + "grant_type": "access_token", + "access_token": "$token", + "client_authentication": { + "scheme": "SharedSecret", + "value": "whatever" + } + } + + - match: { "error.type": "security_exception" } + - match: + "error.reason": "[client_authentication] not supported with the supplied access_token type" + --- "Test grant api key forbidden": - do: diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/counted_keyword/10_basic.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/counted_keyword/10_basic.yml new file mode 100644 index 0000000000000..d0f7c7636582f --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/counted_keyword/10_basic.yml @@ -0,0 +1,51 @@ +setup: + + - skip: + version: " - 8.11.99" + reason: "counted_keyword was added in 8.12" + + - do: + indices.create: + index: test-events + body: + mappings: + properties: + events: + type: counted_keyword + + + - do: + index: + index: test-events + id: "1" + body: { "events": [ "a", "a", "b", "c" ] } + + - do: + index: + index: test-events + id: "2" + body: { "events": [ "a", "b", "b", "b", "c" ] } + + - do: + indices.refresh: { } + +--- +"Counted Terms agg": + + - do: + search: + index: test-events + body: + size: 0 + aggs: + event_terms: + counted_terms: + field: events + + - match: { aggregations.event_terms.buckets.0.key: "b" } + - match: { aggregations.event_terms.buckets.0.doc_count: 4 } + - match: { aggregations.event_terms.buckets.1.key: "a" } + - match: { aggregations.event_terms.buckets.1.doc_count: 3 } + - match: { aggregations.event_terms.buckets.2.key: "c" } + - match: { aggregations.event_terms.buckets.2.doc_count: 2 } + - length: { aggregations.event_terms.buckets: 3 } diff --git a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/100_bug_fix.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/100_bug_fix.yml similarity index 94% rename from x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/100_bug_fix.yml rename to x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/100_bug_fix.yml index d5f5bee46f50a..1876d1a6d3881 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/100_bug_fix.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/100_bug_fix.yml @@ -1,6 +1,8 @@ --- -"Bug fix https://github.com/elastic/elasticsearch/issues/99472": +"Coalesce and to_ip functions": - skip: + version: " - 8.11.99" + reason: "fixes in 8.12 or later" features: warnings - do: bulk: @@ -54,7 +56,10 @@ - match: { values.1: [ 20, null, "255.255.255.255", "255.255.255.255"] } --- -"Bug fix https://github.com/elastic/elasticsearch/issues/101489": +"unsupported and invalid mapped fields": + - skip: + version: " - 8.11.99" + reason: "fixes in 8.12 or later" - do: indices.create: index: index1 diff --git a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/10_basic.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/10_basic.yml similarity index 99% rename from x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/10_basic.yml rename to x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/10_basic.yml index a3b2de27bcb5b..e15372bc3088e 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/10_basic.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/10_basic.yml @@ -1,6 +1,8 @@ --- setup: - skip: + version: " - 8.10.99" + reason: "ESQL is available in 8.11+" features: allowed_warnings_regex - do: indices.create: diff --git a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/20_aggs.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/20_aggs.yml similarity index 81% rename from x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/20_aggs.yml rename to x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/20_aggs.yml index 1087bd5ce06eb..4019b3a303345 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/20_aggs.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/20_aggs.yml @@ -1,6 +1,8 @@ --- setup: - skip: + version: " - 8.10.99" + reason: "ESQL is available in 8.11+" features: warnings - do: indices.create: @@ -22,91 +24,93 @@ setup: type: long color: type: keyword + text: + type: text - do: bulk: index: "test" refresh: true body: - { "index": { } } - - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275187, "color": "red" } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275187, "color": "red", "text": "rr red" } - { "index": { } } - - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275188, "color": "blue" } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275188, "color": "blue", "text": "bb blue" } - { "index": { } } - - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275189, "color": "green" } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275189, "color": "green", "text": "gg green" } - { "index": { } } - - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275190, "color": "red" } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275190, "color": "red", "text": "rr red" } - { "index": { } } - - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275191, "color": "red" } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275191, "color": "red", "text": "rr red" } - { "index": { } } - - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275192, "color": "blue" } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275192, "color": "blue", "text": "bb blue" } - { "index": { } } - - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275193, "color": "green" } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275193, "color": "green", "text": "gg green" } - { "index": { } } - - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275194, "color": "red" } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275194, "color": "red", "text": "rr red" } - { "index": { } } - - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275195, "color": "red" } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275195, "color": "red", "text": "rr red" } - { "index": { } } - - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275196, "color": "blue" } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275196, "color": "blue", "text": "bb blue" } - { "index": { } } - - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275197, "color": "green" } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275197, "color": "green", "text": "gg green" } - { "index": { } } - - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275198, "color": "red" } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275198, "color": "red", "text": "rr red" } - { "index": { } } - - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275199, "color": "red" } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275199, "color": "red", "text": "rr red" } - { "index": { } } - - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275200, "color": "blue" } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275200, "color": "blue", "text": "bb blue" } - { "index": { } } - - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275201, "color": "green" } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275201, "color": "green", "text": "gg green" } - { "index": { } } - - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275202, "color": "red" } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275202, "color": "red", "text": "rr red" } - { "index": { } } - - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275203, "color": "red" } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275203, "color": "red", "text": "rr red" } - { "index": { } } - - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275204, "color": "blue" } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275204, "color": "blue", "text": "bb blue" } - { "index": { } } - - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275205, "color": "green" } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275205, "color": "green", "text": "gg green" } - { "index": { } } - - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275206, "color": "red" } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275206, "color": "red", "text": "rr red" } - { "index": { } } - - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275207, "color": "red" } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275207, "color": "red", "text": "rr red" } - { "index": { } } - - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275208, "color": "blue" } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275208, "color": "blue", "text": "bb blue" } - { "index": { } } - - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275209, "color": "green" } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275209, "color": "green", "text": "gg green" } - { "index": { } } - - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275210, "color": "red" } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275210, "color": "red", "text": "rr red" } - { "index": { } } - - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275211, "color": "red" } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275211, "color": "red", "text": "rr red" } - { "index": { } } - - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275212, "color": "blue" } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275212, "color": "blue", "text": "bb blue" } - { "index": { } } - - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275213, "color": "green" } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275213, "color": "green", "text": "gg green" } - { "index": { } } - - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275214, "color": "red" } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275214, "color": "red", "text": "rr red" } - { "index": { } } - - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275215, "color": "red" } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275215, "color": "red", "text": "rr red" } - { "index": { } } - - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275216, "color": "blue" } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275216, "color": "blue", "text": "bb blue" } - { "index": { } } - - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275217, "color": "green" } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275217, "color": "green", "text": "gg green" } - { "index": { } } - - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275218, "color": "red" } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275218, "color": "red", "text": "rr red" } - { "index": { } } - - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275219, "color": "red" } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275219, "color": "red", "text": "rr red" } - { "index": { } } - - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275220, "color": "blue" } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275220, "color": "blue", "text": "bb blue" } - { "index": { } } - - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275221, "color": "green" } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275221, "color": "green", "text": "gg green" } - { "index": { } } - - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275222, "color": "red" } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275222, "color": "red", "text": "rr red" } - { "index": { } } - - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275223, "color": "red" } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275223, "color": "red", "text": "rr red" } - { "index": { } } - - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275224, "color": "blue" } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275224, "color": "blue", "text": "bb blue" } - { "index": { } } - - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275225, "color": "green" } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275225, "color": "green", "text": "gg green" } - { "index": { } } - - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275226, "color": "red" } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275226, "color": "red", "text": "rr red" } --- "Test From": @@ -127,8 +131,10 @@ setup: - match: {columns.3.type: "long"} - match: {columns.4.name: "data_d"} - match: {columns.4.type: "double"} - - match: {columns.5.name: "time"} - - match: {columns.5.type: "long"} + - match: {columns.5.name: "text"} + - match: {columns.5.type: "text"} + - match: {columns.6.name: "time"} + - match: {columns.6.type: "long"} - length: {values: 40} --- @@ -429,11 +435,11 @@ setup: body: query: 'from test | eval nullsum = count_d + null | sort nullsum | limit 1' - - length: {columns: 7} + - length: {columns: 8} - length: {values: 1} - - match: {columns.6.name: "nullsum"} - - match: {columns.6.type: "double"} - - match: {values.0.6: null} + - match: {columns.7.name: "nullsum"} + - match: {columns.7.type: "double"} + - match: {values.0.7: null} --- "Test Eval Row With Null": @@ -501,3 +507,19 @@ setup: - match: {values.0.2: null} - match: {values.0.3: null} +--- +grouping on text: + - do: + warnings: + - "No limit defined, adding default limit of [500]" + esql.query: + body: + query: 'FROM test | STATS med=median(count) BY text | SORT med' + columnar: true + + - match: {columns.0.name: "med"} + - match: {columns.0.type: "double"} + - match: {columns.1.name: "text"} + - match: {columns.1.type: "text"} + - match: {values.0: [42.0, 43.0, 44.0]} + - match: {values.1: ["bb blue", "rr red", "gg green"]} diff --git a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/30_types.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/30_types.yml similarity index 80% rename from x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/30_types.yml rename to x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/30_types.yml index bf159455d00ca..531f30d42ece0 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/30_types.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/30_types.yml @@ -1,6 +1,8 @@ --- setup: - skip: + version: " - 8.11.99" + reason: "more field loading added in 8.12+" features: warnings --- @@ -677,3 +679,189 @@ unsigned_long: - match: { columns.0.type: unsigned_long } - length: { values: 1 } - match: { values.0.0: [ 0, 1, 9223372036854775808, 18446744073709551615 ] } + +--- +_source: + - skip: + version: " - 8.11.99" + reason: "_source is available in 8.12+" + + - do: + bulk: + index: test + refresh: true + body: + - { "index" : { "_index" : "test", "_id" : "id-1" } } + - { "wow": 1, "such": "_source", "you'd": "never", "expect": ["amazing", "source"] } + + - do: + esql.query: + body: + query: 'FROM test [METADATA _source] | KEEP _source | LIMIT 1' + - match: { columns.0.name: _source } + - match: { columns.0.type: _source } + - length: { values: 1 } + - match: + values.0.0: + wow: 1 + such: _source + "you'd": never + expect: [amazing, source] + +--- +_source keep all: + - skip: + version: " - 8.11.99" + reason: "_source is available in 8.12+" + + - do: + indices.create: + index: test + body: + mappings: + dynamic: false + + - do: + bulk: + index: test + refresh: true + body: + - { "index" : { "_index" : "test", "_id" : "id-1" } } + - { "wow": 1, "such": "_source", "you'd": "never", "expect": ["amazing", "source"] } + + - do: + esql.query: + body: + query: 'FROM test [METADATA _source] | LIMIT 1' + - match: { columns.0.name: _source } + - match: { columns.0.type: _source } + - length: { values: 1 } + - match: + values.0.0: + wow: 1 + such: _source + "you'd": never + expect: [amazing, source] + +--- +_source disabled: + - skip: + version: " - 8.11.99" + reason: "_source is available in 8.12+" + + - do: + indices.create: + index: test + body: + mappings: + _source: + enabled: false + + - do: + bulk: + index: test + refresh: true + body: + - { "index" : { "_index" : "test", "_id" : "id-1" } } + - { "wow": 1, "such": "_source", "you'd": "never", "expect": ["amazing", "source"] } + + - do: + esql.query: + body: + query: 'FROM test [METADATA _source] | KEEP _source | LIMIT 1' + - match: { columns.0.name: _source } + - match: { columns.0.type: _source } + - length: { values: 1 } + - match: { values.0.0: null } + +--- +text: + - do: + indices.create: + index: test + body: + mappings: + properties: + card: + type: text + + - do: + bulk: + index: test + refresh: true + body: + - { "index": { } } + - { "card": "jack of diamonds" } + + - do: + esql.query: + body: + query: 'FROM test | LIMIT 1' + - match: {columns.0.name: card} + - match: {columns.0.type: text} + - length: {values: 1} + - match: {values.0.0: jack of diamonds} + +--- +synthetic _source text stored: + - do: + indices.create: + index: test + body: + mappings: + _source: + mode: synthetic + properties: + card: + type: text + store: true + + - do: + bulk: + index: test + refresh: true + body: + - { "index": { } } + - { "card": "jack of diamonds" } + + - do: + esql.query: + body: + query: 'FROM test | LIMIT 1' + - match: {columns.0.name: card} + - match: {columns.0.type: text} + - length: {values: 1} + - match: {values.0.0: jack of diamonds} + +--- +synthetic _source text with parent keyword: + - do: + indices.create: + index: test + body: + mappings: + _source: + mode: synthetic + properties: + card: + type: keyword + fields: + text: + type: text + + - do: + bulk: + index: test + refresh: true + body: + - { "index": { } } + - { "card": "jack of diamonds" } + + - do: + esql.query: + body: + query: 'FROM test | KEEP card.text | LIMIT 1' + - match: {columns.0.name: card.text} + - match: {columns.0.type: text} + - length: {values: 1} + - match: {values.0.0: jack of diamonds} diff --git a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/40_tsdb.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/40_tsdb.yml similarity index 89% rename from x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/40_tsdb.yml rename to x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/40_tsdb.yml index 6a90fc5a7b8f8..69bd944430f04 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/40_tsdb.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/40_tsdb.yml @@ -1,5 +1,7 @@ setup: - skip: + version: " - 8.10.99" + reason: "ESQL is available in 8.11+" features: allowed_warnings_regex - do: indices.create: @@ -224,3 +226,38 @@ from index pattern explicit counter use: esql.query: body: query: 'FROM test* | keep *.tx' + + +--- +_source: + - skip: + version: " - 8.11.99" + reason: "_source is available in 8.12+" + + - do: + bulk: + index: test + refresh: true + body: + - { "index" : { "_index" : "test", "_id" : "id-1" } } + - { "wow": 1, "such": "_source", "you'd": "never", "expect": ["amazing", "source"] } + + - do: + esql.query: + body: + query: 'FROM test [METADATA _source] | WHERE @timestamp == "2021-04-28T18:50:23.142Z" | KEEP _source | LIMIT 1' + - match: { columns.0.name: _source } + - match: { columns.0.type: _source } + - length: { values: 1 } + - match: + values.0.0: + "@timestamp": "2021-04-28T18:50:23.142Z" + metricset: pod + k8s: + pod: + ip: 10.10.55.3 + name: dog + network: + rx: 530600088 + tx: 1434577921 + uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 diff --git a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/40_unsupported_types.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/40_unsupported_types.yml similarity index 99% rename from x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/40_unsupported_types.yml rename to x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/40_unsupported_types.yml index c06456f7f127d..be5b43433983e 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/40_unsupported_types.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/40_unsupported_types.yml @@ -1,5 +1,7 @@ setup: - skip: + version: " - 8.10.99" + reason: "ESQL is available in 8.11+" features: allowed_warnings_regex - do: indices.create: diff --git a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/45_non_tsdb_counter.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/45_non_tsdb_counter.yml similarity index 98% rename from x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/45_non_tsdb_counter.yml rename to x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/45_non_tsdb_counter.yml index beb7200f01230..13a88d0c2f79f 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/45_non_tsdb_counter.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/45_non_tsdb_counter.yml @@ -1,5 +1,7 @@ setup: - skip: + version: " - 8.10.99" + reason: "ESQL is available in 8.11+" features: allowed_warnings_regex - do: indices.create: diff --git a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/50_index_patterns.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/50_index_patterns.yml similarity index 95% rename from x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/50_index_patterns.yml rename to x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/50_index_patterns.yml index 5fceeee2f6e57..38023b7791709 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/50_index_patterns.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/50_index_patterns.yml @@ -1,5 +1,7 @@ setup: - skip: + version: " - 8.10.99" + reason: "ESQL is available in 8.11+" features: allowed_warnings_regex --- @@ -28,9 +30,9 @@ disjoint_mappings: index: test1 refresh: true body: - - { "index": {} } - - { "message1": "foo1"} - - { "index": {} } + - { "index": { } } + - { "message1": "foo1" } + - { "index": { } } - { "message1": "foo2" } - do: @@ -38,9 +40,9 @@ disjoint_mappings: index: test2 refresh: true body: - - { "index": {} } + - { "index": { } } - { "message2": 1 } - - { "index": {} } + - { "index": { } } - { "message2": 2 } - do: @@ -315,9 +317,9 @@ same_name_different_type: index: test1 refresh: true body: - - { "index": {} } - - { "message": "foo1"} - - { "index": {} } + - { "index": { } } + - { "message": "foo1" } + - { "index": { } } - { "message": "foo2" } - do: @@ -325,9 +327,9 @@ same_name_different_type: index: test2 refresh: true body: - - { "index": {} } + - { "index": { } } - { "message": 1 } - - { "index": {} } + - { "index": { } } - { "message": 2 } - do: @@ -367,9 +369,9 @@ same_name_different_type_same_family: index: test1 refresh: true body: - - { "index": {} } - - { "message": "foo1"} - - { "index": {} } + - { "index": { } } + - { "message": "foo1" } + - { "index": { } } - { "message": "foo2" } - do: @@ -377,9 +379,9 @@ same_name_different_type_same_family: index: test2 refresh: true body: - - { "index": {} } + - { "index": { } } - { "message": "foo3" } - - { "index": {} } + - { "index": { } } - { "message": "foo4" } - do: diff --git a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/60_enrich.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_enrich.yml similarity index 95% rename from x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/60_enrich.yml rename to x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_enrich.yml index 84d8682508733..1673453824584 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/60_enrich.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_enrich.yml @@ -1,6 +1,8 @@ --- setup: - skip: + version: " - 8.10.99" + reason: "ESQL is available in 8.11+" features: allowed_warnings_regex - do: indices.create: @@ -127,3 +129,8 @@ setup: - match: { values.1: [ "Bob", "nyc", "USA" ] } - match: { values.2: [ "Denise", "sgn", null ] } - match: { values.3: [ "Mario", "rom", "Italy" ] } + + - do: + enrich.delete_policy: + name: cities_policy + - is_true: acknowledged diff --git a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/60_usage.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml similarity index 96% rename from x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/60_usage.yml rename to x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml index d7998651540d8..ad46a3c2d9c3e 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/60_usage.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml @@ -1,5 +1,9 @@ --- setup: + - skip: + version: " - 8.10.99" + reason: "ESQL is available in 8.11+" + - do: indices.create: index: test diff --git a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/61_enrich_ip.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/61_enrich_ip.yml similarity index 94% rename from x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/61_enrich_ip.yml rename to x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/61_enrich_ip.yml index bd89af2fd3f79..0d49f169fc4b2 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/61_enrich_ip.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/61_enrich_ip.yml @@ -1,6 +1,8 @@ --- setup: - skip: + version: " - 8.10.99" + reason: "ESQL is available in 8.11+" features: allowed_warnings_regex - do: indices.create: @@ -95,3 +97,8 @@ setup: - match: { values.1: [ [ "10.100.0.21", "10.101.0.107" ], [ "Production", "QA" ], [ "OPS","Engineering" ], "sending messages" ] } - match: { values.2: [ "10.101.0.107" , "QA", "Engineering", "network disconnected" ] } - match: { values.3: [ "13.101.0.114" , null, null, "authentication failed" ] } + + - do: + enrich.delete_policy: + name: networks-policy + - is_true: acknowledged diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/62_extra_enrich.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/62_extra_enrich.yml new file mode 100644 index 0000000000000..5f1112197f383 --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/62_extra_enrich.yml @@ -0,0 +1,70 @@ +--- +"Enrich fields includes match field": + - skip: + version: " - 8.11.99" + reason: "enrich match field was mistakenly excluded in 8.11" + - do: + indices.create: + index: departments + body: + mappings: + properties: + name: + type: keyword + employees: + type: integer + + - do: + bulk: + index: departments + refresh: true + body: + - { "index": { } } + - { "name": "engineering", "employees": 1024 } + - { "index": { } } + - { "name": "marketing", "employees": 56 } + - do: + cluster.health: + wait_for_no_initializing_shards: true + wait_for_events: languid + + - do: + enrich.put_policy: + name: departments-policy + body: + match: + indices: [ "departments" ] + match_field: "name" + enrich_fields: [ "name", "employees" ] + + - do: + enrich.execute_policy: + name: departments-policy + - do: + esql.query: + body: + query: 'ROW name="engineering" | ENRICH departments-policy | LIMIT 10 | KEEP name, employees' + + - match: { columns.0.name: "name" } + - match: { columns.0.type: "keyword" } + - match: { columns.1.name: "employees" } + - match: { columns.1.type: "integer" } + + - length: { values: 1 } + - match: { values.0.0: "engineering" } + - match: { values.0.1: 1024 } + + - do: + esql.query: + body: + query: 'ROW name="sales" | ENRICH departments-policy ON name WITH department=name | WHERE name==department | KEEP name, department | LIMIT 10' + + - match: { columns.0.name: "name" } + - match: { columns.0.type: "keyword" } + - match: { columns.1.name: "department" } + - match: { columns.1.type: "keyword" } + - length: { values: 0 } + + - do: + enrich.delete_policy: + name: departments-policy diff --git a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/70_locale.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/70_locale.yml similarity index 96% rename from x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/70_locale.yml rename to x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/70_locale.yml index a77e0569668de..bcae5e7cf24a2 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/70_locale.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/70_locale.yml @@ -1,6 +1,8 @@ --- setup: - skip: + version: " - 8.10.99" + reason: "ESQL is available in 8.11+" features: allowed_warnings_regex - do: indices.create: diff --git a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/80_text.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/80_text.yml similarity index 99% rename from x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/80_text.yml rename to x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/80_text.yml index d6d20fa0a0aee..cef7f88506de8 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/80_text.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/80_text.yml @@ -1,6 +1,8 @@ --- setup: - skip: + version: " - 8.10.99" + reason: "ESQL is available in 8.11+" features: allowed_warnings_regex - do: indices.create: diff --git a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/90_non_indexed.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/90_non_indexed.yml similarity index 97% rename from x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/90_non_indexed.yml rename to x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/90_non_indexed.yml index 9138a9454c571..c6124e7f75e96 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/90_non_indexed.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/90_non_indexed.yml @@ -1,5 +1,7 @@ setup: - skip: + version: " - 8.11.99" + reason: "extracting non-indexed fields available in 8.12+" features: allowed_warnings - do: indices.create: diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/3rd_party_deployment.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/3rd_party_deployment.yml index 03cbe665a0d5d..1fa675ff4284f 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/3rd_party_deployment.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/3rd_party_deployment.yml @@ -695,3 +695,61 @@ setup: } } } + +--- +"Test put model config with prefix strings": + - do: + ml.put_trained_model: + model_id: model_with_prefixes + body: > + { + "model_type": "pytorch", + "inference_config": { + "text_embedding": { } + }, + "prefix_strings": { + "search": "this is a query", + "ingest": "this is a passage" + } + } + - match: { prefix_strings.search: "this is a query" } + - match: { prefix_strings.ingest: "this is a passage" } + + - do: + ml.get_trained_models: + model_id: model_with_prefixes + - match: { trained_model_configs.0.prefix_strings.search: "this is a query" } + - match: { trained_model_configs.0.prefix_strings.ingest: "this is a passage" } + + + - do: + ml.put_trained_model: + model_id: model_with_search_prefix + body: > + { + "model_type": "pytorch", + "inference_config": { + "text_embedding": { } + }, + "prefix_strings": { + "search": "this is a query" + } + } + - match: { prefix_strings.search: "this is a query" } + - is_false: prefix_strings.ingest + + - do: + ml.put_trained_model: + model_id: model_with_ingest_prefix + body: > + { + "model_type": "pytorch", + "inference_config": { + "text_embedding": { } + }, + "prefix_strings": { + "ingest": "this is a passage" + } + } + - is_false: prefix_strings.search + - match: { prefix_strings.ingest: "this is a passage" } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/inference_rescore.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/learn_to_rank_rescorer.yml similarity index 95% rename from x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/inference_rescore.yml rename to x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/learn_to_rank_rescorer.yml index 824999b3b3008..a0ae4b7c44316 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/inference_rescore.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/learn_to_rank_rescorer.yml @@ -146,7 +146,7 @@ setup: { "rescore": { "window_size": 10, - "inference": { "model_id": "ltr-model" } + "learn_to_rank": { "model_id": "ltr-model" } } } - match: { hits.hits.0._score: 17.0 } @@ -162,7 +162,7 @@ setup: "query": {"term": {"product": "Laptop"}}, "rescore": { "window_size": 10, - "inference": { "model_id": "ltr-model" } + "learn_to_rank": { "model_id": "ltr-model" } } } - match: { hits.hits.0._score: 6.0 } @@ -182,7 +182,7 @@ setup: { "rescore": { "window_size": 2, - "inference": { "model_id": "ltr-model" } + "learn_to_rank": { "model_id": "ltr-model" } } } - match: { hits.hits.0._score: 17.0 } @@ -209,7 +209,7 @@ setup: }, { "window_size": 3, - "inference": { "model_id": "ltr-model" } + "learn_to_rank": { "model_id": "ltr-model" } }, { "window_size": 2, @@ -232,7 +232,7 @@ setup: { "rescore": { "window_size": 10, - "inference": { "model_id": "ltr-missing" } + "learn_to_rank": { "model_id": "ltr-missing" } } } --- @@ -245,7 +245,7 @@ setup: "query": {"term": {"product": "Speaker"}}, "rescore": { "window_size": 10, - "inference": { "model_id": "ltr-model" } + "learn_to_rank": { "model_id": "ltr-model" } } } - length: { hits.hits: 0 } diff --git a/x-pack/plugin/stack/src/javaRestTest/java/org/elasticsearch/xpack/stack/EcsDynamicTemplatesIT.java b/x-pack/plugin/stack/src/javaRestTest/java/org/elasticsearch/xpack/stack/EcsDynamicTemplatesIT.java index ea3286e96160c..25cea3b3f6e0a 100644 --- a/x-pack/plugin/stack/src/javaRestTest/java/org/elasticsearch/xpack/stack/EcsDynamicTemplatesIT.java +++ b/x-pack/plugin/stack/src/javaRestTest/java/org/elasticsearch/xpack/stack/EcsDynamicTemplatesIT.java @@ -32,7 +32,6 @@ import java.net.URL; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.List; @@ -77,7 +76,7 @@ private static void prepareEcsDynamicTemplates() throws IOException { "/" + ECS_DYNAMIC_TEMPLATES_FILE, Integer.toString(1), StackTemplateRegistry.TEMPLATE_VERSION_VARIABLE, - Collections.emptyMap() + StackTemplateRegistry.ADDITIONAL_TEMPLATE_VARIABLES ); Map ecsDynamicTemplatesRaw; try ( diff --git a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/LegacyStackTemplateRegistry.java b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/LegacyStackTemplateRegistry.java index 9fb33db74964a..4ac7d404c49ed 100644 --- a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/LegacyStackTemplateRegistry.java +++ b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/LegacyStackTemplateRegistry.java @@ -16,6 +16,7 @@ import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentParserConfiguration; @@ -35,6 +36,7 @@ import java.util.Map; import static org.elasticsearch.xpack.stack.StackTemplateRegistry.STACK_TEMPLATES_ENABLED; +import static org.elasticsearch.xpack.stack.StackTemplateRegistry.STACK_TEMPLATES_FEATURE; @Deprecated(since = "8.12.0", forRemoval = true) public class LegacyStackTemplateRegistry extends IndexTemplateRegistry { @@ -45,13 +47,16 @@ public class LegacyStackTemplateRegistry extends IndexTemplateRegistry { // The stack template registry version. This number must be incremented when we make changes // to built-in templates. - public static final int REGISTRY_VERSION = 3; + public static final int REGISTRY_VERSION = 4; public static final String TEMPLATE_VERSION_VARIABLE = "xpack.stack.template.version"; private final ClusterService clusterService; + private final FeatureService featureService; private volatile boolean stackTemplateEnabled; + private static final Map ADDITIONAL_TEMPLATE_VARIABLES = Map.of("xpack.stack.template.deprecated", "true"); + // General mappings conventions for any data that ends up in a data stream public static final String DATA_STREAMS_MAPPINGS_COMPONENT_TEMPLATE_NAME = "data-streams-mappings"; @@ -94,10 +99,12 @@ public LegacyStackTemplateRegistry( ClusterService clusterService, ThreadPool threadPool, Client client, - NamedXContentRegistry xContentRegistry + NamedXContentRegistry xContentRegistry, + FeatureService featureService ) { super(nodeSettings, clusterService, threadPool, client, xContentRegistry); this.clusterService = clusterService; + this.featureService = featureService; this.stackTemplateEnabled = STACK_TEMPLATES_ENABLED.get(nodeSettings); } @@ -121,14 +128,14 @@ private void updateEnabledSetting(boolean newValue) { } private static final List LIFECYCLE_POLICY_CONFIGS = List.of( - new LifecyclePolicyConfig(LOGS_ILM_POLICY_NAME, "/logs@lifecycle.json"), - new LifecyclePolicyConfig(METRICS_ILM_POLICY_NAME, "/metrics@lifecycle.json"), - new LifecyclePolicyConfig(SYNTHETICS_ILM_POLICY_NAME, "/synthetics@lifecycle.json"), - new LifecyclePolicyConfig(ILM_7_DAYS_POLICY_NAME, "/7-days@lifecycle.json"), - new LifecyclePolicyConfig(ILM_30_DAYS_POLICY_NAME, "/30-days@lifecycle.json"), - new LifecyclePolicyConfig(ILM_90_DAYS_POLICY_NAME, "/90-days@lifecycle.json"), - new LifecyclePolicyConfig(ILM_180_DAYS_POLICY_NAME, "/180-days@lifecycle.json"), - new LifecyclePolicyConfig(ILM_365_DAYS_POLICY_NAME, "/365-days@lifecycle.json") + new LifecyclePolicyConfig(LOGS_ILM_POLICY_NAME, "/logs@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), + new LifecyclePolicyConfig(METRICS_ILM_POLICY_NAME, "/metrics@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), + new LifecyclePolicyConfig(SYNTHETICS_ILM_POLICY_NAME, "/synthetics@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), + new LifecyclePolicyConfig(ILM_7_DAYS_POLICY_NAME, "/7-days@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), + new LifecyclePolicyConfig(ILM_30_DAYS_POLICY_NAME, "/30-days@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), + new LifecyclePolicyConfig(ILM_90_DAYS_POLICY_NAME, "/90-days@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), + new LifecyclePolicyConfig(ILM_180_DAYS_POLICY_NAME, "/180-days@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), + new LifecyclePolicyConfig(ILM_365_DAYS_POLICY_NAME, "/365-days@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES) ); @Override @@ -154,55 +161,64 @@ protected List getLifecyclePolicies() { DATA_STREAMS_MAPPINGS_COMPONENT_TEMPLATE_NAME, "/data-streams@mappings.json", REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES ), new IndexTemplateConfig( LOGS_MAPPINGS_COMPONENT_TEMPLATE_NAME, "/logs@mappings.json", REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES ), new IndexTemplateConfig( ECS_DYNAMIC_MAPPINGS_COMPONENT_TEMPLATE_NAME, "/ecs@mappings.json", REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES ), new IndexTemplateConfig( LOGS_SETTINGS_COMPONENT_TEMPLATE_NAME, "/logs@settings.json", REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES ), new IndexTemplateConfig( METRICS_MAPPINGS_COMPONENT_TEMPLATE_NAME, "/metrics@mappings.json", REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES ), new IndexTemplateConfig( METRICS_SETTINGS_COMPONENT_TEMPLATE_NAME, "/metrics@settings.json", REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES ), new IndexTemplateConfig( METRICS_TSDB_SETTINGS_COMPONENT_TEMPLATE_NAME, "/metrics@tsdb-settings.json", REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES ), new IndexTemplateConfig( SYNTHETICS_MAPPINGS_COMPONENT_TEMPLATE_NAME, "/synthetics@mappings.json", REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES ), new IndexTemplateConfig( SYNTHETICS_SETTINGS_COMPONENT_TEMPLATE_NAME, "/synthetics@settings.json", REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES ) )) { try { @@ -232,8 +248,22 @@ protected Map getComposableTemplateConfigs() { } private static final List INGEST_PIPELINE_CONFIGS = List.of( - new JsonIngestPipelineConfig("logs@json-message", "/logs@json-pipeline.json", REGISTRY_VERSION, TEMPLATE_VERSION_VARIABLE), - new JsonIngestPipelineConfig("logs-default-pipeline", "/logs@default-pipeline.json", REGISTRY_VERSION, TEMPLATE_VERSION_VARIABLE) + new JsonIngestPipelineConfig( + "logs@json-message", + "/logs@json-pipeline.json", + REGISTRY_VERSION, + TEMPLATE_VERSION_VARIABLE, + List.of(), + ADDITIONAL_TEMPLATE_VARIABLES + ), + new JsonIngestPipelineConfig( + "logs-default-pipeline", + "/logs@default-pipeline.json", + REGISTRY_VERSION, + TEMPLATE_VERSION_VARIABLE, + List.of(), + ADDITIONAL_TEMPLATE_VARIABLES + ) ); @Override @@ -262,7 +292,6 @@ protected boolean isClusterReady(ClusterChangedEvent event) { // Ensure current version of the components are installed only once all nodes are updated to 8.9.0. // This is necessary to prevent an error caused nby the usage of the ignore_missing_pipeline property // in the pipeline processor, which has been introduced only in 8.9.0 - Version minNodeVersion = event.state().nodes().getMinNodeVersion(); - return minNodeVersion.onOrAfter(MIN_NODE_VERSION); + return featureService.clusterHasFeature(event.state(), STACK_TEMPLATES_FEATURE); } } diff --git a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackPlugin.java b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackPlugin.java index 1fac8a28aa5da..2577cf28f4213 100644 --- a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackPlugin.java +++ b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackPlugin.java @@ -34,7 +34,8 @@ public Collection createComponents(PluginServices services) { services.clusterService(), services.threadPool(), services.client(), - services.xContentRegistry() + services.xContentRegistry(), + services.featureService() ); legacyStackTemplateRegistry.initialize(); StackTemplateRegistry stackTemplateRegistry = new StackTemplateRegistry( @@ -42,7 +43,8 @@ public Collection createComponents(PluginServices services) { services.clusterService(), services.threadPool(), services.client(), - services.xContentRegistry() + services.xContentRegistry(), + services.featureService() ); stackTemplateRegistry.initialize(); return List.of(legacyStackTemplateRegistry, stackTemplateRegistry); diff --git a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java index 3471d312d9df8..8dc8238b8230b 100644 --- a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java +++ b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java @@ -9,7 +9,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.metadata.ComponentTemplate; @@ -17,6 +16,8 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.features.FeatureService; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentParserConfiguration; @@ -38,11 +39,11 @@ public class StackTemplateRegistry extends IndexTemplateRegistry { private static final Logger logger = LogManager.getLogger(StackTemplateRegistry.class); // Current version of the registry requires all nodes to be at least 8.9.0. - public static final Version MIN_NODE_VERSION = Version.V_8_9_0; + public static final NodeFeature STACK_TEMPLATES_FEATURE = new NodeFeature("stack.templates_supported"); // The stack template registry version. This number must be incremented when we make changes // to built-in templates. - public static final int REGISTRY_VERSION = 3; + public static final int REGISTRY_VERSION = 4; public static final String TEMPLATE_VERSION_VARIABLE = "xpack.stack.template.version"; public static final Setting STACK_TEMPLATES_ENABLED = Setting.boolSetting( @@ -53,8 +54,11 @@ public class StackTemplateRegistry extends IndexTemplateRegistry { ); private final ClusterService clusterService; + private final FeatureService featureService; private volatile boolean stackTemplateEnabled; + public static final Map ADDITIONAL_TEMPLATE_VARIABLES = Map.of("xpack.stack.template.deprecated", "false"); + // General mappings conventions for any data that ends up in a data stream public static final String DATA_STREAMS_MAPPINGS_COMPONENT_TEMPLATE_NAME = "data-streams@mappings"; @@ -105,10 +109,12 @@ public StackTemplateRegistry( ClusterService clusterService, ThreadPool threadPool, Client client, - NamedXContentRegistry xContentRegistry + NamedXContentRegistry xContentRegistry, + FeatureService featureService ) { super(nodeSettings, clusterService, threadPool, client, xContentRegistry); this.clusterService = clusterService; + this.featureService = featureService; this.stackTemplateEnabled = STACK_TEMPLATES_ENABLED.get(nodeSettings); } @@ -132,14 +138,14 @@ private void updateEnabledSetting(boolean newValue) { } private static final List LIFECYCLE_POLICY_CONFIGS = List.of( - new LifecyclePolicyConfig(LOGS_ILM_POLICY_NAME, "/logs@lifecycle.json"), - new LifecyclePolicyConfig(METRICS_ILM_POLICY_NAME, "/metrics@lifecycle.json"), - new LifecyclePolicyConfig(SYNTHETICS_ILM_POLICY_NAME, "/synthetics@lifecycle.json"), - new LifecyclePolicyConfig(ILM_7_DAYS_POLICY_NAME, "/7-days@lifecycle.json"), - new LifecyclePolicyConfig(ILM_30_DAYS_POLICY_NAME, "/30-days@lifecycle.json"), - new LifecyclePolicyConfig(ILM_90_DAYS_POLICY_NAME, "/90-days@lifecycle.json"), - new LifecyclePolicyConfig(ILM_180_DAYS_POLICY_NAME, "/180-days@lifecycle.json"), - new LifecyclePolicyConfig(ILM_365_DAYS_POLICY_NAME, "/365-days@lifecycle.json") + new LifecyclePolicyConfig(LOGS_ILM_POLICY_NAME, "/logs@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), + new LifecyclePolicyConfig(METRICS_ILM_POLICY_NAME, "/metrics@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), + new LifecyclePolicyConfig(SYNTHETICS_ILM_POLICY_NAME, "/synthetics@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), + new LifecyclePolicyConfig(ILM_7_DAYS_POLICY_NAME, "/7-days@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), + new LifecyclePolicyConfig(ILM_30_DAYS_POLICY_NAME, "/30-days@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), + new LifecyclePolicyConfig(ILM_90_DAYS_POLICY_NAME, "/90-days@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), + new LifecyclePolicyConfig(ILM_180_DAYS_POLICY_NAME, "/180-days@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), + new LifecyclePolicyConfig(ILM_365_DAYS_POLICY_NAME, "/365-days@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES) ); @Override @@ -161,55 +167,64 @@ protected List getLifecyclePolicies() { DATA_STREAMS_MAPPINGS_COMPONENT_TEMPLATE_NAME, "/data-streams@mappings.json", REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES ), new IndexTemplateConfig( LOGS_MAPPINGS_COMPONENT_TEMPLATE_NAME, "/logs@mappings.json", REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES ), new IndexTemplateConfig( ECS_DYNAMIC_MAPPINGS_COMPONENT_TEMPLATE_NAME, "/ecs@mappings.json", REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES ), new IndexTemplateConfig( LOGS_SETTINGS_COMPONENT_TEMPLATE_NAME, "/logs@settings.json", REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES ), new IndexTemplateConfig( METRICS_MAPPINGS_COMPONENT_TEMPLATE_NAME, "/metrics@mappings.json", REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES ), new IndexTemplateConfig( METRICS_SETTINGS_COMPONENT_TEMPLATE_NAME, "/metrics@settings.json", REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES ), new IndexTemplateConfig( METRICS_TSDB_SETTINGS_COMPONENT_TEMPLATE_NAME, "/metrics@tsdb-settings.json", REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES ), new IndexTemplateConfig( SYNTHETICS_MAPPINGS_COMPONENT_TEMPLATE_NAME, "/synthetics@mappings.json", REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES ), new IndexTemplateConfig( SYNTHETICS_SETTINGS_COMPONENT_TEMPLATE_NAME, "/synthetics@settings.json", REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES ) )) { try { @@ -230,14 +245,33 @@ protected Map getComponentTemplateConfigs() { } private static final Map COMPOSABLE_INDEX_TEMPLATE_CONFIGS = parseComposableTemplates( - new IndexTemplateConfig(LOGS_INDEX_TEMPLATE_NAME, "/logs@template.json", REGISTRY_VERSION, TEMPLATE_VERSION_VARIABLE), - new IndexTemplateConfig(METRICS_INDEX_TEMPLATE_NAME, "/metrics@template.json", REGISTRY_VERSION, TEMPLATE_VERSION_VARIABLE), - new IndexTemplateConfig(SYNTHETICS_INDEX_TEMPLATE_NAME, "/synthetics@template.json", REGISTRY_VERSION, TEMPLATE_VERSION_VARIABLE), + new IndexTemplateConfig( + LOGS_INDEX_TEMPLATE_NAME, + "/logs@template.json", + REGISTRY_VERSION, + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES + ), + new IndexTemplateConfig( + METRICS_INDEX_TEMPLATE_NAME, + "/metrics@template.json", + REGISTRY_VERSION, + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES + ), + new IndexTemplateConfig( + SYNTHETICS_INDEX_TEMPLATE_NAME, + "/synthetics@template.json", + REGISTRY_VERSION, + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES + ), new IndexTemplateConfig( KIBANA_REPORTING_INDEX_TEMPLATE_NAME, "/kibana-reporting@template.json", REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES ) ); @@ -251,8 +285,22 @@ protected Map getComposableTemplateConfigs() { } private static final List INGEST_PIPELINE_CONFIGS = List.of( - new JsonIngestPipelineConfig("logs@json-pipeline", "/logs@json-pipeline.json", REGISTRY_VERSION, TEMPLATE_VERSION_VARIABLE), - new JsonIngestPipelineConfig("logs@default-pipeline", "/logs@default-pipeline.json", REGISTRY_VERSION, TEMPLATE_VERSION_VARIABLE) + new JsonIngestPipelineConfig( + "logs@json-pipeline", + "/logs@json-pipeline.json", + REGISTRY_VERSION, + TEMPLATE_VERSION_VARIABLE, + List.of(), + ADDITIONAL_TEMPLATE_VARIABLES + ), + new JsonIngestPipelineConfig( + "logs@default-pipeline", + "/logs@default-pipeline.json", + REGISTRY_VERSION, + TEMPLATE_VERSION_VARIABLE, + List.of(), + ADDITIONAL_TEMPLATE_VARIABLES + ) ); @Override @@ -281,7 +329,6 @@ protected boolean isClusterReady(ClusterChangedEvent event) { // Ensure current version of the components are installed only once all nodes are updated to 8.9.0. // This is necessary to prevent an error caused nby the usage of the ignore_missing_pipeline property // in the pipeline processor, which has been introduced only in 8.9.0 - Version minNodeVersion = event.state().nodes().getMinNodeVersion(); - return minNodeVersion.onOrAfter(MIN_NODE_VERSION); + return featureService.clusterHasFeature(event.state(), STACK_TEMPLATES_FEATURE); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlFeatures.java b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplatesFeatures.java similarity index 65% rename from x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlFeatures.java rename to x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplatesFeatures.java index 29aa189b2acd4..7b05231fcfd15 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlFeatures.java +++ b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplatesFeatures.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.ml; +package org.elasticsearch.xpack.stack; import org.elasticsearch.Version; import org.elasticsearch.features.FeatureSpecification; @@ -13,12 +13,9 @@ import java.util.Map; -/** - * This class specifies source code features exposed by the Shutdown plugin. - */ -public class MlFeatures implements FeatureSpecification { +public class StackTemplatesFeatures implements FeatureSpecification { @Override public Map getHistoricalFeatures() { - return Map.of(MachineLearning.STATE_RESET_FALLBACK_ON_DISABLED, Version.V_8_7_0); + return Map.of(StackTemplateRegistry.STACK_TEMPLATES_FEATURE, Version.V_8_9_0); } } diff --git a/x-pack/plugin/stack/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification b/x-pack/plugin/stack/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification new file mode 100644 index 0000000000000..30a1498a54725 --- /dev/null +++ b/x-pack/plugin/stack/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification @@ -0,0 +1,8 @@ +# +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License +# 2.0; you may not use this file except in compliance with the Elastic License +# 2.0. +# + +org.elasticsearch.xpack.stack.StackTemplatesFeatures diff --git a/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/LegacyStackTemplateRegistryTests.java b/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/LegacyStackTemplateRegistryTests.java new file mode 100644 index 0000000000000..39f58e638aa68 --- /dev/null +++ b/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/LegacyStackTemplateRegistryTests.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.stack; + +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.metadata.ComponentTemplate; +import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.features.FeatureService; +import org.elasticsearch.ingest.PipelineConfiguration; +import org.elasticsearch.test.ClusterServiceUtils; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.ilm.LifecyclePolicy; +import org.junit.After; +import org.junit.Before; + +import java.util.List; + +public class LegacyStackTemplateRegistryTests extends ESTestCase { + private LegacyStackTemplateRegistry registry; + private ThreadPool threadPool; + + @Before + public void createRegistryAndClient() { + threadPool = new TestThreadPool(this.getClass().getName()); + Client client = new NoOpClient(threadPool); + ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); + var featureService = new FeatureService(List.of(new StackTemplatesFeatures())); + registry = new LegacyStackTemplateRegistry( + Settings.EMPTY, + clusterService, + threadPool, + client, + NamedXContentRegistry.EMPTY, + featureService + ); + } + + @After + @Override + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdownNow(); + } + + public void testThatTemplatesAreDeprecated() { + for (ComposableIndexTemplate it : registry.getComposableTemplateConfigs().values()) { + assertTrue(it.isDeprecated()); + } + for (LifecyclePolicy ilm : registry.getLifecyclePolicies()) { + assertTrue(ilm.isDeprecated()); + } + for (ComponentTemplate ct : registry.getComponentTemplateConfigs().values()) { + assertTrue(ct.deprecated()); + } + registry.getIngestPipelines() + .stream() + .map(ipc -> new PipelineConfiguration(ipc.getId(), ipc.loadConfig(), XContentType.JSON)) + .map(PipelineConfiguration::getConfigAsMap) + .forEach(p -> assertTrue((Boolean) p.get("deprecated"))); + } + +} diff --git a/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/StackRegistryWithNonRequiredTemplates.java b/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/StackRegistryWithNonRequiredTemplates.java index 7f674e24658dd..c1c855867599a 100644 --- a/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/StackRegistryWithNonRequiredTemplates.java +++ b/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/StackRegistryWithNonRequiredTemplates.java @@ -11,6 +11,7 @@ import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.core.template.IndexTemplateConfig; @@ -23,9 +24,10 @@ class StackRegistryWithNonRequiredTemplates extends StackTemplateRegistry { ClusterService clusterService, ThreadPool threadPool, Client client, - NamedXContentRegistry xContentRegistry + NamedXContentRegistry xContentRegistry, + FeatureService featureService ) { - super(nodeSettings, clusterService, threadPool, client, xContentRegistry); + super(nodeSettings, clusterService, threadPool, client, xContentRegistry, featureService); } @Override diff --git a/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/StackTemplateRegistryTests.java b/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/StackTemplateRegistryTests.java index 8e0cbc3f82f35..b6fd2e8dd1a53 100644 --- a/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/StackTemplateRegistryTests.java +++ b/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/StackTemplateRegistryTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.ingest.IngestMetadata; import org.elasticsearch.ingest.PipelineConfiguration; import org.elasticsearch.test.ClusterServiceUtils; @@ -78,13 +79,22 @@ public class StackTemplateRegistryTests extends ESTestCase { private ClusterService clusterService; private ThreadPool threadPool; private VerifyingClient client; + private FeatureService featureService; @Before public void createRegistryAndClient() { threadPool = new TestThreadPool(this.getClass().getName()); client = new VerifyingClient(threadPool); clusterService = ClusterServiceUtils.createClusterService(threadPool); - registry = new StackTemplateRegistry(Settings.EMPTY, clusterService, threadPool, client, NamedXContentRegistry.EMPTY); + featureService = new FeatureService(List.of(new StackTemplatesFeatures())); + registry = new StackTemplateRegistry( + Settings.EMPTY, + clusterService, + threadPool, + client, + NamedXContentRegistry.EMPTY, + featureService + ); } @After @@ -101,7 +111,8 @@ public void testDisabledDoesNotAddIndexTemplates() { clusterService, threadPool, client, - NamedXContentRegistry.EMPTY + NamedXContentRegistry.EMPTY, + featureService ); assertThat(disabledRegistry.getComposableTemplateConfigs(), anEmptyMap()); } @@ -113,7 +124,8 @@ public void testDisabledStillAddsComponentTemplatesAndIlmPolicies() { clusterService, threadPool, client, - NamedXContentRegistry.EMPTY + NamedXContentRegistry.EMPTY, + featureService ); assertThat(disabledRegistry.getComponentTemplateConfigs(), not(anEmptyMap())); assertThat( @@ -357,7 +369,8 @@ public void testMissingNonRequiredTemplates() throws Exception { clusterService, threadPool, client, - NamedXContentRegistry.EMPTY + NamedXContentRegistry.EMPTY, + featureService ); DiscoveryNode node = DiscoveryNodeUtils.create("node"); @@ -507,6 +520,23 @@ public void testThatNothingIsInstalledWhenAllNodesAreNotUpdated() { registry.clusterChanged(event); } + public void testThatTemplatesAreNotDeprecated() { + for (ComposableIndexTemplate it : registry.getComposableTemplateConfigs().values()) { + assertFalse(it.isDeprecated()); + } + for (LifecyclePolicy ilm : registry.getLifecyclePolicies()) { + assertFalse(ilm.isDeprecated()); + } + for (ComponentTemplate ct : registry.getComponentTemplateConfigs().values()) { + assertFalse(ct.deprecated()); + } + registry.getIngestPipelines() + .stream() + .map(ipc -> new PipelineConfiguration(ipc.getId(), ipc.loadConfig(), XContentType.JSON)) + .map(PipelineConfiguration::getConfigAsMap) + .forEach(p -> assertFalse((Boolean) p.get("deprecated"))); + } + // ------------- /** diff --git a/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformPivotRestIT.java b/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformPivotRestIT.java index ffafa133989a6..925e6d5381770 100644 --- a/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformPivotRestIT.java +++ b/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformPivotRestIT.java @@ -1123,8 +1123,24 @@ public void testContinuousDateHistogramPivot() throws Exception { assertEquals(11, totalStars, 0); } - @SuppressWarnings("unchecked") public void testPreviewTransform() throws Exception { + testPreviewTransform(""); + } + + public void testPreviewTransformWithQuery() throws Exception { + testPreviewTransform(""" + , + "query": { + "range": { + "timestamp": { + "gte": 123456789 + } + } + }"""); + } + + @SuppressWarnings("unchecked") + private void testPreviewTransform(String queryJson) throws Exception { setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME); final Request createPreviewRequest = createRequestWithAuth( "POST", @@ -1136,6 +1152,7 @@ public void testPreviewTransform() throws Exception { { "source": { "index": "%s" + %s }, "pivot": { "group_by": { @@ -1159,7 +1176,7 @@ public void testPreviewTransform() throws Exception { } } } - }""", REVIEWS_INDEX_NAME); + }""", REVIEWS_INDEX_NAME, queryJson); createPreviewRequest.setJsonEntity(config); diff --git a/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformRestTestCase.java b/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformRestTestCase.java index e6388bb6fea5d..c616c1c238171 100644 --- a/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformRestTestCase.java +++ b/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformRestTestCase.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.transform.TransformField; @@ -618,7 +619,7 @@ protected static void deleteTransform(String transformId) throws IOException { protected static void deleteTransform(String transformId, boolean ignoreNotFound, boolean deleteDestIndex) throws IOException { Request request = new Request("DELETE", getTransformEndpoint() + transformId); if (ignoreNotFound) { - request.addParameter("ignore", "404"); + setIgnoredErrorResponseCodes(request, RestStatus.NOT_FOUND); } if (deleteDestIndex) { request.addParameter(TransformField.DELETE_DEST_INDEX.getPreferredName(), Boolean.TRUE.toString()); diff --git a/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformCCSCanMatchIT.java b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformCCSCanMatchIT.java new file mode 100644 index 0000000000000..d05acc7a7b368 --- /dev/null +++ b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformCCSCanMatchIT.java @@ -0,0 +1,415 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.transform.checkpoint; + +import org.apache.lucene.document.LongPoint; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.PointValues; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.engine.EngineConfig; +import org.elasticsearch.index.engine.EngineFactory; +import org.elasticsearch.index.engine.InternalEngine; +import org.elasticsearch.index.engine.InternalEngineFactory; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.shard.IndexLongFieldRange; +import org.elasticsearch.index.shard.ShardLongFieldRange; +import org.elasticsearch.node.NodeRoleSettings; +import org.elasticsearch.plugins.EnginePlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.aggregations.BaseAggregationBuilder; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.test.AbstractMultiClustersTestCase; +import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.core.ClientHelper; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.transform.MockDeprecatedAggregationBuilder; +import org.elasticsearch.xpack.core.transform.MockDeprecatedQueryBuilder; +import org.elasticsearch.xpack.core.transform.TransformNamedXContentProvider; +import org.elasticsearch.xpack.core.transform.action.GetCheckpointAction; +import org.elasticsearch.xpack.core.transform.action.GetTransformStatsAction; +import org.elasticsearch.xpack.core.transform.action.PutTransformAction; +import org.elasticsearch.xpack.core.transform.action.StartTransformAction; +import org.elasticsearch.xpack.core.transform.transforms.DestConfig; +import org.elasticsearch.xpack.core.transform.transforms.QueryConfig; +import org.elasticsearch.xpack.core.transform.transforms.SourceConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformStats; +import org.elasticsearch.xpack.core.transform.transforms.latest.LatestConfig; +import org.elasticsearch.xpack.transform.LocalStateTransform; +import org.junit.Before; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static java.util.Collections.emptyList; +import static org.elasticsearch.xpack.core.ClientHelper.TRANSFORM_ORIGIN; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + +public class TransformCCSCanMatchIT extends AbstractMultiClustersTestCase { + + private static final String REMOTE_CLUSTER = "cluster_a"; + private static final TimeValue TIMEOUT = TimeValue.timeValueMinutes(1); + + private NamedXContentRegistry namedXContentRegistry; + private long timestamp; + private int oldLocalNumShards; + private int localOldDocs; + private int oldRemoteNumShards; + private int remoteOldDocs; + private int newLocalNumShards; + private int localNewDocs; + private int newRemoteNumShards; + private int remoteNewDocs; + + @Before + public void setUpNamedXContentRegistryAndIndices() throws Exception { + SearchModule searchModule = new SearchModule(Settings.EMPTY, emptyList()); + + List namedXContents = searchModule.getNamedXContents(); + namedXContents.add( + new NamedXContentRegistry.Entry( + QueryBuilder.class, + new ParseField(MockDeprecatedQueryBuilder.NAME), + (p, c) -> MockDeprecatedQueryBuilder.fromXContent(p) + ) + ); + namedXContents.add( + new NamedXContentRegistry.Entry( + BaseAggregationBuilder.class, + new ParseField(MockDeprecatedAggregationBuilder.NAME), + (p, c) -> MockDeprecatedAggregationBuilder.fromXContent(p) + ) + ); + + namedXContents.addAll(new TransformNamedXContentProvider().getNamedXContentParsers()); + + namedXContentRegistry = new NamedXContentRegistry(namedXContents); + + timestamp = randomLongBetween(10_000_000, 50_000_000); + + oldLocalNumShards = randomIntBetween(1, 5); + localOldDocs = createIndexAndIndexDocs(LOCAL_CLUSTER, "local_old_index", oldLocalNumShards, timestamp - 10_000, true); + oldRemoteNumShards = randomIntBetween(1, 5); + remoteOldDocs = createIndexAndIndexDocs(REMOTE_CLUSTER, "remote_old_index", oldRemoteNumShards, timestamp - 10_000, true); + + newLocalNumShards = randomIntBetween(1, 5); + localNewDocs = createIndexAndIndexDocs(LOCAL_CLUSTER, "local_new_index", newLocalNumShards, timestamp, randomBoolean()); + newRemoteNumShards = randomIntBetween(1, 5); + remoteNewDocs = createIndexAndIndexDocs(REMOTE_CLUSTER, "remote_new_index", newRemoteNumShards, timestamp, randomBoolean()); + } + + private int createIndexAndIndexDocs(String cluster, String index, int numberOfShards, long timestamp, boolean exposeTimestamp) + throws Exception { + Client client = client(cluster); + ElasticsearchAssertions.assertAcked( + client.admin() + .indices() + .prepareCreate(index) + .setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numberOfShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + ) + .setMapping("@timestamp", "type=date", "position", "type=long") + ); + int numDocs = between(100, 500); + for (int i = 0; i < numDocs; i++) { + client.prepareIndex(index).setSource("position", i, "@timestamp", timestamp + i).get(); + } + if (exposeTimestamp) { + client.admin().indices().prepareClose(index).get(); + client.admin() + .indices() + .prepareUpdateSettings(index) + .setSettings(Settings.builder().put(IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.getKey(), true).build()) + .get(); + client.admin().indices().prepareOpen(index).get(); + assertBusy(() -> { + IndexLongFieldRange timestampRange = cluster(cluster).clusterService().state().metadata().index(index).getTimestampRange(); + assertTrue(Strings.toString(timestampRange), timestampRange.containsAllShardRanges()); + }); + } else { + client.admin().indices().prepareRefresh(index).get(); + } + return numDocs; + } + + public void testSearchAction_MatchAllQuery() { + testSearchAction(QueryBuilders.matchAllQuery(), true, localOldDocs + localNewDocs + remoteOldDocs + remoteNewDocs, 0); + testSearchAction(QueryBuilders.matchAllQuery(), false, localOldDocs + localNewDocs + remoteOldDocs + remoteNewDocs, 0); + } + + public void testSearchAction_RangeQuery() { + testSearchAction( + QueryBuilders.rangeQuery("@timestamp").from(timestamp), // This query only matches new documents + true, + localNewDocs + remoteNewDocs, + oldLocalNumShards + oldRemoteNumShards + ); + testSearchAction( + QueryBuilders.rangeQuery("@timestamp").from(timestamp), // This query only matches new documents + false, + localNewDocs + remoteNewDocs, + oldLocalNumShards + oldRemoteNumShards + ); + } + + public void testSearchAction_RangeQueryThatMatchesNoShards() { + testSearchAction( + QueryBuilders.rangeQuery("@timestamp").from(100_000_000), // This query matches no documents + true, + 0, + // All but 2 shards are skipped. TBH I don't know why this 2 shards are not skipped + oldLocalNumShards + newLocalNumShards + oldRemoteNumShards + newRemoteNumShards - 2 + ); + testSearchAction( + QueryBuilders.rangeQuery("@timestamp").from(100_000_000), // This query matches no documents + false, + 0, + // All but 1 shards are skipped. TBH I don't know why this 1 shard is not skipped + oldLocalNumShards + newLocalNumShards + oldRemoteNumShards + newRemoteNumShards - 1 + ); + } + + private void testSearchAction(QueryBuilder query, boolean ccsMinimizeRoundtrips, long expectedHitCount, int expectedSkippedShards) { + SearchSourceBuilder source = new SearchSourceBuilder().query(query); + SearchRequest request = new SearchRequest("local_*", "*:remote_*"); + request.source(source).setCcsMinimizeRoundtrips(ccsMinimizeRoundtrips); + SearchResponse response = client().search(request).actionGet(); + ElasticsearchAssertions.assertHitCount(response, expectedHitCount); + int expectedTotalShards = oldLocalNumShards + newLocalNumShards + oldRemoteNumShards + newRemoteNumShards; + assertThat("Response was: " + response, response.getTotalShards(), is(equalTo(expectedTotalShards))); + assertThat("Response was: " + response, response.getSuccessfulShards(), is(equalTo(expectedTotalShards))); + assertThat("Response was: " + response, response.getFailedShards(), is(equalTo(0))); + assertThat("Response was: " + response, response.getSkippedShards(), is(equalTo(expectedSkippedShards))); + } + + public void testGetCheckpointAction_MatchAllQuery() throws InterruptedException { + testGetCheckpointAction( + client(), + null, + new String[] { "local_*" }, + QueryBuilders.matchAllQuery(), + Set.of("local_old_index", "local_new_index") + ); + testGetCheckpointAction( + client().getRemoteClusterClient(REMOTE_CLUSTER, EsExecutors.DIRECT_EXECUTOR_SERVICE), + REMOTE_CLUSTER, + new String[] { "remote_*" }, + QueryBuilders.matchAllQuery(), + Set.of("remote_old_index", "remote_new_index") + ); + } + + public void testGetCheckpointAction_RangeQuery() throws InterruptedException { + testGetCheckpointAction( + client(), + null, + new String[] { "local_*" }, + QueryBuilders.rangeQuery("@timestamp").from(timestamp), + Set.of("local_new_index") + ); + testGetCheckpointAction( + client().getRemoteClusterClient(REMOTE_CLUSTER, EsExecutors.DIRECT_EXECUTOR_SERVICE), + REMOTE_CLUSTER, + new String[] { "remote_*" }, + QueryBuilders.rangeQuery("@timestamp").from(timestamp), + Set.of("remote_new_index") + ); + } + + public void testGetCheckpointAction_RangeQueryThatMatchesNoShards() throws InterruptedException { + testGetCheckpointAction( + client(), + null, + new String[] { "local_*" }, + QueryBuilders.rangeQuery("@timestamp").from(100_000_000), + Set.of() + ); + testGetCheckpointAction( + client().getRemoteClusterClient(REMOTE_CLUSTER, EsExecutors.DIRECT_EXECUTOR_SERVICE), + REMOTE_CLUSTER, + new String[] { "remote_*" }, + QueryBuilders.rangeQuery("@timestamp").from(100_000_000), + Set.of() + ); + } + + private void testGetCheckpointAction(Client client, String cluster, String[] indices, QueryBuilder query, Set expectedIndices) + throws InterruptedException { + final GetCheckpointAction.Request request = new GetCheckpointAction.Request( + indices, + IndicesOptions.LENIENT_EXPAND_OPEN, + query, + cluster, + TIMEOUT + ); + + CountDownLatch latch = new CountDownLatch(1); + SetOnce finalResponse = new SetOnce<>(); + SetOnce finalException = new SetOnce<>(); + ClientHelper.executeAsyncWithOrigin( + client, + TRANSFORM_ORIGIN, + GetCheckpointAction.INSTANCE, + request, + ActionListener.wrap(response -> { + finalResponse.set(response); + latch.countDown(); + }, e -> { + finalException.set(e); + latch.countDown(); + }) + ); + latch.await(10, TimeUnit.SECONDS); + + assertThat(finalException.get(), is(nullValue())); + assertThat("Response was: " + finalResponse.get(), finalResponse.get().getCheckpoints().keySet(), is(equalTo(expectedIndices))); + } + + public void testTransformLifecycle_MatchAllQuery() throws Exception { + testTransformLifecycle(QueryBuilders.matchAllQuery(), localOldDocs + localNewDocs + remoteOldDocs + remoteNewDocs); + } + + public void testTransformLifecycle_RangeQuery() throws Exception { + testTransformLifecycle(QueryBuilders.rangeQuery("@timestamp").from(timestamp), localNewDocs + remoteNewDocs); + } + + public void testTransformLifecycle_RangeQueryThatMatchesNoShards() throws Exception { + testTransformLifecycle(QueryBuilders.rangeQuery("@timestamp").from(100_000_000), 0); + } + + private void testTransformLifecycle(QueryBuilder query, long expectedHitCount) throws Exception { + String transformId = "test-transform-lifecycle"; + { + QueryConfig queryConfig; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, query.toString())) { + queryConfig = QueryConfig.fromXContent(parser, true); + assertNotNull(queryConfig.getQuery()); + } + TransformConfig transformConfig = TransformConfig.builder() + .setId(transformId) + .setSource(new SourceConfig(new String[] { "local_*", "*:remote_*" }, queryConfig, Map.of())) + .setDest(new DestConfig(transformId + "-dest", null, null)) + .setLatestConfig(new LatestConfig(List.of("position"), "@timestamp")) + .build(); + PutTransformAction.Request request = new PutTransformAction.Request(transformConfig, false, TIMEOUT); + AcknowledgedResponse response = client().execute(PutTransformAction.INSTANCE, request).actionGet(); + assertTrue(response.isAcknowledged()); + } + { + StartTransformAction.Request request = new StartTransformAction.Request(transformId, null, TIMEOUT); + StartTransformAction.Response response = client().execute(StartTransformAction.INSTANCE, request).actionGet(); + assertTrue(response.isAcknowledged()); + } + assertBusy(() -> { + GetTransformStatsAction.Request request = new GetTransformStatsAction.Request(transformId, TIMEOUT); + GetTransformStatsAction.Response response = client().execute(GetTransformStatsAction.INSTANCE, request).actionGet(); + assertThat("Stats were: " + response.getTransformsStats(), response.getTransformsStats(), hasSize(1)); + assertThat(response.getTransformsStats().get(0).getState(), is(equalTo(TransformStats.State.STOPPED))); + assertThat(response.getTransformsStats().get(0).getIndexerStats().getNumDocuments(), is(equalTo(expectedHitCount))); + assertThat(response.getTransformsStats().get(0).getIndexerStats().getNumDeletedDocuments(), is(equalTo(0L))); + assertThat(response.getTransformsStats().get(0).getIndexerStats().getSearchFailures(), is(equalTo(0L))); + assertThat(response.getTransformsStats().get(0).getIndexerStats().getIndexFailures(), is(equalTo(0L))); + }); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + return namedXContentRegistry; + } + + @Override + protected Collection remoteClusterAlias() { + return List.of(REMOTE_CLUSTER); + } + + @Override + protected Collection> nodePlugins(String clusterAlias) { + return CollectionUtils.appendToCopy( + CollectionUtils.appendToCopy(super.nodePlugins(clusterAlias), LocalStateTransform.class), + ExposingTimestampEnginePlugin.class + ); + } + + @Override + protected Settings nodeSettings() { + return Settings.builder() + .put(NodeRoleSettings.NODE_ROLES_SETTING.getKey(), "master, data, ingest, transform, remote_cluster_client") + .put(XPackSettings.SECURITY_ENABLED.getKey(), false) + .build(); + } + + private static class EngineWithExposingTimestamp extends InternalEngine { + EngineWithExposingTimestamp(EngineConfig engineConfig) { + super(engineConfig); + assert IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.get(config().getIndexSettings().getSettings()) : "require read-only index"; + } + + @Override + public ShardLongFieldRange getRawFieldRange(String field) { + try (Searcher searcher = acquireSearcher("test")) { + final DirectoryReader directoryReader = searcher.getDirectoryReader(); + + final byte[] minPackedValue = PointValues.getMinPackedValue(directoryReader, field); + final byte[] maxPackedValue = PointValues.getMaxPackedValue(directoryReader, field); + if (minPackedValue == null || maxPackedValue == null) { + assert minPackedValue == null && maxPackedValue == null + : Arrays.toString(minPackedValue) + "-" + Arrays.toString(maxPackedValue); + return ShardLongFieldRange.EMPTY; + } + + return ShardLongFieldRange.of(LongPoint.decodeDimension(minPackedValue, 0), LongPoint.decodeDimension(maxPackedValue, 0)); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + } + + public static class ExposingTimestampEnginePlugin extends Plugin implements EnginePlugin { + + @Override + public Optional getEngineFactory(IndexSettings indexSettings) { + if (IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.get(indexSettings.getSettings())) { + return Optional.of(EngineWithExposingTimestamp::new); + } else { + return Optional.of(new InternalEngineFactory()); + } + } + } +} diff --git a/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformGetCheckpointIT.java b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformGetCheckpointIT.java index bb159856b965d..b952869a34d88 100644 --- a/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformGetCheckpointIT.java +++ b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformGetCheckpointIT.java @@ -15,6 +15,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.Strings; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.transform.action.GetCheckpointAction; import org.elasticsearch.xpack.transform.TransformSingleNodeTestCase; @@ -25,6 +26,7 @@ import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.startsWith; @@ -46,6 +48,8 @@ public void testGetCheckpoint() throws Exception { final GetCheckpointAction.Request request = new GetCheckpointAction.Request( new String[] { indexNamePrefix + "*" }, IndicesOptions.LENIENT_EXPAND_OPEN, + null, + null, TimeValue.timeValueSeconds(5) ); @@ -61,7 +65,7 @@ public void testGetCheckpoint() throws Exception { for (int d = 0; d < docsToCreatePerShard; ++d) { for (int i = 0; i < indices; ++i) { for (int j = 0; j < shards; ++j) { - client().prepareIndex(indexNamePrefix + i).setSource("{" + "\"field\":" + j + "}", XContentType.JSON).get(); + prepareIndex(indexNamePrefix + i).setSource("{" + "\"field\":" + j + "}", XContentType.JSON).get(); } } } @@ -99,6 +103,40 @@ public void testGetCheckpoint() throws Exception { ); } + public void testGetCheckpointWithQueryThatFiltersOutEverything() throws Exception { + final String indexNamePrefix = "test_index-"; + final int indices = randomIntBetween(1, 5); + final int shards = randomIntBetween(1, 5); + final int docsToCreatePerShard = randomIntBetween(0, 10); + + for (int i = 0; i < indices; ++i) { + indicesAdmin().prepareCreate(indexNamePrefix + i) + .setSettings(indexSettings(shards, 1)) + .setMapping("field", "type=long", "@timestamp", "type=date") + .get(); + for (int j = 0; j < shards; ++j) { + for (int d = 0; d < docsToCreatePerShard; ++d) { + client().prepareIndex(indexNamePrefix + i) + .setSource(Strings.format("{ \"field\":%d, \"@timestamp\": %d }", j, 10_000_000 + d + i + j), XContentType.JSON) + .get(); + } + } + } + indicesAdmin().refresh(new RefreshRequest(indexNamePrefix + "*")); + + final GetCheckpointAction.Request request = new GetCheckpointAction.Request( + new String[] { indexNamePrefix + "*" }, + IndicesOptions.LENIENT_EXPAND_OPEN, + // This query does not match any documents + QueryBuilders.rangeQuery("@timestamp").gte(20_000_000), + null, + TimeValue.timeValueSeconds(5) + ); + + final GetCheckpointAction.Response response = client().execute(GetCheckpointAction.INSTANCE, request).get(); + assertThat("Response was: " + response.getCheckpoints(), response.getCheckpoints(), is(anEmptyMap())); + } + public void testGetCheckpointTimeoutExceeded() throws Exception { final String indexNamePrefix = "test_index-"; final int indices = 100; @@ -111,6 +149,8 @@ public void testGetCheckpointTimeoutExceeded() throws Exception { final GetCheckpointAction.Request request = new GetCheckpointAction.Request( new String[] { indexNamePrefix + "*" }, IndicesOptions.LENIENT_EXPAND_OPEN, + null, + null, TimeValue.ZERO ); diff --git a/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformGetCheckpointTests.java b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformGetCheckpointTests.java index 1411576e61d58..300a075c9f1b2 100644 --- a/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformGetCheckpointTests.java +++ b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformGetCheckpointTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.replication.ClusterStateCreationUtils; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -71,6 +72,7 @@ public class TransformGetCheckpointTests extends ESSingleNodeTestCase { private IndicesService indicesService; private ThreadPool threadPool; private IndexNameExpressionResolver indexNameExpressionResolver; + private Client client; private MockTransport mockTransport; private Task transformTask; private final String indexNamePattern = "test_index-"; @@ -133,6 +135,8 @@ protected void onSendRequest(long requestId, String action, TransportRequest req .putCompatibilityVersions("node01", TransportVersions.V_8_5_0, Map.of()) .build(); + client = mock(Client.class); + transformTask = new Task( 1L, "persistent", @@ -157,6 +161,8 @@ public void testEmptyCheckpoint() throws InterruptedException { GetCheckpointAction.Request request = new GetCheckpointAction.Request( Strings.EMPTY_ARRAY, IndicesOptions.LENIENT_EXPAND_OPEN, + null, + null, TimeValue.timeValueSeconds(5) ); assertCheckpointAction(request, response -> { @@ -170,6 +176,8 @@ public void testSingleIndexRequest() throws InterruptedException { GetCheckpointAction.Request request = new GetCheckpointAction.Request( new String[] { indexNamePattern + "0" }, IndicesOptions.LENIENT_EXPAND_OPEN, + null, + null, TimeValue.timeValueSeconds(5) ); @@ -189,6 +197,8 @@ public void testMultiIndexRequest() throws InterruptedException { GetCheckpointAction.Request request = new GetCheckpointAction.Request( testIndices, IndicesOptions.LENIENT_EXPAND_OPEN, + null, + null, TimeValue.timeValueSeconds(5) ); assertCheckpointAction(request, response -> { @@ -208,7 +218,7 @@ public void testMultiIndexRequest() throws InterruptedException { class TestTransportGetCheckpointAction extends TransportGetCheckpointAction { TestTransportGetCheckpointAction() { - super(transportService, new ActionFilters(emptySet()), indicesService, clusterService, indexNameExpressionResolver); + super(transportService, new ActionFilters(emptySet()), indicesService, clusterService, indexNameExpressionResolver, client); } @Override diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetCheckpointAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetCheckpointAction.java index 5acc2d4541559..bf18f7257d906 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetCheckpointAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetCheckpointAction.java @@ -14,9 +14,15 @@ import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.UnavailableShardsException; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchShardsGroup; +import org.elasticsearch.action.search.SearchShardsRequest; +import org.elasticsearch.action.search.SearchShardsResponse; +import org.elasticsearch.action.search.TransportSearchShardsAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.GroupedActionListener; import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -31,10 +37,12 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; import org.elasticsearch.transport.ActionNotFoundTransportException; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.transform.action.GetCheckpointAction; import org.elasticsearch.xpack.core.transform.action.GetCheckpointAction.Request; import org.elasticsearch.xpack.core.transform.action.GetCheckpointAction.Response; @@ -57,6 +65,7 @@ public class TransportGetCheckpointAction extends HandledTransportAction listener) { - final ClusterState state = clusterService.state(); - resolveIndicesAndGetCheckpoint(task, request, listener, state); + final ClusterState clusterState = clusterService.state(); + resolveIndicesAndGetCheckpoint(task, request, listener, clusterState); } - protected void resolveIndicesAndGetCheckpoint(Task task, Request request, ActionListener listener, final ClusterState state) { + protected void resolveIndicesAndGetCheckpoint( + Task task, + Request request, + ActionListener listener, + final ClusterState clusterState + ) { + final String nodeId = clusterState.nodes().getLocalNode().getId(); + final TaskId parentTaskId = new TaskId(nodeId, task.getId()); + // note: when security is turned on, the indices are already resolved // TODO: do a quick check and only resolve if necessary?? - String[] concreteIndices = this.indexNameExpressionResolver.concreteIndexNames(state, request); - - Map> nodesAndShards = resolveIndicesToPrimaryShards(state, concreteIndices); - - if (nodesAndShards.size() == 0) { + String[] concreteIndices = this.indexNameExpressionResolver.concreteIndexNames(clusterState, request); + Map> nodesAndShards = resolveIndicesToPrimaryShards(clusterState, concreteIndices); + if (nodesAndShards.isEmpty()) { listener.onResponse(new Response(Collections.emptyMap())); return; } - new AsyncGetCheckpointsFromNodesAction(state, task, nodesAndShards, new OriginalIndices(request), request.getTimeout(), listener) - .start(); + if (request.getQuery() == null) { // If there is no query, then there is no point in filtering + getCheckpointsFromNodes(clusterState, task, nodesAndShards, new OriginalIndices(request), request.getTimeout(), listener); + return; + } + + SearchShardsRequest searchShardsRequest = new SearchShardsRequest( + request.indices(), + SearchRequest.DEFAULT_INDICES_OPTIONS, + request.getQuery(), + null, + null, + false, + request.getCluster() + ); + searchShardsRequest.setParentTask(parentTaskId); + ClientHelper.executeAsyncWithOrigin( + client, + ClientHelper.TRANSFORM_ORIGIN, + TransportSearchShardsAction.TYPE, + searchShardsRequest, + ActionListener.wrap(searchShardsResponse -> { + Map> filteredNodesAndShards = filterOutSkippedShards(nodesAndShards, searchShardsResponse); + getCheckpointsFromNodes( + clusterState, + task, + filteredNodesAndShards, + new OriginalIndices(request), + request.getTimeout(), + listener + ); + }, e -> { + // search_shards API failed so we just log the error here and continue just like there was no query + logger.atWarn().withThrowable(e).log("search_shards API failed for cluster [{}]", request.getCluster()); + logger.atTrace() + .withThrowable(e) + .log("search_shards API failed for cluster [{}], request was [{}]", request.getCluster(), searchShardsRequest); + getCheckpointsFromNodes(clusterState, task, nodesAndShards, new OriginalIndices(request), request.getTimeout(), listener); + }) + ); } - private static Map> resolveIndicesToPrimaryShards(ClusterState state, String[] concreteIndices) { + private static Map> resolveIndicesToPrimaryShards(ClusterState clusterState, String[] concreteIndices) { if (concreteIndices.length == 0) { return Collections.emptyMap(); } - final DiscoveryNodes nodes = state.nodes(); + final DiscoveryNodes nodes = clusterState.nodes(); Map> nodesAndShards = new HashMap<>(); - ShardsIterator shardsIt = state.routingTable().allShards(concreteIndices); + ShardsIterator shardsIt = clusterState.routingTable().allShards(concreteIndices); for (ShardRouting shard : shardsIt) { // only take primary shards, which should be exactly 1, this isn't strictly necessary // and we should consider taking any shard copy, but then we need another way to de-dup @@ -112,7 +166,7 @@ private static Map> resolveIndicesToPrimaryShards(ClusterSt } if (shard.assignedToNode() && nodes.get(shard.currentNodeId()) != null) { // special case: The minimum TransportVersion in the cluster is on an old version - if (state.getMinTransportVersion().before(TransportVersions.V_8_2_0)) { + if (clusterState.getMinTransportVersion().before(TransportVersions.V_8_2_0)) { throw new ActionNotFoundTransportException(GetCheckpointNodeAction.NAME); } @@ -125,111 +179,128 @@ private static Map> resolveIndicesToPrimaryShards(ClusterSt return nodesAndShards; } - protected class AsyncGetCheckpointsFromNodesAction { - private final Task task; - private final ActionListener listener; - private final Map> nodesAndShards; - private final OriginalIndices originalIndices; - private final TimeValue timeout; - private final DiscoveryNodes nodes; - private final String localNodeId; - - protected AsyncGetCheckpointsFromNodesAction( - ClusterState clusterState, - Task task, - Map> nodesAndShards, - OriginalIndices originalIndices, - TimeValue timeout, - ActionListener listener - ) { - this.task = task; - this.listener = listener; - this.nodesAndShards = nodesAndShards; - this.originalIndices = originalIndices; - this.timeout = timeout; - this.nodes = clusterState.nodes(); - this.localNodeId = clusterService.localNode().getId(); + static Map> filterOutSkippedShards( + Map> nodesAndShards, + SearchShardsResponse searchShardsResponse + ) { + Map> filteredNodesAndShards = new HashMap<>(nodesAndShards.size()); + // Create a deep copy of the given nodes and shards map. + for (Map.Entry> nodeAndShardsEntry : nodesAndShards.entrySet()) { + String node = nodeAndShardsEntry.getKey(); + Set shards = nodeAndShardsEntry.getValue(); + filteredNodesAndShards.put(node, new HashSet<>(shards)); } - - public void start() { - GroupedActionListener groupedListener = new GroupedActionListener<>( - nodesAndShards.size(), - ActionListener.wrap(responses -> listener.onResponse(mergeNodeResponses(responses)), listener::onFailure) - ); - - for (Entry> oneNodeAndItsShards : nodesAndShards.entrySet()) { - if (task instanceof CancellableTask) { - // There is no point continuing this work if the task has been cancelled. - if (((CancellableTask) task).notifyIfCancelled(listener)) { - return; + // Remove (node, shard) pairs for all the skipped shards. + for (SearchShardsGroup shardGroup : searchShardsResponse.getGroups()) { + if (shardGroup.skipped()) { + for (String allocatedNode : shardGroup.allocatedNodes()) { + Set shards = filteredNodesAndShards.get(allocatedNode); + if (shards != null) { + shards.remove(shardGroup.shardId()); + if (shards.isEmpty()) { + // Remove node if no shards were left. + filteredNodesAndShards.remove(allocatedNode); + } } } - if (localNodeId.equals(oneNodeAndItsShards.getKey())) { - TransportGetCheckpointNodeAction.getGlobalCheckpoints( - indicesService, - task, - oneNodeAndItsShards.getValue(), - timeout, - Clock.systemUTC(), - groupedListener - ); - continue; - } + } + } + return filteredNodesAndShards; + } - GetCheckpointNodeAction.Request nodeCheckpointsRequest = new GetCheckpointNodeAction.Request( - oneNodeAndItsShards.getValue(), - originalIndices, - timeout - ); - DiscoveryNode node = nodes.get(oneNodeAndItsShards.getKey()); - - // paranoia: this should not be possible using the same cluster state - if (node == null) { - listener.onFailure( - new UnavailableShardsException( - oneNodeAndItsShards.getValue().iterator().next(), - "Node not found for [{}] shards", - oneNodeAndItsShards.getValue().size() - ) - ); + private void getCheckpointsFromNodes( + ClusterState clusterState, + Task task, + Map> nodesAndShards, + OriginalIndices originalIndices, + TimeValue timeout, + ActionListener listener + ) { + if (nodesAndShards.isEmpty()) { + listener.onResponse(new Response(Map.of())); + return; + } + + final String localNodeId = clusterService.localNode().getId(); + + GroupedActionListener groupedListener = new GroupedActionListener<>( + nodesAndShards.size(), + ActionListener.wrap(responses -> listener.onResponse(mergeNodeResponses(responses)), listener::onFailure) + ); + + for (Entry> oneNodeAndItsShards : nodesAndShards.entrySet()) { + if (task instanceof CancellableTask) { + // There is no point continuing this work if the task has been cancelled. + if (((CancellableTask) task).notifyIfCancelled(listener)) { return; } - - logger.trace("get checkpoints from node {}", node); - transportService.sendChildRequest( - node, - GetCheckpointNodeAction.NAME, - nodeCheckpointsRequest, + } + if (localNodeId.equals(oneNodeAndItsShards.getKey())) { + TransportGetCheckpointNodeAction.getGlobalCheckpoints( + indicesService, task, - TransportRequestOptions.EMPTY, - new ActionListenerResponseHandler<>( - groupedListener, - GetCheckpointNodeAction.Response::new, - TransportResponseHandler.TRANSPORT_WORKER + oneNodeAndItsShards.getValue(), + timeout, + Clock.systemUTC(), + groupedListener + ); + continue; + } + + DiscoveryNodes nodes = clusterState.nodes(); + DiscoveryNode node = nodes.get(oneNodeAndItsShards.getKey()); + + // paranoia: this should not be possible using the same cluster state + if (node == null) { + listener.onFailure( + new UnavailableShardsException( + oneNodeAndItsShards.getValue().iterator().next(), + "Node not found for [{}] shards", + oneNodeAndItsShards.getValue().size() ) ); + return; } + + logger.trace("get checkpoints from node {}", node); + GetCheckpointNodeAction.Request nodeCheckpointsRequest = new GetCheckpointNodeAction.Request( + oneNodeAndItsShards.getValue(), + originalIndices, + timeout + ); + transportService.sendChildRequest( + node, + GetCheckpointNodeAction.NAME, + nodeCheckpointsRequest, + task, + TransportRequestOptions.EMPTY, + new ActionListenerResponseHandler<>( + groupedListener, + GetCheckpointNodeAction.Response::new, + TransportResponseHandler.TRANSPORT_WORKER + ) + ); } + } - private static Response mergeNodeResponses(Collection responses) { - // the final list should be ordered by key - Map checkpointsByIndexReduced = new TreeMap<>(); - - // merge the node responses - for (GetCheckpointNodeAction.Response response : responses) { - response.getCheckpoints().forEach((index, checkpoint) -> { - if (checkpointsByIndexReduced.containsKey(index)) { - long[] shardCheckpoints = checkpointsByIndexReduced.get(index); - for (int i = 0; i < checkpoint.length; ++i) { - shardCheckpoints[i] = Math.max(shardCheckpoints[i], checkpoint[i]); - } - } else { - checkpointsByIndexReduced.put(index, checkpoint); - } - }); - } + private static Response mergeNodeResponses(Collection responses) { + // the final list should be ordered by key + Map checkpointsByIndexReduced = new TreeMap<>(); - return new Response(checkpointsByIndexReduced); + // merge the node responses + for (GetCheckpointNodeAction.Response response : responses) { + response.getCheckpoints().forEach((index, checkpoint) -> { + if (checkpointsByIndexReduced.containsKey(index)) { + long[] shardCheckpoints = checkpointsByIndexReduced.get(index); + for (int i = 0; i < checkpoint.length; ++i) { + shardCheckpoints[i] = Math.max(shardCheckpoints[i], checkpoint[i]); + } + } else { + checkpointsByIndexReduced.put(index, checkpoint); + } + }); } + + return new Response(checkpointsByIndexReduced); } } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetTransformStatsAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetTransformStatsAction.java index 13abc427460be..f7e60b13b50a6 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetTransformStatsAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetTransformStatsAction.java @@ -121,14 +121,15 @@ protected void taskOperation( TransformTask transformTask, ActionListener listener ) { - // Little extra insurance, make sure we only return transforms that aren't cancelled ClusterState clusterState = clusterService.state(); String nodeId = clusterState.nodes().getLocalNode().getId(); final TaskId parentTaskId = new TaskId(nodeId, actionTask.getId()); + // If the _stats request is cancelled there is no point in continuing this work on the task level if (actionTask.notifyIfCancelled(listener)) { return; } + // Little extra insurance, make sure we only return transforms that aren't cancelled if (transformTask.isCancelled()) { listener.onResponse(new Response(Collections.emptyList())); return; diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/DefaultCheckpointProvider.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/DefaultCheckpointProvider.java index aa1332b95fe84..b9b7d9d8477cb 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/DefaultCheckpointProvider.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/DefaultCheckpointProvider.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.transport.ActionNotFoundTransportException; import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.xpack.core.ClientHelper; @@ -134,14 +135,16 @@ protected void getIndexCheckpoints(TimeValue timeout, ActionListener> remoteIndex : resolvedIndexes.getRemoteIndicesPerClusterAlias().entrySet()) { + String cluster = remoteIndex.getKey(); ParentTaskAssigningClient remoteClient = new ParentTaskAssigningClient( - client.getRemoteClusterClient(remoteIndex.getKey(), EsExecutors.DIRECT_EXECUTOR_SERVICE), + client.getRemoteClusterClient(cluster, EsExecutors.DIRECT_EXECUTOR_SERVICE), client.getParentTask() ); getCheckpointsFromOneCluster( @@ -149,7 +152,8 @@ protected void getIndexCheckpoints(TimeValue timeout, ActionListener headers, String[] indices, + QueryBuilder query, String cluster, ActionListener> listener ) { if (fallbackToBWC.contains(cluster)) { getCheckpointsFromOneClusterBWC(client, timeout, headers, indices, cluster, listener); } else { - getCheckpointsFromOneClusterV2(client, timeout, headers, indices, cluster, ActionListener.wrap(response -> { + getCheckpointsFromOneClusterV2(client, timeout, headers, indices, query, cluster, ActionListener.wrap(response -> { logger.debug( "[{}] Successfully retrieved checkpoints from cluster [{}] using transform checkpoint API", transformConfig.getId(), @@ -200,12 +205,15 @@ private static void getCheckpointsFromOneClusterV2( TimeValue timeout, Map headers, String[] indices, + QueryBuilder query, String cluster, ActionListener> listener ) { GetCheckpointAction.Request getCheckpointRequest = new GetCheckpointAction.Request( indices, IndicesOptions.LENIENT_EXPAND_OPEN, + query, + cluster, timeout ); ActionListener checkpointListener; @@ -239,7 +247,6 @@ private static void getCheckpointsFromOneClusterV2( getCheckpointRequest, checkpointListener ); - } /** diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/TimeBasedCheckpointProvider.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/TimeBasedCheckpointProvider.java index 7b83af1dc1405..ec4cc2dcbcbf4 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/TimeBasedCheckpointProvider.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/TimeBasedCheckpointProvider.java @@ -10,8 +10,8 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.ParentTaskAssigningClient; import org.elasticsearch.index.query.BoolQueryBuilder; @@ -81,7 +81,7 @@ public void sourceHasChanged(TransformCheckpoint lastCheckpoint, ActionListener< transformConfig.getHeaders(), ClientHelper.TRANSFORM_ORIGIN, client, - SearchAction.INSTANCE, + TransportSearchAction.TYPE, searchRequest, ActionListener.wrap(r -> listener.onResponse(r.getHits().getTotalHits().value > 0L), listener::onFailure) ); diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/IndexBasedTransformConfigManager.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/IndexBasedTransformConfigManager.java index 107c3c2b5bd65..5d9a3971ad082 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/IndexBasedTransformConfigManager.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/IndexBasedTransformConfigManager.java @@ -22,9 +22,9 @@ import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.index.IndexAction; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.internal.Client; @@ -389,7 +389,7 @@ public void getTransformCheckpoint(String transformId, long checkpoint, ActionLi executeAsyncWithOrigin( client, TRANSFORM_ORIGIN, - SearchAction.INSTANCE, + TransportSearchAction.TYPE, searchRequest, ActionListener.wrap(searchResponse -> { if (searchResponse.getHits().getHits().length == 0) { @@ -426,7 +426,7 @@ public void getTransformCheckpointForUpdate( executeAsyncWithOrigin( client, TRANSFORM_ORIGIN, - SearchAction.INSTANCE, + TransportSearchAction.TYPE, searchRequest, ActionListener.wrap(searchResponse -> { if (searchResponse.getHits().getHits().length == 0) { @@ -470,7 +470,7 @@ public void getTransformConfiguration(String transformId, ActionListenerwrap(searchResponse -> { if (searchResponse.getHits().getHits().length == 0) { @@ -503,7 +503,7 @@ public void getTransformConfigurationForUpdate( .seqNoAndPrimaryTerm(true) .request(); - executeAsyncWithOrigin(client, TRANSFORM_ORIGIN, SearchAction.INSTANCE, searchRequest, ActionListener.wrap(searchResponse -> { + executeAsyncWithOrigin(client, TRANSFORM_ORIGIN, TransportSearchAction.TYPE, searchRequest, ActionListener.wrap(searchResponse -> { if (searchResponse.getHits().getHits().length == 0) { configAndVersionListener.onFailure( new ResourceNotFoundException(TransformMessages.getMessage(TransformMessages.REST_UNKNOWN_TRANSFORM, transformId)) @@ -637,7 +637,7 @@ public void resetTransform(String transformId, ActionListener listener) .query(QueryBuilders.termQuery(TransformField.ID.getPreferredName(), transformId)) .trackTotalHitsUpTo(1) ); - executeAsyncWithOrigin(client, TRANSFORM_ORIGIN, SearchAction.INSTANCE, searchRequest, ActionListener.wrap(searchResponse -> { + executeAsyncWithOrigin(client, TRANSFORM_ORIGIN, TransportSearchAction.TYPE, searchRequest, ActionListener.wrap(searchResponse -> { if (searchResponse.getHits().getTotalHits().value == 0) { listener.onFailure( new ResourceNotFoundException(TransformMessages.getMessage(TransformMessages.REST_UNKNOWN_TRANSFORM, transformId)) @@ -762,7 +762,7 @@ public void getTransformStoredDoc( executeAsyncWithOrigin( client, TRANSFORM_ORIGIN, - SearchAction.INSTANCE, + TransportSearchAction.TYPE, searchRequest, ActionListener.wrap(searchResponse -> { if (searchResponse.getHits().getHits().length == 0) { diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java index 48a6114817a8e..29be02b87cbdf 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java @@ -18,13 +18,13 @@ import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; -import org.elasticsearch.action.search.ClosePointInTimeAction; import org.elasticsearch.action.search.ClosePointInTimeRequest; -import org.elasticsearch.action.search.OpenPointInTimeAction; import org.elasticsearch.action.search.OpenPointInTimeRequest; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportClosePointInTimeAction; +import org.elasticsearch.action.search.TransportOpenPointInTimeAction; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.client.internal.ParentTaskAssigningClient; import org.elasticsearch.common.logging.LoggerMessageFormat; @@ -277,7 +277,7 @@ void doGetInitialProgress(SearchRequest request, ActionListener transformConfig.getHeaders(), ClientHelper.TRANSFORM_ORIGIN, client, - SearchAction.INSTANCE, + TransportSearchAction.TYPE, request, responseListener ); @@ -357,7 +357,7 @@ protected void persistState(TransformState state, ActionListener listener) + statsExc.getMessage() ); - if (failureHandler.handleStatePersistenceFailure(statsExc, getConfig().getSettings()) == false) { + if (failureHandler.handleStatePersistenceFailure(statsExc, getConfig().getSettings())) { // get the current seqNo and primary term, however ignore the stored state transformsConfigManager.getTransformStoredDoc( transformConfig.getId(), @@ -433,7 +433,7 @@ private void closePointInTime(String name) { transformConfig.getHeaders(), ClientHelper.TRANSFORM_ORIGIN, client, - ClosePointInTimeAction.INSTANCE, + TransportClosePointInTimeAction.TYPE, closePitRequest, ActionListener.wrap(response -> { logger.trace("[{}] closed pit search context [{}]", getJobId(), oldPit); @@ -464,12 +464,14 @@ private void injectPointInTimeIfNeeded( // no pit, create a new one OpenPointInTimeRequest pitRequest = new OpenPointInTimeRequest(searchRequest.indices()).keepAlive(PIT_KEEP_ALIVE); + // use index filter for better performance + pitRequest.indexFilter(transformConfig.getSource().getQueryConfig().getQuery()); ClientHelper.executeWithHeadersAsync( transformConfig.getHeaders(), ClientHelper.TRANSFORM_ORIGIN, client, - OpenPointInTimeAction.INSTANCE, + TransportOpenPointInTimeAction.TYPE, pitRequest, ActionListener.wrap(response -> { PointInTimeBuilder newPit = new PointInTimeBuilder(response.getPointInTimeId()).setKeepAlive(PIT_KEEP_ALIVE); @@ -532,7 +534,7 @@ void doSearch(Tuple namedSearchRequest, ActionListener { // did the pit change? @@ -558,7 +560,7 @@ void doSearch(Tuple namedSearchRequest, ActionListener namedSearchRequest, ActionListener listener); + void fail(Throwable exception, String failureMessage, ActionListener listener); } private final AtomicReference taskState; @@ -218,8 +218,8 @@ void shutdown() { taskListener.shutdown(); } - void markAsFailed(String failureMessage) { - taskListener.fail(failureMessage, ActionListener.wrap(r -> { + void markAsFailed(Throwable exception, String failureMessage) { + taskListener.fail(exception, failureMessage, ActionListener.wrap(r -> { // Successfully marked as failed, reset counter so that task can be restarted failureCount.set(0); }, e -> {})); diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandler.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandler.java index 4ac6aff416164..c7e0eda5ca5e6 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandler.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandler.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.script.ScriptException; @@ -51,28 +52,32 @@ class TransformFailureHandler { /** * Handle a search or indexing failure * - * @param e the exception caught + * @param exception the exception caught * @param settingsConfig The settings */ - void handleIndexerFailure(Exception e, SettingsConfig settingsConfig) { + void handleIndexerFailure(Exception exception, SettingsConfig settingsConfig) { // more detailed reporting in the handlers and below - logger.debug(() -> "[" + transformId + "] transform encountered an exception: ", e); - Throwable unwrappedException = ExceptionsHelper.findSearchExceptionRootCause(e); + logger.atDebug().withThrowable(exception).log("[{}] transform encountered an exception", transformId); + Throwable unwrappedException = ExceptionsHelper.findSearchExceptionRootCause(exception); boolean unattended = Boolean.TRUE.equals(settingsConfig.getUnattended()); - if (unwrappedException instanceof CircuitBreakingException circuitBreakingException) { - handleCircuitBreakingException(circuitBreakingException, unattended); - } else if (unwrappedException instanceof ScriptException scriptException) { - handleScriptException(scriptException, unattended); - } else if (unwrappedException instanceof BulkIndexingException bulkIndexingException) { - handleBulkIndexingException(bulkIndexingException, unattended, getNumFailureRetries(settingsConfig)); - } else if (unwrappedException instanceof ClusterBlockException clusterBlockException) { + if (unwrappedException instanceof CircuitBreakingException e) { + handleCircuitBreakingException(e, unattended); + } else if (unwrappedException instanceof ScriptException e) { + handleScriptException(e, unattended); + } else if (unwrappedException instanceof BulkIndexingException e) { + handleBulkIndexingException(e, unattended, getNumFailureRetries(settingsConfig)); + } else if (unwrappedException instanceof ClusterBlockException e) { // gh#89802 always retry for a cluster block exception, because a cluster block should be temporary. - retry(clusterBlockException, clusterBlockException.getDetailedMessage(), unattended, getNumFailureRetries(settingsConfig)); - } else if (unwrappedException instanceof ElasticsearchException elasticsearchException) { - handleElasticsearchException(elasticsearchException, unattended, getNumFailureRetries(settingsConfig)); - } else if (unwrappedException instanceof IllegalArgumentException illegalArgumentException) { - handleIllegalArgumentException(illegalArgumentException, unattended); + retry(e, e.getDetailedMessage(), unattended, getNumFailureRetries(settingsConfig)); + } else if (unwrappedException instanceof SearchPhaseExecutionException e) { + // The reason of a SearchPhaseExecutionException unfortunately contains a full stack trace. + // Instead of displaying that to the user, get the cause's message instead. + retry(e, e.getCause() != null ? e.getCause().getMessage() : null, unattended, getNumFailureRetries(settingsConfig)); + } else if (unwrappedException instanceof ElasticsearchException e) { + handleElasticsearchException(e, unattended, getNumFailureRetries(settingsConfig)); + } else if (unwrappedException instanceof IllegalArgumentException e) { + handleIllegalArgumentException(e, unattended); } else { retry( unwrappedException, @@ -88,21 +93,23 @@ void handleIndexerFailure(Exception e, SettingsConfig settingsConfig) { * * @param e the exception caught * @param settingsConfig The settings + * @return true if there is at least one more retry to be made, false otherwise */ boolean handleStatePersistenceFailure(Exception e, SettingsConfig settingsConfig) { // we use the same setting for retries, however a separate counter, because the failure // counter for search/index gets reset after a successful bulk index request int numFailureRetries = getNumFailureRetries(settingsConfig); - final int failureCount = context.incrementAndGetStatePersistenceFailureCount(e); + int failureCount = context.incrementAndGetStatePersistenceFailureCount(e); if (numFailureRetries != -1 && failureCount > numFailureRetries) { fail( + e, "task encountered more than " + numFailureRetries + " failures updating internal state; latest failure: " + e.getMessage() ); - return true; + return false; } - return false; + return true; } /** @@ -130,7 +137,7 @@ private void handleCircuitBreakingException(CircuitBreakingException circuitBrea if (unattended) { retry(circuitBreakingException, message, true, -1); } else { - fail(message); + fail(circuitBreakingException, message); } } else { String message = TransformMessages.getMessage(TransformMessages.LOG_TRANSFORM_PIVOT_REDUCE_PAGE_SIZE, pageSize, newPageSize); @@ -155,7 +162,7 @@ private void handleScriptException(ScriptException scriptException, boolean unat if (unattended) { retry(scriptException, message, true, -1); } else { - fail(message); + fail(scriptException, message); } } @@ -172,7 +179,7 @@ private void handleBulkIndexingException(BulkIndexingException bulkIndexingExcep TransformMessages.LOG_TRANSFORM_PIVOT_IRRECOVERABLE_BULK_INDEXING_ERROR, bulkIndexingException.getDetailedMessage() ); - fail(message); + fail(bulkIndexingException, message); } else { retry(bulkIndexingException, bulkIndexingException.getDetailedMessage(), unattended, numFailureRetries); } @@ -190,7 +197,7 @@ private void handleBulkIndexingException(BulkIndexingException bulkIndexingExcep private void handleElasticsearchException(ElasticsearchException elasticsearchException, boolean unattended, int numFailureRetries) { if (unattended == false && ExceptionRootCauseFinder.isExceptionIrrecoverable(elasticsearchException)) { String message = "task encountered irrecoverable failure: " + elasticsearchException.getDetailedMessage(); - fail(message); + fail(elasticsearchException, message); } else { retry(elasticsearchException, elasticsearchException.getDetailedMessage(), unattended, numFailureRetries); } @@ -209,7 +216,7 @@ private void handleIllegalArgumentException(IllegalArgumentException illegalArgu retry(illegalArgumentException, illegalArgumentException.getMessage(), true, -1); } else { String message = "task encountered irrecoverable failure: " + illegalArgumentException.getMessage(); - fail(message); + fail(illegalArgumentException, message); } } @@ -226,14 +233,13 @@ private void handleIllegalArgumentException(IllegalArgumentException illegalArgu */ private void retry(Throwable unwrappedException, String message, boolean unattended, int numFailureRetries) { // group failures to decide whether to report it below - final boolean repeatedFailure = context.getLastFailure() == null - ? false - : unwrappedException.getClass().equals(context.getLastFailure().getClass()); + final boolean repeatedFailure = context.getLastFailure() != null + && unwrappedException.getClass().equals(context.getLastFailure().getClass()); final int failureCount = context.incrementAndGetFailureCount(unwrappedException); if (unattended == false && numFailureRetries != -1 && failureCount > numFailureRetries) { - fail("task encountered more than " + numFailureRetries + " failures; latest failure: " + message); + fail(unwrappedException, "task encountered more than " + numFailureRetries + " failures; latest failure: " + message); return; } @@ -248,7 +254,9 @@ private void retry(Throwable unwrappedException, String message, boolean unatten numFailureRetries ); - logger.log(unattended ? Level.INFO : Level.WARN, () -> "[" + transformId + "] " + retryMessage, unwrappedException); + logger.atLevel(unattended ? Level.INFO : Level.WARN) + .withThrowable(unwrappedException) + .log("[{}] {}", transformId, retryMessage); auditor.audit(unattended ? INFO : WARNING, transformId, retryMessage); } } @@ -261,9 +269,9 @@ private void retry(Throwable unwrappedException, String message, boolean unatten * * @param failureMessage the reason of the failure */ - private void fail(String failureMessage) { + private void fail(Throwable exception, String failureMessage) { // note: logging and audit is done as part of context.markAsFailed - context.markAsFailed(failureMessage); + context.markAsFailed(exception, failureMessage); } /** diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformIndexer.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformIndexer.java index f43e1cb91eff2..4128ae42f53e3 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformIndexer.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformIndexer.java @@ -18,6 +18,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.logging.LoggerMessageFormat; import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; @@ -45,6 +46,7 @@ import org.elasticsearch.xpack.transform.persistence.TransformConfigManager; import org.elasticsearch.xpack.transform.transforms.Function.ChangeCollector; import org.elasticsearch.xpack.transform.transforms.RetentionPolicyToDeleteByQueryRequestConverter.RetentionPolicyException; +import org.elasticsearch.xpack.transform.transforms.scheduling.TransformSchedulingUtils; import java.time.Instant; import java.util.Collection; @@ -83,8 +85,9 @@ private enum RunState { private static final long RETENTION_OF_CHECKPOINTS_MS = 864000000L; // 10 days private static final long CHECKPOINT_CLEANUP_INTERVAL = 100L; // every 100 checkpoints - // constant for triggering state persistence, hardcoded for now - public static final long DEFAULT_TRIGGER_SAVE_STATE_INTERVAL_MS = 60_000; // 60s + // Constant for triggering state persistence, used when there are no state persistence errors. + // In face of errors, exponential backoff scheme is used. + public static final TimeValue DEFAULT_TRIGGER_SAVE_STATE_INTERVAL = TimeValue.timeValueSeconds(60); protected final TransformConfigManager transformsConfigManager; private final CheckpointProvider checkpointProvider; @@ -189,8 +192,13 @@ protected boolean triggerSaveState() { if (saveStateListeners.get() != null) { return true; } - - return TimeUnit.NANOSECONDS.toMillis(getTimeNanos()) > lastSaveStateMilliseconds + DEFAULT_TRIGGER_SAVE_STATE_INTERVAL_MS; + long currentTimeMilliseconds = TimeUnit.NANOSECONDS.toMillis(getTimeNanos()); + long nextSaveStateMilliseconds = TransformSchedulingUtils.calculateNextScheduledTime( + lastSaveStateMilliseconds, + DEFAULT_TRIGGER_SAVE_STATE_INTERVAL, + context.getStatePersistenceFailureCount() + ); + return currentTimeMilliseconds > nextSaveStateMilliseconds; } public TransformConfig getConfig() { @@ -343,12 +351,13 @@ protected void onStart(long now, ActionListener listener) { } }, failure -> { String msg = TransformMessages.getMessage(TransformMessages.FAILED_TO_RELOAD_TRANSFORM_CONFIGURATION, getJobId()); - logger.error(msg, failure); // If the transform config index or the transform config is gone, something serious occurred // We are in an unknown state and should fail out if (failure instanceof ResourceNotFoundException) { + logger.error(msg, failure); reLoadFieldMappingsListener.onFailure(new TransformConfigLostOnReloadException(msg, failure)); } else { + logger.warn(msg, failure); auditor.warning(getJobId(), msg); reLoadFieldMappingsListener.onResponse(null); } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutor.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutor.java index 6a8a8c8548491..ac690c625124f 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutor.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutor.java @@ -229,7 +229,7 @@ protected void nodeOperation(AllocatedPersistentTask task, @Nullable TransformTa // TODO: do not use the same error message as for loading the last checkpoint String msg = TransformMessages.getMessage(TransformMessages.FAILED_TO_LOAD_TRANSFORM_CHECKPOINT, transformId); logger.error(msg, error); - markAsFailed(buildTask, msg); + markAsFailed(buildTask, error, msg); }); // <5> load last checkpoint @@ -243,7 +243,7 @@ protected void nodeOperation(AllocatedPersistentTask task, @Nullable TransformTa }, error -> { String msg = TransformMessages.getMessage(TransformMessages.FAILED_TO_LOAD_TRANSFORM_CHECKPOINT, transformId); logger.error(msg, error); - markAsFailed(buildTask, msg); + markAsFailed(buildTask, error, msg); }); // <4> Set the previous stats (if they exist), initialize the indexer, start the task (If it is STOPPED) @@ -288,7 +288,7 @@ protected void nodeOperation(AllocatedPersistentTask task, @Nullable TransformTa if (error instanceof ResourceNotFoundException == false) { String msg = TransformMessages.getMessage(TransformMessages.FAILED_TO_LOAD_TRANSFORM_STATE, transformId); logger.error(msg, error); - markAsFailed(buildTask, msg); + markAsFailed(buildTask, error, msg); } else { logger.trace("[{}] No stats found (new transform), starting the task", transformId); startTask(buildTask, indexerBuilder, null, null, startTaskListener); @@ -309,7 +309,7 @@ protected void nodeOperation(AllocatedPersistentTask task, @Nullable TransformTa TransformDeprecations.MIN_TRANSFORM_VERSION ); auditor.error(transformId, transformTooOldError); - markAsFailed(buildTask, transformTooOldError); + markAsFailed(buildTask, null, transformTooOldError); return; } @@ -321,6 +321,7 @@ protected void nodeOperation(AllocatedPersistentTask task, @Nullable TransformTa auditor.error(transformId, validationException.getMessage()); markAsFailed( buildTask, + validationException, TransformMessages.getMessage( TransformMessages.TRANSFORM_CONFIGURATION_INVALID, transformId, @@ -330,8 +331,7 @@ protected void nodeOperation(AllocatedPersistentTask task, @Nullable TransformTa } }, error -> { String msg = TransformMessages.getMessage(TransformMessages.FAILED_TO_LOAD_TRANSFORM_CONFIGURATION, transformId); - logger.error(msg, error); - markAsFailed(buildTask, msg); + markAsFailed(buildTask, error, msg); }); // <2> Get the transform config @@ -340,8 +340,7 @@ protected void nodeOperation(AllocatedPersistentTask task, @Nullable TransformTa error -> { Throwable cause = ExceptionsHelper.unwrapCause(error); String msg = "Failed to create internal index mappings"; - logger.error(msg, cause); - markAsFailed(buildTask, msg + "[" + cause + "]"); + markAsFailed(buildTask, error, msg + "[" + cause + "]"); } ); @@ -368,10 +367,11 @@ private static IndexerState currentIndexerState(TransformState previousState) { }; } - private static void markAsFailed(TransformTask task, String reason) { + private static void markAsFailed(TransformTask task, Throwable exception, String reason) { CountDownLatch latch = new CountDownLatch(1); task.fail( + exception, reason, new LatchedActionListener<>( ActionListener.wrap( diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformTask.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformTask.java index 753d61410d5a8..6ab7e7764b187 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformTask.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformTask.java @@ -480,7 +480,7 @@ public void failureCountChanged() { } @Override - public void fail(String reason, ActionListener listener) { + public void fail(Throwable exception, String reason, ActionListener listener) { synchronized (context) { // If we are already flagged as failed, this probably means that a second trigger started firing while we were attempting to // flag the previously triggered indexer as failed. Exit early as we are already flagged as failed. @@ -505,7 +505,7 @@ public void fail(String reason, ActionListener listener) { return; } - logger.error("[{}] transform has failed; experienced: [{}].", transform.getId(), reason); + logger.atError().withThrowable(exception).log("[{}] transform has failed; experienced: [{}].", transform.getId(), reason); auditor.error(transform.getId(), reason); // We should not keep retrying. Either the task will be stopped, or started // If it is started again, it is registered again. @@ -517,7 +517,7 @@ public void fail(String reason, ActionListener listener) { // The end user should see that the task is in a failed state, and attempt to stop it again but with force=true context.setTaskStateToFailed(reason); TransformState newState = getState(); - // Even though the indexer information is persisted to an index, we still need TransformTaskState in the clusterstate + // Even though the indexer information is persisted to an index, we still need TransformTaskState in the cluster state // This keeps track of STARTED, FAILED, STOPPED // This is because a FAILED state could occur because we failed to read the config from the internal index, which would imply // that diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/common/AbstractCompositeAggFunction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/common/AbstractCompositeAggFunction.java index 80eb482da6b64..189fb26e1f969 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/common/AbstractCompositeAggFunction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/common/AbstractCompositeAggFunction.java @@ -11,9 +11,9 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.ValidationException; @@ -77,7 +77,7 @@ public void preview( headers, ClientHelper.TRANSFORM_ORIGIN, client, - SearchAction.INSTANCE, + TransportSearchAction.TYPE, buildSearchRequest(sourceConfig, timeout, numberOfBuckets), ActionListener.wrap(r -> { try { @@ -121,7 +121,7 @@ public void validateQuery( headers, ClientHelper.TRANSFORM_ORIGIN, client, - SearchAction.INSTANCE, + TransportSearchAction.TYPE, searchRequest, ActionListener.wrap(response -> { if (response == null) { diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/Pivot.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/Pivot.java index a6d4b06151a0f..3edc0b281fa41 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/Pivot.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/Pivot.java @@ -97,7 +97,15 @@ public void deduceMappings( listener.onResponse(emptyMap()); return; } - SchemaUtil.deduceMappings(client, headers, config, sourceConfig.getIndex(), sourceConfig.getRuntimeMappings(), listener); + SchemaUtil.deduceMappings( + client, + headers, + config, + sourceConfig.getIndex(), + sourceConfig.getQueryConfig().getQuery(), + sourceConfig.getRuntimeMappings(), + listener + ); } /** diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/SchemaUtil.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/SchemaUtil.java index 14259bffdb43d..5cacee644fe3c 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/SchemaUtil.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/SchemaUtil.java @@ -17,6 +17,7 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; import org.elasticsearch.xpack.core.ClientHelper; @@ -89,6 +90,7 @@ public static Object dropFloatingPointComponentIfTypeRequiresIt(String type, dou * @param client Client from which to make requests against the cluster * @param config The PivotConfig for which to deduce destination mapping * @param sourceIndex Source index that contains the data to pivot + * @param sourceQuery Source index query to apply * @param runtimeMappings Source runtime mappings * @param listener Listener to alert on success or failure. */ @@ -97,6 +99,7 @@ public static void deduceMappings( final Map headers, final PivotConfig config, final String[] sourceIndex, + final QueryBuilder sourceQuery, final Map runtimeMappings, final ActionListener> listener ) { @@ -145,6 +148,7 @@ public static void deduceMappings( client, headers, sourceIndex, + sourceQuery, allFieldNames.values().stream().filter(Objects::nonNull).toArray(String[]::new), runtimeMappings, ActionListener.wrap( @@ -248,6 +252,7 @@ static void getSourceFieldMappings( Client client, Map headers, String[] index, + QueryBuilder query, String[] fields, Map runtimeMappings, ActionListener> listener @@ -257,6 +262,7 @@ static void getSourceFieldMappings( return; } FieldCapabilitiesRequest fieldCapabilitiesRequest = new FieldCapabilitiesRequest().indices(index) + .indexFilter(query) .fields(fields) .runtimeFields(runtimeMappings) .indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN); diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduledTask.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduledTask.java index 88405b3e76b79..f2d7b3a9aac70 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduledTask.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduledTask.java @@ -10,9 +10,10 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.xpack.transform.Transform; -import java.time.Duration; import java.util.Objects; +import static org.elasticsearch.xpack.transform.transforms.scheduling.TransformSchedulingUtils.calculateNextScheduledTime; + /** * {@link TransformScheduledTask} is a structure describing the scheduled task in the queue. *

    @@ -20,15 +21,6 @@ */ final class TransformScheduledTask { - /** - * Minimum delay that can be applied after a failure. - */ - private static final long MIN_DELAY_MILLIS = Duration.ofSeconds(5).toMillis(); - /** - * Maximum delay that can be applied after a failure. - */ - private static final long MAX_DELAY_MILLIS = Duration.ofHours(1).toMillis(); - private final String transformId; private final TimeValue frequency; private final Long lastTriggeredTimeMillis; @@ -69,29 +61,6 @@ final class TransformScheduledTask { ); } - // Visible for testing - - /** - * Calculates the appropriate next scheduled time taking number of failures into account. - * This method implements exponential backoff approach. - * - * @param lastTriggeredTimeMillis the last time (in millis) the task was triggered - * @param frequency the frequency of the transform - * @param failureCount the number of failures that happened since the task was triggered - * @return next scheduled time for a task - */ - static long calculateNextScheduledTime(Long lastTriggeredTimeMillis, TimeValue frequency, int failureCount) { - final long baseTime = lastTriggeredTimeMillis != null ? lastTriggeredTimeMillis : System.currentTimeMillis(); - - if (failureCount == 0) { - return baseTime + (frequency != null ? frequency : Transform.DEFAULT_TRANSFORM_FREQUENCY).millis(); - } - - // Math.min(failureCount, 32) is applied in order to avoid overflow. - long delayMillis = Math.min(Math.max((1L << Math.min(failureCount, 32)) * 1000, MIN_DELAY_MILLIS), MAX_DELAY_MILLIS); - return baseTime + delayMillis; - } - String getTransformId() { return transformId; } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformSchedulingUtils.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformSchedulingUtils.java new file mode 100644 index 0000000000000..180fbb4d48522 --- /dev/null +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformSchedulingUtils.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.transform.transforms.scheduling; + +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.xpack.transform.Transform; + +import java.time.Duration; + +public final class TransformSchedulingUtils { + + /** + * Minimum delay that can be applied after a failure. + */ + private static final long MIN_DELAY_MILLIS = Duration.ofSeconds(5).toMillis(); + /** + * Maximum delay that can be applied after a failure. + */ + private static final long MAX_DELAY_MILLIS = Duration.ofHours(1).toMillis(); + + /** + * Calculates the appropriate next scheduled time taking number of failures into account. + * This method implements exponential backoff approach. + * + * @param lastTriggeredTimeMillis the last time (in millis) the task was triggered + * @param frequency the frequency of the transform + * @param failureCount the number of failures that happened since the task was triggered + * @return next scheduled time for a task + */ + public static long calculateNextScheduledTime(Long lastTriggeredTimeMillis, TimeValue frequency, int failureCount) { + final long baseTime = lastTriggeredTimeMillis != null ? lastTriggeredTimeMillis : System.currentTimeMillis(); + + if (failureCount == 0) { + return baseTime + (frequency != null ? frequency : Transform.DEFAULT_TRANSFORM_FREQUENCY).millis(); + } + + // Math.min(failureCount, 32) is applied in order to avoid overflow. + long delayMillis = Math.min(Math.max((1L << Math.min(failureCount, 32)) * 1000, MIN_DELAY_MILLIS), MAX_DELAY_MILLIS); + return baseTime + delayMillis; + } +} diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/utils/ExceptionRootCauseFinder.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/utils/ExceptionRootCauseFinder.java index 40144bd59b127..8618b01a0440b 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/utils/ExceptionRootCauseFinder.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/utils/ExceptionRootCauseFinder.java @@ -10,6 +10,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.SearchContextMissingException; import org.elasticsearch.tasks.TaskCancelledException; import java.util.Collection; @@ -82,6 +83,10 @@ public static boolean isExceptionIrrecoverable(ElasticsearchException elasticsea if (elasticsearchException instanceof TaskCancelledException) { return false; } + // We can safely retry SearchContextMissingException instead of failing the transform. + if (elasticsearchException instanceof SearchContextMissingException) { + return false; + } return true; } return false; diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransportGetCheckpointActionTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransportGetCheckpointActionTests.java new file mode 100644 index 0000000000000..0d2d9619aca68 --- /dev/null +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransportGetCheckpointActionTests.java @@ -0,0 +1,134 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.transform.action; + +import org.elasticsearch.action.search.SearchShardsGroup; +import org.elasticsearch.action.search.SearchShardsResponse; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.test.ESTestCase; + +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.hamcrest.Matchers.anEmptyMap; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +public class TransportGetCheckpointActionTests extends ESTestCase { + + private static final String NODE_0 = "node-0"; + private static final String NODE_1 = "node-1"; + private static final String NODE_2 = "node-2"; + private static final Index INDEX_A = new Index("my-index-A", "A"); + private static final Index INDEX_B = new Index("my-index-B", "B"); + private static final Index INDEX_C = new Index("my-index-C", "C"); + private static final ShardId SHARD_A_0 = new ShardId(INDEX_A, 0); + private static final ShardId SHARD_A_1 = new ShardId(INDEX_A, 1); + private static final ShardId SHARD_B_0 = new ShardId(INDEX_B, 0); + private static final ShardId SHARD_B_1 = new ShardId(INDEX_B, 1); + + private static final Map> NODES_AND_SHARDS = Map.of( + NODE_0, + Set.of(SHARD_A_0, SHARD_A_1, SHARD_B_0, SHARD_B_1), + NODE_1, + Set.of(SHARD_A_0, SHARD_A_1, SHARD_B_0, SHARD_B_1), + NODE_2, + Set.of(SHARD_A_0, SHARD_A_1, SHARD_B_0, SHARD_B_1) + ); + + public void testFilterOutSkippedShards_EmptyNodesAndShards() { + SearchShardsResponse searchShardsResponse = new SearchShardsResponse( + Set.of( + new SearchShardsGroup(SHARD_A_0, List.of(NODE_0, NODE_1), true), + new SearchShardsGroup(SHARD_B_0, List.of(NODE_1, NODE_2), false), + new SearchShardsGroup(SHARD_B_1, List.of(NODE_0, NODE_2), true) + ), + Set.of(), + Map.of() + ); + Map> filteredNodesAndShards = TransportGetCheckpointAction.filterOutSkippedShards( + Map.of(), + searchShardsResponse + ); + assertThat(filteredNodesAndShards, is(anEmptyMap())); + } + + public void testFilterOutSkippedShards_EmptySearchShardsResponse() { + SearchShardsResponse searchShardsResponse = new SearchShardsResponse(Set.of(), Set.of(), Map.of()); + Map> filteredNodesAndShards = TransportGetCheckpointAction.filterOutSkippedShards( + NODES_AND_SHARDS, + searchShardsResponse + ); + assertThat(filteredNodesAndShards, is(equalTo(NODES_AND_SHARDS))); + } + + public void testFilterOutSkippedShards_SomeNodesEmptyAfterFiltering() { + SearchShardsResponse searchShardsResponse = new SearchShardsResponse( + Set.of( + new SearchShardsGroup(SHARD_A_0, List.of(NODE_0, NODE_2), true), + new SearchShardsGroup(SHARD_A_1, List.of(NODE_0, NODE_2), true), + new SearchShardsGroup(SHARD_B_0, List.of(NODE_0, NODE_2), true), + new SearchShardsGroup(SHARD_B_1, List.of(NODE_0, NODE_2), true) + ), + Set.of(), + Map.of() + ); + Map> filteredNodesAndShards = TransportGetCheckpointAction.filterOutSkippedShards( + NODES_AND_SHARDS, + searchShardsResponse + ); + Map> expectedFilteredNodesAndShards = Map.of(NODE_1, Set.of(SHARD_A_0, SHARD_A_1, SHARD_B_0, SHARD_B_1)); + assertThat(filteredNodesAndShards, is(equalTo(expectedFilteredNodesAndShards))); + } + + public void testFilterOutSkippedShards_AllNodesEmptyAfterFiltering() { + SearchShardsResponse searchShardsResponse = new SearchShardsResponse( + Set.of( + new SearchShardsGroup(SHARD_A_0, List.of(NODE_0, NODE_1, NODE_2), true), + new SearchShardsGroup(SHARD_A_1, List.of(NODE_0, NODE_1, NODE_2), true), + new SearchShardsGroup(SHARD_B_0, List.of(NODE_0, NODE_1, NODE_2), true), + new SearchShardsGroup(SHARD_B_1, List.of(NODE_0, NODE_1, NODE_2), true) + ), + Set.of(), + Map.of() + ); + Map> filteredNodesAndShards = TransportGetCheckpointAction.filterOutSkippedShards( + NODES_AND_SHARDS, + searchShardsResponse + ); + assertThat(filteredNodesAndShards, is(equalTo(Map.of()))); + } + + public void testFilterOutSkippedShards() { + SearchShardsResponse searchShardsResponse = new SearchShardsResponse( + Set.of( + new SearchShardsGroup(SHARD_A_0, List.of(NODE_0, NODE_1), true), + new SearchShardsGroup(SHARD_B_0, List.of(NODE_1, NODE_2), false), + new SearchShardsGroup(SHARD_B_1, List.of(NODE_0, NODE_2), true), + new SearchShardsGroup(new ShardId(INDEX_C, 0), List.of(NODE_0, NODE_1, NODE_2), true) + ), + Set.of(), + Map.of() + ); + Map> filteredNodesAndShards = TransportGetCheckpointAction.filterOutSkippedShards( + NODES_AND_SHARDS, + searchShardsResponse + ); + Map> expectedFilteredNodesAndShards = Map.of( + NODE_0, + Set.of(SHARD_A_1, SHARD_B_0), + NODE_1, + Set.of(SHARD_A_1, SHARD_B_0, SHARD_B_1), + NODE_2, + Set.of(SHARD_A_0, SHARD_A_1, SHARD_B_0) + ); + assertThat(filteredNodesAndShards, is(equalTo(expectedFilteredNodesAndShards))); + } +} diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/TimeBasedCheckpointProviderTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/TimeBasedCheckpointProviderTests.java index 5f1c0e6bb7f76..bed646b9ddeb2 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/TimeBasedCheckpointProviderTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/TimeBasedCheckpointProviderTests.java @@ -11,11 +11,11 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.ParentTaskAssigningClient; import org.elasticsearch.common.settings.ClusterSettings; @@ -179,7 +179,7 @@ private void testSourceHasChanged( TimeValue delay, Tuple expectedRangeQueryBounds ) throws InterruptedException { - doAnswer(withResponse(newSearchResponse(totalHits))).when(client).execute(eq(SearchAction.INSTANCE), any(), any()); + doAnswer(withResponse(newSearchResponse(totalHits))).when(client).execute(eq(TransportSearchAction.TYPE), any(), any()); String transformId = getTestName(); TransformConfig transformConfig = newTransformConfigWithDateHistogram( transformId, @@ -200,7 +200,7 @@ private void testSourceHasChanged( assertThat(latch.await(100, TimeUnit.MILLISECONDS), is(true)); ArgumentCaptor searchRequestArgumentCaptor = ArgumentCaptor.forClass(SearchRequest.class); - verify(client).execute(eq(SearchAction.INSTANCE), searchRequestArgumentCaptor.capture(), any()); + verify(client).execute(eq(TransportSearchAction.TYPE), searchRequestArgumentCaptor.capture(), any()); SearchRequest searchRequest = searchRequestArgumentCaptor.getValue(); BoolQueryBuilder boolQuery = (BoolQueryBuilder) searchRequest.source().query(); RangeQueryBuilder rangeQuery = (RangeQueryBuilder) boolQuery.filter().get(1); diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandlerTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandlerTests.java index 0218f5ae86226..84c8d4e140408 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandlerTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandlerTests.java @@ -50,7 +50,7 @@ public void failureCountChanged() { } @Override - public void fail(String failureMessage, ActionListener listener) { + public void fail(Throwable exception, String failureMessage, ActionListener listener) { failed = true; } diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureHandlingTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureHandlingTests.java index f59aaab33f0f1..d3be18a193415 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureHandlingTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureHandlingTests.java @@ -1068,7 +1068,7 @@ public void shutdown() {} public void failureCountChanged() {} @Override - public void fail(String message, ActionListener listener) { + public void fail(Throwable exception, String message, ActionListener listener) { assertTrue(failIndexerCalled.compareAndSet(false, true)); assertTrue(failureMessage.compareAndSet(null, message)); } diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureOnStatePersistenceTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureOnStatePersistenceTests.java index 33ced92a8fa19..55ae653c39629 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureOnStatePersistenceTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureOnStatePersistenceTests.java @@ -197,7 +197,7 @@ public void shutdown() {} public void failureCountChanged() {} @Override - public void fail(String failureMessage, ActionListener listener) { + public void fail(Throwable exception, String failureMessage, ActionListener listener) { state.set(TransformTaskState.FAILED); } }; @@ -281,7 +281,6 @@ public void fail(String failureMessage, ActionListener listener) { } ); } - } // test reset on success @@ -415,7 +414,7 @@ public void shutdown() {} public void failureCountChanged() {} @Override - public void fail(String failureMessage, ActionListener listener) { + public void fail(Throwable exception, String failureMessage, ActionListener listener) { state.set(TransformTaskState.FAILED); } }; diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformTaskTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformTaskTests.java index 277553cd9f4ec..cda258c6daa81 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformTaskTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformTaskTests.java @@ -222,7 +222,7 @@ public void testStopOnFailedTaskWithoutIndexer() { transformTask.init(mock(PersistentTasksService.class), taskManager, "task-id", 42); AtomicBoolean listenerCalled = new AtomicBoolean(false); - transformTask.fail("because", ActionTestUtils.assertNoFailureListener(r -> { listenerCalled.compareAndSet(false, true); })); + transformTask.fail(null, "because", ActionTestUtils.assertNoFailureListener(r -> { listenerCalled.compareAndSet(false, true); })); TransformState state = transformTask.getState(); assertEquals(TransformTaskState.FAILED, state.getTaskState()); diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationSchemaAndResultTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationSchemaAndResultTests.java index a2dda2a1603f1..9221dd36271f7 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationSchemaAndResultTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationSchemaAndResultTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.Strings; +import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.AggregationBuilders; @@ -147,7 +148,15 @@ public void testBasic() throws InterruptedException { .count(); this.>assertAsync( - listener -> SchemaUtil.deduceMappings(client, emptyMap(), pivotConfig, new String[] { "source-index" }, emptyMap(), listener), + listener -> SchemaUtil.deduceMappings( + client, + emptyMap(), + pivotConfig, + new String[] { "source-index" }, + QueryBuilders.matchAllQuery(), + emptyMap(), + listener + ), mappings -> { assertEquals("Mappings were: " + mappings, numGroupsWithoutScripts + 15, mappings.size()); assertEquals("long", mappings.get("max_rating")); @@ -219,7 +228,15 @@ public void testNested() throws InterruptedException { .count(); this.>assertAsync( - listener -> SchemaUtil.deduceMappings(client, emptyMap(), pivotConfig, new String[] { "source-index" }, emptyMap(), listener), + listener -> SchemaUtil.deduceMappings( + client, + emptyMap(), + pivotConfig, + new String[] { "source-index" }, + QueryBuilders.matchAllQuery(), + emptyMap(), + listener + ), mappings -> { assertEquals(numGroupsWithoutScripts + 12, mappings.size()); assertEquals("long", mappings.get("filter_1")); diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/SchemaUtilTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/SchemaUtilTests.java index 778ca4bf7767d..881d578cb4536 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/SchemaUtilTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/SchemaUtilTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.common.Strings; +import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpClient; import org.elasticsearch.threadpool.ThreadPool; @@ -104,6 +105,7 @@ public void testGetSourceFieldMappings() throws InterruptedException { client, emptyMap(), new String[] { "index-1", "index-2" }, + QueryBuilders.matchAllQuery(), null, emptyMap(), listener @@ -120,6 +122,7 @@ public void testGetSourceFieldMappings() throws InterruptedException { client, emptyMap(), new String[] { "index-1", "index-2" }, + QueryBuilders.matchAllQuery(), new String[] {}, emptyMap(), listener @@ -136,6 +139,7 @@ public void testGetSourceFieldMappings() throws InterruptedException { client, emptyMap(), null, + QueryBuilders.matchAllQuery(), new String[] { "field-1", "field-2" }, emptyMap(), listener @@ -152,6 +156,7 @@ public void testGetSourceFieldMappings() throws InterruptedException { client, emptyMap(), new String[] {}, + QueryBuilders.matchAllQuery(), new String[] { "field-1", "field-2" }, emptyMap(), listener @@ -168,6 +173,7 @@ public void testGetSourceFieldMappings() throws InterruptedException { client, emptyMap(), new String[] { "index-1", "index-2" }, + QueryBuilders.matchAllQuery(), new String[] { "field-1", "field-2" }, emptyMap(), listener @@ -196,6 +202,7 @@ public void testGetSourceFieldMappingsWithRuntimeMappings() throws InterruptedEx client, emptyMap(), new String[] { "index-1", "index-2" }, + QueryBuilders.matchAllQuery(), new String[] { "field-1", "field-2" }, runtimeMappings, listener diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduledTaskTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduledTaskTests.java index a63dcd8457efa..fd8a1de429c14 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduledTaskTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduledTaskTests.java @@ -9,13 +9,9 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.transform.Transform; import org.elasticsearch.xpack.transform.transforms.scheduling.TransformScheduler.Listener; -import java.time.Instant; - import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; public class TransformScheduledTaskTests extends ESTestCase { @@ -63,49 +59,4 @@ public void testNextScheduledTimeMillis() { assertThat(task.getNextScheduledTimeMillis(), is(equalTo(105000L))); } } - - public void testCalculateNextScheduledTimeExponentialBackoff() { - long lastTriggeredTimeMillis = Instant.now().toEpochMilli(); - long[] expectedDelayMillis = { - Transform.DEFAULT_TRANSFORM_FREQUENCY.millis(), // normal schedule - 5000, // 5s - 5000, // 5s - 8000, // 8s - 16000, // 16s - 32000, // 32s - 64000, // ~1min - 128000, // ~2min - 256000, // ~4min - 512000, // ~8.5min - 1024000, // ~17min - 2048000, // ~34min - 3600000, // 1h - 3600000, // 1h - 3600000, // 1h - 3600000 // 1h - }; - for (int failureCount = 0; failureCount < 1000; ++failureCount) { - assertThat( - "failureCount = " + failureCount, - TransformScheduledTask.calculateNextScheduledTime(lastTriggeredTimeMillis, null, failureCount), - is(equalTo(lastTriggeredTimeMillis + expectedDelayMillis[Math.min(failureCount, expectedDelayMillis.length - 1)])) - ); - } - } - - public void testCalculateNextScheduledTime() { - long now = Instant.now().toEpochMilli(); - assertThat( - TransformScheduledTask.calculateNextScheduledTime(null, TimeValue.timeValueSeconds(10), 0), - is(greaterThanOrEqualTo(now + 10_000)) - ); - assertThat( - TransformScheduledTask.calculateNextScheduledTime(now, null, 0), - is(equalTo(now + Transform.DEFAULT_TRANSFORM_FREQUENCY.millis())) - ); - assertThat( - TransformScheduledTask.calculateNextScheduledTime(null, null, 0), - is(greaterThanOrEqualTo(now + Transform.DEFAULT_TRANSFORM_FREQUENCY.millis())) - ); - } } diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformSchedulingUtilsTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformSchedulingUtilsTests.java new file mode 100644 index 0000000000000..2995412ccbc87 --- /dev/null +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformSchedulingUtilsTests.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.transform.transforms.scheduling; + +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.transform.Transform; + +import java.time.Instant; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; + +public class TransformSchedulingUtilsTests extends ESTestCase { + + public void testCalculateNextScheduledTimeExponentialBackoff() { + long lastTriggeredTimeMillis = Instant.now().toEpochMilli(); + long[] expectedDelayMillis = { + Transform.DEFAULT_TRANSFORM_FREQUENCY.millis(), // normal schedule + 5000, // 5s + 5000, // 5s + 8000, // 8s + 16000, // 16s + 32000, // 32s + 64000, // ~1min + 128000, // ~2min + 256000, // ~4min + 512000, // ~8.5min + 1024000, // ~17min + 2048000, // ~34min + 3600000, // 1h + 3600000, // 1h + 3600000, // 1h + 3600000 // 1h + }; + for (int failureCount = 0; failureCount < 1000; ++failureCount) { + assertThat( + "failureCount = " + failureCount, + TransformSchedulingUtils.calculateNextScheduledTime(lastTriggeredTimeMillis, null, failureCount), + is(equalTo(lastTriggeredTimeMillis + expectedDelayMillis[Math.min(failureCount, expectedDelayMillis.length - 1)])) + ); + } + } + + public void testCalculateNextScheduledTime() { + long now = Instant.now().toEpochMilli(); + assertThat( + TransformSchedulingUtils.calculateNextScheduledTime(null, TimeValue.timeValueSeconds(10), 0), + is(greaterThanOrEqualTo(now + 10_000)) + ); + assertThat( + TransformSchedulingUtils.calculateNextScheduledTime(now, null, 0), + is(equalTo(now + Transform.DEFAULT_TRANSFORM_FREQUENCY.millis())) + ); + assertThat( + TransformSchedulingUtils.calculateNextScheduledTime(null, null, 0), + is(greaterThanOrEqualTo(now + Transform.DEFAULT_TRANSFORM_FREQUENCY.millis())) + ); + } +} diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/utils/ExceptionRootCauseFinderTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/utils/ExceptionRootCauseFinderTests.java index ca19e9157e9e0..b71156cad5adf 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/utils/ExceptionRootCauseFinderTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/utils/ExceptionRootCauseFinderTests.java @@ -21,6 +21,8 @@ import org.elasticsearch.index.translog.TranslogException; import org.elasticsearch.indices.IndexClosedException; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.SearchContextMissingException; +import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentLocation; @@ -158,6 +160,11 @@ public void testGetFirstIrrecoverableExceptionFromBulkResponses() { public void testIsIrrecoverable() { assertFalse(ExceptionRootCauseFinder.isExceptionIrrecoverable(new MapperException("mappings problem"))); assertFalse(ExceptionRootCauseFinder.isExceptionIrrecoverable(new TaskCancelledException("cancelled task"))); + assertFalse( + ExceptionRootCauseFinder.isExceptionIrrecoverable( + new SearchContextMissingException(new ShardSearchContextId("session-id", 123, null)) + ) + ); assertFalse( ExceptionRootCauseFinder.isExceptionIrrecoverable( new CircuitBreakingException("circuit broken", CircuitBreaker.Durability.TRANSIENT) @@ -175,5 +182,4 @@ private static void assertFirstException(Collection bulkItemRe assertEquals(t.getClass(), expectedClass); assertEquals(t.getMessage(), message); } - } diff --git a/x-pack/plugin/voting-only-node/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePluginTests.java b/x-pack/plugin/voting-only-node/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePluginTests.java index f615dca114996..9fa7d10581353 100644 --- a/x-pack/plugin/voting-only-node/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePluginTests.java +++ b/x-pack/plugin/voting-only-node/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePluginTests.java @@ -152,7 +152,7 @@ public void testVotingOnlyNodesCannotBeMasterWithoutFullMasterNodes() throws Exc expectThrows( MasterNotDiscoveredException.class, () -> assertThat( - clusterAdmin().prepareState().setMasterNodeTimeout("100ms").execute().actionGet().getState().nodes().getMasterNodeId(), + clusterAdmin().prepareState().setMasterNodeTimeout("100ms").get().getState().nodes().getMasterNodeId(), nullValue() ) ); @@ -229,8 +229,7 @@ public void testBasicSnapshotRestoreWorkFlow() { .cluster() .prepareRestoreSnapshot("test-repo", "test-snap") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); ensureGreen(); diff --git a/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePlugin.java b/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePlugin.java index df089381e70b2..7b8355ec41e90 100644 --- a/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePlugin.java +++ b/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePlugin.java @@ -48,6 +48,10 @@ import java.util.function.Predicate; import java.util.function.Supplier; +/** + * A voting-only node is one with the 'master' and 'voting-only' roles, dictating + * that the node may vote in master elections but is ineligible to be master. + */ public class VotingOnlyNodePlugin extends Plugin implements ClusterCoordinationPlugin, NetworkPlugin, ActionPlugin { private static final String VOTING_ONLY_ELECTION_STRATEGY = "supports_voting_only"; @@ -146,15 +150,15 @@ public boolean satisfiesAdditionalQuorumConstraints( } private static Predicate fullMasterWithSameState(long localAcceptedTerm, long localAcceptedVersion) { - return join -> isFullMasterNode(join.getSourceNode()) - && join.getLastAcceptedTerm() == localAcceptedTerm - && join.getLastAcceptedVersion() == localAcceptedVersion; + return join -> isFullMasterNode(join.votingNode()) + && join.lastAcceptedTerm() == localAcceptedTerm + && join.lastAcceptedVersion() == localAcceptedVersion; } private static Predicate fullMasterWithOlderState(long localAcceptedTerm, long localAcceptedVersion) { - return join -> isFullMasterNode(join.getSourceNode()) - && (join.getLastAcceptedTerm() < localAcceptedTerm - || (join.getLastAcceptedTerm() == localAcceptedTerm && join.getLastAcceptedVersion() < localAcceptedVersion)); + return join -> isFullMasterNode(join.votingNode()) + && (join.lastAcceptedTerm() < localAcceptedTerm + || (join.lastAcceptedTerm() == localAcceptedTerm && join.lastAcceptedVersion() < localAcceptedVersion)); } } diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/actions/email/EmailAttachmentTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/actions/email/EmailAttachmentTests.java index 05e6be3312827..92d18dcbbdf16 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/actions/email/EmailAttachmentTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/actions/email/EmailAttachmentTests.java @@ -153,7 +153,7 @@ public void testThatEmailAttachmentsAreSent() throws Exception { createIndex("idx"); // Have a sample document in the index, the watch is going to evaluate - client().prepareIndex("idx").setSource("field", "value").get(); + prepareIndex("idx").setSource("field", "value").get(); refresh(); List attachments = new ArrayList<>(); @@ -196,7 +196,7 @@ public void testThatEmailAttachmentsAreSent() throws Exception { try { searchResponse = prepareSearch(HistoryStoreField.DATA_STREAM + "*").setQuery( QueryBuilders.termQuery("watch_id", "_test_id") - ).execute().actionGet(); + ).get(); } catch (SearchPhaseExecutionException e) { if (e.getCause() instanceof NoShardAvailableActionException) { // Nothing has created the index yet diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/condition/ArrayCompareConditionSearchTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/condition/ArrayCompareConditionSearchTests.java index 85e79d96b14d2..9cf2702d4404e 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/condition/ArrayCompareConditionSearchTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/condition/ArrayCompareConditionSearchTests.java @@ -38,8 +38,8 @@ public void testExecuteWithAggs() throws Exception { int numberOfDocuments = randomIntBetween(1, 100); int numberOfDocumentsWatchingFor = 1 + numberOfDocuments; for (int i = 0; i < numberOfDocuments; i++) { - client().prepareIndex(index).setSource(source("elastic", "you know, for search", i)).get(); - client().prepareIndex(index).setSource(source("fights_for_the_users", "you know, for the users", i)).get(); + prepareIndex(index).setSource(source("elastic", "you know, for search", i)).get(); + prepareIndex(index).setSource(source("fights_for_the_users", "you know, for the users", i)).get(); } refresh(); @@ -89,7 +89,7 @@ public void testExecuteWithAggs() throws Exception { } ); - client().prepareIndex(index).setSource(source("fights_for_the_users", "you know, for the users", numberOfDocuments)).get(); + prepareIndex(index).setSource(source("fights_for_the_users", "you know, for the users", numberOfDocuments)).get(); refresh(); assertResponse( diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/condition/CompareConditionSearchTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/condition/CompareConditionSearchTests.java index a117f59212fbe..dbb7b7d93c2e3 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/condition/CompareConditionSearchTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/condition/CompareConditionSearchTests.java @@ -36,10 +36,10 @@ public class CompareConditionSearchTests extends AbstractWatcherIntegrationTestCase { public void testExecuteWithAggs() { - client().prepareIndex("my-index").setSource("@timestamp", "2005-01-01T00:00").get(); - client().prepareIndex("my-index").setSource("@timestamp", "2005-01-01T00:10").get(); - client().prepareIndex("my-index").setSource("@timestamp", "2005-01-01T00:20").get(); - client().prepareIndex("my-index").setSource("@timestamp", "2005-01-01T00:30").get(); + prepareIndex("my-index").setSource("@timestamp", "2005-01-01T00:00").get(); + prepareIndex("my-index").setSource("@timestamp", "2005-01-01T00:10").get(); + prepareIndex("my-index").setSource("@timestamp", "2005-01-01T00:20").get(); + prepareIndex("my-index").setSource("@timestamp", "2005-01-01T00:30").get(); refresh(); CompareCondition condition = new CompareCondition( @@ -72,7 +72,7 @@ public void testExecuteWithAggs() { } ); - client().prepareIndex("my-index").setSource("@timestamp", "2005-01-01T00:40").get(); + prepareIndex("my-index").setSource("@timestamp", "2005-01-01T00:40").get(); refresh(); assertResponse( diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryActionConditionTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryActionConditionTests.java index 0d914def4831e..b82622fbd4819 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryActionConditionTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryActionConditionTests.java @@ -34,6 +34,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; import static org.elasticsearch.index.query.QueryBuilders.termQuery; @@ -171,6 +172,7 @@ public void testActionConditionWithFailures() throws Exception { putAndTriggerWatch(id, input, actionConditionsWithFailure); assertWatchWithMinimumActionsCount(id, ExecutionState.EXECUTED, 1); + AtomicReference searchHitReference = new AtomicReference<>(); // only one action should have failed via condition assertBusy(() -> { // Watcher history is now written asynchronously, so we check this in an assertBusy @@ -178,38 +180,34 @@ public void testActionConditionWithFailures() throws Exception { final SearchResponse response = searchHistory(SearchSourceBuilder.searchSource().query(termQuery("watch_id", id))); try { assertThat(response.getHits().getTotalHits().value, is(oneOf(1L, 2L))); + searchHitReference.set(response.getHits().getAt(0)); } finally { response.decRef(); } }); - final SearchResponse response = searchHistory(SearchSourceBuilder.searchSource().query(termQuery("watch_id", id))); - try { - final SearchHit hit = response.getHits().getAt(0); - final List actions = getActionsFromHit(hit.getSourceAsMap()); + final SearchHit hit = searchHitReference.get(); + final List actions = getActionsFromHit(hit.getSourceAsMap()); - for (int i = 0; i < actionConditionsWithFailure.length; ++i) { - final Map action = (Map) actions.get(i); - final Map condition = (Map) action.get("condition"); - final Map logging = (Map) action.get("logging"); + for (int i = 0; i < actionConditionsWithFailure.length; ++i) { + final Map action = (Map) actions.get(i); + final Map condition = (Map) action.get("condition"); + final Map logging = (Map) action.get("logging"); - assertThat(action.get("id"), is("action" + i)); - assertThat(condition.get("type"), is(actionConditionsWithFailure[i].type())); + assertThat(action.get("id"), is("action" + i)); + assertThat(condition.get("type"), is(actionConditionsWithFailure[i].type())); - if (i == failedIndex) { - assertThat(action.get("status"), is("condition_failed")); - assertThat(condition.get("met"), is(false)); - assertThat(action.get("reason"), is("condition not met. skipping")); - assertThat(logging, nullValue()); - } else { - assertThat(action.get("status"), is("success")); - assertThat(condition.get("met"), is(true)); - assertThat(action.get("reason"), nullValue()); - assertThat(logging.get("logged_text"), is(Integer.toString(i))); - } + if (i == failedIndex) { + assertThat(action.get("status"), is("condition_failed")); + assertThat(condition.get("met"), is(false)); + assertThat(action.get("reason"), is("condition not met. skipping")); + assertThat(logging, nullValue()); + } else { + assertThat(action.get("status"), is("success")); + assertThat(condition.get("met"), is(true)); + assertThat(action.get("reason"), nullValue()); + assertThat(logging.get("logged_text"), is(Integer.toString(i))); } - } finally { - response.decRef(); } } @@ -234,6 +232,7 @@ public void testActionCondition() throws Exception { assertWatchWithMinimumActionsCount(id, ExecutionState.EXECUTED, 1); + AtomicReference searchHitReference = new AtomicReference<>(); // all actions should be successful assertBusy(() -> { // Watcher history is now written asynchronously, so we check this in an assertBusy @@ -241,30 +240,26 @@ public void testActionCondition() throws Exception { final SearchResponse response = searchHistory(SearchSourceBuilder.searchSource().query(termQuery("watch_id", id))); try { assertThat(response.getHits().getTotalHits().value, is(oneOf(1L, 2L))); + searchHitReference.set(response.getHits().getAt(0)); } finally { response.decRef(); } }); - final SearchResponse response = searchHistory(SearchSourceBuilder.searchSource().query(termQuery("watch_id", id))); - try { - final SearchHit hit = response.getHits().getAt(0); - final List actions = getActionsFromHit(hit.getSourceAsMap()); + final SearchHit hit = searchHitReference.get(); + final List actions = getActionsFromHit(hit.getSourceAsMap()); - for (int i = 0; i < actionConditions.size(); ++i) { - final Map action = (Map) actions.get(i); - final Map condition = (Map) action.get("condition"); - final Map logging = (Map) action.get("logging"); + for (int i = 0; i < actionConditions.size(); ++i) { + final Map action = (Map) actions.get(i); + final Map condition = (Map) action.get("condition"); + final Map logging = (Map) action.get("logging"); - assertThat(action.get("id"), is("action" + i)); - assertThat(action.get("status"), is("success")); - assertThat(condition.get("type"), is(actionConditions.get(i).type())); - assertThat(condition.get("met"), is(true)); - assertThat(action.get("reason"), nullValue()); - assertThat(logging.get("logged_text"), is(Integer.toString(i))); - } - } finally { - response.decRef(); + assertThat(action.get("id"), is("action" + i)); + assertThat(action.get("status"), is("success")); + assertThat(condition.get("type"), is(actionConditions.get(i).type())); + assertThat(condition.get("met"), is(true)); + assertThat(action.get("reason"), nullValue()); + assertThat(logging.get("logged_text"), is(Integer.toString(i))); } } diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateTransformMappingsTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateTransformMappingsTests.java index a0255616e8f7b..6efcbd87f0b33 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateTransformMappingsTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateTransformMappingsTests.java @@ -47,15 +47,10 @@ public void testTransformFields() throws Exception { client().prepareBulk() .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .add( - client().prepareIndex() - .setIndex("idx") - .setId("1") - .setSource(jsonBuilder().startObject().field("name", "first").field("foo", "bar").endObject()) + prepareIndex("idx").setId("1").setSource(jsonBuilder().startObject().field("name", "first").field("foo", "bar").endObject()) ) .add( - client().prepareIndex() - .setIndex("idx") - .setId("2") + prepareIndex("idx").setId("2") .setSource( jsonBuilder().startObject().field("name", "second").startObject("foo").field("what", "ever").endObject().endObject() ) diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/input/chain/ChainIntegrationTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/input/chain/ChainIntegrationTests.java index 041c03af0af3c..c9d40a7f92724 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/input/chain/ChainIntegrationTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/input/chain/ChainIntegrationTests.java @@ -55,7 +55,7 @@ protected Collection> nodePlugins() { public void testChainedInputsAreWorking() throws Exception { String index = "the-most-awesome-index-ever"; createIndex(index); - client().prepareIndex().setIndex(index).setId("id").setSource("{}", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get(); + prepareIndex(index).setId("id").setSource("{}", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get(); InetSocketAddress address = internalCluster().httpAddresses()[0]; HttpInput.Builder httpInputBuilder = httpInput( diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/BasicWatcherTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/BasicWatcherTests.java index c6d9d9eab4525..c8dd4d42ac4a1 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/BasicWatcherTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/BasicWatcherTests.java @@ -75,7 +75,7 @@ public class BasicWatcherTests extends AbstractWatcherIntegrationTestCase { public void testIndexWatch() throws Exception { createIndex("idx"); // Have a sample document in the index, the watch is going to evaluate - client().prepareIndex("idx").setSource("field", "foo").get(); + prepareIndex("idx").setSource("field", "foo").get(); refresh(); WatcherSearchTemplateRequest request = templateRequest(searchSource().query(termQuery("field", "foo")), "idx"); new PutWatchRequestBuilder(client()).setId("_name") @@ -109,7 +109,7 @@ public void testIndexWatchRegisterWatchBeforeTargetIndex() throws Exception { assertWatchWithNoActionNeeded("_name", 1); // Index sample doc after we register the watch and the watch's condition should meet - client().prepareIndex("idx").setSource("field", "value").get(); + prepareIndex("idx").setSource("field", "value").get(); refresh(); timeWarp().clock().fastForwardSeconds(5); @@ -145,7 +145,7 @@ public void testDeleteWatch() throws Exception { public void testMalformedWatch() throws Exception { createIndex("idx"); // Have a sample document in the index, the watch is going to evaluate - client().prepareIndex("idx").setSource("field", "value").get(); + prepareIndex("idx").setSource("field", "value").get(); XContentBuilder watchSource = jsonBuilder(); watchSource.startObject(); @@ -166,7 +166,7 @@ public void testMalformedWatch() throws Exception { // In watch store we fail parsing if an watch contains undefined fields. } try { - client().prepareIndex().setIndex(Watch.INDEX).setId("_name").setSource(watchSource).get(); + prepareIndex(Watch.INDEX).setId("_name").setSource(watchSource).get(); fail(); } catch (Exception e) { // The watch index template the mapping is defined as strict @@ -251,7 +251,7 @@ public void testConditionSearchWithIndexedTemplate() throws Exception { public void testInputFiltering() throws Exception { createIndex("idx"); // Have a sample document in the index, the watch is going to evaluate - client().prepareIndex("idx").setSource(jsonBuilder().startObject().field("field", "foovalue").endObject()).get(); + prepareIndex("idx").setSource(jsonBuilder().startObject().field("field", "foovalue").endObject()).get(); refresh(); WatcherSearchTemplateRequest request = templateRequest(searchSource().query(termQuery("field", "foovalue")), "idx"); new PutWatchRequestBuilder(client()).setId("_name1") @@ -385,21 +385,21 @@ private void testConditionSearch(WatcherSearchTemplateRequest request) throws Ex logger.info("created watch [{}] at [{}]", watchName, ZonedDateTime.now(Clock.systemUTC())); - client().prepareIndex("events").setSource("level", "a").get(); - client().prepareIndex("events").setSource("level", "a").get(); + prepareIndex("events").setSource("level", "a").get(); + prepareIndex("events").setSource("level", "a").get(); refresh(); timeWarp().clock().fastForwardSeconds(1); timeWarp().trigger(watchName); assertWatchWithNoActionNeeded(watchName, 1); - client().prepareIndex("events").setSource("level", "b").get(); + prepareIndex("events").setSource("level", "b").get(); refresh(); timeWarp().clock().fastForwardSeconds(1); timeWarp().trigger(watchName); assertWatchWithNoActionNeeded(watchName, 2); - client().prepareIndex("events").setSource("level", "a").get(); + prepareIndex("events").setSource("level", "a").get(); refresh(); timeWarp().clock().fastForwardSeconds(1); timeWarp().trigger(watchName); diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/BootStrapTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/BootStrapTests.java index a200fccfa928d..99640d1ebc3ea 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/BootStrapTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/BootStrapTests.java @@ -66,9 +66,7 @@ protected boolean timeWarped() { } public void testLoadMalformedWatchRecord() throws Exception { - client().prepareIndex() - .setIndex(Watch.INDEX) - .setId("_id") + prepareIndex(Watch.INDEX).setId("_id") .setSource( jsonBuilder().startObject() .startObject(WatchField.TRIGGER.getPreferredName()) @@ -87,9 +85,7 @@ public void testLoadMalformedWatchRecord() throws Exception { Wid wid = new Wid("_id", now); ScheduleTriggerEvent event = new ScheduleTriggerEvent("_id", now, now); ExecutableCondition condition = InternalAlwaysCondition.INSTANCE; - client().prepareIndex() - .setIndex(HistoryStoreField.DATA_STREAM) - .setId(wid.value()) + prepareIndex(HistoryStoreField.DATA_STREAM).setId(wid.value()) .setOpType(DocWriteRequest.OpType.CREATE) .setSource( jsonBuilder().startObject() @@ -112,9 +108,7 @@ public void testLoadMalformedWatchRecord() throws Exception { // unknown condition: wid = new Wid("_id", now); - client().prepareIndex() - .setIndex(HistoryStoreField.DATA_STREAM) - .setId(wid.value()) + prepareIndex(HistoryStoreField.DATA_STREAM).setId(wid.value()) .setOpType(DocWriteRequest.OpType.CREATE) .setSource( jsonBuilder().startObject() @@ -138,9 +132,7 @@ public void testLoadMalformedWatchRecord() throws Exception { // unknown trigger: wid = new Wid("_id", now); - client().prepareIndex() - .setIndex(HistoryStoreField.DATA_STREAM) - .setId(wid.value()) + prepareIndex(HistoryStoreField.DATA_STREAM).setId(wid.value()) .setOpType(DocWriteRequest.OpType.CREATE) .setSource( jsonBuilder().startObject() @@ -180,9 +172,7 @@ public void testLoadExistingWatchesUponStartup() throws Exception { BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); for (int i = 0; i < numWatches; i++) { bulkRequestBuilder.add( - client().prepareIndex() - .setIndex(Watch.INDEX) - .setId("_id" + i) + prepareIndex(Watch.INDEX).setId("_id" + i) .setSource( watchBuilder().trigger(schedule(cron("0 0/5 * * * ? 2050"))) .input(searchInput(request)) @@ -206,12 +196,7 @@ public void testLoadExistingWatchesUponStartup() throws Exception { public void testMixedTriggeredWatchLoading() throws Exception { createIndex("output"); - client().prepareIndex() - .setIndex("my-index") - .setId("bar") - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .setSource("field", "value") - .get(); + prepareIndex("my-index").setId("bar").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).setSource("field", "value").get(); WatcherStatsResponse response = new WatcherStatsRequestBuilder(client()).get(); assertThat(response.getWatchesCount(), equalTo(0L)); @@ -245,9 +230,7 @@ public void testMixedTriggeredWatchLoading() throws Exception { Wid wid = new Wid(watchId, now); TriggeredWatch triggeredWatch = new TriggeredWatch(wid, event); bulkRequestBuilder.add( - client().prepareIndex() - .setIndex(TriggeredWatchStoreField.INDEX_NAME) - .setId(triggeredWatch.id().value()) + prepareIndex(TriggeredWatchStoreField.INDEX_NAME).setId(triggeredWatch.id().value()) .setSource(jsonBuilder().value(triggeredWatch)) .request() ); @@ -262,12 +245,7 @@ public void testMixedTriggeredWatchLoading() throws Exception { public void testTriggeredWatchLoading() throws Exception { cluster().wipeIndices(TriggeredWatchStoreField.INDEX_NAME); createIndex("output"); - client().prepareIndex() - .setIndex("my-index") - .setId("bar") - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .setSource("field", "value") - .get(); + prepareIndex("my-index").setId("bar").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).setSource("field", "value").get(); WatcherStatsResponse response = new WatcherStatsRequestBuilder(client()).get(); assertThat(response.getWatchesCount(), equalTo(0L)); @@ -295,9 +273,7 @@ public void testTriggeredWatchLoading() throws Exception { Wid wid = new Wid(watchId, now); TriggeredWatch triggeredWatch = new TriggeredWatch(wid, event); bulkRequestBuilder.add( - client().prepareIndex() - .setIndex(TriggeredWatchStoreField.INDEX_NAME) - .setId(triggeredWatch.id().value()) + prepareIndex(TriggeredWatchStoreField.INDEX_NAME).setId(triggeredWatch.id().value()) .setSource(jsonBuilder().value(triggeredWatch)) .setWaitForActiveShards(ActiveShardCount.ALL) ); @@ -374,18 +350,14 @@ public void testWatchRecordSavedTwice() throws Exception { Wid wid = new Wid(watchId, triggeredTime); TriggeredWatch triggeredWatch = new TriggeredWatch(wid, event); bulkRequestBuilder.add( - client().prepareIndex() - .setIndex(TriggeredWatchStoreField.INDEX_NAME) - .setId(triggeredWatch.id().value()) + prepareIndex(TriggeredWatchStoreField.INDEX_NAME).setId(triggeredWatch.id().value()) .setSource(jsonBuilder().value(triggeredWatch)) ); String id = internalCluster().getInstance(ClusterService.class).localNode().getId(); WatchRecord watchRecord = new WatchRecord.MessageWatchRecord(wid, event, ExecutionState.EXECUTED, "executed", id); bulkRequestBuilder.add( - client().prepareIndex() - .setIndex(HistoryStoreField.DATA_STREAM) - .setId(watchRecord.id().value()) + prepareIndex(HistoryStoreField.DATA_STREAM).setId(watchRecord.id().value()) .setOpType(DocWriteRequest.OpType.CREATE) .setSource(jsonBuilder().value(watchRecord)) ); diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/RejectedExecutionTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/RejectedExecutionTests.java index a3dc49411cc86..ccd9023f745bb 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/RejectedExecutionTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/RejectedExecutionTests.java @@ -36,7 +36,7 @@ protected boolean timeWarped() { public void testHistoryOnRejection() throws Exception { createIndex("idx"); - client().prepareIndex("idx").setSource("field", "a").get(); + prepareIndex("idx").setSource("field", "a").get(); refresh(); WatcherSearchTemplateRequest request = templateRequest(searchSource().query(termQuery("field", "a")), "idx"); new PutWatchRequestBuilder(client()).setId(randomAlphaOfLength(5)) diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/WatchAckTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/WatchAckTests.java index 83a3b175819bd..49fd7218ed066 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/WatchAckTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/WatchAckTests.java @@ -53,9 +53,7 @@ public class WatchAckTests extends AbstractWatcherIntegrationTestCase { @Before public void indexTestDocument() { - DocWriteResponse eventIndexResponse = client().prepareIndex() - .setIndex("events") - .setId(id) + DocWriteResponse eventIndexResponse = prepareIndex("events").setId(id) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .setSource("level", "error") .get(); diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/transport/action/activate/ActivateWatchTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/transport/action/activate/ActivateWatchTests.java index d268c6df4b21b..3cd08bb671ed7 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/transport/action/activate/ActivateWatchTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/transport/action/activate/ActivateWatchTests.java @@ -143,9 +143,7 @@ public void testLoadWatchWithoutAState() throws Exception { source.toXContent(builder, ToXContent.EMPTY_PARAMS); // now that we filtered out the watch status state, lets put it back in - DocWriteResponse indexResponse = client().prepareIndex() - .setIndex(".watches") - .setId("_id") + DocWriteResponse indexResponse = prepareIndex(".watches").setId("_id") .setSource(BytesReference.bytes(builder), XContentType.JSON) .get(); assertThat(indexResponse.getId(), is("_id")); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/ExecutionService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/ExecutionService.java index 70b6c0322dced..3413da957add3 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/ExecutionService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/ExecutionService.java @@ -614,7 +614,7 @@ public void executeTriggeredWatches(Collection triggeredWatches) private GetResponse getWatch(String id) { try (ThreadContext.StoredContext ignore = client.threadPool().getThreadContext().stashWithOrigin(WATCHER_ORIGIN)) { GetRequest getRequest = new GetRequest(Watch.INDEX, id).preference(Preference.LOCAL.type()).realtime(true); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); client.get(getRequest, future); return future.actionGet(); } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStore.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStore.java index 74c72e468e9b1..d2b38f4b11ef8 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStore.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStore.java @@ -83,7 +83,7 @@ public void putAll(final List triggeredWatches, final ActionList } public BulkResponse putAll(final List triggeredWatches) throws IOException { - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); putAll(triggeredWatches, future); return future.actionGet(defaultBulkTimeout); } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/search/ExecutableSearchInput.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/search/ExecutableSearchInput.java index eeb43c52a1e20..6dbcef08481d1 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/search/ExecutableSearchInput.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/search/ExecutableSearchInput.java @@ -18,7 +18,6 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.script.Script; -import org.elasticsearch.search.SearchHit; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; @@ -92,9 +91,6 @@ SearchInput.Result doExecute(WatchExecutionContext ctx, WatcherSearchTemplateReq if (logger.isDebugEnabled()) { logger.debug("[{}] found [{}] hits", ctx.id(), response.getHits().getTotalHits().value); - for (SearchHit hit : response.getHits()) { - logger.debug("[{}] hit [{}]", ctx.id(), hit.getSourceAsMap()); - } } final Payload payload; diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java index 87c7c6e9748f8..1dede3f4e135c 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java @@ -12,16 +12,16 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshAction; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; -import org.elasticsearch.action.search.ClearScrollAction; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.search.ClearScrollResponse; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchResponseSections; -import org.elasticsearch.action.search.SearchScrollAction; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.action.search.TransportClearScrollAction; +import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.action.search.TransportSearchScrollAction; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -196,7 +196,7 @@ void stopExecutor() {} ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onResponse(scrollSearchResponse); return null; - }).when(client).execute(eq(SearchScrollAction.INSTANCE), any(SearchScrollRequest.class), anyActionListener()); + }).when(client).execute(eq(TransportSearchScrollAction.TYPE), any(SearchScrollRequest.class), anyActionListener()); // one search response containing active and inactive watches int count = randomIntBetween(2, 200); @@ -236,13 +236,13 @@ void stopExecutor() {} ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onResponse(searchResponse); return null; - }).when(client).execute(eq(SearchAction.INSTANCE), any(SearchRequest.class), anyActionListener()); + }).when(client).execute(eq(TransportSearchAction.TYPE), any(SearchRequest.class), anyActionListener()); doAnswer(invocation -> { ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onResponse(new ClearScrollResponse(true, 1)); return null; - }).when(client).execute(eq(ClearScrollAction.INSTANCE), any(ClearScrollRequest.class), anyActionListener()); + }).when(client).execute(eq(TransportClearScrollAction.TYPE), any(ClearScrollRequest.class), anyActionListener()); service.start(clusterState, () -> {}, exception -> {}); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/index/IndexActionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/index/IndexActionTests.java index c355ec3ebb0ed..b87a176b7b5b1 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/index/IndexActionTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/index/IndexActionTests.java @@ -242,7 +242,7 @@ public void testThatIndexTypeIdDynamically() throws Exception { final WatchExecutionContext ctx = WatcherTestUtils.mockExecutionContext("_id", new Payload.Simple(Maps.ofEntries(entries))); ArgumentCaptor captor = ArgumentCaptor.forClass(IndexRequest.class); - PlainActionFuture listener = PlainActionFuture.newFuture(); + PlainActionFuture listener = new PlainActionFuture<>(); listener.onResponse(new IndexResponse(new ShardId(new Index("foo", "bar"), 0), "whatever", 1, 1, 1, true)); when(client.index(captor.capture())).thenReturn(listener); Action.Result result = executable.execute("_id", ctx, ctx.payload()); @@ -272,7 +272,7 @@ public void testThatIndexActionCanBeConfiguredWithDynamicIndexNameAndBulk() thro ); ArgumentCaptor captor = ArgumentCaptor.forClass(BulkRequest.class); - PlainActionFuture listener = PlainActionFuture.newFuture(); + PlainActionFuture listener = new PlainActionFuture<>(); IndexResponse indexResponse = new IndexResponse(new ShardId(new Index("foo", "bar"), 0), "whatever", 1, 1, 1, true); BulkItemResponse response = BulkItemResponse.success(0, DocWriteRequest.OpType.INDEX, indexResponse); BulkResponse bulkResponse = new BulkResponse(new BulkItemResponse[] { response }, 1); @@ -340,7 +340,7 @@ public void testIndexActionExecuteSingleDoc() throws Exception { WatchExecutionContext ctx = WatcherTestUtils.mockExecutionContext("_id", executionTime, payload); ArgumentCaptor captor = ArgumentCaptor.forClass(IndexRequest.class); - PlainActionFuture listener = PlainActionFuture.newFuture(); + PlainActionFuture listener = new PlainActionFuture<>(); listener.onResponse(new IndexResponse(new ShardId(new Index("test-index", "uuid"), 0), docId, 1, 1, 1, true)); when(client.index(captor.capture())).thenReturn(listener); @@ -393,7 +393,7 @@ public void testFailureResult() throws Exception { WatchExecutionContext ctx = WatcherTestUtils.mockExecutionContext("_id", ZonedDateTime.now(ZoneOffset.UTC), payload); ArgumentCaptor captor = ArgumentCaptor.forClass(BulkRequest.class); - PlainActionFuture listener = PlainActionFuture.newFuture(); + PlainActionFuture listener = new PlainActionFuture<>(); BulkItemResponse.Failure failure = new BulkItemResponse.Failure("test-index", "anything", new ElasticsearchException("anything")); BulkItemResponse firstResponse = BulkItemResponse.failure(0, DocWriteRequest.OpType.INDEX, failure); BulkItemResponse secondResponse; diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java index 9264cc39f88a8..378d6b4773493 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java @@ -1136,7 +1136,7 @@ public void testUpdateWatchStatusDoesNotUpdateState() throws Exception { assertionsTriggered.set(true); } - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); future.onResponse(new UpdateResponse(null, new ShardId("test", "test", 0), "test", 0, 0, 0, DocWriteResponse.Result.CREATED)); return future; }).when(client).update(any()); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java index 01547b898e4b4..0f47df9dff12b 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java @@ -18,12 +18,12 @@ import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.action.search.ClearScrollAction; import org.elasticsearch.action.search.ClearScrollResponse; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchScrollAction; import org.elasticsearch.action.search.SearchScrollRequest; +import org.elasticsearch.action.search.TransportClearScrollAction; +import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.action.search.TransportSearchScrollAction; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -223,7 +223,7 @@ public void testFindTriggeredWatchesGoodCase() { ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onResponse(searchResponse1); return null; - }).when(client).execute(eq(SearchAction.INSTANCE), any(), any()); + }).when(client).execute(eq(TransportSearchAction.TYPE), any(), any()); // First return a scroll response with a single hit and then with no hits hit = new SearchHit(0, "second_foo"); @@ -264,7 +264,7 @@ public void testFindTriggeredWatchesGoodCase() { listener.onFailure(new ElasticsearchException("test issue")); } return null; - }).when(client).execute(eq(SearchScrollAction.INSTANCE), any(), any()); + }).when(client).execute(eq(TransportSearchScrollAction.TYPE), any(), any()); TriggeredWatch triggeredWatch = mock(TriggeredWatch.class); when(parser.parse(eq("_id"), eq(1L), any(BytesReference.class))).thenReturn(triggeredWatch); @@ -275,7 +275,7 @@ public void testFindTriggeredWatchesGoodCase() { listener.onResponse(new ClearScrollResponse(true, 1)); return null; - }).when(client).execute(eq(ClearScrollAction.INSTANCE), any(), any()); + }).when(client).execute(eq(TransportClearScrollAction.TYPE), any(), any()); assertThat(TriggeredWatchStore.validate(cs), is(true)); ZonedDateTime now = ZonedDateTime.now(ZoneOffset.UTC); @@ -301,9 +301,9 @@ public void testFindTriggeredWatchesGoodCase() { assertThat(triggeredWatches, hasSize(watches.size())); verify(client, times(1)).execute(eq(RefreshAction.INSTANCE), any(), any()); - verify(client, times(1)).execute(eq(SearchAction.INSTANCE), any(), any()); - verify(client, times(2)).execute(eq(SearchScrollAction.INSTANCE), any(), any()); - verify(client, times(1)).execute(eq(ClearScrollAction.INSTANCE), any(), any()); + verify(client, times(1)).execute(eq(TransportSearchAction.TYPE), any(), any()); + verify(client, times(2)).execute(eq(TransportSearchScrollAction.TYPE), any(), any()); + verify(client, times(1)).execute(eq(TransportClearScrollAction.TYPE), any(), any()); } // the elasticsearch migration helper is doing reindex using aliases, so we have to diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SearchInputTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SearchInputTests.java index 28e538fc3921b..d06ee606f31ce 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SearchInputTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SearchInputTests.java @@ -90,7 +90,7 @@ public void setup() { @SuppressWarnings("unchecked") public void testExecute() throws Exception { ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(SearchRequest.class); - PlainActionFuture searchFuture = PlainActionFuture.newFuture(); + PlainActionFuture searchFuture = new PlainActionFuture<>(); SearchResponse searchResponse = new SearchResponse( InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, "", @@ -131,7 +131,7 @@ public void testExecute() throws Exception { public void testDifferentSearchType() throws Exception { ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(SearchRequest.class); - PlainActionFuture searchFuture = PlainActionFuture.newFuture(); + PlainActionFuture searchFuture = new PlainActionFuture<>(); SearchResponse searchResponse = new SearchResponse( InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, "", @@ -186,7 +186,7 @@ public void testParserValid() throws Exception { // source: https://discuss.elastic.co/t/need-help-for-energy-monitoring-system-alerts/89415/3 public void testThatEmptyRequestBodyWorks() throws Exception { ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(SearchRequest.class); - PlainActionFuture searchFuture = PlainActionFuture.newFuture(); + PlainActionFuture searchFuture = new PlainActionFuture<>(); SearchResponse searchResponse = new SearchResponse( InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, "", diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/actions/TransportAckWatchActionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/actions/TransportAckWatchActionTests.java index 338f4e56f5663..fe7293bf5e775 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/actions/TransportAckWatchActionTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/actions/TransportAckWatchActionTests.java @@ -110,7 +110,7 @@ public void testWatchNotFound() { }).when(client).execute(eq(WatcherStatsAction.INSTANCE), any(), any()); AckWatchRequest ackWatchRequest = new AckWatchRequest(watchId); - PlainActionFuture listener = PlainActionFuture.newFuture(); + PlainActionFuture listener = new PlainActionFuture<>(); action.doExecute(ackWatchRequest, listener); ExecutionException exception = expectThrows(ExecutionException.class, listener::get); @@ -140,7 +140,7 @@ public void testThatWatchCannotBeAckedWhileRunning() { }).when(client).execute(eq(WatcherStatsAction.INSTANCE), any(), any()); AckWatchRequest ackWatchRequest = new AckWatchRequest(watchId); - PlainActionFuture listener = PlainActionFuture.newFuture(); + PlainActionFuture listener = new PlainActionFuture<>(); action.doExecute(ackWatchRequest, listener); ExecutionException exception = expectThrows(ExecutionException.class, listener::get); diff --git a/x-pack/plugin/wildcard/src/main/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapper.java b/x-pack/plugin/wildcard/src/main/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapper.java index 480704b89ca60..1954e291b1a7f 100644 --- a/x-pack/plugin/wildcard/src/main/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapper.java +++ b/x-pack/plugin/wildcard/src/main/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapper.java @@ -855,9 +855,7 @@ public Query termsQuery(Collection values, SearchExecutionContext context) { @Override public BlockLoader blockLoader(BlockLoaderContext blContext) { if (hasDocValues()) { - // TODO it'd almost certainly be faster to drop directly to doc values like we do with keyword but this'll do for now - IndexFieldData fd = new StringBinaryIndexFieldData(name(), CoreValuesSourceType.KEYWORD, null); - return BlockDocValuesReader.bytesRefsFromDocValues(context -> fd.load(context).getBytesValues()); + return new BlockDocValuesReader.BytesRefsFromBinaryBlockLoader(name()); } return null; } diff --git a/x-pack/plugin/write-load-forecaster/src/internalClusterTest/java/org/elasticsearch/xpack/writeloadforecaster/WriteLoadForecasterIT.java b/x-pack/plugin/write-load-forecaster/src/internalClusterTest/java/org/elasticsearch/xpack/writeloadforecaster/WriteLoadForecasterIT.java index 479f9b3662c05..d983747571b34 100644 --- a/x-pack/plugin/write-load-forecaster/src/internalClusterTest/java/org/elasticsearch/xpack/writeloadforecaster/WriteLoadForecasterIT.java +++ b/x-pack/plugin/write-load-forecaster/src/internalClusterTest/java/org/elasticsearch/xpack/writeloadforecaster/WriteLoadForecasterIT.java @@ -153,16 +153,11 @@ private void setUpDataStreamWriteDocsAndRollover(String dataStreamName, Settings client().execute( PutComposableIndexTemplateAction.INSTANCE, new PutComposableIndexTemplateAction.Request("my-template").indexTemplate( - new ComposableIndexTemplate( - List.of("logs-*"), - new Template(indexSettings, null, null), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("logs-*")) + .template(new Template(indexSettings, null, null)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ) ) ); diff --git a/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/SimpleKdcLdapServer.java b/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/SimpleKdcLdapServer.java index eabb551d1fcd0..30b1744962e35 100644 --- a/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/SimpleKdcLdapServer.java +++ b/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/SimpleKdcLdapServer.java @@ -37,6 +37,7 @@ import javax.net.ServerSocketFactory; import static org.elasticsearch.test.ESTestCase.assertBusy; +import static org.elasticsearch.test.ESTestCase.inFipsJvm; import static org.junit.Assert.assertTrue; /** @@ -183,6 +184,13 @@ private void prepareKdcServerAndStart() throws Exception { final TimeValue maxRenewableLifeTime = new TimeValue(7, TimeUnit.DAYS); simpleKdc.getKdcConfig().setLong(KdcConfigKey.MINIMUM_TICKET_LIFETIME, minimumTicketLifeTime.getMillis()); simpleKdc.getKdcConfig().setLong(KdcConfigKey.MAXIMUM_RENEWABLE_LIFETIME, maxRenewableLifeTime.getMillis()); + if (inFipsJvm()) { + // Triple DES is not allowed when running in FIPS mode + String encryptionTypes = (String) KdcConfigKey.ENCRYPTION_TYPES.getDefaultValue(); + simpleKdc.getKdcConfig() + .setString(KdcConfigKey.ENCRYPTION_TYPES, encryptionTypes.toLowerCase().replace("des3-cbc-sha1-kd", "")); + } + simpleKdc.init(); simpleKdc.start(); } diff --git a/x-pack/qa/oidc-op-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtWithOidcAuthIT.java b/x-pack/qa/oidc-op-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtWithOidcAuthIT.java index 47e4b02d63648..2d3fc611758b0 100644 --- a/x-pack/qa/oidc-op-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtWithOidcAuthIT.java +++ b/x-pack/qa/oidc-op-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtWithOidcAuthIT.java @@ -22,6 +22,7 @@ import org.elasticsearch.rest.RestUtils; import org.elasticsearch.test.TestMatchers; import org.elasticsearch.test.TestSecurityClient; +import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.oidc.C2IdOpTestCase; import org.hamcrest.Matchers; @@ -151,7 +152,10 @@ private Map authenticateWithJwtAndSharedSecret(String idJwt, Str final Map authenticateResponse = super.callAuthenticateApiUsingBearerToken( idJwt, RequestOptions.DEFAULT.toBuilder() - .addHeader(JwtRealm.HEADER_CLIENT_AUTHENTICATION, JwtRealm.HEADER_SHARED_SECRET_AUTHENTICATION_SCHEME + " " + sharedSecret) + .addHeader( + JwtRealm.HEADER_CLIENT_AUTHENTICATION, + JwtRealmSettings.HEADER_SHARED_SECRET_AUTHENTICATION_SCHEME + " " + sharedSecret + ) .build() ); return authenticateResponse; diff --git a/x-pack/qa/rolling-upgrade-basic/src/test/java/org/elasticsearch/upgrades/BasicLicenseUpgradeIT.java b/x-pack/qa/rolling-upgrade-basic/src/test/java/org/elasticsearch/upgrades/BasicLicenseUpgradeIT.java index 75fcc5cf6e7ad..da8a4c806a0f5 100644 --- a/x-pack/qa/rolling-upgrade-basic/src/test/java/org/elasticsearch/upgrades/BasicLicenseUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade-basic/src/test/java/org/elasticsearch/upgrades/BasicLicenseUpgradeIT.java @@ -8,6 +8,7 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; +import org.elasticsearch.rest.RestStatus; import java.util.Map; @@ -28,7 +29,7 @@ private void checkBasicLicense() throws Exception { final Request request = new Request("GET", "/_license"); // This avoids throwing a ResponseException when the license is not ready yet // allowing to retry the check using assertBusy - request.addParameter("ignore", "404"); + setIgnoredErrorResponseCodes(request, RestStatus.NOT_FOUND); Response licenseResponse = client().performRequest(request); assertOK(licenseResponse); Map licenseResponseMap = entityAsMap(licenseResponse); @@ -42,7 +43,7 @@ private void checkNonExpiringBasicLicense() throws Exception { final Request request = new Request("GET", "/_license"); // This avoids throwing a ResponseException when the license is not ready yet // allowing to retry the check using assertBusy - request.addParameter("ignore", "404"); + setIgnoredErrorResponseCodes(request, RestStatus.NOT_FOUND); Response licenseResponse = client().performRequest(request); assertOK(licenseResponse); Map licenseResponseMap = entityAsMap(licenseResponse); diff --git a/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java b/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java index b1e1888aba75d..27250dd4e3367 100644 --- a/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java @@ -21,6 +21,8 @@ import java.util.concurrent.TimeUnit; import static org.elasticsearch.rest.action.search.RestSearchAction.TOTAL_HITS_AS_INT_PARAM; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -88,7 +90,6 @@ public void testUniDirectionalIndexFollowing() throws Exception { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/102000") public void testAutoFollowing() throws Exception { String leaderIndex1 = "logs-20200101"; String leaderIndex2 = "logs-20200102"; @@ -132,7 +133,6 @@ public void testAutoFollowing() throws Exception { createLeaderIndex(leaderClient(), leaderIndex1); index(leaderClient(), leaderIndex1, 64); assertBusy(() -> { - String followerIndex = "copy-" + leaderIndex1; assertThat(getNumberOfSuccessfulFollowedIndices(), equalTo(1)); assertTotalHitCount(followerIndex, 64, followerClient()); @@ -206,7 +206,6 @@ public void testAutoFollowing() throws Exception { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/100291") public void testCannotFollowLeaderInUpgradedCluster() throws Exception { if (upgradeState != UpgradeState.ALL) { return; @@ -227,8 +226,20 @@ public void testCannotFollowLeaderInUpgradedCluster() throws Exception { ResponseException.class, () -> followIndex(leaderClient(), "follower", "not_supported", "not_supported") ); - assertThat(e.getMessage(), containsString("the snapshot was created with Elasticsearch version [")); - assertThat(e.getMessage(), containsString("] which is higher than the version of this node [")); + + assertThat( + e.getMessage(), + anyOf( + allOf( + containsString("the snapshot was created with index version ["), + containsString("] which is higher than the version used by this node [") + ), + allOf( + containsString("the snapshot was created with Elasticsearch version ["), + containsString("] which is higher than the version of this node [") + ) + ) + ); } else if (clusterName == ClusterName.LEADER) { // At this point all nodes in both clusters have been updated and // the leader cluster can now follow not_supported index in the follower cluster: @@ -240,7 +251,6 @@ public void testCannotFollowLeaderInUpgradedCluster() throws Exception { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/102010") public void testBiDirectionalIndexFollowing() throws Exception { logger.info("clusterName={}, upgradeState={}", clusterName, upgradeState); @@ -372,7 +382,8 @@ private static void assertTotalHitCount(final String index, final int expectedTo private static void verifyTotalHitCount(final String index, final int expectedTotalHits, final RestClient client) throws IOException { final Request request = new Request("GET", "/" + index + "/_search"); request.addParameter(TOTAL_HITS_AS_INT_PARAM, "true"); - Map response = toMap(client.performRequest(request)); + setIgnoredErrorResponseCodes(request, RestStatus.NOT_FOUND); // trip the assertOK (i.e. retry an assertBusy) rather than throwing + Map response = toMap(assertOK(client.performRequest(request))); final int totalHits = (int) XContentMapValues.extractValue("hits.total", response); assertThat(totalHits, equalTo(expectedTotalHits)); } diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/DataStreamsUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/DataStreamsUpgradeIT.java index cf2a66bc4fb5b..40ad5bba29baa 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/DataStreamsUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/DataStreamsUpgradeIT.java @@ -7,7 +7,6 @@ package org.elasticsearch.upgrades; import org.apache.http.util.EntityUtils; -import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.cluster.metadata.DataStream; @@ -25,7 +24,6 @@ public class DataStreamsUpgradeIT extends AbstractUpgradeTestCase { public void testDataStreams() throws IOException { - assumeTrue("no data streams in versions before " + Version.V_7_9_0, isOriginalClusterVersionAtLeast(Version.V_7_9_0)); if (CLUSTER_TYPE == ClusterType.OLD) { String requestBody = """ { @@ -110,7 +108,6 @@ public void testDataStreams() throws IOException { } public void testDataStreamValidationDoesNotBreakUpgrade() throws Exception { - assumeTrue("Bug started to occur from version: " + Version.V_7_10_2, isOriginalClusterVersionAtLeast(Version.V_7_10_2)); if (CLUSTER_TYPE == ClusterType.OLD) { String requestBody = """ { diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlTrainedModelsUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlTrainedModelsUpgradeIT.java index 5427fb0c6cc88..0dcf13602a85f 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlTrainedModelsUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlTrainedModelsUpgradeIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.upgrades; import org.apache.http.util.EntityUtils; -import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.settings.Settings; @@ -57,7 +56,6 @@ protected Collection templatesToWaitFor() { } public void testTrainedModelInference() throws Exception { - assumeTrue("We should only test if old cluster is after trained models went GA", isOriginalClusterVersionAtLeast(Version.V_7_13_1)); switch (CLUSTER_TYPE) { case OLD -> { createIndexWithName(INDEX_NAME); diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RollupDateHistoUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RollupDateHistoUpgradeIT.java deleted file mode 100644 index 68c4e57929852..0000000000000 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RollupDateHistoUpgradeIT.java +++ /dev/null @@ -1,279 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.upgrades; - -import org.elasticsearch.Version; -import org.elasticsearch.client.Request; -import org.elasticsearch.client.Response; -import org.elasticsearch.common.xcontent.support.XContentMapValues; -import org.elasticsearch.core.Booleans; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.xcontent.ObjectPath; -import org.hamcrest.Matcher; - -import java.io.IOException; -import java.time.Instant; -import java.time.OffsetDateTime; -import java.time.ZoneOffset; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.concurrent.TimeUnit; - -import static org.hamcrest.Matchers.anyOf; -import static org.hamcrest.Matchers.containsInAnyOrder; -import static org.hamcrest.Matchers.equalTo; - -public class RollupDateHistoUpgradeIT extends AbstractUpgradeTestCase { - private static final Version UPGRADE_FROM_VERSION = Version.fromString(System.getProperty("tests.upgrade_from_version")); - - public void testDateHistoIntervalUpgrade() throws Exception { - assumeTrue("DateHisto interval changed in 7.2", UPGRADE_FROM_VERSION.before(Version.V_7_2_0)); - switch (CLUSTER_TYPE) { - case OLD: - break; - case MIXED: - Request waitForYellow = new Request("GET", "/_cluster/health"); - waitForYellow.addParameter("wait_for_nodes", "3"); - waitForYellow.addParameter("wait_for_status", "yellow"); - client().performRequest(waitForYellow); - break; - case UPGRADED: - Request waitForGreen = new Request("GET", "/_cluster/health/target,rollup"); - waitForGreen.addParameter("wait_for_nodes", "3"); - waitForGreen.addParameter("wait_for_status", "green"); - // wait for long enough that we give delayed unassigned shards to stop being delayed - waitForGreen.addParameter("timeout", "70s"); - waitForGreen.addParameter("level", "shards"); - client().performRequest(waitForGreen); - break; - default: - throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]"); - } - - OffsetDateTime timestamp = Instant.parse("2018-01-01T00:00:01.000Z").atOffset(ZoneOffset.UTC); - - if (CLUSTER_TYPE == ClusterType.OLD) { - String recoverQuickly = """ - {"settings": {"index.unassigned.node_left.delayed_timeout": "100ms"}}"""; - - Request createTargetIndex = new Request("PUT", "/target"); - createTargetIndex.setJsonEntity(recoverQuickly); - client().performRequest(createTargetIndex); - - final Request indexRequest = new Request("POST", "/target/_doc/1"); - indexRequest.setJsonEntity("{\"timestamp\":\"" + timestamp.toString() + "\",\"value\":123}"); - client().performRequest(indexRequest); - - // create the rollup job with an old interval style - final Request createRollupJobRequest = new Request("PUT", "_rollup/job/rollup-id-test"); - createRollupJobRequest.setJsonEntity(""" - { - "index_pattern": "target", - "rollup_index": "rollup", - "cron": "*/1 * * * * ?", - "page_size": 100, - "groups": { - "date_histogram": { - "field": "timestamp", - "interval": "5m" - }, - "histogram": { - "fields": [ "value" ], - "interval": 1 - }, - "terms": { - "fields": [ "value" ] - } - }, - "metrics": [ - { - "field": "value", - "metrics": [ "min", "max", "sum" ] - } - ] - }"""); - - Map createRollupJobResponse = entityAsMap(client().performRequest(createRollupJobRequest)); - assertThat(createRollupJobResponse.get("acknowledged"), equalTo(Boolean.TRUE)); - - Request updateSettings = new Request("PUT", "/rollup/_settings"); - updateSettings.setJsonEntity(recoverQuickly); - client().performRequest(updateSettings); - - // start the rollup job - final Request startRollupJobRequest = new Request("POST", "_rollup/job/rollup-id-test/_start"); - Map startRollupJobResponse = entityAsMap(client().performRequest(startRollupJobRequest)); - assertThat(startRollupJobResponse.get("started"), equalTo(Boolean.TRUE)); - - assertRollUpJob("rollup-id-test"); - List ids = getSearchResults(1); - assertThat(ids.toString(), ids, containsInAnyOrder("rollup-id-test$AuaduUZW8tgWmFP87DgzSA")); - } - - if (CLUSTER_TYPE == ClusterType.MIXED && Booleans.parseBoolean(System.getProperty("tests.first_round"))) { - final Request indexRequest = new Request("POST", "/target/_doc/2"); - indexRequest.setJsonEntity("{\"timestamp\":\"" + timestamp.plusDays(1).toString() + "\",\"value\":345}"); - client().performRequest(indexRequest); - - assertRollUpJob("rollup-id-test"); - client().performRequest(new Request("POST", "rollup/_refresh")); - - List ids = getSearchResults(2); - assertThat( - ids.toString(), - ids, - containsInAnyOrder("rollup-id-test$AuaduUZW8tgWmFP87DgzSA", "rollup-id-test$ehY4NAyVSy8xxUDZrNXXIA") - ); - } - - if (CLUSTER_TYPE == ClusterType.MIXED && Booleans.parseBoolean(System.getProperty("tests.first_round")) == false) { - final Request indexRequest = new Request("POST", "/target/_doc/3"); - indexRequest.setJsonEntity("{\"timestamp\":\"" + timestamp.plusDays(2).toString() + "\",\"value\":456}"); - client().performRequest(indexRequest); - - assertRollUpJob("rollup-id-test"); - client().performRequest(new Request("POST", "rollup/_refresh")); - - List ids = getSearchResults(3); - assertThat( - ids.toString(), - ids, - containsInAnyOrder( - "rollup-id-test$AuaduUZW8tgWmFP87DgzSA", - "rollup-id-test$ehY4NAyVSy8xxUDZrNXXIA", - "rollup-id-test$60RGDSb92YI5LH4_Fnq_1g" - ) - ); - - } - - if (CLUSTER_TYPE == ClusterType.UPGRADED) { - final Request indexRequest = new Request("POST", "/target/_doc/4"); - indexRequest.setJsonEntity("{\"timestamp\":\"" + timestamp.plusDays(3).toString() + "\",\"value\":567}"); - client().performRequest(indexRequest); - - assertRollUpJob("rollup-id-test"); - client().performRequest(new Request("POST", "rollup/_refresh")); - - List ids = getSearchResults(4); - assertThat( - ids.toString(), - ids, - containsInAnyOrder( - "rollup-id-test$AuaduUZW8tgWmFP87DgzSA", - "rollup-id-test$ehY4NAyVSy8xxUDZrNXXIA", - "rollup-id-test$60RGDSb92YI5LH4_Fnq_1g", - "rollup-id-test$LAKZftDeQwsUtdPixrkkzQ" - ) - ); - } - - } - - private List getSearchResults(int expectedCount) throws Exception { - final List collectedIDs = new ArrayList<>(); - assertBusy(() -> { - collectedIDs.clear(); - client().performRequest(new Request("POST", "rollup/_refresh")); - final Request searchRequest = new Request("GET", "rollup/_search"); - try { - Map searchResponse = entityAsMap(client().performRequest(searchRequest)); - assertNotNull(ObjectPath.eval("hits.total.value", searchResponse)); - assertThat(ObjectPath.eval("hits.total.value", searchResponse), equalTo(expectedCount)); - - for (int i = 0; i < expectedCount; i++) { - String id = ObjectPath.eval("hits.hits." + i + "._id", searchResponse); - collectedIDs.add(id); - Map doc = ObjectPath.eval("hits.hits." + i + "._source", searchResponse); - assertNotNull(doc); - } - } catch (IOException e) { - fail(); - } - }); - return collectedIDs; - } - - @SuppressWarnings("unchecked") - private void assertRollUpJob(final String rollupJob) throws Exception { - final Matcher expectedStates = anyOf(equalTo("indexing"), equalTo("started")); - waitForRollUpJob(rollupJob, expectedStates); - - // check that the rollup job is started using the RollUp API - final Request getRollupJobRequest = new Request("GET", "_rollup/job/" + rollupJob); - Map getRollupJobResponse = entityAsMap(client().performRequest(getRollupJobRequest)); - Map job = getJob(getRollupJobResponse, rollupJob); - if (job != null) { - assertThat(ObjectPath.eval("status.job_state", job), expectedStates); - } - - // check that the rollup job is started using the Tasks API - final Request taskRequest = new Request("GET", "_tasks"); - taskRequest.addParameter("detailed", "true"); - taskRequest.addParameter("actions", "xpack/rollup/*"); - Map taskResponse = entityAsMap(client().performRequest(taskRequest)); - Map taskResponseNodes = (Map) taskResponse.get("nodes"); - Map taskResponseNode = (Map) taskResponseNodes.values().iterator().next(); - Map taskResponseTasks = (Map) taskResponseNode.get("tasks"); - Map taskResponseStatus = (Map) taskResponseTasks.values().iterator().next(); - assertThat(ObjectPath.eval("status.job_state", taskResponseStatus), expectedStates); - - // check that the rollup job is started using the Cluster State API - final Request clusterStateRequest = new Request("GET", "_cluster/state/metadata"); - Map clusterStateResponse = entityAsMap(client().performRequest(clusterStateRequest)); - List> rollupJobTasks = ObjectPath.eval("metadata.persistent_tasks.tasks", clusterStateResponse); - - boolean hasRollupTask = false; - for (Map task : rollupJobTasks) { - if (ObjectPath.eval("id", task).equals(rollupJob)) { - hasRollupTask = true; - break; - } - } - if (hasRollupTask == false) { - fail("Expected persistent task for [" + rollupJob + "] but none found."); - } - - } - - private void waitForRollUpJob(final String rollupJob, final Matcher expectedStates) throws Exception { - assertBusy(() -> { - final Request getRollupJobRequest = new Request("GET", "_rollup/job/" + rollupJob); - Response getRollupJobResponse = client().performRequest(getRollupJobRequest); - assertThat(getRollupJobResponse.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus())); - - Map job = getJob(getRollupJobResponse, rollupJob); - if (job != null) { - assertThat(ObjectPath.eval("status.job_state", job), expectedStates); - } - }, 30L, TimeUnit.SECONDS); - } - - private static Map getJob(Response response, String targetJobId) throws IOException { - return getJob(ESRestTestCase.entityAsMap(response), targetJobId); - } - - @SuppressWarnings("unchecked") - private static Map getJob(Map jobsMap, String targetJobId) throws IOException { - - List> jobs = (List>) XContentMapValues.extractValue("jobs", jobsMap); - - if (jobs == null) { - return null; - } - - for (Map job : jobs) { - String jobId = (String) ((Map) job.get("config")).get("id"); - if (jobId.equals(targetJobId)) { - return job; - } - } - return null; - } -} diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SearchableSnapshotsRollingUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SearchableSnapshotsRollingUpgradeIT.java index 3cdeb6dab4d91..0f25592493a1c 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SearchableSnapshotsRollingUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SearchableSnapshotsRollingUpgradeIT.java @@ -52,15 +52,11 @@ public String storageName() { } public void testMountFullCopyAndRecoversCorrectly() throws Exception { - final Storage storage = Storage.FULL_COPY; - assumeVersion(Version.V_7_10_0, storage); - - executeMountAndRecoversCorrectlyTestCase(storage, 6789L); + executeMountAndRecoversCorrectlyTestCase(Storage.FULL_COPY, 6789L); } public void testMountPartialCopyAndRecoversCorrectly() throws Exception { final Storage storage = Storage.SHARED_CACHE; - assumeVersion(Version.V_7_12_0, Storage.SHARED_CACHE); if (CLUSTER_TYPE.equals(ClusterType.UPGRADED)) { assertBusy(() -> { @@ -116,15 +112,11 @@ private void executeMountAndRecoversCorrectlyTestCase(Storage storage, long numb } public void testBlobStoreCacheWithFullCopyInMixedVersions() throws Exception { - final Storage storage = Storage.FULL_COPY; - assumeVersion(Version.V_7_10_0, storage); - - executeBlobCacheCreationTestCase(storage, 9876L); + executeBlobCacheCreationTestCase(Storage.FULL_COPY, 9876L); } public void testBlobStoreCacheWithPartialCopyInMixedVersions() throws Exception { final Storage storage = Storage.SHARED_CACHE; - assumeVersion(Version.V_7_12_0, Storage.SHARED_CACHE); executeBlobCacheCreationTestCase(storage, 8765L); } @@ -326,13 +318,6 @@ private void executeBlobCacheCreationTestCase(Storage storage, long numberOfDocs } } - private static void assumeVersion(Version minSupportedVersion, Storage storageType) { - assumeTrue( - "Searchable snapshots with storage type [" + storageType + "] is supported since version [" + minSupportedVersion + ']', - isOriginalClusterVersionAtLeast(minSupportedVersion) - ); - } - private static void indexDocs(String indexName, long numberOfDocs) throws IOException { final StringBuilder builder = new StringBuilder(); for (long i = 0L; i < numberOfDocs; i++) { @@ -390,11 +375,7 @@ private static void mountSnapshot( Settings indexSettings ) throws IOException { final Request request = new Request(HttpPost.METHOD_NAME, "/_snapshot/" + repositoryName + '/' + snapshotName + "/_mount"); - if (isOriginalClusterVersionAtLeast(Version.V_7_12_0)) { - request.addParameter("storage", storage.storageName()); - } else { - assertThat("Parameter 'storage' was introduced in 7.12.0 with " + Storage.SHARED_CACHE, storage, equalTo(Storage.FULL_COPY)); - } + request.addParameter("storage", storage.storageName()); request.setJsonEntity(Strings.format(""" { "index": "%s", diff --git a/x-pack/qa/runtime-fields/build.gradle b/x-pack/qa/runtime-fields/build.gradle index 1a9e913932eb7..dd7d0abc24b19 100644 --- a/x-pack/qa/runtime-fields/build.gradle +++ b/x-pack/qa/runtime-fields/build.gradle @@ -74,6 +74,7 @@ subprojects { 'search/115_multiple_field_collapsing/two levels fields collapsing', // Field collapsing on a runtime field does not work 'search/111_field_collapsing_with_max_score/*', // Field collapsing on a runtime field does not work 'field_caps/30_index_filter/Field caps with index filter', // We don't support filtering field caps on runtime fields. What should we do? + 'search/350_point_in_time/point-in-time with index filter', // We don't support filtering pit on runtime fields. 'aggregations/filters_bucket/cache busting', // runtime keyword does not support split_queries_on_whitespace 'search/140_pre_filter_search_shards/pre_filter_shard_size with shards that have no hit', //completion suggester does not return options when the context field is a geo_point runtime field diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractAdLdapRealmTestCase.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractAdLdapRealmTestCase.java index 0c0a35f227c8e..1af08ffd5fafe 100644 --- a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractAdLdapRealmTestCase.java +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractAdLdapRealmTestCase.java @@ -258,8 +258,7 @@ protected void assertAccessAllowed(String user, String index) throws IOException DocWriteResponse indexResponse = client.prepareIndex(index) .setSource(jsonBuilder().startObject().field("name", "value").endObject()) - .execute() - .actionGet(); + .get(); assertEquals( "user " + user + " should have write access to index " + index, @@ -281,7 +280,7 @@ protected void assertAccessDenied(String user, String index) throws IOException authenticateUser(client, user, 3); try { - client.prepareIndex(index).setSource(jsonBuilder().startObject().field("name", "value").endObject()).execute().actionGet(); + client.prepareIndex(index).setSource(jsonBuilder().startObject().field("name", "value").endObject()).get(); fail("Write access to index " + index + " should not be allowed for user " + user); } catch (ElasticsearchSecurityException e) { // expected